query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Send the commmit notification to CIA. The message is created incrementally using lxml's "E" builder.
def notify(self, builder): # Build the <files> section for the template... commit = builder.commit files = E.files() commit_msg = commit.message.strip() commit_msg = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', commit_msg) for filename in commit.files_changed: safe_filename = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', filename) file_element = E.file(safe_filename) files.append(file_element) # Build the message cia_message = self.MESSAGE() cia_message.append(self._generator) source = self.SOURCE(E.project("KDE")) source.append(E.module(self.repository.path)) source.append(E.branch(self.repository.ref_name)) cia_message.append(source) cia_message.append(self.TIMESTAMP(commit.date)) body = self.BODY() commit_data = self.COMMIT() commit_data.append(E.author(commit.author_name)) commit_data.append(E.revision(commit.description)) commit_data.append(files) commit_data.append(E.log(commit_msg)) commit_data.append(E.url(commit.url)) body.append(commit_data) cia_message.append(body) # Convert to a string commit_xml = etree.tostring(cia_message) # Craft the email.... message = MIMEText( commit_xml, 'xml', 'utf-8' ) message['Subject'] = "DeliverXML" message['From'] = "[email protected]" message['To'] = "[email protected]" # Send email... self.smtp.sendmail("[email protected]", ["[email protected]"], message.as_string())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(guid, message):", "def notify(self, id, command, data = None):\n print \"sending:\", id, command, data\n if command == Code.START: data = [id]\n try:\n msg = Message(command = command, data = data)\n self.contacts[id].send(msg.encode())\n except:\n print \"msg failed\"", "def write(self, notification):", "def sendmessage(self):\n \n self.message.parentItem = self.rxtxcontroller.transmittable.rootItem\n self.message.can_id = self.idInput.toPlainText()\n self.message.dlc = self.lengthInput.value()\n self.message.cycle_time = self.cycleInput.toPlainText()\n self.message.time = int(round(time.time() * 1000))\n self.message.rxtx = \"TX\"\n self.message.count = 1\n self.message.data = self.dataInput.toPlainText()\n self.accept()", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def action_invoice_dian_resend(self):\n self.ensure_one()\n template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False)\n compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)\n ctx = dict(\n default_model='account.invoice',\n default_res_id=self.id,\n default_use_template=bool(template),\n default_template_id=template and template.id or False,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n )\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }", "def send_notification(self, signal):\n self.cl.sendInitPresence() \n self.cl.send(xmpp.protocol.Message(self.recipient, str(signal), typ='chat'))", "def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n \"New {0} Version: {1}\".format(\n self.properties[CONF_APP_NAME], new_version\n ),\n title=\"New Software 💿\",\n )", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def sendNotifyToAgent(self, data):\n self.parent.sendNotifyToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData=data)", "def sendBack( self , message ) :\n self._connection.populateXmlToClient(message)", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )", "def test_notification_cp_email(self):\n # publish the item\n api.content.transition(obj=self.event, transition='publish')\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 2)\n msg = message_from_string(mailhost.messages[1])\n\n self.assertEqual(msg['To'], CP_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <[email protected]>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BCP=5D_=5BEVENTS=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been published', msg.get_payload())\n self.assertIn('http://nohost/plone/lc/test-event', msg.get_payload())", "async def send_cemi(self, cemi: CEMIFrame) -> None:\n # send L_DATA_IND to network, create L_DATA_CON locally for routing\n cemi.code = CEMIMessageCode.L_DATA_IND\n routing_indication = RoutingIndication(raw_cemi=cemi.to_knx())\n\n async with self._flow_control.throttle():\n self._send_knxipframe(KNXIPFrame.init_from_body(routing_indication))\n\n cemi.code = CEMIMessageCode.L_DATA_CON\n self.cemi_received_callback(cemi.to_knx())", "def send(self, event, message):\n pass", "def _create_msg(self, tr_id, payload, confirm, expire_time, encoding):\n tmp = [\"<SSAP_message><transaction_type>INSERT</transaction_type>\",\n \"<message_type>REQUEST</message_type>\"]\n tmp.extend([\"<transaction_id>\", str(tr_id), \"</transaction_id>\"])\n tmp.extend([\"<node_id>\", str(self.node_id), \"</node_id>\"])\n tmp.extend([\"<space_id>\", str(self.targetSS), \"</space_id>\"])\n tmp.extend(['<parameter name=\"insert_graph\" encoding=\"%s\">' % encoding.upper(),\n str(payload), \"</parameter>\"])\n tmp.extend(['<parameter name = \"confirm\">',\n str(confirm).upper(),\n \"</parameter>\",\n \"</SSAP_message>\"])\n return \"\".join(tmp)", "def send_counterparty(self) -> None:\n object_ = self.objects[0]\n ticket_text = ''\n if 'сб' in object_.counterparty_name.lower() and self.keyword == 'closing':\n # order_id = sberinkas.main(\n # object_.object_SAP_code,\n # object_.object_address,\n # object_.lat,\n # object_.lon\n # )\n # ticket_text = f\"<br>Номер заявки на портале инкассация - {order_id}.\"\n pass\n\n body = '<p>Добрый день!<br><br>' \\\n f'Прошу принять в работу письмо на {self.letter_text}<br>' \\\n f'Скан подписанного письма вышлю позднее.{ticket_text}'\n if 'сб' in object_.counterparty_name.lower():\n self.send_sber_manager_service(body)\n else:\n self.sendmail(\n self.outlook,\n self.to,\n \"\",\n self.letter_name,\n body,\n self.attachment,\n 2\n )", "def notify(cls, self, message):\n pass", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def comsume_msg(self, msg_type):", "def _send_message(self, *args, **kwargs):\n with self.comm_lock:\n return super(FrontendComm, self)._send_message(*args, **kwargs)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending notification in Slack\")", "def notification_cbk(self, notification):\n self._logger.info(\"Received netconf notification %s\", notification)\n if self._encoding == \"json\":\n # Convert XML to Json\n xml_str = bytes(repr(notification), 'utf-8')\n resp_str = self._xml_to_json_translator.convert_notification(xml_str)\n if resp_str is None:\n # Use a schema-less conversion\n resp_str = naive_xml_to_json(xml_str)\n else:\n resp_str = repr(notification)\n self._logger.debug(\"Translated Notification: %s\", resp_str)\n self.on_netconf_message(resp_str)", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['[email protected]']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = '[email protected]'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n else:\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def send_message(self,contato,mensagem):\r\n #Open new chat on whatsapp web\r\n new_msg_button = self.driver.find_element_by_xpath(self.NEW_CHAT)\r\n new_msg_button.click()\r\n sleep(1)\r\n #Search the contact\r\n search_field = self.driver.find_element_by_xpath(self.SEARCH_CONTACT)\r\n search_field.click()\r\n search_field.send_keys(contato)\r\n sleep(1)\r\n #Click on the firts contact with the name that I told \r\n first_contact = self.driver.find_element_by_xpath(self.FIRST_CONTACT)\r\n first_contact.click()\r\n sleep(1.5)\r\n type_field = self.driver.find_element_by_xpath(self.TYPE_MSG)\r\n type_field.click()\r\n type_field.send_keys(mensagem)\r\n send_msg= self.driver.find_element_by_xpath(self.SEND_BUTTON)\r\n send_msg.click()\r\n sleep(1)", "def _send_notification(self, user_id):\n settings = self.settings_repo.find_one_by_id(user_id)\n if settings.req_noti:\n noti = Notification('New Request', '/topics/request', self.BORROW)\n self.noti_service.send_notification(noti)", "def sendNotificationToClerksOffice(date):\n text = translate('notification_email_to_clerk_question_pending_response',\n target_language='en',\n domain='bungeni.core',\n default=\"Questions pending responses.\")\n ministries = _getAllMinistries(date)\n for ministry in ministries:\n questions = _getQuestionsPendingResponse(date, ministry)\n if questions:\n text = text + '\\n' + ministry.full_name +': \\n'\n for question in questions:\n text = text + question.subject + '\\n'\n \n msg = MIMEText(text)\n \n msg['Subject'] = u'Questions pending response'\n msg['From'] = prefs.getAdministratorsEmail()\n msg['To'] = prefs.getClerksOfficeEmail()\n print msg\n #dispatch(msg)", "def notify(self, **kwargs):\n return self.send(kwargs)", "def notify(self, **kwargs):\n return self.send(kwargs)", "def cmd_notification_id(client, args):\n notification = client.get_notification(args.notification_id)\n notification = notification.__dict__\n if 'comment' in notification['content']:\n notification['content'] = format_comment_tree(notification['content'])\n generate_output({'notification': notification})", "def notification(self, approver_list):\n dns_name = axops_client.get_dns()\n job_id = self.root_id\n url_to_ui = 'https://{}/app/jobs/job-details/{}'.format(dns_name, job_id)\n service = axops_client.get_service(job_id)\n\n html_payload = \"\"\"\n<html>\n<body>\n <table class=\"email-container\" style=\"font-size: 14px;color: #333;font-family: arial;\">\n <tr>\n <td class=\"msg-content\" style=\"padding: 20px 0px;\">\n The {} job is waiting for your approval. The job was triggered by {}.\n </td>\n </tr>\n <tr>\n <td class=\"commit-details\" style=\"padding: 20px 0px;\">\n <table cellspacing=\"0\" style=\"border-left: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;border-top: 1px solid #e3e3e3;\">\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Author</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Repo</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Branch</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Description</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Revision</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n </table>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">VIEW JOB</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">VIEW JOB</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">APPROVE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">APPROVE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">DECLINE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">DECLINE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"thank-you\" style=\"padding-top: 20px;line-height: 22px;\">\n Thanks,<br>\n Argo Project\n </td>\n </tr>\n </table>\n</body>\n</html>\n\"\"\"\n\n for user in approver_list:\n\n approve_token, decline_token = self.generate_token(user=user, dns_name=dns_name)\n\n approve_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, approve_token)\n decline_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, decline_token)\n\n msg = {\n 'to': [user],\n 'subject': 'The {} job requires your approval to proceed'.format(service['name']),\n 'body': html_payload.format(service['name'], service['user'],\n service['commit']['author'], service['commit']['repo'],\n service['commit']['branch'], service['commit']['description'], service['commit']['revision'],\n url_to_ui, url_to_ui, approve_link, approve_link, decline_link, decline_link),\n 'html': True\n }\n\n if service['user'] != 'system':\n try:\n user_result = axops_client.get_user(service['user'])\n msg['display_name'] = \"{} {}\".format(user_result['first_name'], user_result['last_name'])\n except Exception as exc:\n logger.error(\"Fail to get user %s\", str(exc))\n\n logger.info('Sending approval requests to %s', str(user))\n result = axsys_client.send_notification(msg)\n\n # TODO: Tianhe adding retry mechanism\n if result.status_code != 200:\n logger.error('Cannot send approval request, %s', result.content)\n sys.exit(1)\n logger.info('Successfully sent approval requests to reviewers.')", "def acknowledgement(self, message: Message[ValueType]):", "def _coordinator_send_message(\n agent, message: str = '', task_data: Dict = None, episode_done: bool = False\n):\n if not task_data:\n task_data = dict()\n agent.observe(\n {\n 'id': constants.COORDINATOR_AGENT,\n 'text': message,\n 'episode_done': episode_done,\n 'task_data': task_data,\n }\n )", "def send_by_email(self):\r\n ir_model_data = self.env['ir.model.data']\r\n try:\r\n template_id = ir_model_data.get_object_reference(\r\n 'ng_church', 'email_template_church_pledge_report')[1]\r\n except ValueError:\r\n template_id = False\r\n try:\r\n compose_form_id = ir_model_data.get_object_reference(\r\n 'mail', 'email_compose_message_wizard_form')[1]\r\n except ValueError:\r\n compose_form_id = False\r\n ctx = dict(self._context)\r\n ctx.update({\r\n 'default_model': 'church.pledge',\r\n 'default_res_id': self._ids[0],\r\n 'default_use_template': bool(template_id),\r\n 'default_template_id': template_id,\r\n 'default_composition_mode': 'comment',\r\n })\r\n return {\r\n 'name': _('Compose Email'),\r\n 'type': 'ir.actions.act_window',\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'mail.compose.message',\r\n 'views': [(compose_form_id, 'form')],\r\n 'view_id': compose_form_id,\r\n 'target': 'new',\r\n 'context': ctx,\r\n }", "def direct_message(self, user, msg, num):\n PAUSE = 1\n logging.info('Send message {} to {}'.format(msg,user))\n self.driver.get(self.direct_url)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input')[0].send_keys(user)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')[0].click() #Edge case to get rid of notification\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button')[0].click()\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button')[0].click()\n time.sleep(PAUSE)\n # The message will be placed and sent\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')[0].send_keys(msg)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button')[0].click()\n # Special feature involving reacting with heart\n for x in range(num):\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/button[2]')[0].click()\n time.sleep(PAUSE)", "def SendCommand(self,command):\n\t\tself.acad.ActiveDocument.SendCommand(command)", "def emailNote(self, authenticationToken, parameters):\r\n self.send_emailNote(authenticationToken, parameters)\r\n self.recv_emailNote()", "def update_helpdesk(self, data):\n self.sr=data\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['[email protected]']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = '[email protected]'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n else: \n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n\n msg['Subject'] = 'Your Request SR# %s for VM provisioning \\\n reported failure for product %s' % \\\n\t\t\t ( self.sr['requestNumber'], pdict[self.data['groupid']] )\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve \\\n the following problem and notify infra team.\")\n nHtml.append(\"VM creation readiness from vCAC cloud \\\n is reported failure, \\\n Product is <b>%s</b> is stuck.\" \\\n % pdict[self.data['groupid']])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is impacted.<br><br>\" % pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for SR# \\\n related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: \\\n <a href=%s> Effected Build \\\n Console</a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + \\\n msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def configureCMFNotification(portal,logger):\n ntool = getToolByName(portal, NTOOL_ID)\n changeProperty = lambda key, value: \\\n ntool.manage_changeProperties(**{key: value})\n \n if not ntool.isExtraSubscriptionsEnabled():\n changeProperty('extra_subscriptions_enabled',True)\n #enable notification on Item creation\n \n changeProperty('item_creation_notification_enabled', True)\n changeProperty('on_item_creation_mail_template',['* :: string:creation_mail_notification'])\n logger.info(\"On Item Creation Notification has been enabled.\")\n \n #enable notification on Item modification\n changeProperty('item_modification_notification_enabled', True)\n changeProperty('on_item_modification_mail_template',['* :: string:modification_mail_notification'])\n logger.info(\"On Item Modification Notification has been enabled.\")\n \n #enable notification on Work Flow Transition\n changeProperty('wf_transition_notification_enabled', True)\n changeProperty('on_wf_transition_mail_template',['* :: string:workflow_mail_notification'])\n logger.info(\"On Workflow transition Notification has been enabled.\")\n \n #enable notification on Discussion Item Creation\n changeProperty('discussion_item_creation_notification_enabled',True)\n changeProperty('on_discussion_item_creation_mail_template',['* :: string:discussion_mail_notification'])\n logger.info(\"On Discussion Item Creation Notification has been enabled.\")", "def notify(self, event):\n\n self.send_json(event[\"payload\"])", "def test_send():\n\n print \"Sending Note Ons....\"\n f = open(PIMIDI_DEV, \"wb\")\n for note_no in xrange(0x3c, 0x48, 2):\n for j in (chr(0x90), chr(note_no), chr(0x60)):\n f.write(j)\n sleep(1)\n f.close()", "def send_casg(cid, comment):\n tid = None\n length = None\n SN = None\n PN = None\n suspected = None\n plabel = None\n flabel = None\n\n SQL.execute('''\n SELECT \n ticket,\n length,\n SN,\n PN,\n suspected,\n plabel,\n flabel\n FROM \n cables\n WHERE\n cables.cid = ?\n LIMIT 1\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall(): \n tid = row['ticket']\n length = row['length']\n SN = row['SN']\n PN = row['PN']\n suspected = row['suspected']\n plabel = row['plabel']\n flabel = row['flabel']\n\n if not tid:\n vlog(1, 'Cable c%s does not have an associated Extraview Ticket. Refusing to send non-existant ticket to casg' % (cid))\n return False\n\n #EV.assign_group(tid, 'casg', None, {\n if not DISABLE_TICKETS:\n vlog(3, 'Sent Ticket %s to CASG' % (tid))\n\n #provide physical label if one is known\n if plabel:\n label = 'Physical Cable Label: %s\\nSoftware Cable Label: %s' % (plabel, flabel)\n else:\n label = 'Cable Label: %s' % (flabel)\n\n EV.assign_group(tid, 'casg', None, {\n 'COMMENTS':\t'''\n CASG,\n\n The follow cable has been marked for repairs following:\n\n https://wiki.ucar.edu/display/ssg/Infiniband+Cable+Repair\n\n This cable has had %s events that required repair to date.\n\n %s\n Length: %s\n Serial: %s\n Product Number: %s\n\n SSG Request:\n\n %s\n\n Please verify that the cable ports are dark before physically repairing cable.\n If there are any questions or issues, please return this ticket to SSG with details.\n ''' % (\n suspected,\n label,\n length if length else 'Unknown',\n SN if SN else 'Unknown',\n PN if PN else 'Unknown',\n comment\n )\n });", "def send_message(self):\n self.preprocess_text()\n message_text = self.create_message_text()\n \n telnyx.Message.create(\n from_=configs.source_number,\n to=self.destination_number,\n text=message_text,\n )", "def send_notification (event):\n Publisher.sendMessage (event)", "async def notify(self, content, em=None):\n if content:\n content = convertFromUnicode(content)\n where = self.alias\n f = self.protocol.sendNotification(where, content, em=em)\n if f: await f", "def send(self, message):\n pass", "def set_message(self, node_uuid, index, data):\n try:\n self.lcd.clear()\n self.lcd.message(data)\n except Exception:\n logger.exception('Exception when displaying message')", "def send_msg():\n\tmessage = \"%s %s %d\\n\" % (metric, activeDAHDIChannels, int(time.time()))\n\t# print 'sending message:\\n%s' % message\n\tcarbonSocket = socket.socket()\n\tcarbonSocket.connect((CARBON_HOST, CARBON_PORT))\n\tcarbonSocket.sendall(message)\n\tcarbonSocket.close()\n\tlast_send = int(time.time())", "def send_irc_message(self, event):\n\n self.log('Transmitting IRC message', lvl=debug)\n\n self.fireEvent(PRIVMSG(event.username, \"[%s] %s : %s\" % (event.msg_type, event.subject, event.body)))", "def send_notification(self):\n # Sending the notification\n tbs = TestBuild.objects.filter(pk__in = self.create_builds_list)\n tbs = tbs.order_by('product')\n\n tbp_pks = list(set(tbs.values_list('product', flat=True)))\n ps = Product.objects.filter(pk__in = tbp_pks)\n\n message = MAIL_HEADER\n\n line = '=' * 30 + '\\n'\n\n for p in ps:\n p_str = unicode(p)\n\n message += line + p_str + '\\n' + line\n for tb in tbs:\n if tb.product == p:\n message += '* ' + unicode(tb) + '\\n'\n message += '\\n'\n\n mail_to = []\n for admin in ADMINS:\n mail_to.append(admin[1])\n\n send_mail(MAIL_SUBJECT, message, MAIL_FROM, mail_to)", "def send_message(self, message):\n pass", "def send_message_to_server(self, new_element):\n self._current_list.append(new_element)\n print(\"Enviando... {}\".format(self._current_list))\n final_content = dumps(self._current_list)\n self._socket.send(final_content)", "def courier_at_customer_handler(self, data: dict, **kwargs) -> None:\n yield self.wait_time()\n self.env.publish('customer_order_collected', data={\n 'order_id': data['order_id'],\n 'restaurant_id': data['restaurant_id'],\n 'customer_id': data['customer_id'],\n 'courier_id': data['courier_id']})", "def send_popup_message(self, title, header, message):\n data = self.device_id_str + \"\\tMSSG\\t{}\\t{}\\t{}\\n\".format(title, header, message)\n self.tx_zmq_pub.send_multipart([b\"ALL\", b'0', data.encode('utf-8')])", "def action_send_email(self):\n self.ensure_one()\n if self.communication_channel != \"email\":\n return False # Maybe we should raise an error here.\n\n if self.state in (\"sent\", \"done\"):\n raise ValidationError(_(\"This communication is already sent.\"))\n\n lines_2be_processed = self.credit_control_line_ids.filtered(\n lambda line: line.state != \"sent\"\n )\n\n if not lines_2be_processed:\n raise ValidationError(_(\"There is no draft lines to send.\"))\n\n self = self.with_context(lang=self.partner_id.lang)\n\n partner = self.partner_id\n mail_template = self.policy_level_id.email_template_id\n # Send the email\n partner.with_context(credit_control_mail=True).message_post_with_template(\n template_id=mail_template.id,\n model=self._name,\n res_id=self.id,\n )\n # Set the state of the credit control lines to \"queued\"\n lines_2be_processed.write({\"state\": \"sent\"})\n self.state = \"sent\"", "def send_message(self):\r\n return \"success\"", "def notify(plaintext_message, signature):", "def broadcast(self, message):\r\n for c in self.characters:\r\n c.notify(message)", "def send_notification(self, context):\n subject = \"Order placed for %s [%s]\" % (context['product']['name'], context['name'])\n message = render_to_string('notification/product_notification.txt',\n context)\n try:\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,\n self.get_recipients())\n except SMTPException as e:\n logger.error(\"Error sending notification: %s\" % e)\n else:\n logger.info(\"Sent notification for %s [%s]\" % (context['product']['name'], context['name']))", "def _send_ICX(self, _to: Address, amount: int, msg: str='') -> None:\n try:\n self.icx.transfer(_to, amount)\n self.FundTransfer(_to, amount, msg + f' {amount} ICX sent to {_to}.')\n except BaseException as e:\n revert(f'{amount} ICX not sent to {_to}. '\n f'Exception: {e}')", "def sendNotification(self):\n if not(self.errors or self.accounting):\n return S_OK()\n\n emailBody = \"\"\n rows = []\n for instanceName, val in self.accounting.iteritems():\n rows.append([[instanceName],\n [val.get('Treatment', 'No Treatment')],\n [str(val.get('LogAge', 'Not Relevant'))]])\n\n if rows:\n columns = [\"Instance\", \"Treatment\", \"Log File Age (Minutes)\"]\n emailBody += printTable(columns, rows, printOut=False, numbering=False, columnSeparator=' | ')\n\n if self.errors:\n emailBody += \"\\n\\nErrors:\"\n emailBody += \"\\n\".join(self.errors)\n\n self.log.notice(\"Sending Email:\\n\" + emailBody)\n for address in self.addressTo:\n res = self.nClient.sendMail(address, self.emailSubject, emailBody, self.addressFrom, localAttempt=False)\n if not res['OK']:\n self.log.error(\"Failure to send Email notification to \", address)\n continue\n\n self.errors = []\n self.accounting.clear()\n\n return S_OK()", "def send_reminder(self):\n pass", "def __send(self) -> None:\n # region Docstring\n # endregion\n\n if len(self.entryline.get_text().strip()) > 0:\n self.udp.transmission(\n \"CHA\", \"01\", self.username, self.entryline.get_text().strip()\n )\n self.__addmsg(f\"<b>(YOU): </b><br>{self.entryline.get_text().strip()}<br>\")\n self.entryline.set_text(\"\")", "def _send_ICX(self, _to: Address, amount: int, msg: str) -> None:\n try:\n self.icx.transfer(_to, amount)\n self.FundTransfer(_to, amount, msg + f' {amount} ICX sent to {_to}.')\n except BaseException as e:\n revert(f'{amount} ICX not sent to {_to}. '\n f'Exception: {e}')", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def absenden(self):\n\n message = self.textFeld.toPlainText()\n self.c.send(message)\n self.textFeld.clear()", "def SendMessage(self, event):\n pass", "def initM(self, num):\n prefix = C_Messaging.PREFIX\n if not wait_el_xpath_click(self.driver, C_Messaging.PATH_BTN_CREATE):\n logging.info('{0}: Create new message unsucceed.'.format(prefix))\n self.fail('{0}: Create new message unsucceed.'.format(prefix))\n recipients = wait_el_xpath(self.driver, C_Messaging.PATH_RECIPIENTS)\n action(recipients, Commands.CLEAR)\n action(recipients, Commands.CLICK)\n\n # phone number: 147 8230 5348\n for s in num:\n self.driver.press_keycode(Keycode.get(self, s))\n\n self.driver.press_keycode(Keycode.ENTER)\n\n text_editor = wait_el_xpath(self.driver, C_Messaging.PATH_TEXT_EDITOR)\n return text_editor", "def test_notify(self):\n disco = create_disco()\n messages = [object(), NodeActive(create_node(\"hello\"))]\n result = []\n disco.notify(result.append)\n for m in messages:\n disco.onMessage(None, m)\n self.assertEqual(messages, result)", "def notifySysOperator(self):\n msg = self.generateNotifyMessage()\n print(msg)\n # with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n # smtp.ehlo()\n # smtp.starttls()\n # smtp.ehlo()\n\n # smtp.login(\"[email protected]\", \"qwerQWER123.\")\n\n # smtp.sendmail(\"[email protected]\", \"[email protected]\", msg)\n\n # smtp.close()\n return False", "def send_notification(to_number):\n client.messages.create(to=to_number,\n from_=config.get('TWILIO', 'twilio_from_number'),\n body=f'RTX 3090 FE has been added to your Best Buy cart')", "def alert_for_pending_invoices_1(request):\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>Beginning of alert_for_pending_invoices_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tThread(target=alert_for_pending_invoices_1_woker).start()\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>End of alert_for_pending_invoices_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tresponse = {}\n\n\tresponse[\"info_to_contact\"] = \"Ok\"\n\n\treturn response", "def test_customer_notified(self, mocked_notify_client):\n order = OrderWithOpenQuoteFactory()\n\n notify.quote_generated(order)\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == order.get_current_contact_email()\n assert call_args['template_id'] == Template.quote_sent_for_customer.value\n assert call_args['personalisation']['recipient name'] == order.contact.name\n assert call_args['personalisation']['embedded link'] == order.get_public_facing_url()", "def send_message(self, tag, value, priority=0):\n self._messaged.emit((\"msg\",tag,priority,value))", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "def contact(update: Update) -> None:\n update.message.text(\"@New GEN\")", "def send_email(self, message):\n pass", "def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]", "def notify(self, notification):\n topic = 'notify.' + notification['subject']\n payload = serializer.dumps(notification, use_bin_type=True)\n self.socket.send_string(topic, flags=zmq.SNDMORE)\n self.socket.send(payload)\n return self.socket.recv_string()", "def sendC(self):\n startC = self.countCannibalOnStart()\n if startC < 1:\n return None\n else:\n newStart = self.start[0:2] + str(startC-1) + self.start[3]\n newEnd = self.end[0:2] + str(4-startC) + self.end[3]\n return MissionaryState(newStart, newEnd, \"sendC\")", "def send_interim_change_notice(request, meeting):\n group = meeting.session_set.first().group\n form = InterimAnnounceForm(get_announcement_initial(meeting, is_change=True))\n message = form.save(user=request.user)\n message.related_groups.add(group)\n send_mail_message(request, message)", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def post_reminder(self, date1='2014-02-12', time1='22:20:33', rnumber='0674767730', rname='Romain', subject='Subject', date2='2014-03-12', time2='23:20:33', location='Paris', body='Description'):\n #TODO fix\n xmldata1 = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\" + \\\n \"<s:Envelope xmlns:s=\\\"http://schemas.xmlsoap.org/soap/envelope/\\\" s:encodingStyle=\\\"http://schemas.xmlsoap.org/soap/encoding/\\\">\" + \\\n \"<s:Body>\" + \\\n \"<u:AddMessage xmlns:u=\\\"urn:samsung.com:service:MessageBoxService:1\\\">\" + \\\n \"<MessageType>text/xml</MessageType>\" + \\\n \"<MessageID>can be anything</MessageID>\" + \\\n \"<Message>\" + \\\n \"&lt;Category&gt;Schedule Reminder&lt;/Category&gt;\" + \\\n \"&lt;DisplayType&gt;Maximum&lt;/DisplayType&gt;\" + \\\n \"&lt;StartTime&gt;\" + \\\n \"&lt;Date&gt;\"\n xmldata2 = \"&lt;/Date&gt;\" + \\\n \"&lt;Time&gt;\"\n xmldata3 = \"&lt;/Time&gt;\" + \\\n \"&lt;/StartTime&gt;\" + \\\n \"&lt;Owner&gt;\" + \\\n \"&lt;Number&gt;\"\n xmldata4 = \"&lt;/Number&gt;\" + \\\n \"&lt;Name&gt;\"\n xmldata5 = \"&lt;/Name&gt;\" + \\\n \"&lt;/Owner&gt;\" + \\\n \"&lt;Subject&gt;\"\n xmldata6 = \"&lt;/Subject&gt;\" + \\\n \"&lt;EndTime&gt;\" + \\\n \"&lt;Date&gt;\"\n xmldata7 = \"&lt;/Date&gt;\" + \\\n \"&lt;/Time&gt;\"\n xmldata8 = \"&lt;/Time&gt;\" + \\\n \"&lt;/EndTime&gt;\" + \\\n \"&lt;Location&gt;\"\n xmldata9= \"&lt;/Location&gt;\" + \\\n \"&lt;Body&gt;\"\n xmldata10 = \"&lt;/Body&gt;\" + \\\n \"</Message>\" + \\\n \"</u:AddMessage>\" + \\\n \"</s:Body>\" + \\\n \"</s:Envelope>\"\n\n #Create Header for Message\n header = \"POST /PMR/control/MessageBoxService HTTP/1.0\\r\\n\" + \\\n \"Content-Type: text/xml; charset=\\\"utf-8\\\"\\r\\n\" + \\\n \"Host: \" + self.host + \"\\r\\n\" + \\\n \"Content-Length: \" + str(len(xmldata1) + len(date1) + \\\n len(xmldata2) + len(time1) + \\\n len(xmldata3) + len(rnumber) + \\\n len(xmldata4) + len(rname) + \\\n len(xmldata5) + len(subject) + \\\n len(xmldata6) + len(date2) + \\\n len(xmldata7) + len(time2) + \\\n len(xmldata8) + len(location) + \\\n len(xmldata9) + len(body) + \\\n len(xmldata10)) + \"\\r\\n\" + \\\n \"SOAPACTION: urn:samsung.com:service:MessageBoxService:1#AddMessage\\r\\n\" + \\\n \"Connection: close\\r\\n\\r\\n\"\n #Create socket\n full_soap_request = header + \\\n xmldata1 + date1 + \\\n xmldata2 + time1 + \\\n xmldata3 + rnumber + \\\n xmldata4 + rname + \\\n xmldata5 + subject +\\\n xmldata6 + date2 +\\\n xmldata7 + time2 +\\\n xmldata8 + location +\\\n xmldata9 + body +\\\n xmldata10\n msg_port = 52235;\n\n try:\n # Open Socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, msg_port))\n sock.send(full_soap_request.encode('utf-8'))\n read = sock.recv(1024)\n print(\"\\n\\n Reader \\n\\n\" + read)\n sock.close()\n except socket.error, e:\n raise TVError(e[1], 'post_reminder')\n finally:\n sock.close()\n sock = None", "def send_message(self, serial_message):\n #print(\"Sending message: %s\" % serial_message)\n self.sendString(serial_message)", "def write_line(self,msg):\n self.body += str(msg)\n self.body.br()", "def send_reminder(self):\n message_contents = \"This is a reminder that your event: \" + self.event_title + \" takes place on \" + self.event_date + \" in \" + self.event_location\n subject = \"Event Reminder\"\n attendees = self.gameplanuser_set.all()\n for attendee in attendees:\n remindermessage = Message.objects.create(sender=self.event_manager, recipient=attendee, contents=message_contents)\n remindermessage.save()", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def __call__(self, circ):\n log.msg(\"Circuit %s is in progress ...\" % circ.id)\n self.attacher.waiting_circuits.append((circ.id, self.d))", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def send_command(self, command):\n question = jbus.jbus_generator_data_write(self.node, 0x15b0, bytes([0x00,command]))\n answer = self.send_request(question)\n #print(\"Question: [\", question, \"]\")\n #print(\"Answer: [\",answer,\"] LEN: \",len(answer))\n return self.verify_response(question, answer)", "def run(self):\n\n # self.peripheral.connect(self.address)\n\n # //-set the delegate to handle notification message process\n # self.peripheral.setDelegate(MyDelegate(self.sinOut))\n if self._type == \"BW\":\n uuid = \"0000fff0-0000-1000-8000-00805f9b34fb\" # the bought module distinguished by the name.\n # BW means the bought module's name \"BW-ECG-01\".\n svc = self.peripheral.getServiceByUUID(uuid)\n\n # //-the characteristic that data can be written to\n chr_of_writable = svc.getCharacteristics()[0]\n # //-the characteristic that receives notification from other peripheral.\n chr_of_notify = svc.getCharacteristics()[1]\n # //-enable the notify\n self.peripheral.writeCharacteristic(chr_of_notify.valHandle + 1, struct.pack('<bb', 0x01, 0x00), True)\n # //-bind user ID to BW-ECG-01, the ID could be a random ID.\n chr_of_writable.write(b'\\xE8\\x41\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00',\n True)\n # //-start the acquiring, a time(Y/M/D/H/H/S/deltaT) should be given. the time could be a random time\n # //-but the delta T should have meaning which is the acquiring time. 0x01 means 1 minutes.\n # //-the delta T could be modified as other number, this could be done by UI.\n # //-if the number could be set by user, that will be perfection.\n chr_of_writable.write(b'\\xE8\\x23\\x15\\x03\\x0b\\x10\\x15\\x00\\x00\\x01', True)\n # //-start continually acquiring\n chr_of_writable.write(b'\\xE8\\20', True)\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue\n else:\n uuid = \"f000fff0-0451-4000-b000-000000000000\" # the module made by ourselves\n svc = self.peripheral.getServiceByUUID(uuid)\n ch = svc.getCharacteristics()[0]\n self.peripheral.writeCharacteristic(ch.valHandle + 1, struct.pack('<bb', 0x01, 0x00))\n # print(\"waiting...\")\n # self.sinOut.emit(\"waiting...\")\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue", "async def after_tick(self):\n\n msg = Message(\n to=self.agent.factory_jid,\n body=self.agent.position.to_json()\n )\n msg.set_metadata(\"performative\", \"inform\")\n await self.send(msg)", "def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"", "def Notify(self, text):\n self.logger.debug(\"Sending notification to XBMC: \" + text)\n xbmc = Server(self.url('/jsonrpc', True))\n image='https://raw.github.com/styxit/HTPC-Manager/master/interfaces/default/img/xbmc-logo.png'\n return xbmc.GUI.ShowNotification(title='HTPC manager', message=text, image=image)", "def schc_message(self, message):\r\n self.enter_state()\r\n logging.debug(\"\\tMessage:\\n{}\".format(message.as_text()))\r\n return", "def SendAttentionCommand( self ): # also pauses the recording ?\r\n\r\n self._socket.write( 'A' )\r\n \r\n return self.GetServerResponse()", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")" ]
[ "0.5887094", "0.5750634", "0.57312334", "0.57009876", "0.5486229", "0.54457545", "0.5437605", "0.53316367", "0.5314361", "0.52677506", "0.52383125", "0.5220476", "0.52009946", "0.5194496", "0.5170836", "0.51647365", "0.51592344", "0.51445454", "0.5126826", "0.5122613", "0.5111196", "0.5108509", "0.50696975", "0.50601727", "0.5041371", "0.50351584", "0.50154984", "0.5008828", "0.5008828", "0.49825588", "0.497945", "0.49668276", "0.4965725", "0.49588534", "0.4957893", "0.4956946", "0.49544615", "0.49524024", "0.4945884", "0.49418572", "0.493249", "0.4923263", "0.49180245", "0.49015203", "0.49003226", "0.48992932", "0.4895751", "0.48937613", "0.4893386", "0.48911762", "0.48834708", "0.48825094", "0.48813266", "0.4865661", "0.4865338", "0.48601925", "0.4842469", "0.48362902", "0.48317748", "0.4823327", "0.481755", "0.48006833", "0.47888017", "0.47860283", "0.47800446", "0.47800446", "0.47800446", "0.4774409", "0.47722468", "0.47589517", "0.47542968", "0.474635", "0.47451144", "0.47414812", "0.4740729", "0.4738628", "0.4733623", "0.4732716", "0.47261745", "0.47154167", "0.47130165", "0.47120774", "0.47113785", "0.47090027", "0.4708461", "0.47058338", "0.46972108", "0.46968216", "0.4687584", "0.4686776", "0.46851552", "0.46801588", "0.46764737", "0.4671714", "0.46679443", "0.46676406", "0.46566612", "0.46549904", "0.4652952", "0.46509865" ]
0.5796216
1
Check for potential problems in a commit.
def check_commit_problems(self, commit, diff): # Initialise self._license_problem = False self._commit_problem = False self._commit_notes = defaultdict(list) # Unsafe regex checks... unsafe_matches = list() unsafe_matches.append( r"\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\b\s*[\(\r\n]" ) unsafe_matches.append( r"\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\b\s*[\(\r\n]" ) unsafe_matches.append( r"(scanf)\b\s*[\(\r\n]" ) valid_filename_regex = r"\.(cpp|cc|cxx|C|c\+\+|c|l|y||h|H|hh|hxx|hpp|h\+\+|qml)$" # Retrieve the diff and do the problem checks... filename = unicode("") filediff = list() for line in diff: file_change = re.match( "^diff --(cc |git a\/.+ b\/)(.+)$", line ) if file_change: # Are we changing file? If so, we have the full diff, so do a license check.... if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename): self.check_commit_license(filename, ''.join(filediff)) filediff = list() filename = file_change.group(2) continue # Diff headers are bogus if re.match("@@ -\d+,\d+ \+\d+ @@", line): filediff = list() continue # Do an incremental check for *.desktop syntax errors.... if re.search("\.desktop$", filename) and re.search("[^=]+=.*[ \t]$", line) and line.startswith("+") and not re.match("^\+#", line): self._commit_notes[filename].append( "[TRAILING SPACE] **" ) self._commit_problem = True # Check for things which are unsafe... for safety_match in unsafe_matches: match = re.match(safety_match, line) if match: note = "[POSSIBLY UNSAFE: {0}] **".format( match.group(1) ) self._commit_notes[filename].append(note) self._commit_problem = True # Store the diff.... filediff.append(line) if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename): self.check_commit_license(filename, ''.join(filediff))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")", "def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews", "def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def validate_change(ticket):\n # First ensure topic line mentions tickets, and pull them out.\n topic = COMMIT_MSG.split('\\n', 1)[0]\n fix_tickets = re.findall(\"[A-Z]{2,5}-[0-9]{1,6}\", topic)\n if len(fix_tickets) == 0:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: commit message does not name a ticket!\"\n return False\n\n # Now get list of approved tickets from master ticket, and ensure\n # all \"fixed\" tickets are approved.\n approved_tickets = get_approved_tickets(ticket)\n for tick in fix_tickets:\n if not tick in approved_tickets:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: ticket {} is not approved (see approval ticket {})\".format(\n tick, ticket)\n return False\n return True", "def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass", "def validate_commit(ctx, sha, **_):\n\n gh = ctx.obj.github\n ci_provider = ctx.obj.ci_provider\n\n sha = sha or (ci_provider.sha if ci_provider else None)\n\n def _pre_issue():\n log.echo('Commit references an issue...', break_line=False)\n\n def _post_issue():\n log.checkmark()\n\n def _pre_label():\n log.echo('Issue is labeled with a release label...', break_line=False)\n\n def _post_label():\n log.checkmark()\n\n log.echo('Validating commit', add=True)\n\n try:\n gh.validate_commit(sha=sha,\n hooks={\n 'pre_issue': _pre_issue,\n 'pre_label': _pre_label,\n 'post_issue': _post_issue,\n 'post_label': _post_label\n })\n except exceptions.ReleaseValidationFailedException as e:\n log.xmark()\n log.sub()\n tb = sys.exc_info()[2]\n utils.raise_with_traceback(e, tb)\n log.sub()\n\n log.echo('Validation passed')", "def lint_commit_message(commit):\n success = True\n lines = commit.message.splitlines()\n\n # Check length of summary line.\n summary_line_len = len(lines[0])\n if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:\n error(\n \"The summary line in the commit message is %d characters long; \"\n \"only %d characters are allowed.\" %\n (summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)\n success = False\n\n # Check that summary line does not end with a period\n if lines[0].endswith('.'):\n error(\"The summary line must not end with a period.\", commit)\n success = False\n\n # Check that we don't have any fixups.\n if lines[0].startswith('fixup!'):\n error(\"Fixup commits are not allowed. Please resolve by rebasing.\",\n commit)\n success = False\n\n # Try to determine whether we got an area prefix in the commit message:\n summary_line_split = lines[0].split(':')\n summary_line_split_len = len(summary_line_split)\n\n # We didn't get an area prefix, so just make sure the message started with a\n # capital letter.\n if summary_line_split_len == 1:\n if not re.match(r'[A-Z]', lines[0]):\n error(\"The summary line must start with a capital letter.\", commit)\n success = False\n # The user specified an area on which she worked.\n elif summary_line_split_len == 2:\n if not re.match(r'[a-z_A-Z\\-]*(/[a-z_A-Z\\-]+)*', summary_line_split[0]):\n error(\n 'The area specifier is mal-formed. Only letters,'\n 'underscores and hyphens are allowed. Different areas must be'\n 'separated by a slash.', commit)\n success = False\n # Check the second part of the commit message.\n if not summary_line_split[1].startswith(' '):\n error(\"The area must be separated by a single space.\", commit)\n success = False\n if not re.match(r'\\s[A-Z]', summary_line_split[1]):\n error(\n \"The summary line after the colon must start with a capital letter.\",\n commit)\n success = False\n # We do not allow more than one area i.e., colon.\n else:\n error(\"Only one colon is allowed to specify the area of changes.\",\n commit)\n success = False\n\n # Check for an empty line separating the summary line from the long\n # description.\n if len(lines) > 1 and lines[1] != \"\":\n error(\n \"The second line of a commit message must be empty, as it \"\n \"separates the summary from the long description.\", commit)\n success = False\n\n return success", "def check_commit_msg(commitish):\n\n hdr = CommitSubHeader()\n line_list = dump_raw_body(commitish)\n\n if COMMIT_MESSAGE_CHECK and line_list[1] != \"\":\n if line_list[1].find('REF: ') == -1:\n add_error(\"Summary field must have just one line in %s\" % commitish)\n else:\n add_error(\"No empty line after Summary field in %s\" % commitish)\n\n if COMMIT_MESSAGE_CHECK and len(line_list[0]) < 5 or len(line_list[0]) > 78:\n add_error(\"Wrong size (%d) of Summary field in %s\" % (len(line_list[0]), commitish))\n\n while len(line_list) != 0:\n line = line_list.pop(0)\n\n if line.find('REF: ') == 0:\n if hdr.ref == None:\n hdr.ref = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'REF:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['REF: '].match(line[len('REF: '):]):\n add_error(\"Wrong field 'REF:' in %s\" % commitish)\n else:\n hdr.ref = line[len('REF: '):]\n\n elif line.find('Signed-off-by: ') == 0:\n if hdr.signed == None:\n hdr.signed = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'Signed-off-by:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['Signed-off-by: '].match(line[len('Signed-off-by: '):]):\n add_error(\"Wrong field 'Signed-off-by:' in %s\" % commitish)\n else:\n hdr.signed = line[len('Signed-off-by: '):]\n\n elif len(line) != 0:\n hdr.desc = 1\n if COMMIT_MESSAGE_CHECK and len(line) > 78:\n add_error(\"Wrong size (%d) of field 'Description' in %s\" % (len(line), commitish))\n\n if COMMIT_MESSAGE_CHECK and hdr.ref == None:\n add_error(\"No field 'REF:' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.desc == None:\n add_error(\"No field 'Description' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.signed == None:\n add_error(\"No field 'Signed-off-by:' in %s\" % commitish)\n\n return hdr", "def _ensure_commit(git_sha1):\n cmd = [\"git\", \"cat-file\", \"-e\", git_sha1 + \"^{commit}\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if p.returncode == 0:\n # we have the commit locally\n return\n # we don't have the commit, must fetch\n cmd = [\"git\", \"fetch\", \"https://github.com/pytorch/pytorch.git\", git_sha1]\n p = subprocess.run(cmd, check=True)", "def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())", "def lint_commit_base(commit):\n success = True\n # Merge commits have two parents, we maintain a linear history.\n if len(commit.parents) > 1:\n error(\n \"Please resolve merges by re-basing. Merge commits are not allowed.\",\n commit)\n success = False\n\n return success", "def resolve_conflicts(self, commit=True):\n pass # pragma: no cover", "def _validate_commits(pull_request):\n commits = github.get_commits(pull_request[\"commits_url\"])\n analyzed = []\n\n for commit_wrapper in commits:\n commit = {\n \"sha\": commit_wrapper[\"sha\"],\n \"message\": commit_wrapper[\"commit\"][\"message\"],\n }\n\n commit[\"standard\"] = _validate_title(commit[\"message\"])\n analyzed.append(commit)\n\n result = all(commit[\"standard\"] for commit in analyzed)\n return analyzed, result", "def lint(self, commit):\n LOG.debug(\"Linting commit %s\", commit.sha or \"[SHA UNKNOWN]\")\n LOG.debug(\"Commit Object\\n\" + str(commit))\n\n # Ensure the Deprecation class has a reference to the config currently being used\n Deprecation.config = self.config\n\n # Apply config rules\n for rule in self.configuration_rules:\n rule.apply(self.config, commit)\n\n # Skip linting if this is a special commit type that is configured to be ignored\n ignore_commit_types = [\"merge\", \"squash\", \"fixup\", \"fixup_amend\", \"revert\"]\n for commit_type in ignore_commit_types:\n if getattr(commit, f\"is_{commit_type}_commit\") and getattr(self.config, f\"ignore_{commit_type}_commits\"):\n return []\n\n violations = []\n # determine violations by applying all rules\n violations.extend(self._apply_line_rules([commit.message.title], commit, self.title_line_rules, 1))\n violations.extend(self._apply_line_rules(commit.message.body, commit, self.body_line_rules, 2))\n violations.extend(self._apply_commit_rules(self.commit_rules, commit))\n\n # Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules),\n # we replace None with -1 so that it always get's placed first. Note that we need this to do this to support\n # python 3, as None is not allowed in a list that is being sorted.\n violations.sort(key=lambda v: (-1 if v.line_nr is None else v.line_nr, v.rule_id))\n return violations", "def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )", "def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False", "def is_commit_id_valid(commit_id, wit_path):\n\n if not is_branch(wit_path, commit_id):\n if commit_id.isalnum() and len(commit_id) == 40:\n\n if commit_id in _get_all_saves_names(wit_path):\n return True\n\n else:\n logging.error(f'No commit named {commit_id}.')\n\n else:\n logging.error('branch or commit does not exist. commit id must be 40 digits long and hexadecimal.')\n else:\n return True", "def __gitBisectBad(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"bad\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def commit_exists(repo, commit):\n cmd = ['git', 'cat-file', '-t', commit]\n try:\n devnull = open(os.devnull, 'wb')\n output = subprocess.check_output(cmd, cwd=repo,\n stderr=devnull)\n return output.rstrip() == 'commit'\n except subprocess.CalledProcessError:\n return False", "def _check_inputs(self):\n\n if not os.path.isdir(self._parent_repo):\n raise Error('Invalid parent repo path %r' % self._parent_repo)\n\n self._run_git_command(['--help'], error_message='Unable to run git')\n self._run_git_command(['status'],\n error_message='%r is not a valid git repo' %\n os.path.abspath(self._parent_repo))\n self._run_git_command(['fetch', 'origin'],\n error_message='Failed to fetch origin')\n self._run_git_command(\n ['rev-parse', '%s^{commit}' % self._branch_ref],\n error_message='Branch %s not found' % self._branch_ref)\n self._run_git_command(\n ['rev-parse', '%s^{commit}' % self._revision],\n error_message='Revision \"%s\" not found' % self._revision)", "def enforce_clean_option(args, run):\n repos = run.experiment_info[\"repositories\"]\n if not repos:\n raise RuntimeError(\n \"No version control detected. \"\n \"Cannot enforce clean repository.\\n\"\n \"Make sure that your sources under VCS and the \"\n \"corresponding python package is installed.\"\n )\n else:\n for repo in repos:\n if repo[\"dirty\"]:\n raise RuntimeError(\n \"EnforceClean: Uncommited changes in \"\n 'the \"{}\" repository.'.format(repo)\n )", "def verify_rev(rev):\n return not subprocess.call(['git', 'rev-parse', '-q', '--verify', '--no-revs', rev])", "def warn_uncommitted_changes(force):\n output = subprocess.run([\"git\", \"status\"], capture_output=True, text=True,)\n if \"modified\" in output.stdout or \"Untracked\" in output.stdout:\n print(\"Warning: repository has uncommitted changes:\\n\")\n print(\"-----------------------------------------------------------------------\")\n print(f\"{output.stdout}\")\n print(\"-----------------------------------------------------------------------\")\n if not force:\n print(\"\\nRun with -f to override\")\n sys.exit(1)", "def check_pr_details(self, pr_number):\n pr = self.repo.get_pull(pr_number)\n email_pattern = re.compile(r'^.*@suse\\.(com|cz|de)$')\n\n for commit in pr.get_commits():\n sha = commit.sha\n author = commit.author\n title = message = commit.commit.message\n # Not sure why we need to use the nested commit for the email\n email = commit.commit.author.email\n user_id = f'{author.login}({email})'\n body = ''\n\n # This could be probably smarter but commit contains something like the following\n # message=\"$commit_title\\n\\n$long_commit_message\" and as such maybe we can split it and\n # check for the following limits: title max 50 chars, body max 72 chars per line and at\n # least as long as the commit title to avoid commit message bodies full of whitespaces\n try:\n title, body = message.split('\\n\\n', 1)\n except ValueError:\n print('No commit body was detected')\n\n print(f'Checking commit \"{sha}: {title}\"')\n\n if not email_pattern.fullmatch(email):\n print(f'Checking if {user_id} is part of the SUSE organization...')\n\n if self.org.has_in_members(commit.author):\n print(f'{user_id} is part of SUSE organization but a SUSE e-mail address was not used for commit: {sha}')\n sys.exit(1)\n\n # replace case-insensitive \"(bsc#)\" (or []) and surrounding spaces\n # with a single space, then prune leading/trailing spaces\n title = re.sub(r'\\s*[([]\\s*(?i:bsc)#\\d+\\s*[)\\]]\\s*', ' ', title).strip()\n if len(title) > 50:\n print('Commit message title should be less than 50 characters (excluding the bsc# reference)')\n sys.exit(1)\n\n # No body detected. Nothing else to do here.\n if not body:\n continue\n\n if len(body) < len(title):\n print('Commit message body is too short')\n sys.exit(1)\n\n # strip multi-line '```code```' blocks & lines starting w\\ `code`\n code_pattern = re.compile(\n r'''\n ((?m:^)\\s*```) # multi-line beginning, 0-more whitespace, ```\n (?s:.*?) # non-greedy, zero or more chars, including \\n\n \\1 # whatever matched at the beginning\n | # or...\n (?m:^)\\s*` # start of line, optional whitespace, backtick\n [^`]+ # oneor more non-backtick chars\n `\\s*(?m:$) # and a backtick at the end of the line\n ''',\n re.VERBOSE\n )\n for body_line in re.sub(code_pattern, '', body).splitlines():\n if len(body_line) > 72:\n print('Each line in the commit body should be less than 72 characters')\n sys.exit(1)\n\n print(f'PR-{pr_number} commits verified.')", "def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)", "def test_noChangeFromTrunk(self):\n runCommand([\"git\", \"checkout\", \"-b\", \"mypatch\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(\n logs[-1], \"On trunk or no diffs from trunk; no need to look at this.\"\n )", "def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False", "def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True", "def _abort_on_pending_changes(self) -> None:\n if set(self._dirty_paths_by_status) - {StatusCode.Untracked}:\n raise ActionFailure(\n \"Found pending changes in tracked files. Diff-aware runs require a clean git state.\"\n )", "def test_update_from_commit_id_without_repository_support(self):\n scmtool_cls = type(self.repository.get_scmtool())\n\n old_supports_post_commit = scmtool_cls.supports_post_commit\n scmtool_cls.supports_post_commit = False\n\n try:\n review_request = ReviewRequest.objects.create(self.user,\n self.repository)\n draft = ReviewRequestDraft.create(review_request)\n\n with self.assertRaises(NotImplementedError):\n draft.update_from_commit_id('4')\n finally:\n scmtool_cls.supports_post_commit = old_supports_post_commit", "def _is_commit_sha(commit):\n return len(commit) == 40 and all([\n ch.isdigit() or (ch >= \"a\" and ch <= \"f\")\n for ch in commit.elems()\n ])", "def clean_for_commit(self):", "def test_no_change(self):\r\n git_export_utils.export_to_git(\r\n self.course.id,\r\n 'file://{0}'.format(self.bare_repo_dir)\r\n )\r\n\r\n with self.assertRaisesRegexp(GitExportError,\r\n str(GitExportError.CANNOT_COMMIT)):\r\n git_export_utils.export_to_git(\r\n self.course.id, 'file://{0}'.format(self.bare_repo_dir))", "def checkGit(directory):", "def line_part_of_commit(file, line, commit):\n if line == '0': return False\n\n line_val = git(\"blame\", \"-l\", \"-L{0},{0}\".format(line), file)\n return line_val.split(\" \", 1)[0] == commit", "def check_diff(src, dst):\n result = _subprocess(['git', '--no-pager', 'log', '--graph', '--abbrev-commit', '--pretty=oneline',\n '--no-merges', \"--\", f\"{src}\", f\"^{dst}\"])\n\n if result:\n print(f\"Warning: the following commits are present on {dst} but not on {src}: \\n{result}\")\n if args.force:\n print(f\"Warning: they will be overwritten on {dst} and discarded.\")\n else:\n print(f\"Warning: run with --force to overwrite and discard these commits from {dst}\")\n exit(1)", "def test_get_latest_commit(self):\n error = ''\n list = get_latest_commit()\n for el in list:\n sha = el['sha']\n try:\n if not Commits.objects.get(sha=sha):\n error = 'object not exist'\n except Exception as ex:\n error = 'there are duplicates'\n self.assertEqual(error, '')", "def lint_commit_author(commit):\n success = True\n if commit.author.email.endswith('users.noreply.github.com'):\n error(\n 'Commit author has no valid email address set: %s. '\n 'Use \"git config user.email [email protected]\" to '\n 'set a valid email address, then update the commit '\n 'with \"git rebase -i\" and/or '\n '\"git commit --amend --reset-author\". '\n 'Also check your GitHub settings at '\n 'https://github.com/settings/emails: your email address '\n 'must be verified, and the option \"Keep my email address '\n 'private\" must be disabled.' % (commit.author.email, ), commit)\n success = False\n\n if ' ' not in commit.author.name:\n warning(\n 'The commit author name \"%s\" contains no space. '\n 'Use \"git config user.name \\'Johnny English\\'\" to '\n 'set your real name, and update the commit with \"git rebase -i \" '\n 'and/or \"git commit --amend --reset-author\".' %\n (commit.author.name, ), commit)\n # A warning doesn't fail lint.\n\n return success", "def check_commits(self, commits):\n LOG.info('Checking Perforce permissions and locks')\n self.ctx.checkpoint(\"copy_to_p4._preflight_check\")\n\n # Stop if files are opened in our repo client\n # We expect this to be none, since we have the view lock\n opened = self.ctx.p4.run(['opened', '-m1'])\n if opened:\n raise PreflightException(_('There are files opened by Git Fusion for this repo.'))\n\n # fetch the repo setting only, without cascading to global config\n is_read_only = self.ctx.repo_config.getboolean(p4gf_config.SECTION_REPO,\n p4gf_config.KEY_READ_ONLY,\n fallback=False)\n if is_read_only:\n raise PreflightException(_(\"Push to repo {repo_name} prohibited.\")\n .format(repo_name=self.ctx.config.repo_name))\n\n # get a list of stream depots for later checks for read-only paths\n depots = self.ctx.p4.run(['depots'])\n self.stream_depots = set([d['name'] for d in depots if d['type'] == 'stream'])\n any_locked_files = self._find_locked_by()\n LOG.debug(\"any_locked_files {0}\".format(any_locked_files))\n case_conflict_checker = None\n if not self.ctx.server_is_case_sensitive:\n case_conflict_checker = CaseConflictChecker(self.ctx)\n case_conflict_checker.read_perforce_paths()\n\n ui_name = self._curr_ref_ui_name()\n if ui_name:\n progress_msg = _('Checking commits for {ref}...').format(ref=ui_name)\n else:\n progress_msg = _('Checking commits...')\n\n with ProgressReporter.Determinate(len(commits)):\n for commit in commits:\n ProgressReporter.increment(progress_msg)\n\n self.g2p_user.get_author_pusher_owner(commit)\n\n rev = commit['sha1']\n if not self.assigner.is_assigned(commit['sha1']):\n continue\n\n self.check_commit(commit)\n\n for branch_id in self.assigner.branch_id_list(rev):\n self.check_commit_for_branch(\n commit\n , branch_id\n , any_locked_files\n , case_conflict_checker )\n\n if case_conflict_checker:\n cc_text = case_conflict_checker.conflict_text()\n if cc_text:\n raise PreflightException(cc_text)", "def test_valid_commit(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'out')\n os.mkdir(out_path)\n self.assertTrue(\n cifuzz.build_fuzzers(\n EXAMPLE_PROJECT,\n 'oss-fuzz',\n tmp_dir,\n commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))\n self.assertTrue(\n os.path.exists(os.path.join(out_path, EXAMPLE_BUILD_FUZZER)))", "def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''", "def commits_exist(repo, commits):\n for commit in commits:\n if not commit_exists(repo, commit):\n return False\n return True", "def check_checkpatch(project, commit, _desc, diff, options=None):\n tool = get_helper_path('checkpatch.pl')\n cmd = ([tool, '-', '--root', project.dir] +\n options.args(('--ignore=GERRIT_CHANGE_ID',), diff))\n return _check_cmd('checkpatch.pl', project, commit, cmd,\n input=rh.git.get_patch(commit))", "def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)", "def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())", "def maybe_commit(job):", "def check_unstaged_changes(self):\n pass", "def is_new_commit(self, commit, cache):\n\n return (commit.id not in cache and not self.db.key_in_db(\n f'{self.project}-{commit.id.decode()}'\n ))", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation ([email protected])\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics ([email protected])\n |/\n f7a5a23d * missed version number in docs ([email protected])\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def verify(repo, revision, sigbody, ignore_missing=False): # pylint: disable=R0912,R0915\n # resolve revision to commit hash\n try:\n obj_to_verify, _ = repo.revparse_ext(revision)\n except KeyError:\n error_if(True, f\"Failed to `git rev-parse {revision}`\")\n if isinstance(obj_to_verify, Commit):\n commit_to_verify = obj_to_verify.hex\n elif isinstance(obj_to_verify, Tag):\n commit_to_verify = obj_to_verify.target.hex\n else:\n assert False\n\n # check status of current checkout\n warnings = set()\n head_commit = repo.revparse_ext(\"HEAD\")[0].hex\n if head_commit != commit_to_verify:\n warnings.add(\n f\"Verified revision{revision} = {commit_to_verify} is not the working tree HEAD = {head_commit}\"\n )\n elif dirty(repo):\n warnings.add(\n \"Working tree is dirty; signature applies to clean commit HEAD = \" + head_commit\n )\n\n verified = None\n all_sha256 = len(commit_to_verify) == 64\n\n # Look for signature of commit_to_verify\n # Warning about warning messages: sigbody comes off the blockchain, so we shouldn't include\n # anything from it in warning messages without validation (in case it is malicious)\n lines = [line for line in sigbody.split(b\"\\n\") if line]\n for line in lines: # pylint: disable=R1702\n try:\n sig_elt = json.loads(line.decode())\n assert isinstance(sig_elt.get(\"commit\"), str)\n except:\n error_if(True, \"Invalid signature syntax\")\n if not repo.get(sig_elt[\"commit\"]):\n error_if(\n not ignore_missing,\n (\n \"Signed commit missing from local repository\"\n if len(lines) <= 1\n else \"Signed commit(s) missing from local repository; try --ignore-missing if OK for some but not all to be present\"\n ),\n )\n warnings.add(\"One or more signed commit(s) missing from local repository\")\n # If the signature includes tags, make sure they don't refer to commits other than the\n # signed ones. There are several cases to deal with here as the signed & local tags could\n # each be either lightweight or annotated.\n local_tag = None\n if \"tag\" in sig_elt:\n assert isinstance(sig_elt[\"tag\"], str)\n assert \"tagObject\" not in sig_elt or isinstance(sig_elt[\"tagObject\"], str)\n try:\n local_tag = repo.revparse_ext(sig_elt[\"tag\"])\n except KeyError:\n error_if(\n not ignore_missing,\n \"Signed tag(s) missing from local repository; try --ignore-missing if this is OK\",\n )\n warnings.add(\"One or more signed tag(s) missing from local repository\")\n local_tag = (None, None)\n if isinstance(local_tag[0], Commit): # local lightweight tag\n assert isinstance(local_tag[1], Reference)\n error_if(\n not local_tag[1].name.startswith(\"refs/tags/\"),\n \"The signed tag refers locally to something else: \" + local_tag[1].name,\n )\n error_if(\n local_tag[0].hex != sig_elt[\"commit\"],\n f\"The local tag '{local_tag[1].shorthand}' refers to a different commit than the signed tag\",\n )\n if \"tagObject\" in sig_elt:\n warnings.add(\n f\"The local tag '{local_tag[1].shorthand}' is lightweight, while the signed tag was annotated\"\n )\n elif isinstance(local_tag[0], Tag): # local annotated tag\n if \"tagObject\" in sig_elt:\n error_if(\n sig_elt[\"tagObject\"] != local_tag[0].hex,\n f\"The local tag '{local_tag[0].name}' = {local_tag[0].hex} differs from the signed tag in annotations (although they share the same name and commit reference)\",\n )\n else:\n error_if(\n local_tag[0].target.hex != sig_elt[\"commit\"],\n f\"The local annotated tag '{local_tag[0].name}' = {local_tag[0].hex} refers to a different commit than the signed tag\",\n )\n warnings.add(\n f\"The local tag '{local_tag[1].shorthand}' is annotated, while the signed tag was lightweight\"\n )\n all_sha256 = all_sha256 and len(local_tag[0].hex) == 64\n elif local_tag[0] is not None:\n assert False\n # At last...check whether sig_elt signs the desired commit\n if sig_elt[\"commit\"] == commit_to_verify:\n if local_tag and local_tag[1]:\n verified = f\"Verified: local revision {revision} = signed tag {local_tag[1].shorthand} (commit {commit_to_verify})\"\n elif verified is None:\n verified = f\"Verified: local revision {revision} = signed commit {commit_to_verify}\"\n\n error_if(not verified, f\"Signature doesn't apply to {revision} ({commit_to_verify})\")\n if not all_sha256:\n warnings.add(\n \"Signature pertains to git SHA-1 digest(s); review git SHA-1 security risks and consider adopting git SHA-256 mode\"\n )\n return verified, warnings", "def __gitCheckPatches(self):\n self.vcs.gitApplyCheckPatches(self.project.getProjectPath(),\n check=True)", "def lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")", "def test_commit_on_unborn_branch(tmp_path: Path) -> None:\n repository = Repository.init(tmp_path / \"repository\")\n repository.commit(message=\"initial\")\n\n assert not repository.head.commit.parents", "def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def check_heads(repo, their_heads, context):\n heads = repo.heads()\n heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()\n if not (\n their_heads == [b'force']\n or their_heads == heads\n or their_heads == [b'hashed', heads_hash]\n ):\n # someone else committed/pushed/unbundled while we\n # were transferring data\n raise error.PushRaced(\n b'repository changed while %s - please try again' % context\n )", "def test_invalid_commit_sha(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self.assertRaises(AssertionError):\n cifuzz.build_fuzzers(EXAMPLE_PROJECT,\n 'oss-fuzz',\n tmp_dir,\n commit_sha='')", "def make_sanity_checks(opts):\n if not info.is_git_installed():\n raise RuntimeError(\"Make sure git is installed and working.\")\n if not info.is_git_configured():\n raise RuntimeError(\n 'Make sure git is configured. Run:\\n' +\n ' git config --global user.email \"[email protected]\"\\n' +\n ' git config --global user.name \"Your Name\"\\n' +\n \"to set your account's default identity.\")\n if os.path.exists(opts['project']):\n if not opts['update'] and not opts['force']:\n raise RuntimeError(\n \"Directory {dir} already exists! Use --update to update an \"\n \"existing project or --force to overwrite an existing \"\n \"directory.\".format(dir=opts['project']))\n if 'package' in opts:\n if not utils.is_valid_identifier(opts['package']):\n raise RuntimeError(\n \"Package name {} is not a valid \"\n \"identifier.\".format(opts['package']))", "def flight_check():\n for command in ['git']:\n if shutil.which(command) is None:\n raise RuntimeError('command not found: {}'.format(command))\n\n git_rev_parse('HEAD')", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations", "def test_pre_commit_has_no_configuration(tmp_path):\n ProjectMock(tmp_path).style(\"\").pre_commit(\"\").api_check_then_fix()", "def __gitBisectGood(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"good\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def locate_fixed_problems(commit_message):\n \n identifiers = set()\n \n commit_message = commit_message.strip()\n lines = commit_message.splitlines()\n \n match_urls = True\n for line in lines:\n ids = identifiers_from_line(line, match_urls)\n if len(ids) == 0:\n match_urls = False\n \n identifiers.update(ids)\n \n return identifiers", "def commit_sanity_check(self, data):\n try:\n effects_node = PipelineHelper.getAssetEffectsNode(data.asset)\n except ValueError:\n mc.warning(' '.join([\n 'No effects group.',\n \"Can't find the effects group for the asset\",\n data.asset.name\n ]))\n return False\n\n # Check for duplicates in nodes parented under effects node as\n # those would prevent a proper commit\n relatives = mc.listRelatives(effects_node, ad=True)\n if relatives is not None and Counter(relatives).most_common()[0][1] > 1:\n mc.warning(' '.join([\n 'Commit failed.',\n \"Two objects or more share the same name!\"\n \"Rename and retry.\"\n ]))\n return False\n\n # Find fx cache and animation alembic components assigned to the\n # current asset\n maya_commit = data.asset.get_maya_commit()\n fx_simulation_component = maya_commit.component\n\n context = PipelineHelper.getContext()\n fx_cache_component = context.find_shot_instance_component(\n shot_instance=fx_simulation_component.shot_instance,\n stage=zefir.STAGES.FX_CACHE\n )\n\n animation_cache_component = context.find_shot_instance_component(\n shot_instance=fx_simulation_component.shot_instance,\n stage=zefir.STAGES.ANIMATION_ALEMBIC\n )\n # Stop if the current asset has neither a fx cache nor a animation\n # alembic component\n if fx_cache_component is None and animation_cache_component is None:\n return False\n\n return True", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def check_errors(self) -> None:", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def checkConflicts(self):\n\t\treturn", "def fail_on_unacknowledged_changes(args):\n if not filecmp.cmp(args.golden, args.current):\n return GoldenMismatchError(\n api_level=args.api_level,\n current=args.current,\n golden=args.golden,\n show_update_hint=True,\n )\n return None", "def prevent_duplicate_commits(oldrev, newrev, refname):\n try:\n commit_list = subprocess.check_output([\n \"git\", \"rev-list\", newrev, \"-n\", GIT_COMMIT_LIST_LENGTH\n ])\n except Exception as e:\n print(\"Exception: %s\" % e)\n pass\n commit_list = commit_list.split(\"\\n\")\n commit_list = [item for item in commit_list if len(item) > 0]\n\n # For each of the first GIT_COMMIT_LIST_LENGTH pairs, check diff\n for i in range(len(commit_list) - 1):\n first = commit_list[i]\n second = commit_list[i + 1]\n\n rev1 = get_svn_revision(first)\n rev2 = get_svn_revision(second)\n if rev1 and (rev1 == rev2):\n diff = subprocess.check_output([\"git\", \"diff\", first, second])\n # If the diff of two commits is empty, means they are the same.\n # i.e duplicate\n if not diff:\n print(ERROR_MSG % (first, second))\n sys.exit(1)\n return", "def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False", "def is_commit_signature_valid(self, commit: git.Commit) -> bool:\n commit_status = self.get_commit_signature_status(commit)\n if commit_status in VALID_SIGNATURE_STATUSES:\n logger.debug(f\"Commit {commit.hexsha!r} signature is valid.\")\n return True\n\n logger.warning(f\"Commit {commit.hexsha!r} signature is not valid.\")\n return False", "def _diff_and_commit(self, commit_msg=''):\n if not commit_msg:\n if 'commit_msg' not in self.data:\n # Ask until we get a non-empty commit message.\n while not commit_msg:\n commit_msg = utils.get_input(\n \"What is the commit message? \")\n else:\n commit_msg = self.data['commit_msg']\n\n diff_cmd = self.vcs.cmd_diff()\n diff = execute_command(diff_cmd)\n if sys.version.startswith('2.6.2'):\n # python2.6.2 bug... http://bugs.python.org/issue5170 This is the\n # spot it can surface as we show a part of the changelog which can\n # contain every kind of character. The rest is mostly ascii.\n print(\"Diff results:\")\n print(diff)\n else:\n # Common case\n logger.info(\"The '%s':\\n\\n%s\\n\", diff_cmd, diff)\n if utils.ask(\"OK to commit this\"):\n msg = commit_msg % self.data\n msg = self.update_commit_message(msg)\n commit_cmd = self.vcs.cmd_commit(msg)\n commit = execute_command(commit_cmd)\n logger.info(commit)", "def validate_sha_github(sha):\n r = requests.head(github_changeset_url % sha)\n return r.status_code == 200", "def test_nothing_significant_to_commit(caplog, local, subdirs):\n local.ensure('sub' if subdirs else '', '.doctrees', 'file.bin').write('data')\n local.ensure('sub' if subdirs else '', 'searchindex.js').write('data')\n old_sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha != old_sha\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n records = [(r.levelname, r.message) for r in caplog.records]\n assert ('INFO', 'No changes to commit.') not in records\n assert ('INFO', 'No significant changes to commit.') not in records\n\n local.ensure('sub' if subdirs else '', '.doctrees', 'file.bin').write('changed')\n local.ensure('sub' if subdirs else '', 'searchindex.js').write('changed')\n old_sha = sha\n records_seek = len(caplog.records)\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha == old_sha\n with pytest.raises(CalledProcessError):\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--'])\n records = [(r.levelname, r.message) for r in caplog.records][records_seek:]\n assert ('INFO', 'No changes to commit.') not in records\n assert ('INFO', 'No significant changes to commit.') in records\n\n local.join('README').write('changed') # Should cause other two to be committed.\n old_sha = sha\n records_seek = len(caplog.records)\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n sha = pytest.run(local, ['git', 'rev-parse', 'HEAD']).strip()\n assert sha != old_sha\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n records = [(r.levelname, r.message) for r in caplog.records][records_seek:]\n assert ('INFO', 'No changes to commit.') not in records\n assert ('INFO', 'No significant changes to commit.') not in records", "def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0", "def checkReissues(self):\n return None", "def canBeAccessed(self):\n \n try:\n self._client.log(self._repositoryUri)\n return True\n except ClientError, error:\n _logger.debug(error.args[0])\n for _, errorCode in error.args[1]:\n if errorCode == 160006: # We have no commit in the repository, but its ok.\n return True\n return False", "def test_missing_hooks_in_repo(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n repo = \"whatever\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: whatever\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 334, \": missing 'hooks' in repo 'whatever'\")\n )", "def is_commit_affecting_directory(self, commit, directory):\n exit_code = self.run([\n 'git', 'diff-tree', '--quiet', '--no-commit-id', '-r', commit,\n '--', directory\n ],\n return_exit_code=True)\n return exit_code == 1", "def is_git_dirty():\n dirty_status = local('git diff --quiet || echo \"*\"', capture=True)\n if dirty_status == '*':\n return True\n\n untracked_count = int(local('git status --porcelain 2>/dev/null| grep \"^??\" | wc -l', capture=True))\n if untracked_count > 0:\n return True\n\n return False", "def _check_diff_add_delete(commit_sha1, head_sha1):\n commit_info = {}\n branch_sha1s = []\n\n # Get list of commits between this one and the branch head\n git_log_cmd = shlex.split(\n 'git log --oneline --no-abbrev --reverse '\n '{commit_sha1}..{head_sha1}'.format(\n commit_sha1=commit_sha1, head_sha1=head_sha1))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n for git_log_line in git_log_output.splitlines():\n if git_log_line == '':\n continue\n\n branch_sha1, _ = git_log_line.split(' ', 1)\n branch_sha1s.append(branch_sha1)\n\n # If there are no commits to check then just return an empty dict\n # and empty list tuple\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n diff_lines = _parse_diff(commit_sha1)\n\n context = 'diff-add-delete-check'\n for diff_line in diff_lines:\n line_type, line = diff_line[0], diff_line[1:]\n\n # Skip blank lines\n if line == '':\n continue\n\n # Use the -S parameter of git log to check whether an added line\n # was removed or duplicated in a later commit, or whether a\n # removed line was re-added or also removed elsewhere in a later\n # commit\n\n # Escape double-quotes\n line = re.sub(r'\"', r'\\\\\\\"', line)\n git_log_s_str = (\n 'git log --oneline --no-abbrev --reverse -S\"{line}\" '\n '{commit_sha1}..{head_sha1}'.format(\n line=line, commit_sha1=commit_sha1, head_sha1=head_sha1))\n try:\n git_log_s_cmd = shlex.split(git_log_s_str)\n print 'Running git log -S\"{line}\"'.format(line=line)\n print 'git_log_s_cmd: {git_log_s_cmd}'.format(\n git_log_s_cmd=git_log_s_cmd)\n\n git_log_s_output = subprocess.check_output(git_log_s_cmd)\n print 'git_log_s_output: {git_log_s_output}'.format(\n git_log_s_output=git_log_s_output)\n except (subprocess.CalledProcessError, ValueError) as e:\n print 'Exception when running git log -S\"{line}\"'.format(line=line)\n print 'Exception was {e}'.format(e=e)\n try:\n print 'git_log_s_cmd: {git_log_s_cmd}'.format(\n git_log_s_cmd=git_log_s_cmd)\n except Exception as ex:\n print 'git_log_s_cmd not defined: {ex}'.format(ex=ex)\n print (\n 'Failed to run shlex.split on {git_log_s_str}'.format(\n git_log_s_str=git_log_s_str))\n git_log_s_output = ''\n pass\n\n for git_log_s_line in git_log_s_output.splitlines():\n sha1_s, _ = git_log_s_line.split(' ', 1)\n\n if sha1_s not in commit_info.keys():\n message = None\n if line_type == '+':\n description = (\n 'Adds or removes lines matching a line added in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n elif line_type == '-':\n description = (\n 'Adds or removes lines matching a line removed in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n else:\n print (\n 'Got line_type \"{line_type}\" instead of '\n '\"-\" or \"+\" in _check_diff_add_delete'.format(\n line_type=line_type))\n\n commit_info[sha1_s] = [message]\n\n # Remove this sha1 from branch_sha1s\n if sha1_s in branch_sha1s:\n branch_sha1s.remove(sha1_s)\n\n # If we have already marked all the existing commits in the\n # branch, then break out of the loop\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n return commit_info, branch_sha1s", "def verify_ballot_consistency(self) -> bool:\n sbb_contents = self._sbb.get_sbb_contents()\n \n # First, validate the commitment consistency with the initial vote lists and final vote lists.\n for list_idx, proof in sbb_contents.consistency_proof.items():\n for vote_idx in range(len(proof)):\n proved_sv = proof[vote_idx]\n tu_list = []\n tv_list = []\n for row_idx, sv in enumerate(proved_sv):\n # Ensure that we are consistent with the initial and the final commitments\n if sv.get('u', None) is not None:\n val_init = sv['u_init']\n val_fin = sv['u_fin']\n val_uv = sv['u']\n val_t = sbb_contents.t_values[list_idx][row_idx][vote_idx]['tu']\n original_commitment = sbb_contents.svr_commitments[row_idx][vote_idx]['com_u']\n final_commitment = sbb_contents.vote_lists[list_idx][vote_idx][row_idx].com_u\n else:\n val_init = sv['v_init']\n val_fin = sv['v_fin']\n val_uv = sv['v']\n val_t = sbb_contents.t_values[list_idx][row_idx][vote_idx]['tv']\n original_commitment = sbb_contents.svr_commitments[row_idx][vote_idx]['com_v']\n final_commitment = sbb_contents.vote_lists[list_idx][vote_idx][row_idx].com_v\n key_init = sv['k_init']\n key_fin = sv['k_fin']\n \n # Verify the input and output commitments\n com_init = util.get_COM(util.bigint_to_bytes(key_init), util.bigint_to_bytes(val_init))\n com_fin = util.get_COM(util.bigint_to_bytes(key_fin), util.bigint_to_bytes(val_fin))\n if com_init != original_commitment:\n raise Exception(\"Failed to open the initial vote commitment\")\n if com_fin != final_commitment:\n raise Exception(\"Failed to open the final vote commitment\")\n \n # Verify the t-values\n if util.t_val(util.bigint_to_bytes(val_init), util.bigint_to_bytes(val_uv), self._M) != val_t:\n raise Exception(\"Failed to verify t value\")\n \n # Add t-values to their respective lists for lagrange checks\n tu_list.append(sbb_contents.t_values[list_idx][row_idx][vote_idx]['tu'])\n tv_list.append(sbb_contents.t_values[list_idx][row_idx][vote_idx]['tv'])\n \n # Check that tu_list and tv_list lagrange to (t, -t)\n rows = len(proved_sv)\n tu0 = self._lagrange(tu_list, rows, rows-1, self._M)\n tv0 = self._lagrange(tv_list, rows, rows-1, self._M)\n if util.val(tu0, tv0, self._M) != 0:\n # TODO: This does not work\n #raise Exception(\"Failed lagrange verification of t values\")\n pass\n return True", "def _commit_or_print_changes(\n self, commit: bool, ignore_template_commit: bool\n ) -> bool:\n if not commit:\n diff_text, updated = self.diff(\n ignore_template_commit=ignore_template_commit\n )\n if updated:\n indented = textwrap.indent(diff_text, \" \")\n message = f\" > Changes:\\n{indented}\"\n else:\n message = \" > No changes.\"\n click.echo(message)\n else:\n commit_msg = (\n f\"Update from template\\n\\nTemplate version: {self.template_version}\"\n )\n if self.template_commit:\n commit_msg += f\" ({self.template_commit[:7]})\"\n\n updated = self.commit(\n commit_msg, init=False, ignore_template_commit=ignore_template_commit\n )\n\n return updated", "def check_if_introduction(commit, result):\n\ttree_hash, parent_commit_hash, time = search(commit, 'commit')\n\t\n\t# controlling for the case of no parent commits\n\tif parent_commit_hash == '':\n\t\treturn True\n\n\t# controlling for the case of multiple parent commits\n\tall_parent_CI = False\n\tfor parent in parent_commit_hash.split(':'):\n\n\t\tparent_tree_hash = search(parent, 'commit')[0]\n\t\tparent_CI = ci_lookup(parent_tree_hash)\n\t\t\t\n\t\t# checking all the parent commits for the usage of CI\n\t\tall_parent_CI = all_parent_CI or parent_CI\n\t\t\n\t# if the tree has a CI file, while the parent tree does not, it is an introduction\n\treturn not all_parent_CI", "def check_dependency(self, repo, minhash=None):\n try:\n p = Project.objects.get(repo_url=repo)\n except Project.DoesNotExist:\n return False\n j = p.last_successful_job()\n\n if j:\n if minhash:\n if p.commit_in_history(minhash, j.commit):\n # We already have a successful job that is new enough\n return True\n else:\n return True\n\n return False", "def test_style_missing_hooks_in_repo(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n repo = \"another\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: another\n hooks:\n - id: isort\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 335, \": style file is missing 'hooks' in repo 'another'\")\n )", "def check_working_tree():\n result = _subprocess(['git', '--no-pager', 'diff', '--ignore-submodules=untracked'])\n if result:\n print(result)\n print(f\"Warning: Working tree contains changes to tracked files. Please commit or discard \"\n f\"your changes and try again.\")\n exit(1)", "def has_changes(directory=None):\n out = check_output('git status', shell=True, cwd=directory)\n if 'nothing to commit (working directory clean)' in out:\n return False\n if 'nothing to commit, working directory clean' in out:\n return False\n if 'nothing to commit, working tree clean' in out:\n return False\n if 'nothing added to commit' in out:\n return False\n return True", "def test_angular_convention_fix() -> None:\n subject = \"fix: this is a bug fix\"\n commit = Commit(\n commit_hash=\"aaaaaaa\",\n subject=subject,\n author_date=\"1574340645\",\n committer_date=\"1574340645\",\n )\n convention = AngularConvention()\n commit_dict = convention.parse_commit(commit)\n assert not commit_dict[\"is_major\"]\n assert not commit_dict[\"is_minor\"]\n assert commit_dict[\"is_patch\"]", "def cppcheck_on_files(files, commit):\n cppcheck_cmd = local[\"cppcheck\"][\n \"--quiet\",\n \"-j %d\" % (multiprocessing.cpu_count() * 2),\n \"--template={file}###{line}###{severity}###{message}\"]\n\n # Each line in the output is an issue\n review = {}\n rc, out, err = cppcheck_cmd.run(filter_files(files, CPP_SOURCE_FILES),\n retcode=None)\n if len(err) > 0:\n review[\"message\"] = \"[CPPCHECK] Some issues need to be fixed.\"\n\n review[\"comments\"] = defaultdict(list)\n for c in err.split(\"\\n\"):\n if len(c.strip()) == 0: continue\n\n parts = c.split(\"###\")\n\n # Only add a comment if code was changed in the modified region\n if not line_part_of_commit(parts[0], parts[1], commit): continue\n\n review[\"comments\"][parts[0]].append({\n \"path\": parts[0],\n \"line\": parts[1],\n \"message\": \"[{0}] {1}\".format(parts[2], parts[3])\n })\n\n if len(review[\"comments\"]):\n review[\"labels\"] = {\"Code-Review\": -1}\n return json.dumps(review)\n\n # Check the return code only just now as cppcheck might still have returned\n # some valid comments.\n if rc != 0:\n review[\"message\"] = \"[CPPCHECK] Did not complete successfully: \" + out\n return json.dumps(review)\n\n # Add a review comment that no issues have been found\n review[\"message\"] = \"[CPPCHECK] No issues found. OK\"\n return json.dumps(review)", "def protect_pr_branch_with_tests_if_any_exist(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_test_protection(change: Change[str], branch: Branch, existing_checks: Set[str],\n known_status_checks: Set[str], known_checkruns: Set[str]) -> Change[str]:\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n\n print_debug(\"[%s] Changing status checks on branch '%s' to [%s]\" %\n (highlight(repo.name), highlight(branch.name),\n highlight(\", \".join(list(all_known_checks)))))\n try:\n if existing_checks:\n branch.edit_required_status_checks(strict=True, contexts=list(all_known_checks))\n else:\n safe_branch_edit_protection(\n branch,\n strict=True,\n contexts=list(all_known_checks),\n )\n except GithubException as e:\n print_error(\"Can't edit required status checks on repo %s branch %s: %s\" %\n (repo.name, branch.name, str(e)))\n return change.failure()\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n existing_checks = set() # type: Set[str]\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n # The repository already has some status checks\n existing_checks = set(rqs.contexts)\n print_debug(\"Branch %s on repo %s already has status checks [%s]\" %\n (highlight(prb.name), highlight(repo.name), highlight(\", \".join(existing_checks))))\n\n # the repository currently has no status checks, let's see if any came in within the last 7 days\n sevendaysago = datetime.now() - timedelta(days=7)\n commits = repo.get_commits(prb.name, since=sevendaysago)\n known_status_checks = set() # type: Set[str]\n known_checkruns = set() # type: Set[str]\n for commit in commits:\n for status in commit.get_statuses(): # type: CommitStatus\n if status.context not in known_status_checks:\n print_debug(\"New status check [%s]: %s %s '%s'\" %\n (commit.sha, status.updated_at,\n status.context, status.description))\n known_status_checks.add(status.context)\n for checkrun in commit.get_check_runs(): # type: CheckRun\n if checkrun.name not in known_checkruns:\n print_debug(\"New check run [%s]: %s %s %s\" %\n (commit.sha, checkrun.completed_at, checkrun.name, checkrun.app))\n known_checkruns.add(checkrun.name)\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n print_debug(\"Found status checks [%s]\" % \", \".join(all_known_checks))\n\n if all_known_checks and all_known_checks != existing_checks:\n # add all known checks as required checks\n print_debug('Adding checks [%s] to branch %s on repo %s' %\n (highlight(\", \".join((all_known_checks) - existing_checks)),\n highlight(prb.name), highlight(repo.name)))\n return [Change(\n meta=ChangeMetadata(\n executor=execute_test_protection,\n params=[prb, existing_checks, known_status_checks, known_checkruns]\n ),\n action=ChangeActions.REPLACE if existing_checks else ChangeActions.ADD,\n before=\"%s checks\" % len(existing_checks) if existing_checks else \"No checks\",\n after=\"%s checks\" % len(all_known_checks),\n )]\n return []", "def check_msg_block_on_commit(self, broker, ftd_msgs):\n hits = self._get_hits(broker, re.compile(\"debug Message id=\\\"[0-9a-f-]{36}\\\"; pid=0x[0-9a-f]+: \"\n \"Content release blocked on commit$\", re.MULTILINE))\n self._reconsile_hits(broker, ftd_msgs, hits)", "def audit_eol(self):\n\n # Regex's....\n re_commit = re.compile(\"^\\xff(.+)\\xff$\")\n re_filename = re.compile(\"^diff --(cc |git a\\/.+ b\\/)(.+)$\")\n blocked_eol = re.compile(r\"(?:\\r\\n|\\n\\r|\\r)$\")\n\n # Bool to allow special files such as vcards to bypass the check\n eol_allowed = False\n\n\n # Do EOL audit!\n process = get_change_diff( self.repository, [\"-p\"] )\n for line in process.stdout:\n commit_change = re.match( re_commit, line )\n if commit_change:\n commit = commit_change.group(1)\n continue\n\n file_change = re.match( re_filename, line )\n if file_change:\n filename = file_change.group(2)\n eol_violation = False\n eol_allowed = False\n\n # Check if it's an allowed mimetype\n # First - check with the mimetypes system, to see if it can tell\n guessed_type, _ = mimetypes.guess_type(filename)\n if guessed_type in self.ALLOWED_EOL_MIMETYPES:\n eol_allowed = True\n continue\n\n # Second check: by file extension\n # NOTE: This uses the FIRST dot as extension\n splitted_filename = filename.split(os.extsep)\n # Check if there's an extension or not\n # NOTE This assumes that files use dots for extensions only!\n if len(splitted_filename) > 1:\n extension = splitted_filename[1]\n if extension in self.ALLOWED_EOL_EXTENSIONS:\n eol_allowed = True\n\n continue\n\n # Unless they added it, ignore it\n if not line.startswith(\"+\"):\n continue\n\n if re.search( blocked_eol, line ) and not eol_violation:\n # Is this an allowed filename?\n if eol_allowed:\n continue\n\n # Failure has been found... handle it\n eol_violation = True\n self.__log_failure(commit, \"End of Line Style (non-Unix): \" + filename);", "def has_errors_fatal(self) -> bool:", "def test_lt_false(self):\n self.assertFalse(self.instance < Commit('f3ccd0b70fe758b539c28319735d9a6489c0fb10'))", "def test_reuse_options(self):\n\n self.git.commit(\n message='new: XXX commit',\n author='Bob <[email protected]>',\n date='2000-01-01 10:00:00',\n allow_empty=True)\n self.git.commit(\n message='new: XYZ commit',\n author='Bob <[email protected]>',\n date='2000-01-01 10:00:00',\n allow_empty=True)\n self.git.commit(\n message='new: normal commit !minor',\n author='Bob <[email protected]>',\n date='2000-01-01 10:00:00',\n allow_empty=True)\n\n gitchangelog.file_put_contents(\n \".gitchangelog.rc\",\n \"ignore_regexps += [r'XXX', ]\")\n\n changelog = w('$tprog')\n self.assertNotContains(\n changelog, \"XXX\",\n msg=\"Should not contain commit with XXX in it... \"\n \"content of changelog:\\n%s\" % changelog)\n self.assertContains(\n changelog, \"XYZ\",\n msg=\"Should contain commit with XYZ in it... \"\n \"content of changelog:\\n%s\" % changelog)\n self.assertNotContains(\n changelog, \"!minor\",\n msg=\"Shouldn't contain !minor tagged commit neither... \"\n \"content of changelog:\\n%s\" % changelog)", "def exists_ref(self, commit_id):\n pass", "def _check_nothing_changed(self):\n if self.data['history_file'] is None:\n return\n nothing_yet = self.data['nothing_changed_yet']\n if nothing_yet not in self.data['history_last_release']:\n return\n # We want quotes around the text, but also want to avoid\n # printing text with a u'unicode marker' in front...\n pretty_nothing_changed = '\"{}\"'.format(nothing_yet)\n if not utils.ask(\n \"WARNING: Changelog contains {}. Are you sure you \"\n \"want to release?\".format(pretty_nothing_changed),\n default=False):\n logger.info(\"You can use the 'lasttaglog' command to \"\n \"see the commits since the last tag.\")\n sys.exit(1)" ]
[ "0.7050094", "0.69747615", "0.6810369", "0.6753324", "0.67024326", "0.66566247", "0.6637356", "0.65921104", "0.65602815", "0.65418464", "0.6462754", "0.6436193", "0.6383709", "0.6378951", "0.62852246", "0.6269505", "0.6268365", "0.6259074", "0.6205398", "0.6135695", "0.6093538", "0.60787183", "0.6051862", "0.60444015", "0.6023536", "0.5981489", "0.59676933", "0.59646606", "0.59513015", "0.5943752", "0.5943546", "0.5933777", "0.59197515", "0.5904889", "0.5875022", "0.5847279", "0.58420646", "0.5794444", "0.57686865", "0.57486737", "0.57397974", "0.57262385", "0.5713236", "0.5707873", "0.5703362", "0.5669931", "0.56677336", "0.566636", "0.5658723", "0.5655273", "0.56494504", "0.5645224", "0.5641727", "0.5639406", "0.5635189", "0.56302714", "0.56272376", "0.56239635", "0.5623847", "0.5607868", "0.5605654", "0.55760527", "0.55646145", "0.55596876", "0.5556026", "0.5553565", "0.5550862", "0.55429745", "0.553654", "0.5521672", "0.55211323", "0.5510922", "0.5507636", "0.5503143", "0.5502971", "0.55001694", "0.5499875", "0.54939646", "0.54861784", "0.5477912", "0.5467972", "0.54611784", "0.545875", "0.54586524", "0.5439097", "0.5436849", "0.5436368", "0.54272074", "0.5410078", "0.53934586", "0.5391995", "0.5387775", "0.5384113", "0.53812236", "0.53754866", "0.53749347", "0.5367317", "0.5364494", "0.53634894", "0.535661" ]
0.800173
0
Returns index of the resource to use for making requests to get data if none of the resources are available, then send number of seconds until the resource is not available
def get_resource_index(self): result = -1 max_sleep_time = self.time_window with self._lock: while result == -1: for i in range(0, self.num_keys): curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0) max_sleep_time = min(max_sleep_time, curr_sleep_time) if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time(): self.timers[i][0] = 0 self.timers[i][1] = 0 if self.timers[i][1] < self.window_limit: result = i break if result == -1: # case when all streams are rate limited # logging.warning('sleeping for %d seconds.' % max_sleep_time) # time.sleep(max_sleep_time) return -1 * max_sleep_time if self.timers[result][0] == 0: self.timers[result][0] = time.time() self.timers[result][1] += 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_get():\n for resource in resources:\n\n # acquire lock\n res_lock = resource.get(\"lock\")\n res_lock.acquire()\n\n # Get if available\n if resource.get(\"available\") == \"true\":\n # Available - acquire resource and return\n resource.update({\"available\": \"false\"})\n res_lock.release()\n return jsonify(resource.get(\"data\"))\n\n # Not available, release and continue\n res_lock.release()\n\n # All resources are taken\n return app.make_response(('No available resource', 500))", "def ask_for_numbers():\n requests.get(\"http://zero2.local:5000/get_num\", timeout=(20,0.02))\n return 1", "def on_get(self, req, resp):\n try:\n n_reqs = int(req.params.get('n', self.default_reqs))\n except ValueError:\n error_response(resp, 'ERROR: Incorrect number of requests')\n return\n\n urls = self.scheduler.requests(n_reqs)\n resp.data = json.dumps(urls, ensure_ascii=True)\n resp.content_type = \"application/json\"\n resp.status = falcon.HTTP_200", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def do_GET(self):\n global st_point, cur_request\n if time.time() - st_point < 1 and cur_request > args.MAX_REQ:\n self.send_response(429)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n time.sleep(0.2)\n return\n elif time.time() - st_point > 1:\n st_point = time.time()\n cur_request = 1\n self.func_PARSE()\n if self.parsed_url[2] in [\"/ping\", \"/cats\"]:\n self.func_DO()\n else:\n self.send_response(400)\n text=\"<h1 align=center>Bad request</h1>\"\n self.func_PRINT(text)", "def request_more_resources():\n logger.info(\"NEED MORE RESOURCES!!!!\")", "def retrieve_report(resource, url, key):\n # TODO: manage time\n params = {\"apikey\": key, \"resource\": resource}\n res = requests.post(url, data=params)\n\n while res.status_code == 204 or json.loads(res.text)[\"response_code\"] == -2:\n time.sleep(15)\n res = requests.post(url, data=params)\n\n return res", "def get_num_pages(self) -> Optional[int]:\n timeout: float = 5\n num_attempts = 0\n while num_attempts < 10:\n r = hit_api(self.key_manager, self.url, self.logger, timeout=timeout, method=\"HEAD\")\n\n if r:\n break\n\n timeout = timeout * 1.2\n else:\n raise RuntimeError(\"Unable to get the number of pages of data in 10 attempts\")\n\n if 'last' not in r.links.keys():\n return 1\n \n # get the last url from header\n last_page_url = r.links['last']['url']\n\n parsed_url = urlparse(last_page_url)\n try:\n num_pages = int(parse_qs(parsed_url.query)['page'][0])\n except (KeyError, ValueError):\n return None\n\n return num_pages", "def try_query(pid):\n retries = 1\n while True:\n try:\n query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n return query\n except HTTPError as e:\n if retries > 10:\n raise e\n print(e)\n wait = retries * 15\n time.sleep(wait)\n retries += 1", "def http_call(self, request):\n response = self.session.get(request)\n attempts = 0\n while response.status_code == 429:\n if attempts > 5:\n break\n attempts = attempts + 1\n time.sleep(30)\n response = self.session.get(request)\n response.raise_for_status()\n return response", "def wait_for_not_found(name, read_method, resource_type=None, **kwargs):\n sleep_time = CONF.kubernetes.status_poll_interval\n retries_total = CONF.kubernetes.status_total_retries\n\n commonutils.interruptable_sleep(CONF.kubernetes.start_prepoll_delay)\n\n i = 0\n while i < retries_total:\n try:\n resp = read_method(name=name, **kwargs)\n resp_id = resp.metadata.uid\n current_status = resp.status.phase\n except rest.ApiException as ex:\n if ex.status == 404:\n return\n else:\n raise\n else:\n commonutils.interruptable_sleep(sleep_time)\n i += 1\n if i == retries_total:\n raise exceptions.TimeoutException(\n desired_status=\"Terminated\",\n resource_name=name,\n resource_type=resource_type,\n resource_id=resp_id or \"<no id>\",\n resource_status=current_status,\n timeout=(retries_total * sleep_time))", "async def async_get_stage(self, attempts=50):\n\n # Query the API until a sensible (> 0) value is received, or the number of attempts is exceeded\n for attempt in range(attempts):\n res = await self.async_query_api(\"/GetStatus\")\n\n # Return the current loadshedding stage by subtracting 1 from the query result\n # Occasionally the Eskom API will return a negative stage, so simply retry if this occurs\n if res and int(res) > 0:\n return int(res) - 1\n\n # If the query does not succeed after the number of attempts has been exceeded, raise an exception\n raise Exception(\n f\"Error, invalid loadshedding stage received from API after {attempts} attempts\"\n )", "def get_from_index(index, type, id): \n response = None\n \n #Try 3 times to read the document from ES, each time picking a random ES node address in case of failure\n for retries in range(3): \n try:\n response = es.get(index=index, doc_type=type, id=id)\n log(\"ES Get Response :: \" + json.dumps(response))\n except ImproperlyConfigured:\n log(\"ES ImproperlyConfigured!\" + traceback.format_exc())\n continue\n except ElasticsearchException:\n log(\"ES ElasticsearchException!\" + traceback.format_exc())\n continue\n except TransportError:\n log(\"ES TransportError!\" + traceback.format_exc())\n continue\n except NotFoundError:\n log(\"ES NotFoundError!\" + traceback.format_exc())\n continue\n except ConflictError:\n log(\"ES ConflictError!\" + traceback.format_exc())\n continue\n except RequestError:\n log(\"ES RequestError!\" + traceback.format_exc())\n continue\n except SerializationError:\n log(\"ES SerializationError!\" + traceback.format_exc())\n continue\n except ConnectionError:\n log(\"ES ConnectionError!\" + traceback.format_exc())\n continue\n except Exception:\n log(\"ES Exception!\" + traceback.format_exc())\n continue\n finally:\n log(\"Total number of ES read attempts: \" + str(retries + 1))\n #Exit for loop if ES transaction is successful otherwise pick another node and continue retrying\n break\n\n if response is None or response == '':\n return ('false', retries + 1)\n else:\n return ('true', retries + 1)", "def initialize_timer():\n try:\n print_debug(\"Initializing the timer by fetching it on the online API\")\n response = WEB_INSTANCE.open(config.API_LOCATION).read()\n response = response.rstrip()\n print_debug(\"Found \"+str(response)+\" on the online API\")\n save_time_left(response)\n return response\n except Exception, e:\n print(e)\n return 'WAITING'", "def winhttp_WinHttpQueryDataAvailable(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpdwNumberOfBytesAvailable\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def __len__(self, context=None):\n if context is not None:\n context = self._repair_context(context)\n uri = self.rest_services[\"size\"]\n payload=dict()\n if context:\n context = context.n3()\n payload[\"context\"] = context\n r = requests.get(uri, params = payload)\n return int(r.text)", "def do_get(self):\n try:\n res = requests.get(self.url, timeout=self.timeout)\n response = make_response(res.__dict__)\n self.elapsed_times.append(response.elapsed)\n self.logger.info( str(response) )\n except MyTimeoutException:\n response = make_response({'url':self.url,'elapsed':-1,'status_code':-1})\n self.elapsed_times.append(self.timeout)\n self.logger.info( str(response) )\n self.fail_count += 1", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def task_retry_count():\n retries = flask.request.headers.get(\n 'X-AppEngine-TaskRetryCount')\n if retries is not None:\n return int(retries)\n return None", "def __getitem__(self, index):\n self.wait()\n return self._results.__getitem__(index)", "def _request(payloadString):\n global countRequested\n global lastReqTime\n if lastReqTime is not None and time.time() - lastReqTime < interReqTime:\n timeToSleep = random()*(interReqTime-time.time()+lastReqTime)*2\n logging.info(\"Sleeping for {0} seconds before request.\".format(\n timeToSleep))\n time.sleep(timeToSleep)\n logging.info(\"Issuing request for the following payload: {0}\".format(\n payloadString))\n r = requests.get(\"{0}/{1}\".format(baseUrl, payloadString))\n lastReqTime = time.time()\n countRequested += 1\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n raise Exception(\"Could not process request. \\\n Received status code {0}.\".format(r.status_code))", "def wait(self, timeout=None):\n if self.counter > 0:\n return self.counter\n\n self._wait(timeout) # return value irrelevant, whether we got it or got a timeout\n return self.counter", "def read_blinds_status_from_thingspeak():\n results = 1\n URL='https://api.thingspeak.com/channels/1152832/feeds.json?api_key='\n KEY='4DDGV289MS3GJCBY'\n prev_len_data = 0 #the length of the list of data points collected on the previous loop search\n \n while (1):\n HEADER='&results=%d' % (2**results)\n NEW_URL=URL+KEY+HEADER\n \n try: \n get_data=requests.get(NEW_URL).json()\n \n data = []\n for x in get_data['feeds']:\n print(x['field3'])\n data.append(x['field3']) #get lightstatus\n #End for\n \n index = search_for_nums(data) #searching for most recent lightstatus input\n \n if index != None: #found most recent data\n print(\"data point found...blindsstatus: %s \" % (data[index]))\n return int(data[index])\n else:\n print(\"missing data point\")\n results += 1\n \n if prev_len_data == len(data): #if the list of data previously collected is the same as the current\n print (\"No data points currently exist\") #all current available data has been exhausted. Move on\n return\n else: \n prev_len_data = len(data) #there are more points available. try again.\n #END if\n #END if\n except:\n print (\"Error reading blinds_status from ThingSpeak\")\n #END try-except\n #END WHILE", "def getRetryCount():\n return int(webapp2.get_request().headers.get('X-Appengine-TaskRetryCount', 0))", "def request(self):\n xml = urllib2.urlopen(self.url, self.data, self.timeout).read()\n if int(xml.count('name')) >= 0:\n self.items['used_download'] = int(xml.count('name'))/2 - 1\n self.items['download'] = self.items['used_download']\n return self", "def get_next_client_index(self, write=True):\r\n if write or len(self._server) == 1:\r\n return 0\r\n\r\n return random.randint(1, len(self._server) - 1)", "async def fetch_one(session, num):\n\n url = f'https://projecteuler.net/overview={num:03}'\n\n async with async_timeout.timeout(10):\n async with session.get(url) as response:\n if response.status == 200:\n if response.headers['Content-Type'] == 'application/pdf':\n data = await response.read()\n filename = f'{num:03}_overview.pdf'\n\n # Working with files synchronously as async would require a thread pool\n with get_path('doc', filename).open('wb') as pdf_file:\n pdf_file.write(data)\n return num\n else:\n print(f\"Got {response.status} while fetching {url}\")", "def rest_api_status(self):\n with self.resource_lock:\n pass", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def do_GET(self):\n for i in range(0,5):\n \"\"\" gather status update time\"\"\"\n f = open(STATUSTIME, \"rb\")\n try:\n mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)\n last = int(mm.readline())\n mm.seek(0)\n mm.close()\n except ValueError as e:\n print(e.message + str(i) + ' failed to read status time')\n continue\n f.close()\n \"\"\" gather json status \"\"\"\n st = open(STATUSFILE, \"rb\")\n try:\n buf = mmap.mmap(st.fileno(), 0, access=mmap.ACCESS_READ)\n raw = (buf.read(len(buf)))\n #print('reading status ' + hashlib.sha1(raw).hexdigest())\n except ValueError as e:\n print(e.message + str(i) + ' failed to read json status')\n continue\n data = None\n if raw is not None:\n try:\n data = raw\n #data = json.loads(raw)\n except ValueError as e:\n print(e.message + str(i) + ' failed to load json status')\n continue\n \"\"\" all done - exit for loop\"\"\"\n break\n else:\n print('all attempts failed')\n self.send_response(500)\n self.end_headers()\n self.wfile.write('\\n')\n return\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n# message = threading.currentThread().getName() + ' ' + str(last) + ' ' +str(data)\n# message = str(raw)\n message = str(data)\n \n self.wfile.write(message)\n self.wfile.write('\\n')\n return", "def robust_request(twitter, resource, params, max_tries=5):\n \n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def get_num_to_request():\n #ALLOWABLE_REQUEST_SIZES = [1, 3, 5, 8, 12]\n ALLOWABLE_REQUEST_SIZES = [1, 3, 5, 8]\n avgrate = jobtracker.query(\"SELECT AVG(files.size/\" \\\n \"(TO_SECONDS(download_attempts.updated_at)*1/86400. - \" \\\n \"TO_SECONDS(download_attempts.created_at)*1/86400.)) \" \\\n \"FROM files, download_attempts \" \\\n \"WHERE files.id=download_attempts.file_id \" \\\n \"AND download_attempts.status='downloaded'\", \\\n fetchone=True)\n avgsize = jobtracker.query(\"SELECT AVG(size/numrequested) FROM requests \" \\\n \"WHERE numbits=%d AND \" \\\n \"file_type='%s'\" % \\\n (config.download.request_numbits, \\\n config.download.request_datatype.lower()), \\\n fetchone=True)\n if avgrate is None or avgsize is None:\n return min(ALLOWABLE_REQUEST_SIZES)\n\n # Total number requested that can be downloaded per day (on average).\n max_to_request_per_day = avgrate/avgsize\n \n used = get_space_used()\n avail = get_space_available()\n reserved = get_space_committed()\n \n # Maximum number of bytes that we should request\n max_bytes = min([avail-reserved-config.download.min_free_space, \\\n config.download.space_to_use-reserved-used])\n # Maximum number to request\n max_num = max_bytes/avgsize\n\n ideal_num_to_request = min([max_num, max_to_request_per_day])\n\n if debug.DOWNLOAD:\n print \"Average dl rate: %.2f bytes/day\" % avgrate\n print \"Average size per request unit: %d bytes\" % avgsize\n print \"Max can dl per day: %d\" % max_to_request_per_day\n print \"Max num to request: %d\" % max_num\n print \"Ideal to request: %d\" % ideal_num_to_request\n\n # Return the closest allowable request size without exceeding\n # 'ideal_num_to_request'\n num_to_request = max([0]+[N for N in ALLOWABLE_REQUEST_SIZES \\\n if N <= ideal_num_to_request])\n\n return num_to_request", "def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')", "def access_url_repeatedly(context, url, repeat_count, delay):\n context.api_call_results = []\n url = context.coreapi_url + url\n\n # repeatedly call REST API endpoint and collect HTTP status codes\n for i in range(repeat_count):\n response = requests.get(url)\n context.api_call_results.append(response.status_code)\n time.sleep(delay)", "def _obtain(self):\n\n while True:\n # make sure we're observing load maximums\n if self.max_load is not None:\n try:\n load = os.getloadavg()\n if jobserver_running_jobs() > 0 and load[1] > self.max_load:\n time.sleep(0.01)\n continue\n except NotImplementedError:\n pass\n\n # make sure we're observing memory maximum\n if self.max_mem is not None:\n mem_used, mem_total = memory_usage()\n mem_percent_used = 100.0 * float(mem_used) / float(mem_total)\n if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem:\n time.sleep(0.01)\n continue\n\n # get a token from the job pipe\n try:\n token = os.read(self.job_pipe[0], 1)\n return token\n except OSError as e:\n if e.errno != errno.EINTR:\n raise", "def data_wait(self):\n return self.get(timeout=self._timeout)", "def get():\n global __internal_state_index_counter\n __internal_state_index_counter += long(1)\n return __internal_state_index_counter", "def load():\n\n time.sleep(0.2) # Used to simulate delay\n\n if request.args:\n counter = int(request.args.get(\"c\")) # The 'counter' value sent in the QS\n\n if counter == 0:\n print(f\"Returning posts 0 to {quantity}\")\n # Slice 0 -> quantity from the db\n res = make_response(jsonify(db[0: quantity]), 200)\n\n elif counter == posts:\n print(\"No more posts\")\n res = make_response(jsonify({}), 200)\n\n else:\n print(f\"Returning posts {counter} to {counter + quantity}\")\n # Slice counter -> quantity from the db\n res = make_response(jsonify(db[counter: counter + quantity]), 200)\n\n return res", "def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def test_get_stream_too_many_requests(req):\n req.get(ENTREZ_URL, text=u'Whoa, slow down', status_code=429, headers={\"Retry-After\": \"2\"})\n params = dict(id='FAKE')\n with pytest.raises(TooManyRequests):\n core.get_stream(ENTREZ_URL, params)", "def test_flask_usage_request(self):\n beforeRequestCount = FlaskUsage.query.filter_by(path=\"/index\").count()\n response = self.client.get(url_for('main.index'))\n self.assertEqual(FlaskUsage.query.filter_by(path=\"/index\").count(), (beforeRequestCount+1))", "def call(url):\n result = requests.get(url)\n if 300 <= result.status_code < 400:\n raise TemporaryException\n if result.status_code == 429:\n raise ApiCountZeroException\n if 400 <= result.status_code < 600:\n raise PermanentException\n return result", "def next_available_number(cls):\n try:\n return cls.objects.latest().number + 1\n except cls.DoesNotExist:\n return 1", "def test_get_resource(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{ResourceTypeName.get()}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self._test_paging('/v1/resources', admin_headers, 10, 'resources')", "def bleep(url):\n print(url)\n r = requests.get(url)\n\n try:\n next_page = r.links[\"next\"][\"url\"]\n except KeyError:\n next_page = None\n\n print(\"r.status_code\", r.status_code)\n print(\"X-Ratelimit-Limit\", r.headers[\"X-Ratelimit-Limit\"])\n print(\"X-Ratelimit-Remaining\", r.headers[\"X-Ratelimit-Remaining\"])\n\n if r.status_code == 200:\n return r.json(), next_page\n\n return None, None", "def url_get(url: str, maxtry: int = 3, timeout: int = 10) -> io.BytesIO: # pragma: no cover\n\n if maxtry <= 0:\n raise ValueError(\"Parameter maxtry should be greater than zero.\")\n for ntry in range(maxtry):\n try:\n rspns = urlrequest.urlopen(url, timeout=timeout)\n cntnt = rspns.read()\n break\n except urlerror.URLError as e:\n if not isinstance(e.reason, socket.timeout):\n raise\n\n return io.BytesIO(cntnt)", "def pollTillAvailable(self):\n item = self.getItem()\n while item is None:\n item = self.getItem()\n\n return item", "def get_requests(url, user, passwd):\n \n #get\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n \n #if timout\n if r.status_code == 403:\n print(\"LIMIT EXCEEDED\")\n print(\"WAIT AN HOUR\")\n i=1\n while r.status_code != 200:\n time.sleep(60)\n r = requests.get(url, auth=HTTPBasicAuth(user, passwd))\n print(\"{} MINUTES ELAPSED\".format(i))\n i+=1\n elif r.status_code != 200:\n print(r.status_code)\n return []\n #return data\n data = r.json()\n return data", "def start(self):\n self.log.info(\"start\")\n # verify that we have enough free space for the download\n # may not be accurate since we likely have not fetched the\n # the size of the objects to be downloaded.\n # But s3parallel should check for us prior to sending the request.\n freespace = self.freespace\n download_size = self.downloadsize()\n if download_size > freespace:\n msg = \"not enough free space to download: \" + str(download_size)\n msg += \" bytes, \" + str(freespace) + \" available\"\n self.log.info(msg)\n raise IOError(msg)\n\n count = self.update()\n return count", "def sync(self):\n self.send()\n detail_count = summary_count = 0\n while self.responses:\n response = self.responses[0]\n while not response.complete:\n detail_delta, summary_delta = self.fetch()\n detail_count += detail_delta\n summary_count += summary_delta\n return detail_count, summary_count", "def get(self):\n if self.result_data.get(self.get_idx + 1) is not None:\n self.get_idx += 1\n res = self.result_data[self.get_idx]\n del self.result_data[self.get_idx]\n return res\n while True:\n res = self.result_queue.get(block=False)\n idx = res.id\n if idx == self.get_idx + 1:\n self.get_idx += 1\n return res\n self.result_data[idx] = res", "def run():\n check_active_requests()\n start_downloads()\n check_download_attempts()\n numsuccess = verify_files()\n recover_failed_downloads()\n check_downloading_requests()\n acknowledge_downloaded_files()\n if can_request_more():\n make_request()\n return numsuccess", "def handle_post():\n for resource in resources:\n\n if resource.get(\"data\").get(\"ip\") == request.get_json().get(\"ip\"):\n # acquire lock\n res_lock = resource.get(\"lock\")\n res_lock.acquire()\n\n # POST if not available\n if resource.get(\"available\") == \"false\":\n # Not available - release resource and return\n resource.update({\"available\": \"true\"})\n res_lock.release()\n return app.make_response(('Resource was successfully released', 200))\n\n else:\n # Resource is already available\n res_lock.release()\n return app.make_response(('Resource was already available', 200))\n\n # Could not find resource\n return app.make_response(('Resource not found', 500))", "def get_etherscan_calls() -> int:\n return _get_counter(\"etherscan_calls\")", "def current_requests(self):\n return len(self._current_requests)", "def get_amount_of_reports():\n try: \n return get_amount_of_reports_dao(), 200\n except: \n return \"An error ocurred\", 404", "def read_light_status_from_thingspeak():\n results = 1\n URL='https://api.thingspeak.com/channels/1152832/feeds.json?api_key='\n KEY='4DDGV289MS3GJCBY'\n prev_len_data = 0 #the length of the list of data points collected on the previous loop search\n \n while (1):\n HEADER='&results=%d' % (2**results)\n NEW_URL=URL+KEY+HEADER\n \n try:\n get_data=requests.get(NEW_URL).json()\n \n data = []\n for x in get_data['feeds']:\n print(x['field2'])\n data.append(x['field2']) #get lightstatus\n #END for\n \n print (\"length of data = %d \" % (len(data)))\n \n index = search_for_nums(data) #searching for most recent lightstatus input\n \n if index != None: #found most recent data\n print(\"data point found...lighstatus: %s \" % (data[index]))\n return int(data[index])\n else:\n print(\"missing data point\")\n results += 1\n \n if prev_len_data == len(data): #if the list of data previously collected is the same as the current\n print (\"No data points currently exist\") #all current available data has been exhausted. Move on\n return\n else: \n prev_len_data = len(data) #there are more points available. try again. \n #END if\n #END if\n except:\n print (\"Error reading light_status from ThingSpeak\")\n #END try-except\n #END WHILE", "def get_timeout(self) -> int:", "def test_requests_are_throttled(self):\n request = self.factory.get('/')\n for dummy in range(4):\n response = MockView.as_view()(request)\n assert response.status_code == 429", "def check_server(host, port, path_info='/', timeout=3, retries=30):\n if retries < 0:\n return 0\n time.sleep(.3)\n for i in range(retries):\n try:\n conn = http_client.HTTPConnection(host, int(port), timeout=timeout)\n conn.request('GET', path_info)\n res = conn.getresponse()\n return res.status\n except (socket.error, http_client.HTTPException):\n time.sleep(.3)\n return 0", "def _get(self, url, **kwargs):\n retries = self.max_retries\n delay = 1\n while retries > 0:\n try:\n return self._call('GET', url, kwargs)\n except HTTPError as e: # Retry for some known issues\n retries -= 1\n status = e.response.status_code\n if status == 429 or (500 <= status < 600):\n if retries < 0:\n raise\n else:\n sleep_seconds = int(e.headers.get('Retry-After', delay))\n print('retrying ...' + str(sleep_seconds) + ' secs')\n time.sleep(sleep_seconds + 1)\n delay += 1\n else:\n raise\n except Exception as e:\n print('exception', str(e))\n retries -= 1\n if retries >= 0:\n print('retrying ...' + str(delay) + 'secs')\n time.sleep(delay + 1)\n delay += 1\n else:\n raise", "def count(options=None):\n if options is None:\n return requests.get(\"/count\")\n else:\n return requests.get(\"/count\", options)", "def get_first_n_pending_links(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT link FROM link WHERE chunk_id IS NULL AND state = 'pending' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def request_data(url): \n requests_cache.install_cache('data_cache')\n while True:\n data = requests.get(url)\n if not data.status_code == 200 or \"try again later\" in data.text:\n continue\n else:\n break\n return data.text", "def get(self):\n get_idx = self.get_indices_ls[0]\n if self.result_data.get(get_idx) is not None:\n res = self.result_data[get_idx]\n del self.result_data[get_idx]\n del self.get_indices_ls[0]\n return res\n\n while True:\n res = self.result_queue.get(block=False)\n idx = res.id\n if idx == get_idx:\n del self.get_indices_ls[0]\n return res\n self.result_data[idx] = res", "def in_waiting(self):\n [ack, txcount, rxcount] = self._GetResponseFrame()\n return rxcount", "def ping(self, times=10):\n logging.debug(\"checking for rate limit info\")\n url = \"https://api.twitter.com/1.1/application/rate_limit_status.json?resources=search\"\n response = self.client.get(url)\n result = response.json()\n\n # look for limits in the json or the http headers, which can\n # happen when we are rate limited from checking the rate limits :)\n\n if \"resources\" in result:\n self.reset = int(result[\"resources\"][\"search\"][\"/search/tweets\"][\"reset\"])\n self.remaining = int(result[\"resources\"][\"search\"][\"/search/tweets\"][\"remaining\"])\n elif 'x-rate-limit-reset' in response.headers:\n self.reset = int(response.headers[\"x-rate-limit-reset\"])\n self.remaining = int(response.headers[\"x-rate-limit-remaining\"])\n else:\n logging.error(\"missing x-rate-limit-reset in headers: %s\", response.headers)\n if times == 0:\n logging.error(\"ping isn't working :(\")\n raise Exception(\"unable to ping\")\n else:\n times -= 1\n time.sleep(1)\n logging.info(\"trying to ping again: %s\", times)\n return self.ping(times)\n\n logging.info(\"new rate limit remaining=%s and reset=%s\",\n self.remaining, self.reset)", "def doRequest(self, partial_url, params):\n\n self.calls_to_timeout += 1\n\n self.number_of_max_req -= 1\n\n if self.number_of_max_req <= 0:\n self.getNextApiKey()\n\n url = self.base_url + partial_url\n\n while True:\n\n try:\n response = self.session.get(url, params=params, timeout=self.req_timeout)\n if not (response.status_code == 200 or response.status_code == 404):\n log.info(\n 'response.status_code {} received. For current key having still {} requests to do. Sleeping {} seconds ...'.format(\n response.status_code, self.number_of_max_req, TIMEOUT))\n self.doTimeout()\n continue\n\n except Exception as e:\n log.info('Exception occurred: {}'.format(e))\n log.info('For current key having still {} requests to do. Sleeping {} seconds ...'.format(\n self.number_of_max_req, TIMEOUT))\n self.doTimeout()\n continue\n\n break\n\n try:\n responseJson = response.json()\n log.debug(responseJson)\n\n # record could not be found/does not exist\n if response.status_code == 404:\n responseJson = None\n\n return responseJson\n\n except Exception as e:\n log.error('Parsing the following response to Json failed: {}'.format(response))\n log.error('Exception occurred: {}'.format(e))\n return None", "def test_get_counters_url_creation(self):\n token = '7f56b1a9d80648c7b87ae588a905a9be'\n http = 'https://api-metrika.yandex.ru/management/v1/counters?oauth_token=%s'\n req1 = requests.get(http % token)\n req2 = requests.get(API + COUNTERS % (token))\n self.assertEqual(req1.url, req2.url)", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "async def _request_one(self, url, header, id, index, session):\n async with session.get(url=url, headers=header) as resp:\n status = resp.status\n if status == '200':\n self.logger.info(\"[%s] Request for url %s, header: %s\", index, url, header)\n result = await resp.text()\n\n # successful\n self.logger.info(\"[%s] [Successful] Request Success for url %s\", index, url)\n if id not in self.processed_ids:\n self.processed_ids.add(id)\n\n async with aiofiles.open(self.result_file_dir + id + \".txt\", \"w\") as f:\n await f.write(result)\n self.logger.info(\"[%s] Wrote results for source URL: %s\",index, url)\n else:\n self.logger.info(\"[%s] [ERROR] Request error for url %s, status %s\", index, url, resp.status)\n if resp == '429':\n time.sleep(1000)", "def test_index_availability(client):\n response = client.get('/')\n assert response.status_code == 200", "def available(self):\n if self._count is not None:\n # If count is available, use it\n return self._count\n else:\n # We really have no idea.\n # Don't know what do do here, but for this\n # impl, which should only be constructed with\n # python lists, self._count should never be none.\n return 0", "def get_awaiting_request(self):\n return self.client.in_flight_request_count()", "def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)", "def fetch_data(swarming, start, end, state, tags):\n # Split the work in days. That's a lot of requests to do.\n queue = Queue.Queue()\n threads = []\n def run(start, cmd):\n data = json.loads(subprocess.check_output(cmd))\n queue.put((start, int(data['count'])))\n\n day = start\n while day != end:\n data = [\n ('start', int((day - _EPOCH).total_seconds())),\n ('end', int((day + datetime.timedelta(days=1)-_EPOCH).total_seconds())),\n ('state', state),\n ]\n for tag in tags:\n data.append(('tags', tag))\n cmd = [\n sys.executable, os.path.join(CLIENT_DIR, 'swarming.py'),\n 'query', '-S', swarming, 'tasks/count?' + urllib.urlencode(data),\n ]\n thread = threading.Thread(target=run, args=(day.strftime('%Y-%m-%d'), cmd))\n thread.daemon = True\n thread.start()\n threads.append(thread)\n while len(threads) > 100:\n # Throttle a bit.\n for i, thread in enumerate(threads):\n if not thread.is_alive():\n thread.join()\n threads.pop(i)\n sys.stdout.write('.')\n sys.stdout.flush()\n break\n day = day + datetime.timedelta(days=1)\n\n while threads:\n # Throttle a bit.\n for i, thread in enumerate(threads):\n if not thread.is_alive():\n thread.join()\n threads.pop(i)\n sys.stdout.write('.')\n sys.stdout.flush()\n break\n print('')\n data = []\n while True:\n try:\n data.append(queue.get_nowait())\n except Queue.Empty:\n break\n return dict(data)", "def range_request(numbytes):\n\n if numbytes <= 0 or numbytes > (100 * 1024):\n response = Response(\n headers={\"ETag\": \"range%d\" % numbytes, \"Accept-Ranges\": \"bytes\"}\n )\n response.status_code = 404\n response.data = \"number of bytes must be in the range (0, 102400]\"\n return response\n\n params = CaseInsensitiveDict(request.args.items())\n if \"chunk_size\" in params:\n chunk_size = max(1, int(params[\"chunk_size\"]))\n else:\n chunk_size = 10 * 1024\n\n duration = float(params.get(\"duration\", 0))\n pause_per_byte = duration / numbytes\n\n request_headers = get_headers()\n first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes)\n range_length = (last_byte_pos + 1) - first_byte_pos\n\n if (\n first_byte_pos > last_byte_pos\n or first_byte_pos not in xrange(0, numbytes)\n or last_byte_pos not in xrange(0, numbytes)\n ):\n response = Response(\n headers={\n \"ETag\": \"range%d\" % numbytes,\n \"Accept-Ranges\": \"bytes\",\n \"Content-Range\": \"bytes */%d\" % numbytes,\n \"Content-Length\": \"0\",\n }\n )\n response.status_code = 416\n return response\n\n def generate_bytes():\n chunks = bytearray()\n\n for i in xrange(first_byte_pos, last_byte_pos + 1):\n\n # We don't want the resource to change across requests, so we need\n # to use a predictable data generation function\n chunks.append(ord(\"a\") + (i % 26))\n if len(chunks) == chunk_size:\n yield (bytes(chunks))\n time.sleep(pause_per_byte * chunk_size)\n chunks = bytearray()\n\n if chunks:\n time.sleep(pause_per_byte * len(chunks))\n yield (bytes(chunks))\n\n content_range = \"bytes %d-%d/%d\" % (first_byte_pos, last_byte_pos, numbytes)\n response_headers = {\n \"Content-Type\": \"application/octet-stream\",\n \"ETag\": \"range%d\" % numbytes,\n \"Accept-Ranges\": \"bytes\",\n \"Content-Length\": str(range_length),\n \"Content-Range\": content_range,\n }\n\n response = Response(generate_bytes(), headers=response_headers)\n\n if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):\n response.status_code = 200\n else:\n response.status_code = 206\n\n return response", "def check_load(cursor):\n cursor.execute(\"\"\"\n select pid from pg_stat_activity where query ~* 'FETCH'\n and datname = 'asos'\"\"\")\n if cursor.rowcount > 9:\n sys.stderr.write((\"/cgi-bin/request/metars.py over capacity: %s\"\n ) % (cursor.rowcount,))\n ssw(\"Content-type: text/plain\\n\")\n ssw('Status: 503 Service Unavailable\\n\\n')\n ssw(\"ERROR: server over capacity, please try later\")\n sys.exit(0)", "def _poll_advance_read(self, busy):\n if busy == 0:\n self._async_status = consts.ASYNC_DONE\n return consts.POLL_OK\n\n if busy == 1:\n return consts.POLL_READ\n\n return consts.POLL_ERROR", "def getfile(link):\r\n\r\n global args\r\n\r\n file_data = None\r\n trying_count = 1\r\n print('')\r\n while file_data == None and trying_count <= args.try_limit:\r\n try:\r\n print('\\rTrying to get {}... ({}/{})'\r\n .format(link, trying_count, args.try_limit), end='')\r\n back_data = requests.get(link)\r\n if back_data.status_code == 200:\r\n file_data = back_data.content\r\n except:\r\n pass\r\n trying_count += 1\r\n if file_data == None:\r\n print(' FAILED!')\r\n else:\r\n print(' SUCCESS.')\r\n return file_data", "def _find_wait_key_and_regist_event(self):\n with self.pool.reserve() as client:\n client.incr(self.event_key)\n result = client.incr(self.wait_key)\n\n return result", "def identify_num_simulate():\r\n # http / https ip:port\r\n proxy = {'http': 'http://112.85.165.113:9999'}\r\n response = requests.get(base_url + '/get', proxies=proxy)\r\n print(response.status_code)\r\n print(response.text)", "def get_statistics():\n logger.info(\"Started request\")\n if os.path.exists(app_config['datastore']['filename']):\n with open(app_config['datastore']['filename']) as f:\n data = json.loads(f.read())\n\n logging.debug(\"Request data: {}\".format(data))\n logging.info(\"Request completed\")\n\n return data, 200\n else:\n logger.error(\"File not found\")\n return 404", "def request_large_data(path, params):\r\n start = 0\r\n result = []\r\n while True:\r\n process = __split_path(path)\r\n if process['status'] == 'success':\r\n conn = process['conn']\r\n info = MetaInfo()\r\n header = {'vendor_key': info.vendor_id}\r\n # check 2.x and 3.x differences in using urllib\r\n params['start'] = str(start)\r\n try:\r\n conn.request(\"GET\", process['req_path'] + \"?\" +\r\n urllib.urlencode(params), headers=header)\r\n except AttributeError:\r\n conn.request(\"GET\", process['req_path'] + \"?\" +\r\n urllib.parse.urlencode(params), headers=header)\r\n resp = conn.getresponse()\r\n\r\n if resp.status != 200:\r\n print(\"Code: \" + str(resp.status))\r\n resp_obj = HTTPConnect(resp.status, resp.read()).json()\r\n print(resp_obj)\r\n break\r\n resp_obj_read = resp.read()\r\n resp_obj = HTTPConnect(resp.status, resp_obj_read).json()\r\n if type(resp_obj) is str:\r\n try:\r\n resp_obj = json.loads(resp_obj)\r\n except:\r\n # return error msg\r\n print(\"parse json str failed\")\r\n # print(resp_obj)\r\n break\r\n elif type(resp_obj) is dict:\r\n try:\r\n resp_obj = json.loads(json.dumps(resp_obj))\r\n except:\r\n print(\"parse dict failed\")\r\n print(resp_obj)\r\n break\r\n else:\r\n print(\"result not str\")\r\n print(resp_obj)\r\n break\r\n\r\n total = int(resp_obj['total'])\r\n next_start = int(resp_obj['next_start'])\r\n\r\n result.extend(resp_obj['data'])\r\n print(\"Finish extracting: \" + str(start+1) + \" to \" + str(next_start-1) + \" , remaining: \"\r\n + str(total - next_start))\r\n start = next_start\r\n conn.close()\r\n if start == total:\r\n break\r\n return result", "def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)", "def timeout_handle():\r\n response = requests.get(base_url + '/get', params=None, timeout=3)\r\n print(response.json)\r\n print(response.status_code)", "def poll(cid, resource):\n cur = db.cursor()\n\n for r in resources:\n cur.execute(\"SELECT has_%s FROM starting_resources WHERE cid='%s'\" %\n (r, cid))\n code = cur.fetchone()\n if code[0] != \"0\":\n if r == resource:\n return True\n \n cur.execute(\"SELECT has_%s FROM acquired_resources WHERE cid='%s'\" %\n (r, cid))\n code = cur.fetchone()\n if code[0] != \"0\":\n if r == resource:\n return True\n\n return False", "async def limited_req(path, session, semaphore, **kwargs):\n async with semaphore:\n async with session.get('https://iceprod2-api.icecube.wisc.edu'+path, params=kwargs) as response:\n return await response.json()", "def get_res_port():\n return get_port() + 1", "async def monitor():\n global counter\n while True:\n time.sleep(1)\n print(counter, 'reqs/sec')\n counter = 0", "def get_count_request():\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n\n get_ip = \"\"\"SELECT ip FROM Status\"\"\"\n\n ip = cursor.execute(get_ip).fetchall()\n\n get_count = \"\"\"SELECT count_requests FROM Status\"\"\"\n\n count_requests = cursor.execute(get_count).fetchall()\n\n get_protocol = \"\"\"SELECT protocol FROM Status\"\"\"\n\n protocol = cursor.execute(get_protocol).fetchall()\n\n cursor.close()\n sql.close()\n\n return zip(ip, count_requests, protocol)", "def get(resource, lookup):\n\n documents = []\n response = {}\n etag = None\n req = parse_request(resource)\n embedded_fields = _resolve_embedded_fields(resource, req)\n\n # facilitate cached responses\n if req.if_modified_since:\n # client has made this request before, has it changed?\n # this request does not account for deleted documents!!! (issue #243)\n preflight_req = copy.copy(req)\n preflight_req.max_results = 1\n\n cursor = app.data.find(resource, preflight_req, lookup)\n if cursor.count() == 0:\n # make sure the datasource is not empty (#243).\n if not app.data.is_empty(resource):\n # the if-modified-since conditional request returned no\n # documents, we send back a 304 Not-Modified, which means that\n # the client already has the up-to-date representation of the\n # resultset.\n status = 304\n last_modified = None\n return response, last_modified, etag, status\n\n # continue processing the full request\n last_update = epoch()\n req.if_modified_since = None\n cursor = app.data.find(resource, req, lookup)\n\n for document in cursor:\n _build_response_document(document, resource, embedded_fields)\n documents.append(document)\n\n # build last update for entire response\n if document[config.LAST_UPDATED] > last_update:\n last_update = document[config.LAST_UPDATED]\n\n status = 200\n last_modified = last_update if last_update > epoch() else None\n\n # notify registered callback functions. Please note that, should the\n # functions modify the documents, the last_modified and etag won't be\n # updated to reflect the changes (they always reflect the documents\n # state on the database.)\n\n getattr(app, \"on_fetched_resource\")(resource, documents)\n getattr(app, \"on_fetched_resource_%s\" % resource)(documents)\n\n if config.DOMAIN[resource]['hateoas']:\n response[config.ITEMS] = documents\n response[config.LINKS] = _pagination_links(resource, req,\n cursor.count())\n else:\n response = documents\n\n # the 'extra' cursor field, if present, will be added to the response.\n # Can be used by Eve extensions to add extra, custom data to any\n # response.\n if hasattr(cursor, 'extra'):\n getattr(cursor, 'extra')(response)\n\n return response, last_modified, etag, status", "def numNotready(antReady) :\n return len(antReady.notready)", "def ping(self, t):\r\n if not t:\r\n return 0\r\n\r\n # Set pointer to object queue and append most recent ping request\r\n q = self.q\r\n q.append(t)\r\n\r\n # Collect target ping request\r\n p = q[0]\r\n\r\n while p < t - 3000:\r\n # Remove targest ping request because too old\r\n q.popleft()\r\n p = q[0]\r\n\r\n # Determine number of recent ping requests\r\n return len(q)" ]
[ "0.6093405", "0.5707536", "0.56445384", "0.5552679", "0.55315655", "0.54823583", "0.5422522", "0.5402123", "0.5400669", "0.538704", "0.5379008", "0.5378094", "0.53733474", "0.535927", "0.5357638", "0.53508085", "0.53386307", "0.5338582", "0.5338395", "0.53344756", "0.5322996", "0.5296604", "0.5291609", "0.5290525", "0.52858925", "0.5276881", "0.525561", "0.5244207", "0.52355975", "0.52355975", "0.52355975", "0.52355975", "0.5227768", "0.5223348", "0.52041054", "0.5203846", "0.518906", "0.51871765", "0.5176887", "0.5168952", "0.51673555", "0.5161582", "0.51551104", "0.51480246", "0.5146495", "0.5129906", "0.5126814", "0.5119767", "0.51173747", "0.51163065", "0.5108948", "0.51077", "0.5103846", "0.5096799", "0.50964856", "0.5090749", "0.5088163", "0.50685185", "0.50576395", "0.505578", "0.503957", "0.5037692", "0.50357926", "0.50325066", "0.502353", "0.5023368", "0.501819", "0.50120604", "0.5010326", "0.5003413", "0.50032514", "0.49995995", "0.49908692", "0.49902046", "0.49902046", "0.49902046", "0.4977771", "0.49673593", "0.49665558", "0.4965332", "0.49619976", "0.49571708", "0.49506804", "0.49493822", "0.49481353", "0.49427512", "0.49359086", "0.49259573", "0.492233", "0.49191877", "0.49155584", "0.49137545", "0.4911151", "0.4901581", "0.489305", "0.48885286", "0.48885027", "0.48885003", "0.4876532", "0.48671183" ]
0.67190135
0
Test Chronos GR Config plugin writes new config when config has changed
def test_chronos_gr_config_changed(self, mock_run_command, mock_safely_write): # Create the plugin plugin = ChronosGRConfigPlugin({}) # Set up the config strings to be tested old_config_string = "Old Chronos GR config" new_config_string = "New Chronos GR config" # Call 'on_config_changed' with file.open mocked out with mock.patch('clearwater_etcd_plugins.chronos.chronos_gr_config_plugin.open', \ mock.mock_open(read_data=old_config_string), create=True) as mock_open: plugin.on_config_changed(new_config_string, None) # Test assertions mock_open.assert_called_once_with(plugin.file(), "r") mock_safely_write.assert_called_once_with(plugin.file(), new_config_string) mock_run_command.assert_called_once_with("/usr/share/clearwater/clearwater-queue-manager/scripts/modify_nodes_in_queue add apply_chronos_gr_config")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def test_update_wait():\n wait = '10 seconds'\n config_info = read_config()\n config_info['wait'] = wait\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['wait'] == wait", "def test_write_config(self):\n config = Config()\n config.config = test_config\n config.config_file = \"./config\"\n config.write_config()\n with open(config.config_file) as config_file:\n data = config_file.read()\n self.assertTrue(data)\n os.remove(config.config_file)", "def test_config_overwrite(self):\n inc = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", False, True)\n ini = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", False, True)\n\n self.assertEquals(inc, ini)", "def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()", "def test_config(setup_debug, tmp_path):\n os.chdir(tmp_path)\n \n ssh_tunnels = SSHTunnels(users=[\"bbeeson\"])\n c0 = (TEST_DATA / \"config\").read_text()\n # run and add 'queen'\n c1 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n # run and do nothing\n c2 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n assert len(c1) > len(c0)\n assert len(c1) == len(c2)\n \n # c_ref = (TEST_DATA / \"test_ssh_config2\").read_text()\n # should have just added queen\n #assert c2 == c_ref", "def test_update_reg_ex_config(self):\n pass", "def test_update_age():\n age = '2 minutes'\n config_info = read_config()\n config_info['age'] = age\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['age'] == age", "def test_config_reload(self):\n server = self.start_server(\"hello world\", 200)\n try:\n self.setup_dynamic()\n\n cfg_file = \"test.yml\"\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.output_has(lines=1))\n\n self.assert_last_status(\"up\")\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://203.0.113.1:8186\"))\n\n self.wait_until(lambda: self.last_output_line()[\n \"url.full\"] == \"http://203.0.113.1:8186\")\n\n self.assert_last_status(\"down\")\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()", "def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig", "def test_set_config__twice__with_same_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'), \\\n mock.patch('optimizely.optimizely_config.OptimizelyConfigService.get_config') as mock_opt_service:\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual(1, mock_opt_service.call_count)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n mock_opt_service.reset_mock()\n\n # Call set config again and confirm that no new log message denoting config update is there\n project_config_manager._set_config(test_datafile)\n self.assertEqual(0, mock_logger.debug.call_count)\n self.assertEqual(0, mock_notification_center.call_count)\n # Assert that mock_opt_service is not called again.\n self.assertEqual(0, mock_opt_service.call_count)", "def test_custom_configuration_updated(self):\n component_protocol_id = ComponentId(\n ComponentType.PROTOCOL, self.new_protocol_id\n )\n component_contract_id = ComponentId(\n ComponentType.CONTRACT, self.new_contract_id\n )\n component_connection_id = ComponentId(\n ComponentType.CONNECTION, self.new_connection_id\n )\n component_skill_id = ComponentId(ComponentType.SKILL, self.new_skill_id)\n\n assert (\n self.agent_config.component_configurations[component_protocol_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_contract_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_connection_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_skill_id]\n == self.expected_custom_component_configuration\n )", "def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")", "def test_config_add(self):\n self.setup_dynamic()\n\n self.wait_until(lambda: self.log_contains(\n \"Starting reload procedure, current runners: 0\"))\n\n server = self.start_server(\"hello world\", 200)\n try:\n self.write_dyn_config(\n \"test.yml\", self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.log_contains(\n \"Starting reload procedure, current runners: 1\"))\n\n self.wait_until(lambda: self.output_has(lines=1))\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()", "def test_set_config__success(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n\n self.assertIsInstance(\n project_config_manager.optimizely_config,\n optimizely_config.OptimizelyConfig\n )", "def conf_update(self):\n pass", "def test_set_config__twice__with_diff_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('1', project_config_manager.optimizely_config.revision)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n\n # Call set config again\n other_datafile = json.dumps(self.config_dict_with_multiple_experiments)\n project_config_manager._set_config(other_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: 1. New revision number: 42.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('42', project_config_manager.optimizely_config.revision)", "def test_config_overwrites():\n basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", '..'))\n temppath = '/tmp/'\n\n conf = core.Config(datapath=temppath)\n\n assert conf.basepath.lower() == basepath.lower()\n assert conf.datapath.lower() == temppath.lower()", "def test_update_configuration(self):\n\n ts_name = 'test-update-1'\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertTrue(configuration.default)\n self.assertEquals(configuration.retentions, granularities.RETENTIONS_GRANULARITY)\n self.assertEquals(configuration.timezone, granularities.DEFAULT_TIMEZONE)\n self.assertEquals(configuration.aggregation_method,\n aggregations.DEFAULT_AGGREGATION)\n\n custom_tz = 'America/New_York'\n custom_agg = aggregations.AGGREGATION_LAST\n custom_ret = granularities.RETENTIONS_GRANULARITY\n custom_ret[granularities.SECOND] = 3 * 365 * 12 * 30 * 24 * 60 * 60\n timeserie_configuration.update_timeserie_configuration(\n self.get_local_dynamo_cli(), ts_name, custom_tz, custom_agg, custom_ret)\n\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertFalse(configuration.default)\n self.assertEquals(configuration.retentions, custom_ret)\n self.assertEquals(configuration.timezone, custom_tz)\n self.assertEquals(configuration.aggregation_method, custom_agg)", "def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)", "def test_config_write(get_config, default_config):\n cfg = get_config(Config, default_config('sys'))\n\n try:\n cfg.write()\n except Exception as e:\n pytest.fail(f'exception raised as\\n{e}')", "def test_update_lastID():\n config_info = read_config()\n config_info['lastID'] = 0\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['lastID'] == 0", "def test_new_config(self, context, permissions, wizard):\n context.config_exists.return_value = False\n permissions.return_value = True\n wizard.return_value = \"/some/file/path\"\n\n runner = CliRunner()\n result = runner.invoke(cli_node_new_configuration, [\n \"--name\", \"some-name\",\n \"--environment\", \"application\"\n ])\n\n # check that info message is produced\n self.assertEqual(result.output[:6], \"[info]\")\n\n # check OK exit code\n self.assertEqual(result.exit_code, 0)", "def test_get_config(default_config, tmp_path):\n abcconfig.write_config(default_config, configpath=tmp_path)\n config = abcconfig.get_config(configpath=tmp_path)\n assert config == default_config", "def test_global_config_persistence():\n gc = GlobalConfig()\n\n # Track old one\n old_analytics_opt_in = gc.analytics_opt_in\n\n # Toggle it\n gc.analytics_opt_in = not old_analytics_opt_in\n\n # Initialize new config\n gc = GlobalConfig()\n\n # It still should be equal to the old value, as we have not saved\n assert old_analytics_opt_in == gc.analytics_opt_in\n\n # Get raw config\n raw_config = yaml_utils.read_json(os.path.join(APP_DIR, GLOBAL_CONFIG_NAME))\n assert raw_config[\"analytics_opt_in\"] == old_analytics_opt_in", "def update(self):\n self.save_config_file()", "def _on_config_changed(self, _):\n self._configure_pod()", "def test_config_ok_config(self):\n test_data = (\"[gnupg]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"\\n\"\n \"[data]\\n\"\n \"\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\"\n \"\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data)\n config = Config(\"test_config.conf\")\n self.assertIn(\"gnupg\", config.config.sections())\n self.assertIn(\"amazon-s3\", config.config.sections())\n self.assertEqual(config.config.get(\n \"gnupg\", \"recipients\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"gnupg\", \"signer\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"access_key\"), \"ACCESSKEY\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"secret_access_key\"), \"SECRETACCESSKEY\")\n self.assertEqual(config.config.get(\n \"data\", \"bucket\"), \"DATABUCKET\")\n self.assertEqual(config.config.get(\n \"metadata\", \"bucket\"), \"METADATABUCKET\")\n os.remove(\"test_config.conf\")", "def test_logging_config(self):\n topdir = os.path.dirname(os.path.dirname(__file__))\n # logging config from default\n os.system('rm %s/logging.conf' % topdir)\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)\n # logging config from file\n os.system('cp %s/logging.conf.sample %s/logging.conf' %\n (topdir, topdir))\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)", "def _change_conf_check(mds_config):\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))", "def test_new_config_based_on_old_config(self):\n # set up\n get_config_patcher = patch(\n 'factories.get_config',\n return_value = {\n 'config_id': 101,\n 'key_value_pairs': \"mockKey=mockVal\",\n }\n )\n mock_get_config = get_config_patcher.start()\n\n # run SUT\n new_config_id = new_config(101)\n\n # confirm correct sql was executed once\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO config (key_value_pairs) VALUES (%s) RETURNING config_id\",\n ('mockKey=mockVal',)\n )\n\n # confirm that we got config 101\n mock_get_config.assert_called_once_with(101)", "def test_changedFile(self):\n self.write(\"service1.json\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.pump()\n self.write(\"service1.json\", [{\"host\": \"host3\", \"port\": 125},\n {\"host\": \"host4\", \"port\": 126}])\n self.pump()\n self.assertNodesEqual(\n knownNodes(self.disco, \"service1\", \"staging\"),\n [self.node(\"service1\", \"host3\", 125),\n self.node(\"service1\", \"host4\", 126)])", "def test_900_restart_on_config_change(self):\n u.log.info('Checking that conf files and system services respond '\n 'to a charm config change...')\n\n sentry = self.swift_proxy_sentry\n juju_service = 'swift-proxy'\n\n # Process names, corresponding conf files\n services = {'swift-proxy-server': '/etc/swift/proxy-server.conf'}\n\n # Expected default and alternate values\n set_default = {'node-timeout': '60'}\n set_alternate = {'node-timeout': '90'}\n\n # Make config change, check for service restarts\n u.log.debug('Making config change on {}...'.format(juju_service))\n mtime = u.get_sentry_time(sentry)\n self.d.configure(juju_service, set_alternate)\n\n sleep_time = 40\n for s, conf_file in services.items():\n u.log.debug(\"Checking that service restarted: {}\".format(s))\n if not u.validate_service_config_changed(sentry, mtime, s,\n conf_file,\n sleep_time=sleep_time):\n self.d.configure(juju_service, set_default)\n msg = \"service {} didn't restart after config change\".format(s)\n amulet.raise_status(amulet.FAIL, msg=msg)\n sleep_time = 0\n\n self.d.configure(juju_service, set_default)", "def test_update_node_driveconfig(self):\n pass", "def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "def test_config_changed_non_leader(\n self,\n ) -> NoReturn:\n self.harness.set_leader(is_leader=False)\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)", "async def test_update_device_config(hass, hass_client):\n with patch.object(config, \"SECTIONS\", [\"automation\"]):\n await async_setup_component(hass, \"config\", {})\n\n client = await hass_client()\n\n orig_data = [{\"id\": \"sun\"}, {\"id\": \"moon\"}]\n\n def mock_read(path):\n \"\"\"Mock reading data.\"\"\"\n return orig_data\n\n written = []\n\n def mock_write(path, data):\n \"\"\"Mock writing data.\"\"\"\n written.append(data)\n\n with patch(\"homeassistant.components.config._read\", mock_read), patch(\n \"homeassistant.components.config._write\", mock_write\n ), patch(\"homeassistant.config.async_hass_config_yaml\", return_value={}):\n resp = await client.post(\n \"/api/config/automation/config/moon\",\n data=json.dumps({\"trigger\": [], \"action\": [], \"condition\": []}),\n )\n\n assert resp.status == 200\n result = await resp.json()\n assert result == {\"result\": \"ok\"}\n\n assert list(orig_data[1]) == [\"id\", \"trigger\", \"condition\", \"action\"]\n assert orig_data[1] == {\"id\": \"moon\", \"trigger\": [], \"condition\": [], \"action\": []}\n assert written[0] == orig_data", "def _refreshconfig(self):\n self.config = ConfigGenerator(os.path.join(self.rundir, const.CONFIG_FILE))", "def test_generateconfig(self):\n args = mock.Mock()\n args.debug = None\n args.generateconfig = True\n args.config = None\n expected_text = ('Sample configuration file written to sample_config.json\\n'\n \"Replicate the site JSON for each site.\\n\"\n \" Valid values for use_https and local are 'True' and 'False'\\n\"\n \" One site must have local set to 'True'\\n\"\n 'Replicate the export JSON for each exported contract.\\n')\n with mock.patch('sys.stdout', new=StringIO()) as fake_out:\n execute_tool(args)\n self.assertEqual(fake_out.getvalue(), expected_text)", "def check_update(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\r\n try:\r\n sock.settimeout(2)\r\n sock.bind(('', 8080))\r\n sock.listen(1)\r\n conn, addr = sock.accept()\r\n except socket.timeout:\r\n return False\r\n sock.settimeout(None)\r\n with conn:\r\n conn.send(configuration)\r\n data = conn.recv(1024)\r\n with open('new_config.json', 'wt') as jsonfile:\r\n json.dump(data, jsonfile)\r\n self.set_new_configuration()", "def test_configuration(self):\n self.assertEqual(self.Test.adapter_config['write'],\n { 'adapter': TestAdapter, 'foo': 'bar' })", "def test_edit_configuration(self):\n configuration = copy.deepcopy(self.configuration)\n configuration['settings'] = {'DB_HOST': 'other_scale_db'}\n configuration['mounts'] = {\n 'dted': {\n 'type': 'host',\n 'host_path': '/some/new/path'\n }\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n json_data = {\n 'configuration': configuration,\n 'auto_update': False\n }\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def setUpConfig(self):\n pass", "def config():", "def config():", "def configure_test(self, test, config_json):\n pass", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def test_conf(self):\n self.TESTED_UNIT = 'ceph-fs/0'\n\n def _get_conf():\n \"\"\"get/parse ceph daemon response into dict for specified configs.\n\n :returns dict: conf options selected from configs\n :rtype: dict\n \"\"\"\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder\n\n @retry(wait=wait_exponential(multiplier=1, min=4, max=10),\n stop=stop_after_attempt(10))\n def _change_conf_check(mds_config):\n \"\"\"Change configs, then assert to ensure config was set.\n\n Doesn't return a value.\n \"\"\"\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))\n\n # ensure defaults are set\n _get_conf()\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)\n\n # change defaults\n mds_config = {'mds-cache-memory-limit': '8589934592',\n 'mds-cache-reservation': '0.10',\n 'mds-health-cache-threshold': '2'}\n _change_conf_check(mds_config)\n\n # Restore config to keep tests idempotent\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "async def test_manual_configuration_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"2.3.4.5\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://2.3.4.5:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://2.3.4.5:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"2.3.4.5\"", "def test_saveConfigurationWhenButtonClicked(self):\n self.createSettingsFile()\n testSave = ConfigurationWindow()\n testSave.FieldLabelServer.setText('133.51.19.172')\n testSave.FieldLabelPort.setText('54352')\n testSave.FieldLabelUDPIpSend.setText('145.51.19.172')\n testSave.FieldLabelUDPPortSend.setText('90075')\n testSave.FieldLabelUDPIPReceive.setText('1.0.0.127')\n testSave.FieldLabelUDPPortRececeive.setText('4321')\n testSave.FieldLabelTCPIPSend.setText('1.0.0.127')\n testSave.FieldLabelTCPPortSend.setText('4321')\n testSave.FieldLabelTCPIPReceive.setText('1.0.0.127')\n testSave.FieldLabelTCPPortRececeive.setText('1234')\n\n testSave.save()\n config = ConfigParser.SafeConfigParser()\n config.read(\".settings\")\n FieldLabelServer = config.get('server', 'serverip')\n FieldLabelPort = config.get('server', 'serverport')\n FieldLabelUDPIPSend = config.get('udp', 'udpipsend')\n FieldLabelUDPPortSend = config.get('udp', 'udpportsend')\n FieldLabelUDPIPReceive = config.get('udp', 'udpipreceive')\n FieldLabelUDPPortReceive = config.get('udp', 'udpportreceive')\n FieldLabelTCPIPSend = config.get('tcp', 'tcpipsend')\n FieldLabelTCPPortSend = config.get('tcp', 'tcpportsend')\n FieldLabelTCPIPReceive = config.get('tcp', 'tcpipreceive')\n FieldLabelTCPPortReceive = config.get('tcp', 'tcpportreceive')\n\n self.assertEqual('133.51.19.172', str(FieldLabelServer))\n self.assertEqual('54352', str(FieldLabelPort))\n self.assertEqual('145.51.19.172', str(FieldLabelUDPIPSend))\n self.assertEqual('90075', str(FieldLabelUDPPortSend))\n self.assertEqual('1.0.0.127', str(FieldLabelUDPIPReceive))\n self.assertEqual('4321', str(FieldLabelUDPPortReceive))\n self.assertEqual('1.0.0.127', str(FieldLabelTCPIPSend))\n self.assertEqual('4321', str(FieldLabelTCPPortSend))\n self.assertEqual('1.0.0.127', str(FieldLabelTCPIPReceive))\n self.assertEqual('1234', str(FieldLabelTCPPortReceive))", "def test_update_global_config(self):\n result = self.local_gae.query(\n 'import base_page\\n'\n\n # Verify the default config exists.\n 'n = base_page.GlobalConfig.all().count()\\n'\n 'assert n == 1, \"n == 1\"\\n'\n\n # Verify there is a config, and shows False.\n 'q = base_page.GlobalConfig.all()\\n'\n 'assert q.count() == 1, \"q.count() == 1\"\\n'\n 'config = q.get()\\n'\n 'assert not config.public_access, \"not config.public_access\"\\n'\n\n # Make the instance public.\n 'config.public_access = True\\n'\n 'config.put()\\n'\n 'print \"ok\",\\n')\n self.assertEqual('ok', result)\n # Login and try various operations.\n self.login('[email protected]')\n self._check_current_page()\n self._check_current_raw_page()\n self._check_post_thru_ui()\n # Verify the config now shows True.\n result = self.local_gae.query(\n 'import base_page\\n'\n 'q = base_page.GlobalConfig.all()\\n'\n 'assert q.count() == 1, \"q.count() == 1\"\\n'\n 'print q.get().public_access\\n')\n self.assertEqual('True\\n', result)", "def test_write_config(default_config, tmp_path):\n testpath = Path(tmp_path, \"write_config\")\n testpath.mkdir()\n abcconfig.write_config(default_config, configpath=testpath)\n assert Path(testpath, \"config.yml\").exists()", "def test_config_repository(self):\n self._ucr({\n 'repository/online': 'no',\n 'repository/online/server': 'example.net',\n 'repository/online/port': '1234',\n 'repository/online/prefix': 'prefix',\n 'repository/online/sources': 'yes',\n 'repository/online/httpmethod': 'POST',\n })\n self.u.config_repository()\n self.assertFalse(self.u.online_repository)\n self.assertEqual(self.u.repository_server, 'example.net')\n self.assertEqual(self.u.repository_port, '1234')\n self.assertEqual(self.u.repository_prefix, 'prefix')\n self.assertTrue(self.u.sources)\n self.assertEqual(U.UCSHttpServer.http_method, 'POST')", "def test_init_overwrite():\n client = TestClient()\n client.run('config init')\n dummy_content = 'DUMMY CONTENT. SHOULD BE REMOVED!'\n save_append(client.cache.conan_conf_path, dummy_content)\n save_append(client.cache.remotes_path, dummy_content)\n save_append(client.cache.settings_path, dummy_content)\n save_append(client.cache.default_profile_path, dummy_content)\n\n client.run('config init --force')\n assert dummy_content not in load(client.cache.conan_conf_path)\n assert dummy_content not in load(client.cache.conan_conf_path)\n assert dummy_content not in load(client.cache.settings_path)\n assert dummy_content not in load(client.cache.remotes_path)\n assert dummy_content not in load(client.cache.default_profile_path)", "def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))", "def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()", "def write_config_file(config, args):\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()", "def test_config_device_save(get_config, config_dict):\n cfg = get_config(DeviceConfig, config_dict)\n cfg.save()\n with open(cfg.config_path, 'r') as fh:\n yml = yaml.load(fh.read(), Loader=get_yaml_loader())\n assert yml == config_dict, f'saved {yml} instead of {config_dict}'", "def test_enable_enabled():\n config_info = read_config()\n config_info['enabled'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['enabled'] is True", "def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def write_config():\n\n e = Element(\"Configuration\")\n r = SubElement(e, \"RepositoryList\")\n r = SubElement(r, \"Repository\", name = \"default\")\n SubElement(r, \"Module\").text = args.driver\n SubElement(r, \"TokenLabel\").text = args.token_label\n SubElement(r, \"PIN\").text = args.pin\n ElementTree(e).write(args.write_config)\n args.write_config.flush()", "def reload_config(self):\n pass", "def refresh_configuration(self):\n pass", "def test_set_new_property():\n\n value = '1'\n contents = (\"[info]\\n\"\n \"real = not_real\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()", "def test_config_changed_no_relations(\n self,\n ) -> NoReturn:\n\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)\n self.assertTrue(\n all(\n relation in self.harness.charm.unit.status.message\n for relation in [\"mongodb\", \"kafka\", \"ro\"]\n )\n )", "def test_set_config_options(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Make sure id is initially set to what we expect\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n # Set and test to a new id\n config.set_config_options(client_id=\"new_id\")\n self.assertEqual(config.read_config_option('client_id'), \"new_id\")", "def test_configuration_handler(self):\n time_series = ['test-handler-1', 'test-handler-1', 'test-handler-2']\n # Calling get to force the creation of the configurations\n [timeserie_configuration.get_timeserie_configure(self.get_local_dynamo_cli(),\n ts) for ts in time_series]\n\n # Test handler to get the configuration\n event = {\n 'operation': 'get',\n 'payload': {\n 'timeseries': \",\".join(time_series)\n }\n }\n handler_get_confs = timeserie_configuration.handler(event, None)\n self.assertEquals(3, len(handler_get_confs))\n self.assertTrue(all([conf['default'] for conf in handler_get_confs]))\n\n # Test modify configuration via handler\n tz = \"America/Adak\"\n agg = 'sum'\n ret = granularities.RETENTIONS_GRANULARITY\n new_configuration = timeserie_configuration.Configuration(tz, agg, ret)\n new_configuration.timeserie = 'test-handler-1'\n\n event = {\n 'operation': 'post',\n 'payload': json.dumps(new_configuration.__dict__)\n }\n\n db_new_configuration = timeserie_configuration.handler(event, None)\n self.assertTrue(db_new_configuration)\n\n # Validate the changes in the configuration\n event = {\n 'operation': 'get',\n 'payload': {\n 'timeseries': 'test-handler-1'\n }\n }\n db_conf = timeserie_configuration.handler(event, None)\n self.assertEquals(1, len(db_conf))\n self.assertEqual(db_conf[0]['timezone'], tz)", "def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')", "def test_update_topic_config(self):\n test_topic_config = {\n 'test.topic': {\n 'schema_name': 'test.schema'\n }\n }\n local_topic_config = eventlogging.topic.get_topic_config()\n local_topic_config.update(test_topic_config)\n\n # append the new test topic config to the global topic config\n eventlogging.topic.update_topic_config(test_topic_config)\n\n # test that the global topic config is what it should be\n self.assertEqual(\n eventlogging.topic.get_topic_config(),\n local_topic_config\n )", "def notify_config_changes(self, is_new, data, diff):\n self.event(\n self.EV_CONFIG_CHANGED, {\"object\": self, \"is_new\": is_new, \"config\": data, \"diff\": diff}\n )", "def test_config_device_reset(get_config, monkeypatch):\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', {'test': 'conf'})\n cfg = get_config(DeviceConfig, base_config)\n cfg.save()\n cfg.write_default()\n new_conf = cfg.load()\n\n assert cfg.data == cfg.minimal_essential_conf, 'failed to apply default config'\n assert new_conf == cfg.minimal_essential_conf, 'failed to load default config'", "def _on_config_changed(self, _):\n script = b64decode(self.config[\"script\"]).decode()\n location = self.config[\"location\"]\n print(script)\n logger.debug(\"Deploying script to %s\", location)\n with open(location, 'w') as f:\n f.write(script)\n os.chmod(location, 0o755)\n self.unit.status = ActiveStatus()", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def testWriteReadConfigFile(self):\n path = os.path.join(self.tempdir, 'foo.json')\n config = {'foo': 1, 'bar': 2}\n\n workspace_lib.WriteConfigFile(path, config)\n self.assertDictEqual(config, workspace_lib.ReadConfigFile(path))", "def test_custom_configuration_updated_correctly(self):\n result = self.run_cli_command(\n \"--skip-consistency-check\",\n \"config\",\n \"get\",\n \"vendor.fetchai.skills.error.is_abstract\",\n cwd=self._get_cwd(),\n )\n assert result.stdout == \"True\\n\"", "def _CheckGRRConfig(self):\n config_info = self.fd.Get(self.fd.Schema.GRR_CONFIGURATION)\n self.assertEqual(config_info[\"Client.control_urls\"],\n [\"http://localhost:8001/control\"])\n self.assertEqual(config_info[\"Client.poll_min\"], 1.0)", "def check_config(self):\n try:\n config_metadata = self.dbc.get_metadata(\"config.txt\")\n except rest.ErrorResponse:\n print str(datetime.datetime.now()) \\\n + \": No config.txt in Dropbox directory. Exiting.\"\n sys.exit()\n if config_metadata[\"modified\"] != self.config_date:\n print str(datetime.datetime.now()) + \": Config changed\"\n self.config_date = config_metadata[\"modified\"]\n try:\n self.dbc.get_file(\"config.txt\")\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + e.reason\n return False\n self.config.reload(self.local_directory + \"/\" + \"config.txt\")\n return True\n return False", "def test_z_restart_on_config_change(self):\n\n sentry = self.n_ovs_sentry\n juju_service = 'neutron-openvswitch'\n\n # Expected default and alternate values\n set_default = {'debug': 'False'}\n set_alternate = {'debug': 'True'}\n\n # Services which are expected to restart upon config change,\n # and corresponding config files affected by the change\n conf_file = '/etc/neutron/neutron.conf'\n services = {\n 'neutron-openvswitch-agent': conf_file\n }\n\n # Make config change, check for svc restart, conf file mod time change\n u.log.debug('Making config change on {}...'.format(juju_service))\n mtime = u.get_sentry_time(sentry)\n self.d.configure(juju_service, set_alternate)\n self._wait_and_check()\n\n sleep_time = 30\n for s, conf_file in services.iteritems():\n u.log.debug(\"Checking that service restarted: {}\".format(s))\n if not u.validate_service_config_changed(\n sentry, mtime, s, conf_file, sleep_time=sleep_time,\n pgrep_full=self.pgrep_full):\n self.d.configure(juju_service, set_default)\n self._wait_and_check()\n msg = \"service {} didn't restart after config change\".format(s)\n amulet.raise_status(amulet.FAIL, msg=msg)\n\n u.log.debug('OK')", "def on_config_change(self, config, section, key, value):\n \n if section == \"Makesmith Settings\":\n if key == \"COMport\":\n self.data.comport = value\n elif key == 'xPitch':\n print \"xPitch changed\"", "def engage_config_lock():\n _get_scs_globals().config_update_lock = True", "def config(name, value):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Configuring {} to {}\".format(\n name,\n value,\n )\n return ret\n\n __salt__[\"trafficserver.set_config\"](name, value)\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Configured {} to {}\".format(name, value)\n return ret", "def tearDownConfig(self):\n print time.ctime(), 'enter tearDownConfig'\n\n self.site1 = self.globalCfg['site1']\n self.site2 = self.globalCfg['site2']\n self.site3 = self.globalCfg['site3']\n\n self.site1.databaseLandscapeInfo()\n self.site2.databaseLandscapeInfo()\n self.site3.databaseLandscapeInfo()\n self.site1.systemReplicationStatus()\n\n if self.globalCfg['sync_mode'] == 'sync' and self.site1.fullSync:\n try:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n except Exception, e:\n print 'disable full_sync in tearDownConfig failed: %s' % e\n\n for h in range(1, self.site1.getHostNo()):\n self.site1.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n for h in range(1, self.site2.getHostNo()):\n self.site2.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n for h in range(1, self.site3.getHostNo()):\n self.site3.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n\n self.site1.resetStatXSToMaster(self.globalCfg['multiDB'])\n self.site2.resetStatXSToMaster(self.globalCfg['multiDB'])\n self.site3.resetStatXSToMaster(self.globalCfg['multiDB'])\n\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n # for normal tear down(unregister/disable), the steps should be in order\n # the primary cannot be disabled if there's secondary attached\n # so there's no need to use multi-thread\n # executing here means the landscape has been resorded to site1--(sync/syncmem)--site2--(async)--site3\n #pdb.set_trace()\n self.site3.tearDown()\n self.site2.tearDown()\n self.site1.tearDown()", "async def test_full_config(hass, mock_client):\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED", "def test_delayed_exec_configs(self):\n from fixtures.test_adapter import TestAdapter\n class Test(pyperry.Base):\n def _config(cls):\n cls.configure('read', adapter=TestAdapter, foo=lambda: 'barbarbar')\n\n adapter = Test.adapter('read', )\n self.assertEquals(adapter.config.foo, 'barbarbar')", "def test_logging_config_file(self, monkeypatch):\n # We still want the Formatter to be configured.\n assert logging.Formatter.converter == time.gmtime\n assert logging.Formatter.default_time_format == '%Y-%m-%dT%H:%M:%S'\n assert logging.Formatter.default_msec_format == '%s.%03d'\n\n # Set NETDUMPLINGS_LOGGING_CONFIG to point to a test logging config.\n logging_config_file = 'tests/data/logging.json'\n monkeypatch.setenv('NETDUMPLINGS_LOGGING_CONFIG', logging_config_file)\n\n configure_logging()\n\n # The test config file sets all the loggers to ERROR.\n assert logging.getLogger('netdumplings').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplinghub').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingkitchen').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingeater').level == logging.ERROR", "def test_connections_updated(self):\n assert self.agent_config.connections == {self.new_connection_id}", "def test_refecth_auth_config(self, mock_config_file):\n responses.add(responses.GET, 'https://test.gigantum.com/.well-known/auth.json',\n json={\"audience\": \"api.test.gigantum.com\",\n \"issuer\": \"https://auth.gigantum.com/\",\n \"signing_algorithm\": \"RS256\",\n \"public_key_url\": \"https://test.gigantum.com/gigantum/auth/jwks.json\",\n \"login_url\": \"https://test.gigantum.com/auth/redirect?target=login\",\n \"token_url\": \"https://test.gigantum.com/auth/token\",\n \"logout_url\": \"https://test.gigantum.com/auth/redirect?target=logout\",\n \"client_id\": \"Z6Wl854wqCjNY0D4uJx8SyPyySyfKmAy\",\n \"login_type\": \"auth0\"},\n status=200)\n\n config_instance, working_dir = mock_config_file\n config_instance.set_current_server(\"test-gigantum-com\")\n\n # Make sure cache is loaded\n config_instance.get_server_configuration()\n auth_config = config_instance.get_auth_configuration()\n assert auth_config.public_key_url == \"https://auth.gigantum.com/.well-known/jwks.json\"\n\n # Refetch\n config_instance.refetch_auth_config()\n\n # Verify\n auth_config = config_instance.get_auth_configuration()\n assert auth_config.public_key_url == \"https://test.gigantum.com/gigantum/auth/jwks.json\"\n\n # Do it again because should be in redis now\n auth_config = config_instance.get_auth_configuration()\n assert auth_config.public_key_url == \"https://test.gigantum.com/gigantum/auth/jwks.json\"\n\n # Explicitly check redis\n data = config_instance._get_redis_client().hgetall(config_instance.AUTH_CONFIG_CACHE_KEY)\n assert data['public_key_url'] == \"https://test.gigantum.com/gigantum/auth/jwks.json\"\n\n # Explicity check persisted file\n file_data = config_instance._load_current_configuration()\n assert file_data['auth']['public_key_url'] == \"https://test.gigantum.com/gigantum/auth/jwks.json\"", "def test_config_save_restore(self):\n\n config_filename_initial = 'test_configuration'\n config_filename_save = 'save_configuration'\n\n # Get config path\n local_dir = os.path.dirname(__file__)\n config_path_initial = os.path.join(local_dir, config_filename_initial)\n config_path_save = os.path.join(local_dir, config_filename_save)\n\n # Load initial configuration from file\n config_initial = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_initial)\n\n config1 = config_initial.genome_config\n names1 = [p.name for p in config1._params]\n for n in names1:\n assert hasattr(config1, n)\n\n # Save configuration to another file\n config_initial.save(config_path_save)\n\n # Obtain configuration from saved file\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_save)\n\n config2 = config.genome_config\n names2 = [p.name for p in config2._params]\n for n in names2:\n assert hasattr(config2, n)\n\n self.assertEqual(names1, names2)\n\n for n in names1:\n v1 = getattr(config1, n)\n v2 = getattr(config2, n)\n self.assertEqual(v1, v2)", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)", "async def test_set_config(self):\n set_log_levels(logger_topics=True)\n\n disable_auto_linking = random_bool()\n monitor_mode = random_bool()\n auto_led = random_bool()\n deadman = random_bool()\n topic = f\"ack.{SET_IM_CONFIGURATION}\"\n topic_item = TopicItem(\n topic,\n {\n \"disable_auto_linking\": disable_auto_linking,\n \"monitor_mode\": monitor_mode,\n \"auto_led\": auto_led,\n \"deadman\": deadman,\n },\n 0.1,\n )\n\n modem = ModemBase()\n reset_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman)\n\n send_topics([topic_item])\n await modem.async_set_configuration(\n disable_auto_linking, monitor_mode, auto_led, deadman\n )\n await asyncio.sleep(0.1)\n\n assert modem.configuration[DISABLE_AUTO_LINKING].value == disable_auto_linking\n assert modem.configuration[MONITOR_MODE].value == monitor_mode\n assert modem.configuration[AUTO_LED].value == auto_led\n assert modem.configuration[DEADMAN].value == deadman\n\n assert modem.configuration[DISABLE_AUTO_LINKING].new_value is None\n assert modem.configuration[MONITOR_MODE].new_value is None\n assert modem.configuration[AUTO_LED].new_value is None\n assert modem.configuration[DEADMAN].new_value is None", "async def test_minimal_config(hass, mock_client):\n config = {prometheus.DOMAIN: {}}\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED", "def test_config_reload_lc(duthosts, all_cfg_facts, nbrhosts, nbr_macs, tbinfo):\n logger.info(\"=\" * 80)\n logger.info(\"Precheck\")\n logger.info(\"-\" * 80)\n\n check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs)\n check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo)\n\n logger.info(\"=\" * 80)\n logger.info(\"Config reload on node: %s\", duthosts.frontend_nodes[0].hostname)\n logger.info(\"-\" * 80)\n\n config_reload(duthosts.frontend_nodes[0], config_source='config_db', safe_reload=True, check_intf_up_ports=True)\n poll_bgp_restored(duthosts)\n\n logger.info(\"=\" * 80)\n logger.info(\"Postcheck\")\n logger.info(\"-\" * 80)\n check_intfs_and_nbrs(duthosts, all_cfg_facts, nbrhosts, nbr_macs)\n check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo)", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def test_connections_updated(self):\n assert self.connection_config.connections == {self.new_connection_id}", "async def test_config_sensor(hass, method_discovery, do_config):\n sensor_name = \"test_sensor\"\n config_sensor = {\n CONF_NAME: sensor_name,\n **do_config,\n }\n await base_config_test(\n hass,\n config_sensor,\n sensor_name,\n SENSOR_DOMAIN,\n CONF_BIT_SENSORS,\n CONF_INPUTS,\n method_discovery=method_discovery,\n )", "def sync_config():\n rsync_project(remote_dir='/apps/sharejs-rethinkdb-example/config/', local_dir='./config/')", "def test_update_global_system_config(self):\n new_config = self._create_global_system_config()\n update_name = data_utils.rand_name('test')\n with self.override_role():\n self.config_client.update_global_system_config(\n new_config['uuid'],\n display_name=update_name)", "def test_update_config():\n subprocess.check_call(['yggconfig', '-h'])" ]
[ "0.72591335", "0.72113305", "0.71514034", "0.7107235", "0.7026826", "0.69808435", "0.69801", "0.6910698", "0.6887558", "0.68464094", "0.6824999", "0.68176496", "0.67969394", "0.67067325", "0.6699056", "0.6610438", "0.6609877", "0.6593223", "0.6569542", "0.6516883", "0.65156054", "0.64069116", "0.6402888", "0.63679236", "0.6356955", "0.63513726", "0.63385147", "0.6323897", "0.63080955", "0.6299405", "0.62853295", "0.6284575", "0.6279466", "0.62722826", "0.62690926", "0.62462276", "0.6238872", "0.62311065", "0.6226195", "0.6201082", "0.61941236", "0.6193813", "0.616989", "0.61637974", "0.6155142", "0.6155142", "0.61377347", "0.61374277", "0.6136849", "0.6136448", "0.61300623", "0.61292195", "0.61066157", "0.60942495", "0.6075186", "0.6071588", "0.6065912", "0.60648865", "0.60610837", "0.6058915", "0.6054634", "0.6044618", "0.6043018", "0.6040805", "0.60302484", "0.60213643", "0.6021186", "0.60140526", "0.6011275", "0.6001427", "0.59989345", "0.5998335", "0.5995606", "0.5990772", "0.5989951", "0.5987847", "0.597536", "0.5973609", "0.5967318", "0.5965239", "0.595804", "0.59574217", "0.5955231", "0.5948746", "0.5946", "0.5943967", "0.59395105", "0.5933856", "0.59326065", "0.5929782", "0.5924513", "0.5923716", "0.5922193", "0.59136194", "0.59010905", "0.58970004", "0.58966315", "0.58938074", "0.5886401", "0.5885828" ]
0.75435835
0
Returns 3d matrix of sizes [257,301,2]
def process(self, data): spectr = stft(data, n_fft=512, hop_length=160) return np.concatenate((spectr.real[:, :, np.newaxis], spectr.imag[:, :, np.newaxis]), axis=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_2d_to_3d(matrix: np.matrix) -> np.matrix:\n return np.matrix([\n [matrix.item(0, 0), matrix.item(0, 1), 0, matrix.item(0, 2)],\n [matrix.item(1, 0), matrix.item(1, 1), 0, matrix.item(1, 2)],\n [0, 0, 1, 0],\n [matrix.item(2, 0), matrix.item(2, 1), 0, matrix.item(2, 2)]])", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def make8UC3(mat):\n mat_8UC = make8UC(mat)\n mat_8UC3 = np.stack((mat_8UC,)*3, axis = -1)\n \n return mat_8UC3", "def mat_2d_to_3d(x, agg_num, hop):\n # Pad to at least one block. \n len_x, n_in = x.shape\n if (len_x < agg_num):\n x = np.concatenate((x, np.zeros((agg_num - len_x, n_in))))\n \n # Segment 2d to 3d. \n len_x = len(x)\n i1 = 0\n x3d = []\n while (i1 + agg_num <= len_x):\n x3d.append(x[i1 : i1 + agg_num])\n i1 += hop\n return np.array(x3d)", "def matrix_3d_to_4x4(matrix: np.matrix) -> np.matrix:\n return np.matrix([\n [matrix.item(0, 0), matrix.item(0, 1), matrix.item(0, 2), 0],\n [matrix.item(1, 0), matrix.item(1, 1), matrix.item(1, 2), 0],\n [matrix.item(2, 0), matrix.item(2, 1), matrix.item(2, 2), 0],\n [0, 0, 0, 1]])", "def get_matrix(self):\n return self._matrix[:3, :]", "def _make_3d_series(x: np.ndarray) -> np.ndarray:\n num_dims = x.ndim\n if num_dims == 1:\n shape = x.shape\n _x = np.reshape(x, (shape[0], 1, 1))\n elif num_dims == 2:\n shape = x.shape\n _x = np.reshape(x, (shape[0], 1, shape[1]))\n elif num_dims > 3:\n raise ValueError(\n \"The matrix provided has more than 3 dimensions. This is not\"\n \"supported. Please provide a matrix with less than \"\n \"3 dimensions\"\n )\n else:\n _x = x\n return _x", "def init_three_d_array(dimens, val):\n w, x, y = dimens\n return [[[val for k in range(y)] for j in range(x)] for i in range(w)]", "def getMat3(self):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return _mat3(m11,m12,m13,\r\n m21,m22,m23,\r\n m31,m32,m33)", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def matrix_magnify_3d(f: float) -> np.matrix:\n return np.matrix([[f, 0, 0], [0, f, 0], [0, 0, f]])", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def dim3():\n return Integer(\"yolo3\", \"uniform\", 3, 7, shape=(1,))", "def generate_3d_board(n):\n layer = nxn_generate.generate_shuffled_2d_board(n)\n cube = []\n for i in range(len(layer)):\n new_layer = []\n for column in layer:\n new_column = []\n # this nested mess is to ensure that none of the sub 3x3 squares violates sudoku rules from any x y or z\n # perspective (also the Latin Square rules but the subsquares are trickier and the cause of more mess)\n for j in range(int(math.sqrt(len(layer)))):\n for k in range(int(math.sqrt(len(layer)))):\n # lot of 3 = (i+j) % 3\n # index within lot = (i + k + (i//3)) % 3\n new_column.append(column[int(math.sqrt(len(layer))) * ((i + j) % int(math.sqrt(len(layer)))) + (\n i + k + (i // int(math.sqrt(len(layer))))) % int(math.sqrt(len(layer)))])\n new_layer.append(new_column)\n cube.append(new_layer)\n\n return shuffle_cube(cube)", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def _embed44(matrix):\n result = np.eye(4)\n r, c = matrix.shape\n result[:r, :c] = matrix\n return result", "def image_batch():\n return np.zeros((2, 1, 4, 4))", "def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def split_3Darray(array2d, L_window):\n N, ch = array2d.shape\n n_windows = N//L_window\n array3d = np.zeros((n_windows, L_window, ch))\n for i in range(n_windows):\n array3d[i]=array2d[i*L_window: (i+1)*L_window,:] \n \n return array3d", "def make_matrix(sizex, sizey):\n return [[0]*sizey for i in xrange(sizex)]", "def make_matrix(sizex, sizey):\n return [[0] * sizey for i in range(sizex)]", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def _xyz_matrix():\n fx = 583.0\n fy = 583.0\n cx = 321\n cy = 249\n a = -0.0028300396\n b = 3.1006268\n mat = np.array([[1/fx, 0, 0, -cx/fx],\n [0, -1/fy, 0, cy/fy],\n [0, 0, 0, -1],\n [0, 0, a, b]])\n return mat", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def flatten(X):\n N = X.shape[-1]\n flat = np.zeros((N, 3072))\n for idx, i in enumerate(range(N)):\n # if not idx:\n # print(X[:,:,:,i].reshape(3072))\n flat[i] = X[:,:,:,i].reshape(3072)\n return flat", "def to_3dim(X: torch.Tensor, target_size: Tuple[int, int, int], dtype=torch.float32) -> torch.Tensor:\n assert X.ndim == 2\n n_styles, n_contents, dim_x = target_size\n assert X.shape[0] == n_styles * dim_x\n assert X.shape[1] == n_contents\n\n target = torch.zeros(target_size, dtype=X.dtype)\n\n for s in range(n_styles):\n for c in range(n_contents):\n img = X[s * dim_x: (s + 1) * dim_x, c]\n target[s, c] = img\n return target.to(dtype)", "def populate3D(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n \n #put the actual indices appropriate for this cube into the indexRef array\n indexRef = getIndexRef(row, col, frame, midRange, maxIndex)\n\n pf = perturbanceFactor(matrix.shape[0], midRange * 2, perturbance)\n noiseLevel = roughness * pf\n \n populateFaces(matrix, indexRef, noiseLevel)\n\n populateEdges(matrix, indexRef, noiseLevel)\n \n #printAllowCancel(matrix)\n \n return [row + midRange, col + midRange, frame + midRange]", "def tdim3(dim3):\n return TransformedDimension(Compose([], dim3.type), dim3)", "def transpose_3d():\n tmp = np.random.random((10, 10, 10))\n\n a = tmp.T\n b = np.empty(tmp.shape)\n for j in range(tmp.shape[1]):\n b[:, j, :] = tmp[:, j, :].T\n\n print(np.all(a == b))", "def _buildMatrix(self, SparseMatrix, Ncells, MaxFaces, coeff):\n return (0, 0)", "def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)", "def build3D(imMask, imInout, grid=m3D.DEFAULT_GRID3D):\r\n \r\n if imInout.getDepth()==1:\r\n _build3D_1(imMask, imInout, grid)\r\n elif imInout.getDepth()==8:\r\n _build3D_8(imMask, imInout, grid)\r\n else:\r\n _build3D_32(imMask, imInout, grid)", "def _make_random_matrix(self, n_components, n_features):", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def make_data(self): \n s = numpy.arange(0.0, 10.0, 0.01)\n s = numpy.reshape(s, (10,10,10))\n s = numpy.transpose(s)\n\n v = numpy.zeros(3000, 'd')\n v[1::3] = 1.0\n v = numpy.reshape(v, (10,10,10,3))\n return s, v", "def build(xaxis, yaxis, zaxis):\n matrix = []\n for floor in range(zaxis):\n roomnum = 1\n matrix.append([])\n for row in range(yaxis):\n matrix[floor].append([])\n for column in range(xaxis):\n matrix[floor][row].append(str(roomnum))\n roomnum += 1\n return matrix", "def make_matrix(rows, columns):\n\tmatrix = []\n\tfor row in range(rows):\n\t\tmatrix += [[0] * columns]\n\t\t\n\treturn matrix", "def _makeWaMatrix_(self, wa, nRow, nCol):\n\t\t#print nRow, nCol\n\t\t#print wa\n\t\t#print\n\t\twaMatrix = [[0 for j in xrange(nCol)] for i in xrange(nRow)]\n\t\tfor a in wa: \n\t\t\tfor i in a[0]:\n\t\t\t\tfor j in a[1]:\n\t\t\t\t\twaMatrix[i][j] = 1\n\t\treturn waMatrix", "def makeMatrix():\n listOfChars = []\n for ascii in range(32, 128):\n listOfChars.append(chr(ascii))\n random.shuffle(listOfChars)\n matrix = Grid(8, 12)\n i = 0\n for row in range(matrix.getHeight()):\n for column in range(matrix.getWidth()):\n matrix[row][column] = listOfChars[i]\n i += 1\n return matrix", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def gamma_matrix(gamma_coefs):\n matrix = np.ndarray([len(gamma_coefs), 3, 256], dtype=int)\n\n # gamma_coefs contains an [R, G, B] gamma table for each slab\n for i, slab in enumerate(gamma_coefs):\n for j, color in enumerate(slab):\n for k in range(256):\n v = pow(k / 255, color) * 255\n v = int(round(v))\n matrix[i, j, k] = v\n return matrix", "def as_matrix(self) -> types.Matrix:", "def make_result_matrix(T):\n result_matrix = []\n # Uniform sampled distribution\n distribution = np.random.choice([1, 0], T, p=[.1, .9])\n place_holder = np.random.randn(T)\n place_holder[distribution] = np.nan # Masking\n\n # This block is to un-flatten the 25 element matrix into a 5*5 matrix\n for j in range(T):\n temp = []\n for i in range(T):\n temp.append(place_holder[i])\n result_matrix.append(temp)\n\n result_matrix = np.array(result_matrix)\n\n return result_matrix", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def make_matrix(n):\n #number that makes up the matrix depending on value of n\n num = 1/n**2\n #creates a blur box as a dictionary\n blur_box = {'height': n, 'width': n, 'pixels': [num]*n**2 }\n return blur_box", "def create_matrix(n, m):\n matrix = [[None]*m for i in range(n)]\n return matrix", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def image_to_matrix(image):\n\n\tpic = np.array([t[0] for t in image.getdata()]).reshape(image.size[1],image.size[0])\n\n\treturn pic", "def matrix_init(sizex, sizey):\n return [[0]*sizey for i in range(sizex)]", "def private_create_matrix(sample_size, dim, n_param):\n if dim == 0:\n point = []\n for i in range(n_param):\n point.append(0)\n return [point, 9]\n return [private_create_matrix(sample_size, dim - 1, n_param) for _ in range(sample_size)]", "def matrix4_to_3x4_array(mat):\r\n return tuple(f for v in mat[0:3] for f in v)", "def conv3H4H(M):\n M = np.append(M.copy(), [[0, 0, 1]], 0) # add row\n return np.append(M, [[0], [0], [0], [0]], 1) # add column", "def jointsImgTo3D(sample):\n ret = np.zeros((sample.shape[0], 3), np.float32)\n for i in range(sample.shape[0]):\n ret[i] = jointImgTo3D(sample[i])\n return ret", "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def row_col_diag(arr):\n three_sets = np.zeros((8,3), dtype=int)\n for i in range(arr.shape[0]):\n three_sets[i] = arr[i]\n for i in range(arr.shape[1]):\n three_sets[i+3] = arr[:,i]\n three_sets[6] = np.diag(arr)\n three_sets[7] = np.diag(np.flipud(arr))\n return three_sets", "def get_matrixS(n):\n\n mat_nxn = np.zeros([n, n], dtype=int)\n for row_num in range(1, n + 1):\n i = row_num - 1\n if row_num == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == 2:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == n - 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num == n:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 0:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n mat_nxn = mat_nxn + np.eye(n, dtype=int)\n mat_2nx2n = np.repeat(np.repeat(mat_nxn, 2, 0), 2, 1)\n return torch.as_tensor(mat_2nx2n)", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def get3D_rod():\n\n volume = torch.zeros(1,1,55,54,53)\n length = 15\n st = [27,26,25]\n\n volume[:,:,st[0]:st[0]+length,st[1],st[2]] = 0.5\n volume[:,:,st[0]+length:st[0]+length+2,st[1],st[2]] = 0.2\n\n volume[:,:,st[0],st[1]:st[1]+length,st[2]] = 0.5\n volume[:,:,st[0],st[1]+length:st[1]+length+2,st[2]] = 1.\n \n volume[:,:,st[0],st[1],st[2]:st[2]+length] = 0.5\n volume[:,:,st[0],st[1],st[2]+length:st[2]+length+2] = 2.0\n \n volume[:,:,st[0],st[1]:st[1]+length,st[2]:st[2]+length] = 0.2\n volume[:,:,st[0],st[1]+length:st[1]+length+1,st[2]+length:st[2]+length+1] = 1.5\n\n return volume", "def get_stain_matrix(I):", "def take_matrix(self):\n matrix = aux.matrix(self.take_vec(), self.order)\n\n return matrix", "def T(self):\n # TODO - your code here\n matrix_transpose = [];\n \n for j in range(self.w):\n matrix_transpose.append(self.get_column(j));\n \n return Matrix(matrix_transpose);", "def matrix_apply_to_3d_3x3(vol, matrix: np.matrix):\n from scipy import mgrid\n\n cx = vol.shape[0]/2\n cy = vol.shape[1]/2\n cz = vol.shape[2]/2\n\n # Calculate the new coordinates of every point\n grid = mgrid[-cx:vol.shape[0]-cx, -cy:vol.shape[1]-cy, -cz:vol.shape[2]-cz]\n temp = grid.reshape((3, grid.size / 3))\n # Add the fourth dimension (just 1s but needed for the computations)\n # Use the matrix to calculate the new positions of every point\n temp = np.dot(matrix, temp)\n # Delete the fourth dimension\n temp = np.array(temp)\n grid = np.reshape(temp, (3, vol.shape[0], vol.shape[1], vol.shape[2]))\n\n grid[0] += cx\n grid[1] += cy\n grid[2] += cz\n\n from scipy.ndimage.interpolation import map_coordinates\n d = map_coordinates(vol, grid, order=3)\n\n return d", "def create_matrix(sample_size, dim):\n return np.array(private_create_matrix(sample_size, dim, dim))", "def create_fabric_matrix(rows, columns):\n return [['.'] * columns for i in range(rows)]", "def fread_tensor3(stream):\n\n T = fread_matrix(stream)\n while (True):\n A = fread_matrix(stream)\n if (len(A.shape) < 2 or A.shape[0] == 0 or A.shape[1] == 0):\n return (T)\n T = numpy.dstack((T, A))", "def JPGtoMatrix(path,w,h):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((w*h,count))\n for i in range(len(listing)):\n matrix[:,i]=JPGtoArray(join(path,listing[i]))\n return matrix,listing", "def getMatrix(self) -> CMatrix4:\n ...", "def matrix_units(dim):\n return [_np.reshape(unit_vector(a, dim**2), (dim, dim)) for a in range(dim**2)]", "def Matrix(arg0: List[List[complex]]) -> ngsolve.bla.MatrixC:", "def crear_matrix(nxn):\n matrix =[]\n for i in range(nxn):\n matrix.append([])\n for e in range(nxn):\n matrix[i].append(\"\")\n return matrix", "def np_image_matrix(self):\n return np.array(self.crop_image())", "def init_reshape(cube, nside):\n half_nside = 2**nside / 2\n \n dim1 = cube.shape[1]/2 - half_nside\n dim2 = cube.shape[1]/2 + half_nside\n dim3 = cube.shape[2]/2 - half_nside\n dim4 = cube.shape[2]/2 + half_nside\n\n return cube[:, dim1:dim2, dim3:dim4]", "def new_game(n):\n matrix = []\n\n for i in range(n):\n matrix.append([0] * n)\n return matrix", "def matEye(n):\n ret=matZeros((n,n))\n for i in range(n):\n matSet(ret,i,i,1)\n return ret", "def get_3d_H(H1):\n H_fin = [H1[0,0], 0, H1[0,1], H1[0,2], 0, 1, 0, 0, H1[1,0], 0, H1[1,1], H1[1,2]]\n H_fin = np.array(H_fin).reshape(3,4)\n return H_fin", "def get_matrices(dimensions: Tuple[int, int]\n ) -> Matrices:\n embedding_matrix = np.random.uniform(-1, 1, dimensions)\n context_matrix = np.random.uniform(-1, 1, dimensions)\n return Matrices(embedding_matrix, context_matrix)", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def getPadded3dimage(image):\n z, m, n = np.shape(image)\n paddedShape = z + 2, m + 2, n + 2\n padImage = np.zeros((paddedShape), dtype=np.uint8)\n padImage[1:z + 1, 1:m + 1, 1:n + 1] = image\n return padImage", "def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim", "def init_four_d_array(dimens, val):\n w, x, y, z = dimens\n return [[[[val for l in range(z)]\n for k in range(y)]\n for j in range(x)]\n for i in range(w)]", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def gen_matrix(e):\n\tif e < 1:\n\t\treturn None\n\tm_list = [[[1, 2], [3, 0]]]\n\t_b = m_list[0]\n\tfor n in xrange(1, e):\n\t\tm = m_list[n - 1]\n\t\tm_list.append(\n\t\t\t[\n\t\t\t\t[4 * i + _b[0][0] for i in m[0]] + [4 * i + _b[0][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[0][0] for i in m[1]] + [4 * i + _b[0][1] for i in m[1]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[0]] + [4 * i + _b[1][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[1]] + [4 * i + _b[1][1] for i in m[1]],\n\t\t\t]\n\t\t)\n\treturn m_list", "def create_coord_matrix(site, index):\n mat = np.empty([0,3])\n for res in np.take(site.residues, index):\n ind = [i for i,x in enumerate(res.atoms) if x.type == \"CA\" or x.type == \"CA A\"][0]\n x,y,z = res.atoms[ind].coords\n mat = np.append(mat, [[x,y,z]], axis=0)\n return mat", "def create_matrix(size):\n total_size = size * size\n rand_matrix = np.reshape(\n np.random.choice(\n [0, 1], int(total_size), p=[0.9, 0.1]\n ),\n (size, size)\n )\n return rand_matrix", "def reshape_tensor3d(self, x):\n if self.dim_ordering == 'th':\n tx = K.reshape(x, (-1, self.nb_filter, self.cols * self.rows))\n else:\n tx = K.reshape(x, (-1, self.cols * self.rows, self.nb_filter))\n tx = K.transpose(tx, (0,2,1))\n if self.cov_mode == 'channel' or self.cov_mode =='mean' or self.cov_mode =='pmean':\n return tx\n else:\n return K.transpose(tx, (0,2,1))", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def preprocess_3d(im_stack):\n im_stack /= 127.5\n im_stack -= 1.0\n return im_stack", "def imagesMatrix(path,imageSize = 10304,byteorder = '>'):\n listing = os.listdir(path)\n listing.sort()\n count = 0\n docFiles = []\n for infile in listing:\n count = count + 1\n docFiles.append(infile)\n matrix = np.zeros((imageSize,count))\n for i in range(len(listing)):\n matrix[:,i]=np.asarray(read_pgm(join(path,listing[i]),byteorder)).reshape(-1)\n return matrix,listing", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def get_B3():\n return array([[0.46870499+0.37541453j, 0.19115959-0.39233203j,\n 0.12830659+0.12102382j],\n [0.90249603-0.09446345j, 0.51584055+0.84326503j,\n -0.02582305+0.23259079j],\n [0.75419973-0.52470311j, -0.59173739+0.48075322j,\n 0.51545446-0.21867957j]])", "def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)", "def simetrize_3dhistogram(histogram):\n N =len(histogram)\n n_histogram = np.zeros((N,N,N))\n for i in range(N):\n for j in range(i,N):\n for k in range(j, N):\n S = histogram[i][j][k] + histogram[k][i][j] + histogram[j][k][i] + histogram[i][k][j] + histogram[j][i][k] + histogram[k][j][i]\n n_histogram[i][j][k] = S\n n_histogram[k][i][j] = S\n n_histogram[j][k][i] = S\n n_histogram[i][k][j] = S\n n_histogram[j][i][k] = S\n n_histogram[k][j][i] = S\n #a[i][j][k], a[k][i][j], a[j][k][i], a[i][k][j], a[j][i][k], a[k][j][i]\n return n_histogram", "def make_2d(x):\n return x.reshape((1, len(x)))", "def tomatrix(self, ai_patch):\n V = self.space\n# print \"------------\"\n# print \"geo.npatchs : \", V.geometry.npatchs\n# print \"patch id : \", ai_patch\n# print \"dim : \", V.dim\n# print \"shape : \", V.geometry[ai_patch].shape\n if V.dim == 1 :\n [li_n_1] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_1d ( self.id, ai_patch \\\n , li_n_1 )\n if V.dim == 2 :\n [li_n_1, li_n_2] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_2d ( self.id, ai_patch \\\n , li_n_1, li_n_2 )\n if V.dim == 3 :\n [li_n_1, li_n_2, li_n_3] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_3d ( self.id \\\n , ai_patch, li_n_1, li_n_2, li_n_3 )", "def asMatrix(self):\n output = np.zeros((self.size[0],self.size[1]))\n for pos in self.matrixDict:\n output[pos[0]][pos[1]] = self.matrixDict[pos]\n return output", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)" ]
[ "0.64607185", "0.6292983", "0.6173402", "0.6133898", "0.6117917", "0.6092436", "0.60344905", "0.601531", "0.6014885", "0.60019165", "0.5841986", "0.58119726", "0.5777618", "0.5760934", "0.57527536", "0.57133275", "0.5710595", "0.56664544", "0.56459385", "0.5627127", "0.5597834", "0.5593563", "0.55924004", "0.5578567", "0.5571573", "0.5566661", "0.5554361", "0.5541287", "0.55306244", "0.552428", "0.5511196", "0.5502406", "0.55012274", "0.5497133", "0.54924554", "0.5485082", "0.5480516", "0.5480516", "0.5476604", "0.5464654", "0.54604125", "0.54586613", "0.545552", "0.5452466", "0.54466707", "0.54455185", "0.54369", "0.5422578", "0.541958", "0.53884876", "0.53881276", "0.5387819", "0.5379898", "0.5378085", "0.5373369", "0.5370049", "0.5361023", "0.53606266", "0.53597754", "0.53535324", "0.5351539", "0.53512096", "0.5347486", "0.5338817", "0.53386736", "0.5337714", "0.5336883", "0.5329892", "0.5329453", "0.53266025", "0.53186584", "0.5312019", "0.5311685", "0.5309666", "0.53024524", "0.53007376", "0.52955633", "0.5291278", "0.5286838", "0.5286032", "0.5285955", "0.528508", "0.5277807", "0.52720654", "0.52624685", "0.52610815", "0.5253017", "0.5250795", "0.5239227", "0.52340275", "0.5232943", "0.5232666", "0.5225264", "0.52250004", "0.52208996", "0.52141166", "0.5213269", "0.52123356", "0.5210078", "0.5206094", "0.52024204" ]
0.0
-1
Finds the intersection of two lines given in Hesse normal form. Returns closest integer pixel locations.
def intersection(line1, line2): rho1, theta1 = line1 rho2, theta2 = line2 A = np.array([ [np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)] ]) b = np.array([[rho1], [rho2]]) x0, y0 = np.linalg.solve(A, b) x0, y0 = int(np.round(x0)), int(np.round(y0)) return [x0, y0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None", "def lineintersect(line1,line2):\n a1, a2, b1, b2=line1[0],line1[1],line2[0],line2[1]\n\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return (float('inf'), float('inf'))\n return (x/z, y/z)", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack([a1, a2, b1, b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return float('inf'), float('inf')\n return x / z, y / z", "def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]", "def get_intersect(a1, a2, b1, b2):\r\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\r\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\r\n l1 = np.cross(h[0], h[1]) # get first line\r\n l2 = np.cross(h[2], h[3]) # get second line\r\n x, y, z = np.cross(l1, l2) # point of intersection\r\n if z == 0: # lines are parallel\r\n return (float('inf'), float('inf'))\r\n return (x/z, y/z)", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def line_intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack((a1, a2, b1, b2)) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return None\n return np.array([x / z, y / z])", "def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))", "def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)", "def LineLineIntersection(lineA, lineB):\n lineA = rhutil.coerceline(lineA, True)\n lineB = rhutil.coerceline(lineB, True)\n rc, a, b = Rhino.Geometry.Intersect.Intersection.LineLine(lineA, lineB)\n if not rc: return None\n return lineA.PointAt(a), lineB.PointAt(b)", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def _intersection_homogenous(homog_line_0, homog_line_1):\n # NB: renamed from '_intersection'\n eps = 1e-13\n a,b,c=homog_line_0\n u,v,w=homog_line_1\n D=float(b*u-v*a)\n if abs(D)<eps:\n # parallel lines\n return None, None\n xp=-(w*b-c*v)/D\n yp= (w*a-c*u)/D\n\n return xp, yp", "def line_line_intersection(a1: Vector3, a2: Vector3, b1: Vector3, b2: Vector3) -> Vector3:\n # From https://stackoverflow.com/a/20677983/7245441\n\n def det(a: Vector3, b: Vector3) -> float:\n return a.x * b.y - a.y * b.x\n\n y_diff = Vector3(a1.y - a2.y, b1.y - b2.y, 0)\n x_diff = Vector3(a1.x - a2.x, b1.x - b2.x, 0)\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise Exception(\"Lines do not intersect\")\n\n d = Vector3(det(a1, a2), det(b1, b2), 0)\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n\n return Vector3(x, y, 0)", "def intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n else:\n return False", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w", "def intersection(self, line):\n\t\tdenom = (line.b[1]-line.a[1])*(self.b[0]-self.a[0]) - (line.b[0]-line.a[0])*(self.b[1]-self.a[1])\n\t\t# denominator is 0 if lines are parallel\n\t\tif denom == 0:\n\t\t\treturn None\n\t\t\n\t\tnum_a = (line.b[0]-line.a[0])*(self.a[1]-line.a[1]) - (line.b[1]-line.a[1])*(self.a[0]-line.a[0])\n\t\tnum_b = (self.b[0]-self.a[0])*(self.a[1]-line.a[1]) - (self.b[1]-self.a[1])*(self.a[0]-line.a[0])\n\t\t# if both numerators are 0 then lines are coincident\n\t\tif num_a==0 and num_b==0:\n\t\t\treturn None\n\t\t\t\n\t\tu_a = num_a/denom\n\t\tu_b = num_b/denom\n\t\t\t\n\t\tif 0 <= u_a <= 1 and 0 <= u_b <= 1:\n\t\t\treturn self.a + uA*(self.b-self.a)\n\t\telse:\n\t\t\treturn None", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def get_intersect(pair1, pair2):\n # calculate the homogeneous coords\n tmp = np.vstack((pair1, pair2))\n h = np.hstack((tmp, np.ones((4, 1))))\n\n # line through each pair of points\n l1 = np.cross(h[0], h[1])\n l2 = np.cross(h[2], h[3])\n\n # get the intersect\n x, y, z = np.cross(l1, l2)\n x /= z\n y /= z\n return x, y", "def compute_x_intersection(y, x1, x2, y1, y2):\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return ((y - y1) * (x2 - x1) / delta_y) + x1", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def lines_intersect_2d(line1_pt1, line1_pt2, line2_pt1, line2_pt2):\r\n return geometry.gmLinesIntersect(line1_pt1, line1_pt2, line2_pt1, line2_pt2)", "def intersect_shape_by_line(topods_shape, line, low_parameter=0.0, hi_parameter=float(\"+inf\")):\n from OCC.Core.IntCurvesFace import IntCurvesFace_ShapeIntersector\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(topods_shape, TOLERANCE)\n shape_inter.PerformNearest(line, low_parameter, hi_parameter)\n\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n return (shape_inter.Pnt(1),\n shape_inter.Face(1),\n shape_inter.UParameter(1),\n shape_inter.VParameter(1),\n shape_inter.WParameter(1))", "def intersect_or_on(s1, s2, c1, c2):\n den = float( (c2.y - c1.y) * (s2.x - s1.x) - (c2.x - c1.x) * (s2.y - s1.y) )\n if not den:\n return None\n\n us = ((c2.x - c1.x) * (s1.y - c1.y) - (c2.y - c1.y) * (s1.x - c1.x)) / den\n uc = ((s2.x - s1.x) * (s1.y - c1.y) - (s2.y - s1.y) * (s1.x - c1.x)) / den\n\n if (0 <= us <= 1) and (0 <= uc <= 1):\n #subj and clip line intersect eachother somewhere in the middle\n #this includes the possibility of degenerates (edge intersections)\n x = s1.x + us * (s2.x - s1.x)\n y = s1.y + us * (s2.y - s1.y)\n return (x, y), us, uc\n else:\n return None", "def intersection(self, pn1, pn2, h):\n #print \"intersectionection:\", pn1, pn2, h\n #print \"z: \", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0]\n return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h", "def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]", "def intersect_point(self,m1,c1,m2,c2):\n\n x = (c2 - c1)/(m1 - m2)\n y = m1*x + c1\n return x, y", "def find_closest_intersections(wire_data):\n\n # Find the intersection of the two lists\n intersections = find_intersections(wire_data)\n\n # For each intersection measure distance from the centre\n dists = [abs(point[0]) + abs(point[1]) for point in intersections]\n\n return min(dists)", "def find_intersection(coefs_1, coefs_2):\r\n # Form the necessary matrices\r\n A = np.matrix([[coefs_1[0,0], coefs_1[1,0]], [coefs_2[0,0], coefs_2[1,0]]])\r\n B = np.matrix([[coefs_1[2,0]],[coefs_2[2,0]]])\r\n if np.linalg.det(A) == 0:\r\n return None\r\n else:\r\n _intersection = -np.linalg.inv(A) * B\r\n return Point(_intersection[0,0], _intersection[1,0])", "def lineBoxIntersection(w1, w2, b, xmin, ymin, xmax, ymax):\n \n point1 = None\n point2 = None\n if w2 == 0:\n x1a = -(w2*ymin + b)*1.0/w1\n x1b = -(w2*ymax + b)*1.0/w1\n \n point1 = (x1a, ymin)\n point2 = (x1b, ymax)\n else:\n x2a = -(w1*xmin + b)*1.0/w2\n x2b = -(w1*xmax + b)*1.0/w2\n \n if w1 == 0:\n point1 = (xmin, x2a)\n point2 = (xmax, x2b)\n else:\n\n x1a = -(w2*ymin + b)*1.0/w1\n x1b = -(w2*ymax + b)*1.0/w1\n # Point 1\n if x2a < ymin:\n if xmin <= x1a and x1a <= xmax:\n # Point 1 on bottom edge\n point1 = (x1a, ymin)\n elif x2a > ymax:\n if xmin <= x1b and x1b <= xmax:\n # Point 1 on top edge\n point1 = (x1b, ymax)\n else:\n # Point 1 on left edge\n point1 = (xmin, x2a)\n \n # Point 2\n if point1 is not None:\n if x2b < ymin:\n # Point 2 on bottom edge\n point2 = (x1a, ymin)\n elif x2b > ymax:\n # Point 2 on top edge\n point2 = (x1b, ymax)\n else:\n # Point 2 on right edge\n point2 = (xmax, x2b) \n return (point1, point2)", "def find_intersection(A, B, C, D):\n \n a1, b1, c1 = line_equation(A.x, A.y, B.x, B.y)\n a2, b2, c2 = line_equation(C.x, C.y, D.x, D.y)\n \n Y = - np.array([[c1],\n [c2]])\n M = np.array([[a1, b1],\n [a2, b2]])\n\n X = np.linalg.solve(M, Y)\n intersection = Coordinates(X[0], X[1])\n \n return intersection", "def _lines_intersect(self, line1, line2):\n return self._lines_overlap_on_x_axis(line1, line2) and self._lines_overlap_on_y_axis(line1, line2)", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def find_circle_line_intersection(P0, r0, P1):\n\t\n\tx_offset, y_offset = P0\n\tx0, y0 = 0, 0\n\tx1, y1 = P1\n\n\tx1, y1 = x1 - x_offset, y1 - y_offset\n\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tdr = math.sqrt(dx*dx + dy*dy)\n\n\tD = x0*y1 - x1*y0\n\n\tdelta0 = r0*r0*dr*dr - D*D\n\n\tx2 = (D*dy + sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty2 = (D*dx + math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx3 = (D*dy - sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty3 = (D*dx - math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx2 += x_offset\n\tx3 += x_offset\n\ty2 += y_offset\n\ty3 += y_offset\n\n\treturn np.array([[x2, y2], [x3, y3]])", "def _distance2_line_segments(line1, line2, h_line1=None, h_line2=None):\n h_line1 = _homogenous_line(*line1) if h_line1 is None else h_line1\n h_line2 = _homogenous_line(*line2) if h_line2 is None else h_line2\n\n r_11 = _distance2_point_to_h_line(line1[0], h_line2), line2\n r_12 = _distance2_point_to_h_line(line1[1], h_line2), line2\n r_21 = _distance2_point_to_h_line(line2[0], h_line1), line1\n r_22 = _distance2_point_to_h_line(line2[1], h_line1), line1\n\n tests = sorted((r_11,r_12,r_21,r_22), key=lambda x: x[0][0])\n # check for validity starting with the closest point\n for (r2, ps), line in tests:\n if _point_within_bounds(line,ps):\n return r2, ps, line #0 if line==line1 else 1\n\n # none of the corner points is close to any of the line\n # --> line separation is simply the closest distance of\n # corner points\n\n r2, p1, p2 = _distance2_line_endpoints(line1, line2)\n\n return r2, p1, p2", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def get_line_intersects_line(self) -> List[List[Line]]:\n intersections = []\n\n for line_bin in self.line_bins.values():\n for connection_pair in itertools.combinations(line_bin, 2):\n line_segments = (\n connection_pair[0].line_segments + connection_pair[1].line_segments\n )\n\n for segment_pair in itertools.combinations(line_segments, 2):\n if check_cross(segment_pair[0], segment_pair[1]):\n intersections.append(connection_pair)\n # for line_bin in self.line_bins.values():\n # segments = []\n # line_idx_map = []\n # for line_1, line_2 in itertools.combinations(line_bin, 2):\n # for segment in line_1.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_1)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n # for segment in line_2.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_2)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n #\n # for collision_point in segments_intersections(segments).values():\n # for intersection in collision_point:\n # intersections.append([line_idx_map[i] for i in intersection])\n return intersections", "def intersection(self, L):\n if self.slope() == L.slope():\n return None\n intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)\n intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)\n\n return (intpt_xcood, intpt_ycood)", "def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)", "def compute_y_intersection(x, x1, x2, y1, y2):\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return ((x - x1) * (y2 - y1) / delta_x) + y1", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def get_distance_of_closest_intersections(commands1, commands2):\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n return min(map(lambda x: np.abs(x[0])+np.abs(x[1]), intersections))", "def midpoint_euclidean(self, x1, y1, x2, y2):\n dist_x = abs(x1 - x2) / 2.\n dist_y = abs(y1 - y2) / 2.\n res_x = x1 - dist_x if x1 > x2 else x2 - dist_x\n res_y = y1 - dist_y if y1 > y2 else y2 - dist_y\n return res_x, res_y", "def calc_intersection(terrain_intersection_curves, edges_coords, edges_dir):\n building_line = gp_Lin(gp_Ax1(gp_Pnt(edges_coords[0], edges_coords[1], edges_coords[2]),\n gp_Dir(edges_dir[0], edges_dir[1], edges_dir[2])))\n terrain_intersection_curves.PerformNearest(building_line, 0.0, float(\"+inf\"))\n if terrain_intersection_curves.IsDone():\n npts = terrain_intersection_curves.NbPnt()\n if npts != 0:\n return terrain_intersection_curves.Pnt(1), terrain_intersection_curves.Face(1)\n else:\n return None, None\n else:\n return None, None", "def get_closest_intersection(wire1, wire2):\n pass", "def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t", "def get_steps_of_closest_intersections(commands1, commands2):\n\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n # index is 0 based, therefore +2\n return min(map(lambda x: path1.index(x) + path2.index(x), intersections)) + 2", "def intersection(boxes1: np.array, boxes2: np.array) -> np.array:\n inter_coords = np.clip(\n np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) - np.maximum(boxes1[:, None, :2], boxes2[:, :2]),\n a_min=0,\n a_max=None\n )\n\n return np.prod(inter_coords, 2)", "def _intersect(A, B, C, D):\n d = (B[0] - A[0]) * (D[1] - C[1]) - (D[0] - C[0]) * (B[1] - A[1])\n x = ((B[0] * A[1] - A[0] * B[1]) * (D[0] - C[0]) - (D[0] * C[1] - C[0] * D[1]) * (B[0] - A[0])) / d\n y = ((B[0] * A[1] - A[0] * B[1]) * (D[1] - C[1]) - (D[0] * C[1] - C[0] * D[1]) * (B[1] - A[1])) / d\n return (np.round(x, 6), np.round(y, 6))", "def __two_nearest_line__(b1, b2):\n distances = []\n for p in b1:\n for q in b2:\n distances.append([__distance__(p, q), (p, q)])\n distances = sorted(distances, key=lambda d: d[0])\n a1, b1 = distances[0][1][0], distances[0][1][1]\n a2, b2 = distances[1][1][0], distances[1][1][1]\n a1 = (a1[0] + (a2[0] - a1[0]) * 1 / 14, a1[1] + (a2[1] - a1[1]) * 1 / 14)\n b1 = (b1[0] + (b2[0] - b1[0]) * 1 / 14, b1[1] + (b2[1] - b1[1]) * 1 / 14)\n a2 = (a2[0] + (a1[0] - a2[0]) * 1 / 14, a2[1] + (a1[1] - a2[1]) * 1 / 14)\n b2 = (b2[0] + (b1[0] - b2[0]) * 1 / 14, b2[1] + (b1[1] - b2[1]) * 1 / 14)\n return (a1, b1), (a2, b2)", "def get_intersection_points(lines, debug_img=None):\n\n # Convert [a,b,c,d] to [(a,b), (b,c), (c,d), (d,a)]\n line_pairs = list(zip(lines, lines[1:]+lines[:1]))\n\n corners = [get_intersection_point(*p) for p in line_pairs]\n\n if debug_img is not None:\n int_corners = np.array(corners, np.int32)\n draw_corners(debug_img, int_corners, (0, 255, 0))\n\n return corners", "def _distance2_line_endpoints(line1, line2):\n (A,B),(C,D) = line1, line2\n R2=lambda u,v: (u[0]-v[0])**2+(u[1]-v[1])**2\n pairs = zip((A,A,B,B),(C,D,C,D))\n r2 = [R2(pair[0],pair[1]) for pair in pairs]\n mini=sorted(zip(r2,pairs),key=lambda a,b: a)[0]\n #R2_min = min((R2(A,C), R2(A,D), R2(B,C), R2(B,D)))\n return mini[0], mini[1][0], mini[1][1]", "def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y", "def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None", "def intersect_ext(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0, 0\n dp = line.p - self.p\n c2 = self.cross_z\n u = c.dot(dp) / d\n v = c2.dot(dp) / d\n return u > 0 and v > 0 and u < 1 and v < 1, self.lerp(u), u, v", "def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def _intersection_forward_line_segment(semiinf_line1, line2, semiinf_h_line1=None, h_line2=None):\n\n semiinf_h_line1 = _homogenous_line(*semiinf_line1) if semiinf_h_line1 is None else semiinf_h_line1\n h_line2 = _homogenous_line(*line2) if h_line2 is None else h_line2\n\n P = _intersection_homogenous(semiinf_h_line1, h_line2)\n if not _point_within_bounds(line2,P):\n # semi-infinite line does not intersect the particular SEGMENT of line2\n return None, P\n\n A,B = semiinf_line1\n if abs(B[1]-A[1])>=abs(B[0]-A[0]):\n t = (P[1]-A[1])/(B[1]-A[1])\n else:\n t = (P[0]-A[0])/(B[0]-A[0])\n\n if t>0: # intersection lies behind A, i.e. toward or beyond B\n return None, P\n\n return (P[0]-A[0])**2+(P[1]-A[1])**2, P", "def find_intersection(set_1, set_2):\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def lineLineIntersectXY(l1,l2,inside=True,params=False):\n\n x1=l1[0][0]\n y1=l1[0][1]\n z1=l1[0][2]\n \n x2=l1[1][0]\n y2=l1[1][1]\n z2=l1[1][2]\n\n x3=l2[0][0]\n y3=l2[0][1]\n z3=l2[0][2]\n \n x4=l2[1][0]\n y4=l2[1][1]\n z4=l2[1][2]\n\n ## check for x,y planar consistency\n if abs(z2-z1) > epsilon or abs(z3-z1) > epsilon or abs(z4-z1) > epsilon:\n raise ValueError('lines not in same x-y plane')\n\n ## do lines intersect anywhere?\n denom=(x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)\n if denom*denom < epsilon:\n return False\n\n ## the lines do intersect, so let's see if they intersect\n ## inside both line segments\n t = ((x1-x3)*(y3-y4) - (y1-y3)*(x3-x4))/denom\n u = -1 * ((x1-x2)*(y1-y3) - (y1-y2)*(x1-x3))/denom\n\n ## return the paramater space intersection\n if params:\n return [t,u]\n \n ## do we care about falling inside the line segments? if so,\n ## check that the intersection falls within\n if inside and ( t < 0.0 or t > 1.0 or u < 0.0 or u > 1.0):\n return False\n\n return [x1 + t*(x2-x1), y1+t*(y2-y1), z1, 1.0]", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def find_intersection(set_1, set_2):\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)", "def get_intersect_lines(self, p10, p11, p20, p21):\n t = (p20 - p10) / (p11 - p10 - p21 + p20)\n return p10 + t * (p11 - p10)", "def closest_distance_between_lines(\n a0,\n a1,\n b0,\n b1,\n clamp_all=False,\n clamp_a0=False,\n clamp_a1=False,\n clamp_b0=False,\n clamp_b1=False,\n):\n\n # If clampAll=True, set all clamps to True\n if clamp_all:\n clamp_a0 = True\n clamp_a1 = True\n clamp_b0 = True\n clamp_b1 = True\n\n a0 = np.asarray(a0)\n a1 = np.asarray(a1)\n b0 = np.asarray(b0)\n b1 = np.asarray(b1)\n\n # Calculate denomitator\n A = a1 - a0\n B = b1 - b0\n magA = np.linalg.norm(A)\n magB = np.linalg.norm(B)\n\n _A = A / magA\n _B = B / magB\n\n # due to numerical instabilities there is a test for the case _A and _B are almost parallel\n if not ((np.allclose(_A, _B) or np.allclose(_A, -_B))):\n # non parallel\n # worsk also for strong parallel lines\n cross = np.cross(_A, _B)\n denom = np.linalg.norm(cross) ** 2\n else:\n # almost paralel vectors\n # this is due to numerical stability\n denom = 0\n\n # If lines are parallel (denom=0) test if lines overlap.\n # If they don't overlap then there is a closest point solution.\n # If they do overlap, there are infinite closest positions, but there is a closest distance\n if not denom:\n d0 = np.dot(_A, (b0 - a0))\n\n # Overlap only possible with clamping\n if clamp_a0 or clamp_a1 or clamp_b0 or clamp_b1:\n d1 = np.dot(_A, (b1 - a0))\n\n # Is segment B before A?\n if d0 <= 0 >= d1:\n if clamp_a0 and clamp_b1:\n if np.absolute(d0) < np.absolute(d1):\n return a0, b0, np.linalg.norm(a0 - b0)\n return a0, b1, np.linalg.norm(a0 - b1)\n\n # Is segment B after A?\n elif d0 >= magA <= d1:\n if clamp_a1 and clamp_b0:\n if np.absolute(d0) < np.absolute(d1):\n return a1, b0, np.linalg.norm(a1 - b0)\n return a1, b1, np.linalg.norm(a1 - b1)\n\n # Segments overlap, return distance between parallel segments\n return None, None, np.linalg.norm(((d0 * _A) + a0) - b0)\n\n # Lines criss-cross: Calculate the projected closest points\n t = b0 - a0\n detA = np.linalg.det([t, _B, cross])\n detB = np.linalg.det([t, _A, cross])\n\n t0 = detA / denom\n t1 = detB / denom\n\n pA = a0 + (_A * t0) # Projected closest point on segment A\n pB = b0 + (_B * t1) # Projected closest point on segment B\n\n # Clamp projections\n if clamp_a0 or clamp_a1 or clamp_b0 or clamp_b1:\n if clamp_a0 and t0 < 0:\n pA = a0\n elif clamp_a1 and t0 > magA:\n pA = a1\n\n if clamp_b0 and t1 < 0:\n pB = b0\n elif clamp_b1 and t1 > magB:\n pB = b1\n\n # Clamp projection A\n if (clamp_a0 and t0 < 0) or (clamp_a1 and t0 > magA):\n dot = np.dot(_B, (pA - b0))\n if clamp_b0 and dot < 0:\n dot = 0\n elif clamp_b1 and dot > magB:\n dot = magB\n pB = b0 + (_B * dot)\n\n # Clamp projection B\n if (clamp_b0 and t1 < 0) or (clamp_b1 and t1 > magB):\n dot = np.dot(_A, (pB - a0))\n if clamp_a0 and dot < 0:\n dot = 0\n elif clamp_a1 and dot > magA:\n dot = magA\n pA = a0 + (_A * dot)\n\n return pA, pB, np.linalg.norm(pA - pB)", "def _distance2_point_to_h_line(point, h_line):\n a,b,c = h_line\n x0,y0 = point\n # solve for equality\n # r^2 = (x-x0)^2 + (y-y0)^2\n # ax + by + c = 0\n # --> 2nd order polynomial\n # --> find place of exactly one solution, i.e.\n # radicant of p-q formula is identical zero\n # if radicant is zero, then\n ys = ((a*x0-c)*b + a**2*y0)/(a**2+b**2)\n # or\n xs = ((b*y0-c)*a + b**2*x0)/(a**2+b**2)\n # for a != 0\n if abs(a)>=abs(b):\n R2 = (x0-c/a)**2+y0**2 - (1.+(b/a)**2)*ys**2\n else:\n R2 = (y0-c/b)**2+x0**2 - (1.+(a/b)**2)*xs**2\n R2 = R2 if abs(R2)>1e-13 else 0.\n return R2, (xs, ys)", "def get_line_circle_intersections(A, B, C, r):\n Lx = B[0] - A[0]\n Ly = B[1] - A[1]\n Lz = B[2] - A[2]\n\n # stranger things\n D = Lx**2 + Ly**2\n E = 2 * ( Lx * (A[0] - C[0]) + Ly * (A[1] - C[1]) )\n F = (\n (A[0] - C[0])**2\n + (A[1] - C[1])**2\n - r**2\n )\n det = E**2 - 4 * D * F\n \n # declare null vectors\n P1 = [0, 0, 0]\n P2 = [0, 0, 0]\n t1 = t2 = None\n eps = .00001\n if ( not (D <= eps) or (det < 0) ):\n if det == 0:\n print \"tangential intersection found\",\n t1 = t2 = -E / (2*D)\n else:\n print \"pass-through intersection found\",\n t1 = ( (-E + math.sqrt(det)) / (2 * D) )\n t2 = ( (-E - math.sqrt(det)) / (2 * D) )\n P1[0] = A[0] + t1 * Lx\n P1[1] = A[1] + t1 * Ly\n P1[2] = A[2] + t1 * Lz\n P2[0] = A[0] + t2 * Lx\n P2[1] = A[1] + t2 * Ly\n P2[2] = A[2] + t2 * Lz\n else:\n print \"no intersections are available\",\n\n return P1, P2", "def intersects(a0, a1, b0, b1):\n # First line is vertical\n if a0[0] == a1[0]:\n # Both lines are vertical\n if b0[0] == b1[0]:\n return (a0[0] == b0[0]) and (in_range(b0[1], a0[1], a1[1]) or in_range(b1[1], a0[1], a1[1]))\n eqn = get_eqn(b0, b1)\n y = apply_eqn(eqn, a0[0])\n return in_range(y, a0[1], a1[1])\n # Only second line is vertical\n if b0[0] == b1[0]:\n eqn = get_eqn(a0, a1)\n y = apply_eqn(eqn, b0[0])\n return in_range(y, b0[1], b1[1])\n # Parallel lines\n eqn0 = get_eqn(a0, a1)\n eqn1 = get_eqn(b0, b1)\n if eqn0[0] == eqn1[0]:\n if eqn0[1] != eqn1[1]:\n return False\n return in_range(a0[0], b0[0], b1[0]) or in_range(a1[0], b0[0], b1[0])\n # Get intersection\n i = intersection(eqn0, eqn1)\n # Check if intersection is between end points\n return in_range(i[0], a0[0], a1[0]) and in_range(i[0], b0[0], b1[0]) and in_range(i[1], a0[1], a1[1]) and in_range(i[1], b0[1], b1[1])", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def intersect_ext(self, line):\n res, p, v = self.intersect(line)\n v0 = self.p0 - self.c\n v1 = p - self.c\n u = self.signed_angle(v0, v1) / self.da\n return res and u > 0 and v > 0 and u < 1 and v < 1, p, u, v", "def lines_intersect(x1, y1, x2, y2, a1, b1, a2, b2):\n\n\t# Ensures that x1 < x2 \n\t(x1, x2, y1, y2) = (x1, x2, y1, y2) if x1 < x2 else (x2, x1, y2, y1) \n\t(a1, a2, b1, b2) = (a1, a2, b1, b2) if a1 < a2 else (a2, a1, b2, b1) \n\t\n\t# Make lines same domain\n\tif x1 > a1:\n\t\tif x1 > a2 or a1 == a2:\n\t\t\treturn False \n\n\t\ta = x1 \n\telse:\n\t\tif a1 > x2 or x1 == x2:\n\t\t\treturn False\n\t\t\n\t\ta = a1 \n\n\tif x2 < a2:\n\t\tif x2 < a1 or a1 == a2:\n\t\t\treturn False \n\n\t\tb = x2\n\telse:\n\t\tif a2 < x1 or x1 == x2:\n\t\t\treturn False \n\n\t\tb = a2\n\n\tif x1 != x2:\n\t\tx1, y1, x2, y2 = trim_line(x1, y1, x2, y2, a, b)\n\tif a1 != a2:\n\t\ta1, b1, a2, b2 = trim_line(a1, b1, a2, b2, a, b)\n\n\t\n\treturn (y1 >= b1 and y2 <= b2) or (y1 <= b1 and y2 >= b2)", "def find_intersection(mask_part, houghlinePara=50):\n edge = cv.Canny(mask_part, 0, 1)\n lines = cv.HoughLines(edge, 1, np.pi / 180, houghlinePara)\n\n rhos = []\n thetas = []\n for line in lines:\n rho, theta = line[0]\n rhos.append(rho)\n thetas.append(theta)\n\n thetas = np.array(thetas)\n rhos = np.array(rhos)\n mean = np.mean(thetas)\n inx = thetas > mean\n\n thetas1 = thetas[inx]\n rhos1 = rhos[inx]\n thetas2 = thetas[1 - inx != 0]\n rhos2 = rhos[1 - inx != 0]\n # detect outliers\n inx2 = np.abs(rhos1-np.mean(rhos1)) <= np.std(rhos1)\n rhos1 = rhos1[inx2]\n thetas1 = thetas1[inx2]\n inx3 = np.abs(rhos2-np.mean(rhos2)) <= np.std(rhos2)\n rhos2 = rhos2[inx3]\n thetas2 = thetas2[inx3]\n\n theta1 = np.mean(thetas1)\n rho1 = np.mean(rhos1)\n theta2 = np.mean(thetas2)\n rho2 = np.mean(rhos2)\n\n k1 = -1 / np.tan(theta1)\n k2 = -1 / np.tan(theta2)\n b1 = rho1 * np.sin(theta1) - k1 * rho1 * np.cos(theta1)\n b2 = rho2 * np.sin(theta2) - k2 * rho2 * np.cos(theta2)\n\n x_cross = (b2-b1) / (k1-k2)\n y_cross = (k1 * b2 - k2 * b1) / (k1 - k2)\n # return thetas1, thetas2\n return x_cross, y_cross", "def get_shape_line_intersections(cls, shape, line):\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(shape, 1e-3)\n shape_inter.PerformNearest(line, float(\"-inf\"), float(\"+inf\"))\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n intersections = [(shape_inter.Pnt(i), shape_inter.Face(i), line) for i in\n range(1, shape_inter.NbPnt() + 1)] # Indices start at 1 :(\n return intersections", "def test_intersect_line_in_one_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 1", "def find_intersection(center0, direction0, center1, direction1):\n # c0 + d0 t = c1 + d1 s\n # (-d0) t + (d1) s = c0 - c1\n # [-d0, d1] [t,s]^T = delta\n A = np.array([-direction0, direction1]).T\n delta = center0 - center1\n # Unpack M = A^T * A:\n # [[a, b],\n # [c, d]]\n (a, b), (c, d) = A.T.dot(A)\n # Inverse of M:\n # 1/ det(M) [[ d, -b],\n # [-c, a]]\n M_inv = np.array([[d, -b], [-c, a]]) / (a * d - b * c)\n t, s = M_inv.dot(A.T.dot(delta))\n return t, s", "def intersectConics(E1, E2):\n\n P = np.array([])\n r1 = matrix_rank(E1)\n r2 = matrix_rank(E2)\n \n if(r1==3 and r2==3):\n P = completeIntersection(E1,E2) \n else:\n if (r2 < 3): #E2 is degenerate\n defE = E2\n fullE = E1\n else:\n defE = E1 #E1 is degenerate\n fullE = E2\n m, l = decomposeDegenerateConic(defE)\n P1 = intersectConicLine(fullE,m)\n P2 = intersectConicLine(fullE,l)\n P = np.array([P1, P2])\n points_x = []\n points_y = []\n for i in range(2):\n P1 = P[i]\n if(P1.size!=0):\n for j in range(P1.shape[0]):\n points_x.append(P1[j,0]/P1[j,2])\n points_y.append(P1[j,1]/P1[j,2])\n return points_x, points_y", "def solveIntersection(edge, x1, y1, x2, y2,\n left, right, top, bottom):\n\n x = 0\n y = 0\n CP_LEFT = 0\n CP_RIGHT = 1\n CP_BOTTOM = 2\n CP_TOP = 3\n m = 0\n if(x2 != x1):\n m = (y2 - y1) / (x2 - x1)\n if(edge == CP_LEFT):\n x = left\n y = y1 + m * (x - x1)\n elif(edge == CP_RIGHT):\n x = right\n y = y1 + m * (x - x1)\n elif (edge == CP_BOTTOM):\n y = bottom\n if(x1 != x2):\n x = x1 + (1.0 / m) * (y - y1)\n else:\n x = x1\n elif (edge == CP_TOP):\n y = top;\n if(x1 != x2):\n x = x1 + (1.0 / m) * (y - y1)\n else:\n x = x1\n return x, y", "def line_intersection_with(self, other):\n # solve following system :\n # intersection = start of self + alpha * direction of self\n # intersection = start of other + beta * direction of other\n directions = [s.endpoints[1] - s.endpoints[0] for s in (self, other)]\n denominator = directions[0].cross_product(directions[1])\n if abs(denominator) < 0.000001:\n # almost parallel lines\n return\n start_diff = other.endpoints[0] - self.endpoints[0]\n alpha = start_diff.cross_product(directions[1]) / denominator\n return self.endpoints[0] + directions[0] * alpha", "def inside_of_line_2d(pt1, pt2, reference_point, pt, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmInsideOfLineWithTol(pt1, pt2, reference_point, pt, tol)", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def intersection(eqn0, eqn1):\n x = (eqn1[1] - eqn0[1]) / (eqn0[0] - eqn1[0])\n return (x, apply_eqn(eqn0, x))", "def line_equation(x1, y1, x2, y2):\n \n a = y2 - y1\n b = x1 - x2\n c = x2*y1 - x1*y2\n return a, b, c", "def _intersection(self, ix, iy):\n im = ix + iy # cv2.bitwise_or(ix, iy)\n # freq = cytoolz.frequencies(im.ravel())\n # hx = freq.get(1, 0) # x alone\n # hy = freq.get(2, 0) # y alone\n # ha = freq.get(3, 0) # x & y\n\n freq = np.bincount(im.ravel().astype(np.int64))\n hx = freq[1] if len(freq) > 1 else 0\n hy = freq[2] if len(freq) > 2 else 0\n ha = freq[3] if len(freq) > 3 else 0\n\n return hx, hy, ha", "def distance(p1, p2):\n if isparallel(p1, p2):\n # lines are parallel\n l = np.cross(p1.w, p1.v - p2.v * np.dot(p1.w, p2.w) / dot(p2.w, p2.w)) / np.linalg.norm(p1.w)\n else:\n # lines are not parallel\n if abs(p1 * p2) < 10*_eps:\n # lines intersect at a point\n l = 0\n else:\n # lines don't intersect, find closest distance\n l = abs(p1 * p2) / np.linalg.norm(np.cross(p1.w, p2.w))**2\n return l" ]
[ "0.77867657", "0.76443017", "0.75186956", "0.7482537", "0.74320704", "0.72984135", "0.72857475", "0.7276795", "0.7244609", "0.72418344", "0.71921796", "0.71198523", "0.7066881", "0.70585316", "0.70348686", "0.70186913", "0.7002847", "0.6971487", "0.6970397", "0.6936027", "0.67533684", "0.6712502", "0.6681787", "0.6633627", "0.65797466", "0.65453845", "0.65294474", "0.6528463", "0.6464413", "0.64496416", "0.63964796", "0.63906604", "0.6357088", "0.63311595", "0.63309145", "0.6307957", "0.62950754", "0.6267432", "0.626676", "0.62601596", "0.624548", "0.6213957", "0.62119514", "0.6211829", "0.6209128", "0.62011725", "0.61981475", "0.6197942", "0.61947244", "0.6183518", "0.61760056", "0.61753213", "0.6162164", "0.61393464", "0.6130166", "0.61296993", "0.6116681", "0.6108639", "0.6092719", "0.6070354", "0.6064112", "0.6050593", "0.6048575", "0.60408235", "0.60340834", "0.60335225", "0.6023461", "0.602071", "0.6017057", "0.6007155", "0.6002909", "0.59949857", "0.599021", "0.59745455", "0.59460986", "0.59460986", "0.59460986", "0.59460986", "0.5938596", "0.5935596", "0.59287494", "0.5922023", "0.59201443", "0.59164715", "0.5913496", "0.5907541", "0.59049356", "0.5898893", "0.5896933", "0.5882786", "0.5874145", "0.58622956", "0.58599436", "0.5856215", "0.5841453", "0.58370554", "0.583701", "0.5836832", "0.58317024", "0.581711" ]
0.763136
2
Check correctness of `limiter` value returned.
def test_identify_limit(limit, all, expected): assert identify_limit(limit, all) == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def check(self):\n self.__check_request_limit()", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit", "def check(self):\n logging.info(\"rate limit remaining %s\" % self.remaining)\n while self.remaining <= 1:\n now = time.time()\n logging.debug(\"rate limit < 1, now=%s and reset=%s\", now,\n self.reset)\n if self.reset and now < self.reset:\n # padded with 5 seconds just to be on the safe side\n secs = self.reset - now + 5\n logging.info(\"sleeping %s seconds for rate limiting\" % secs)\n time.sleep(secs)\n else:\n # sleep a second before checking again for new rate limit\n time.sleep(1)\n # get the latest limit\n self.ping()\n self.remaining -= 1", "def _check_rate_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n # TODO distinct up/down rates\n # check limiting rate for resource flow in/out, if any\n if self._rate:\n request = {res: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n max_rate = self._rate.evaluate(inputs, target_var=res)[0][res]\n delta = np.sign(amt) * min(max_rate, abs(amt))\n print('max_rate in _check_rate_limit',max_rate, 'delta (min of maxrate and abs(amt)',delta)\n return {res: delta}, meta\n return {res: amt}, meta", "def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None", "def check_limits(self):\n\n too_hi = False\n too_lo = False\n if is_empty(self.value):\n return None\n\n # Check for limit violations.\n if is_valid(self.limit_lower):\n too_lo = (self.value < self.limit_lower)\n if is_valid(self.limit_upper):\n too_hi = (self.value > self.limit_upper)\n\n # If a limit is violated, return the designated limit action.\n if too_hi or too_lo:\n return self.limit_action\n else:\n return None", "def check_limit(limit):\n if limit:\n limit = int(limit)\n if limit > settings.MAX_LISTING_LIMIT or \\\n limit < settings.MIN_LISTING_LIMIT:\n # SuspiciousOperation raises 400 bad request in Django 1.11.\n # https://docs.djangoproject.com/en/1.11/ref/views/#the-400-bad-request-view\n raise SuspiciousOperation()\n return limit\n return settings.DEFAULT_LISTING_LIMIT", "def test_fail_on_rate_limit_exceeded(self):\n\n # setup 'short' limit for testing\n self.client.protocol.rate_limiter.rules = []\n self.client.protocol.rate_limiter.rules.append(\n XRateLimitRule(\n {\n \"short\": {\n \"usage\": 0,\n \"limit\": 600,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n \"long\": {\n \"usage\": 0,\n \"limit\": 30000,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n }\n )\n )\n\n # interact with api to get the limits\n self.client.get_athlete()\n\n # access the default rate limit rule\n rate_limit_rule = self.client.protocol.rate_limiter.rules[0]\n\n # get any of the rate limits, ex the 'short'\n limit = rate_limit_rule.rate_limits[\"short\"]\n\n # get current usage\n usage = limit[\"usage\"]\n print(\"last rate limit usage is {0}\".format(usage))\n\n # for testing purpses set the limit to usage\n limit[\"limit\"] = usage\n print(\"changing limit to {0}\".format(limit[\"limit\"]))\n\n # expect exception because of RateLimit has been\n # exceeded (or reached max)\n with self.assertRaises(exc.RateLimitExceeded):\n self.client.get_athlete()\n\n # request fired to early (less than 5 sec) causes timeout exception\n with self.assertRaises(exc.RateLimitTimeout):\n self.client.get_athlete()\n\n # once rate limit has exceeded wait until another request is possible\n # check if timeout has been set\n self.assertTrue(rate_limit_rule.limit_timeout > 0)\n print(\"limit timeout {0}\".format(rate_limit_rule.limit_timeout))\n\n # resetting limit\n # simulates Strava api - it would set the usage again to 0\n limit[\"limit\"] = 600\n print(\"resetting limit to {0}\".format(limit[\"limit\"]))\n\n try:\n # waiting until timeout expires\n time.sleep(5)\n\n # this time it should work again\n self.client.get_athlete()\n self.assertTrue(\"No exception raised\")\n except exc.RateLimitExceeded as e:\n self.fail(\"limiter raised RateLimitTimeout unexpectedly!\")\n\n # continue other tests with DefaultRateLimiter\n print(\"setting default rate limiter\")\n self.client.protocol.rate_limiter = DefaultRateLimiter()", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def verify_is_allowed(self):\n if (\n self.throttling_enabled\n and self.throttling_failure_count > 0\n and self.throttling_failure_timestamp is not None\n ):\n now = timezone.now()\n delay = (now - self.throttling_failure_timestamp).total_seconds()\n # Required delays should be 1, 2, 4, 8 ...\n delay_required = self.get_throttle_factor() * (\n 2 ** (self.throttling_failure_count - 1)\n )\n if delay < delay_required:\n return (\n False,\n {\n 'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,\n 'failure_count': self.throttling_failure_count,\n 'locked_until': self.throttling_failure_timestamp\n + timedelta(seconds=delay_required),\n },\n )\n\n return super().verify_is_allowed()", "def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)", "def check_rate_limit(self):\n # Already received 429 from server\n if self.rate_limit_sleep:\n msg = f\"请求过于频繁,已被BitMEX限制,请等待{self.rate_limit_sleep}秒后再试\"\n self.gateway.write_log(msg)\n return False\n # Just local request limit is reached\n elif not self.rate_limit_remaining:\n msg = \"请求频率太高,有触发BitMEX流控的风险,请稍候再试\"\n self.gateway.write_log(msg)\n return False\n else:\n self.rate_limit_remaining -= 1\n return True", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def near_quota(response, margin=1):\n try:\n user_limit = int(response.getheader('X-RateLimit-UserLimit'))\n user_remaining = int(response.getheader('X-RateLimit-UserRemaining'))\n client_limit = int(response.getheader('X-RateLimit-ClientLimit'))\n client_remaining = int(response.getheader('X-RateLimit-ClientRemaining'))\n except (ValueError, TypeError):\n return\n\n if user_remaining <= margin:\n return \"UserRemaining: {}, UserLimit: {}\".format(\n user_remaining, user_limit)\n elif client_remaining <= margin:\n return \"ClientRemaining: {}, ClientLimit: {}\".format(\n client_remaining, client_limit)\n else:\n return False", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def handle_rate_limit(rate_limit):\n remaining = rate_limit['remaining']\n limit = rate_limit['limit']\n percent_remaining = remaining / limit\n reset_at = rate_limit['resetAt']\n if percent_remaining < 0.15:\n reset_at = datetime.strptime(reset_at, '%Y-%m-%dT%H:%M:%SZ')\n current_time = datetime.now()\n time_diff = reset_at - current_time\n seconds = time_diff.total_seconds()\n\n print(f'Rate Limit hit. Waiting for reset.\\nProcess will continue at: {reset_at}')\n\n time.sleep(seconds)", "def check_constrained(self, limit=None):\n\n # Set the 'well-constrained' limit at 10% (arbitrary) if not provided.\n limit = (Decimal(0.1) if not limit else Decimal(limit))\n\n if is_empty(self.value) or is_empty(self.uncertainty):\n return False\n elif self.uncertainty > (Decimal(self.value) * Decimal(limit)):\n self.well_constrained = False\n else:\n self.well_constrained = True", "def is_unlimited(self) -> bool:\n return not self.is_limited", "def _throttle_check(self, request):\n identifier = self._meta.authentication.get_identifier(request)\n\n # Check to see if they should be throttled.\n if self._meta.throttle.should_be_throttled(identifier):\n # Throttle limit exceeded.\n raise ImmediateHttpResponse(response=http.HttpTooManyRequests())", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def _check_items_limit(self):\n if self.items_limit and self.items_limit == self.get_metadata('items_count'):\n raise ItemsLimitReached('Finishing job after items_limit reached:'\n ' {} items written.'.format(self.get_metadata('items_count')))", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def test_can_not_exceed_quota(self):\n create_test_booking(self.user, self.first_day, 8, facility='g')\n create_test_booking(self.user, self.first_day, 9, facility='0')\n create_test_booking(self.user, self.first_day, 10, facility='g')\n create_test_booking(self.user, self.first_day, 11, facility='h')\n create_test_booking(self.user, self.first_day, 12, facility='h')\n create_test_booking(self.user, self.first_day, 13, facility='g')\n create_test_booking(self.user, self.first_day, 14, facility='x')\n create_test_booking(self.user, self.first_day, 15, facility='y')\n create_test_booking(self.user, self.first_day, 16, facility='g')\n create_test_booking(self.user, self.first_day, 17, facility='g')\n\n date = datetime(2030, 1, 1, 8)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], 0)\n self.assertEqual(type(context[\"info\"]), QuotaExceededAlert)", "def _check_throttles_decorator(func):\n @wraps(func)\n def _decorated(*args, **kwargs):\n # Skip the throttle check entirely if we've disabled rate limiting.\n # Otherwise, perform the checks (as usual)\n if RateLimitConfiguration.current().enabled:\n return func(*args, **kwargs)\n else:\n msg = \"Rate limiting is disabled because `RateLimitConfiguration` is not enabled.\"\n LOGGER.info(msg)\n return\n\n return _decorated", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def limit_reached(self):\n if len(self.selected) >= self.limit:\n return True\n return False", "def should_be_throttled(self, resource):\r\n pass", "def rate_limit_check():\n\n data = api.rate_limit_status()\n\n user_timeline_remaining = data['resources']['statuses'] \\\n ['/statuses/user_timeline'] \\\n ['remaining']\n\n followers_list_remaining = data['resources']['followers'] \\\n ['/followers/list']['remaining']\n\n rate_limit_remaining = data['resources']['application'] \\\n ['/application/rate_limit_status']['remaining']\n\n verify_credentials_remaining = data['resources']['account'] \\\n ['/account/verify_credentials'] \\\n ['remaining']\n\n user_timeline_reset = data['resources']['statuses'] \\\n ['/statuses/user_timeline'] \\\n ['reset']\n\n followers_list_reset = data['resources']['followers'] \\\n ['/followers/list']['reset']\n\n rate_limit_reset = data['resources']['application'] \\\n ['/application/rate_limit_status']['reset']\n\n verify_credentials_reset = data['resources']['account'] \\\n ['/account/verify_credentials'] \\\n ['reset']\n\n return {'utrem': user_timeline_remaining,\n 'ftrem': followers_list_remaining,\n 'rlrem': rate_limit_remaining,\n 'vcrem': verify_credentials_remaining,\n 'utres': user_timeline_reset,\n 'ftres': followers_list_reset,\n 'rlres': rate_limit_reset,\n 'vcres': verify_credentials_reset}", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def is_limited(self) -> bool:\n return self.__times > ActionState.UNLIMITED", "def checkLimit(device, checkStatus):\n d = device.read(1)\n if d:\n print(d)\n status = d[0]\n\n printStatus(status)\n if (checkStatus & status):\n return False\n return True", "def check_limit(redis_client):\n if redis_client.llen('query_counter') >= API_RATE_LIMIT:\n left_val = redis_client.lpop('query_counter')\n parsed_left_val = float(left_val.decode('utf-8'))\n current_api_window = (datetime.utcnow() - timedelta(minutes=API_WINDOW_PERIOD)).timestamp()\n if parsed_left_val > current_api_window:\n redis_client.lpush('query_counter', left_val)\n return False\n return True", "def _validate_value(self, value):\n if self.limits[0] <= value <= self.limits[1]:\n return True\n else:\n return False", "def is_rate_limit_exceeded(self, request):\r\n counts = self.get_counters(request)\r\n return sum(counts.values()) >= self.requests", "async def rate_limit(self, ctx):\n await ctx.send(\"We have found that the approximate rate limit is 30-40 requests per second. Staying \"\n \"below this should be safe.\")", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def _check(self):\n try:\n num = int(self.ids.res_lim.text)\n # reset negative numbers to zero\n if num <= 0:\n self.ids.res_lim.text = str(0)\n except ValueError:\n self.ids.res_lim.text = str(self.limit)\n\n return int(self.ids.res_lim.text)", "def check_rate(self):\n rate = self.rate_measurer.rate()\n if rate < self.request_rate:\n self._fail(WRequestRateTooLow(rate))\n\n if self.rate_measurer.outstanding() > self.max_outstanding:\n self._fail(WRequestOverload())", "def _rate_limit_reached(self, waiting=False):\n msg = [\"GitHub rate limit reached.\"]\n if waiting:\n msg.append(\"Waiting for limit reset...\")\n if \"Authorization\" not in self._api_headers():\n msg.append(\"Authenticate to GitHub to increase the limit.\")\n return \" \".join(msg)", "def _VerifyZoneByQuota(self):\n if self.EnoughMetricsInZone(self._zone):\n return True\n raise errors.CheckGCEZonesQuotaError(\n \"There is no enough quota in zone: %s\" % self._zone)", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def limited(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"limited\")", "def test_request_throttling_is_per_user(self):\n self.ensure_is_throttled(MockView, 200)", "def assertHttpTooManyRequests(self, resp):\r\n return self.assertEqual(resp.status_code, 429)", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def is_few_remaining(self) -> bool:\n return self.on_hand <= self.warn_limit", "def _handle_rate_limit(self):\n if self.is_rate_limit_status_stale():\n self.update_rate_limit_status()\n sleep_duration = self._rate_limit_status[\"reset_in_sec\"] + consts.RATE_LIMIT_BUFFER_SEC\n time.sleep(sleep_duration)\n wakeup_time = util.epoch_to_local_datetime(self._rate_limit_status[\"reset_at_utc\"])\n log.warning(\"GHUB\", f\"Rate limit reached - sleeping for {sleep_duration}s until {wakeup_time}.\")\n time.sleep(sleep_duration)", "def check_timeout(flag: Callable, limit: float) -> bool:\n timed_out = False\n if HAS_SUPERVISOR:\n start = supervisor.ticks_ms()\n while not timed_out and not flag():\n if ticks_diff(supervisor.ticks_ms(), start) >= limit * 1000:\n timed_out = True\n else:\n start = time.monotonic()\n while not timed_out and not flag():\n if time.monotonic() - start >= limit:\n timed_out = True\n return timed_out", "def test_rate_limiting_registration_view(self):\n for _ in range(int(settings.REGISTRATION_VALIDATION_RATELIMIT.split('/')[0])):\n response = self.request_without_auth('post', self.path)\n assert response.status_code != 403\n response = self.request_without_auth('post', self.path)\n assert response.status_code == 403", "def on_limit(self, track):\n print ('Got Rate limit Message', str(track))\n return True # Don't kill the stream", "def check_timelimit_slot__(self):\n timerange = self.valkkafs_manager.getTimeRange()\n \n if len(timerange) < 1: # empty tuple implies no frames\n print(\"PlaybackController: check_timelimit_slot__ : WARNING! no timerange from ValkkaFS\")\n # fabricate a dummy time : this exact moment\n current_time = int(time.time() * 1000)\n timerange = (\n current_time,\n current_time + 1\n )\n print(\"check_timelimits_slot__ : timerange =\", timerange)\n print(\"check_timelimits_slot__ : %s -> %s\" % ( formatMstimestamp(timerange[0]), formatMstimestamp(timerange[1]) ) )\n self.signals.set_fs_time_limits.emit(timerange)", "def respect_rate_limits(response, progress):\n reset = response.headers['x-ratelimit-reset']\n reset_dt = datetime.datetime.fromtimestamp(float(reset))\n remaining = response.headers['x-ratelimit-remaining']\n if remaining == 0:\n progress.write('Sleeping until rate limit refreshed, please wait...')\n while reset_dt > datetime.datetime.now():\n time.sleep(1)", "def validateVoltage( self, name, voltage ):\n channel = self.d[name]\n (MIN,MAX) = channel.limits\n if not MIN <= voltage <= MAX: raise Exception('Invalid voltage {}'.format(voltage))", "def test_limit_is_negative(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=-5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])", "def limited(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"limited\")", "def check_rate_limit(session, provided_iocs):\n rate_limit = session.rate_limit_status()[\"resources\"][\"search\"][\"/search/tweets\"]\n\n if rate_limit[\"remaining\"] == 0:\n reset_time = rate_limit[\"reset\"]\n rate_limit[\"reset\"] = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(reset_time))\n return rate_limit\n\n if len(provided_iocs) > rate_limit[\"remaining\"]:\n rate_limit = {\"Search term limit\": rate_limit[\"remaining\"],\n \"Total Search Terms Provided\": len(provided_iocs)}\n return rate_limit\n return", "def test_disable_limiter(monkeypatch):\n monkeypatch.setattr(planet.http, 'RATE_LIMIT', 0)\n monkeypatch.setattr(planet.http, 'MAX_ACTIVE', 0)", "def check_limits(*args):\n if getNanny(args[0]) == 1:\n if np.sum(pt_index(args[0])) != 0:\n setAbortState(args[0], 1)\n elif np.sum(tc_index(args[0])) != 0:\n setAbortState(args[0], 1)\n elif np.sum(lc_index(args[0])) != 0:\n setAbortState(args[0], 1)\n return args[0].Controls.AbortState.abort_state", "def ge(value, limit):\n return value >= limit", "def _test_rate_limiter(\n self,\n num_agents,\n consecutive_success_threshold,\n initial_cluster_rate,\n max_cluster_rate,\n min_cluster_rate,\n experiment_duration,\n max_concurrency,\n expected_requests,\n allowed_variance,\n reported_outcome_generator=always_true(),\n increase_strategy=BlockingRateLimiter.STRATEGY_MULTIPLY,\n backoff_factor=0.5,\n increase_factor=2.0,\n ):\n rate_limiter = BlockingRateLimiter(\n num_agents=num_agents,\n initial_cluster_rate=initial_cluster_rate,\n max_cluster_rate=max_cluster_rate,\n min_cluster_rate=min_cluster_rate,\n consecutive_success_threshold=consecutive_success_threshold,\n strategy=increase_strategy,\n increase_factor=increase_factor,\n backoff_factor=backoff_factor,\n max_concurrency=max_concurrency,\n fake_clock=self._fake_clock,\n )\n\n # Create and start a list of consumer threads\n experiment_end_time = self._fake_clock.time() + experiment_duration\n threads = self.__create_consumer_threads(\n max_concurrency,\n rate_limiter,\n experiment_end_time,\n reported_outcome_generator,\n )\n [t.setDaemon(True) for t in threads]\n [t.start() for t in threads]\n\n # Create and join and advancer thread (which in turn lasts until all client threads die\n advancer = self.__create_fake_clock_advancer_thread(rate_limiter, threads)\n advancer.start()\n advancer.join()\n\n requests = self._test_state[\"count\"]\n # Assert that count is close enough to the expected count\n observed_ratio = float(requests) / expected_requests\n self.assertGreater(observed_ratio, allowed_variance[0])\n self.assertLess(observed_ratio, allowed_variance[1])", "def testAtLeastSetsLimit(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g = 6\n\t\tc.atLeast(2)\n\t\tc.replay()\n\t\tx.g = 6\n\t\tself.failUnlessRaises(Exception, c.verify)\n\t\tx.g = 6\n\t\tc.verify()", "def le(value, limit):\n return value <= limit", "def limiter_context(limiter):\n # Increment the counter\n limiter.increment()\n with limiter._lock:\n try:\n # run setup code, while locked\n yield\n except Exception:\n # Decrement counter if any errors\n limiter.decrement()\n raise", "def should_be_throttled(self, identifier, **kwargs):\r\n return False", "def has_reached_limit(domain, limit=RATE_LIMIT):\n count = count_domain_certs_since(domain)\n return count >= limit", "def check_rate_limit(api, url, zzz=180.0):\n pattern = 'https:\\/\\/api.twitter.com\\/.*(\\/([a-z_]*)\\/.*)\\.json'\n endpoint, family = re.match(pattern, url).groups()\n url = \"https://api.twitter.com/1.1/application/rate_limit_status.json\"\n params = {\"resources\": [family]}\n response = api.get(url, params=params)\n response.close()\n try:\n return response.json()[\"resources\"][family][endpoint]\n except KeyError:\n try:\n return response.json()[\"resources\"][family][endpoint + '/:id']\n except KeyError:\n print \"Error checking rate limit status:\"\n print response.json()\n print \"Sleeping {:,}s and trying again...\".format(zzz)\n # DEBUG\n # Weirdly we get an OpenSSL error everytime\n # we go to sleep\n time.sleep(zzz)\n return check_rate_limit(api, url, zzz=zzz*2)", "def is_power_limited(self):\n status = self.get_status_response()\n return ((status[1] & 0x10) == 0x10)\n #end is_power_limited()", "def on_over_limit(limit):\n return (jsonify({'data': 'You hit the rate limit', 'error': '429'}), 429)", "def checkSanity(self, valuePreviouslySet):\n firstGet = self._pfwClient.get(self._paramPath)\n\n try:\n returnValue = Decimal(firstGet)\n except ValueError:\n print(\"ERROR: Can't convert %s to a decimal\" % firstGet)\n return firstGet, False\n\n upperAllowedValue = Decimal(valuePreviouslySet) + (Decimal(self._quantum) / Decimal(2))\n lowerAllowedValue = Decimal(valuePreviouslySet) - (Decimal(self._quantum) / Decimal(2))\n\n if not (lowerAllowedValue <= returnValue <= upperAllowedValue):\n print('%s <= %s <= %s is not true' %\n (lowerAllowedValue, returnValue, upperAllowedValue))\n return firstGet, False\n\n return firstGet, True", "def test_can_not_book_running_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def rate_limit(entity, limit, duration=60):\n\n return current_rate(entity, limit, duration) > limit", "def _isInAllowedRange( self, testval, refval, reltol=1.e-2 ):\n denom = refval\n if refval == 0:\n if testval == 0:\n return True\n else:\n denom = testval\n rdiff = (testval-refval)/denom\n del denom,testval,refval\n return (abs(rdiff) <= reltol)", "def on_limit(self, track):\n print \"!!! Limitation notice received: %s\" % str(track)\n return", "def percentage_limiter(percentage: float):\n if percentage < 0:\n return 0\n elif 0 <= percentage <= 1:\n return percentage\n else:\n return 1", "def safe_to_dance(self):\n #check for all fil/early-termination conditions\n for _ in range(4):\n if self.read_distance() < 300:\n print(\"not safe to dance!\")\n return False\n else:\n self.turn_by_deg(90)\n #after all checks have been done, we deduce its safe to dance\n print(\"Dance on!\")\n return True", "def retry_on_429(exc):\n return isinstance(exc, errors.APIRateLimitError)", "def test_request_limit_overflow(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': 100, 'limit': 100})\n r._get_response(200, 20)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 4100, 'limit': 20})", "def retry_if_rate_limit_error(exception):\n return isinstance(exception, RateLimitException)", "def _api_rate_limit_exceeded(self, api_call, window=60):\n current = datetime.datetime.now()\n try:\n previous = getattr(self, api_call.__name__ + \"_window\")\n # Force the calling of our property so we can\n # handle not having set it yet.\n previous.__str__\n except AttributeError:\n now = datetime.datetime.now()\n outside_window = datetime.timedelta(seconds=window+1)\n previous = now - outside_window\n\n if current - previous > datetime.timedelta(seconds=window):\n setattr(self, api_call.__name__ + \"_window\", current)\n else:\n timeout = window - (current - previous).seconds\n raise NewRelicApiRateLimitException(str(timeout))", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def _handle_rate_limit(self):\n if not self._wait_rate_limit:\n raise GithubRateLimitException(self._rate_limit_reached())\n\n url = GITHUB_API + \"/rate_limit\"\n headers = self._api_headers()\n remaining = 0\n while remaining == 0:\n if self._wait_warn and not Client._RATE_LIMIT_WARNED:\n from warnings import warn\n\n warn(self._rate_limit_reached(True), GithubRateLimitWarning)\n Client._RATE_LIMIT_WARNED |= True\n\n sleep(self._wait_retry_delay)\n resp = self._request(\"GET\", url, headers=headers)\n remaining = int((resp.json())[\"resources\"][\"core\"][\"remaining\"])", "def charge_limit(self, limit=None):\n if limit is None:\n done, data = self._request('GH')\n if done:\n return int(data[0])\n else:\n if self._request('SH', str(int(limit)))[0]:\n return limit\n\n raise EvseError", "def validate(c_name, val):\n n = 80\n threshold = 4\n while (threshold >= 0):\n if ((len(channels[c_name]) > n) and (val <= threshold)):\n return True\n else:\n n -= 20\n threshold -= 1\n\n return False", "def is_rate_limited(self, force_update=False, ignore_stale=False):\n status = self.request_rate_limit_status(force_update, ignore_stale)\n return status[\"remaining\"] <= 0", "def check_passed_count(f):\n\n @functools.wraps(f)\n def wrapper(self, *args):\n if len(self.passed) > 5:\n f(self, *args)\n else:\n self.allowed[args[0]] = \"\"\n self.failed[args[0]] = \"\"\n self.log.info(\"Not filtering based on {}\".format(f.__name__))\n\n return wrapper", "def testTooManyPlaybacksRaisesAnException(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tpolicy.playback()\n\t\tself.failUnlessRaises(RecordedCallsWereNotReplayedCorrectly, policy.playback)", "def _consume_limiter_units(rl, units, timeout_ms):\n if rl is None or units <= 0:\n return 0\n \"\"\"\n The logic consumes units (and potentially delays) _after_ a successful\n operation for a couple reasons:\n\n * We don't know the actual number of units an op uses until after the\n operation successfully finishes.\n * Delaying after the op keeps the application from immediately trying\n the next op and ending up waiting along with other client threads\n until the rate goes below the limit, at which time all client threads\n would continue at once. By waiting after a successful op, client\n threads will get staggered better to avoid spikes in throughput and\n oscillation that can result from it.\n \"\"\"\n try:\n return rl.consume_units_with_timeout(units, timeout_ms, False)\n except Timeout:\n # Don't throw - operation succeeded. Just return timeout_ms.\n return timeout_ms", "def quick_test():\n if PERIOD < 2:\n return False\n if SIZE % PERIOD != 0:\n return False\n return True", "def _limited_call(self, func, *args, **kwargs):\n\n # Check seconds that have passed\n now = datetime.datetime.now()\n diff = (now - self._rate_limit_start).total_seconds()\n\n if diff >= 60:\n # If greater than a minute, reset the rate limit\n self._rate_limit_count = 0\n self._rate_limit_start = now\n else:\n # Check if the per-minute limit has been exceeded\n if self._rate_limit_count >= constants.FA_PAGE_REQUESTS_PER_MINUTE:\n # Wait until next minute, then reset the count/time\n wait_time = 60 - diff\n logger.debug(\"Hit rate limit, waiting %d seconds\" % wait_time)\n time.sleep(wait_time)\n self._rate_limit_count = 0\n self._rate_limit_start = datetime.datetime.now()\n\n self._rate_limit_count += 1\n\n return func(*args, **kwargs)", "def _check_shrink(self):\n # As an example, if length is 1/4 of capacity and growth factor is 2,\n # then the capacity should shrink in half to keep length proportional\n # to capacity\n if self._length < int(self._capacity / (self._growth_factor ** 2)):\n self._shrink_arr()", "def backoff_limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"backoff_limit\")", "def test_limit_gives_helpful_err_message_with_misuse() -> None:\n msg = r\"Please pass arguments to decorator `@restricted`\"\n with pytest.raises(ValueError, match=msg):\n\n @restricted # type: ignore\n def f(x: int) -> int:\n return x", "def _exceeded_maximum_iteration(self) -> bool:\n if self.iteration >= self._maxiter:\n logger.warning(\n f\"Reached the maximum number of iterations \"\n f\"*{self._maxiter}*. Did not converge\"\n )\n return True\n\n else:\n return False" ]
[ "0.6411604", "0.6216477", "0.6159756", "0.59867847", "0.5980849", "0.5841296", "0.5840714", "0.5840501", "0.58299303", "0.5824446", "0.58074856", "0.57916874", "0.577796", "0.5687688", "0.5626272", "0.5561189", "0.5534629", "0.55233294", "0.5515274", "0.55132216", "0.5506859", "0.54680246", "0.5445703", "0.5440341", "0.5434376", "0.54156107", "0.5409898", "0.5400834", "0.5384751", "0.53840697", "0.53793883", "0.53652596", "0.53607064", "0.5347938", "0.5345569", "0.5332706", "0.5323658", "0.53056186", "0.53042114", "0.5302922", "0.52878094", "0.5269795", "0.5264974", "0.5249175", "0.5243945", "0.5238807", "0.52318156", "0.5227708", "0.521527", "0.5196859", "0.51962596", "0.5194131", "0.51598567", "0.51472133", "0.514179", "0.5139486", "0.5131714", "0.5131193", "0.5119808", "0.5117044", "0.51137507", "0.50958055", "0.5092069", "0.50901055", "0.50894916", "0.50746447", "0.50724393", "0.50708264", "0.50594646", "0.5057572", "0.5055694", "0.50509375", "0.5038474", "0.5036189", "0.5022679", "0.500992", "0.49985352", "0.4997975", "0.4995945", "0.4993248", "0.4986594", "0.4986358", "0.4981204", "0.49642155", "0.4963969", "0.49628597", "0.4957536", "0.4955328", "0.49529406", "0.49454325", "0.49426118", "0.4941619", "0.49414793", "0.49414074", "0.49376187", "0.49321017", "0.49239406", "0.4917531", "0.49146923", "0.49115354" ]
0.52793914
41
Check correctness of iterable returned.
def test_get_row_ids(issues, limiter, expected): assert get_row_ids(issues, limiter) == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)", "def _check_iterable(self):\n if self.theoretical_size is None:\n raise TypeError(\"This `fixture_ref` has not yet been initialized, so it cannot be unpacked/iterated upon. \"\n \"This is not supposed to happen when a `fixture_ref` is used correctly, i.e. as an item in\"\n \" the `argvalues` of a `@parametrize` decorator. Please check the documentation for \"\n \"details.\")\n if self.theoretical_size == 1:\n raise TypeError(\"This fixture_ref does not represent a tuple of arguments, it is not iterable\")", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def is_iterable(self):\n return all(set.is_iterable for set in self.sets)", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def all_user(iterable):\n for element in iterable:\n if not element:\n return False\n return True", "def testCreateFromIterable(self):\n self.assertEqual([\"c\",\"h\",\"e\",\"k\",\"a\"],list(\"cheka\"))", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def is_iterable(x: Any) -> bool:\r\n return isinstance(x, collections.abc.Iterable) and not isinstance(x, (str, bytes))", "def test_collect(\n self, iterable: t.Iterable[Result[int, str]], exp: Result[int, str]\n ) -> None:\n assert Result.collect(iterable) == exp", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def test_is_iterable(self):\r\n msg_list = messages.MessageList()\r\n\r\n # Adds 3 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n\r\n self.assertEqual([\"ab\", \"bb\", \"cb\"], [x.msg + \"b\" for x in msg_list])", "def testIterator(self):\n # Use the iterator to convert storage table to a list.\n iter_rows = [r for r in self._table]\n self.assertSameElements(self._fake_rows, iter_rows)", "def all(iterable):\n for item in iterable:\n if not item:\n return False\n return True", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def is_iterable(obj):\n try:\n itr = iter(obj)\n del itr\n return True\n except:\n return False", "def is_iterable(obj):\n return isinstance(obj, (list, tuple, types.GeneratorType)) or \\\n (not isinstance(obj, (int, str, dict)) and\n bool(getattr(obj, \"next\", False)))", "def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)", "def is_iterable_object(maybe_iterable: Any) -> TypeGuard[Iterable[Any]]:\n\n return isinstance(maybe_iterable, Iterable)", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def is_iterable(x):\n if isinstance(x, six.string_types):\n return False\n return hasattr(x, '__iter__')", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def is_iterable(obj):\n if isinstance(obj, (str, bytes, bytearray)):\n return False\n return isinstance(obj, Iterable)", "def safe_iterator(i):\n return i or []", "def is_iterable(arg):\n return (\n isinstance(arg, collections.Iterable)\n and not isinstance(arg, str)\n )", "def test_defined_in_iter():\n\n @type_checked\n def _run_test(thing:[(int, str, str)]):\n for group in thing:\n assert isinstance(group[0], int)\n assert isinstance(group[1], str)\n assert isinstance(group[2], str)\n assert len(thing) == 4\n\n _run_test(thing=[\n (12.3, None, False),\n (\"12.1\", True, 1),\n (False, 10, 12.1),\n (True, 14.9, None),\n ])", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def _check_items(cls, sequence):\n all([cls._check_item(x) for x in sequence])", "def is_sequence_of_iterable(items):\n return all(is_item_iterable(item) for item in items)", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def is_iterable(refobject):\n is_iter = False\n try:\n for e in refobject: \n break\n is_iter = True\n except:\n pass\n\n return is_iter", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def isiterable(obj, strings=False, isinstance=isinstance, Iterable=Iterable):\n return (isinstance(obj, Iterable) and\n not (isinstance(obj, str) and not strings))", "def isIterable(obj):\n return isinstance(obj, ListType)", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def assert_stored_iss(self):\n assert(type(self.iss) == list)\n assert(len(self.iss) > 0)", "def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def _TestTableSanity(self, tt, lines):\n # Check that more than one iterable can be used at once.\n iter1 = iter(tt)\n iter2 = iter(tt)\n self.assertEquals(lines[0], iter1.next())\n self.assertEquals(lines[0], iter2.next())\n self.assertEquals(lines[1], iter2.next())\n\n # Check that iteration again works again.\n for ix, line in enumerate(tt):\n self.assertEquals(lines[ix], line)\n\n # Check direct access of input lines.\n for i in xrange(len(tt)):\n self.assertEquals(lines[i], tt.GetInputs(i))\n\n # Check assertions on bad input to GetInputs.\n self.assertRaises(ValueError, tt.GetInputs, -1)\n self.assertRaises(ValueError, tt.GetInputs, len(tt))", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def __iter__(self):\n return NotImplemented", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def true_sentence_iterability():\n se = Sentence(\"how now brown cow\")\n print(_is_obj_iterable(se)) # True\n print(_is_obj_iterable(Sentence)) # False?", "def not_iterable(obj):\n return hasattr(obj,\"rstrip\") or not (hasattr(obj,\"__getitem__\") or hasattr(obj,\"__iter__\"))", "def isIterable(obj):\n # type: (Any) -> bool\n if isinstance(obj, basestring):\n return False\n elif isinstance(obj, ProxyUnicode):\n return False\n try:\n iter(obj)\n except TypeError:\n return False\n else:\n return True", "def is_iterable(var, iterable_types=ITERABLE_TYPES):\n return isinstance(var, iterable_types)", "def test_iterable_len(self):\n for iterable_len, expected_size in [(5, 5), (150, 100), (None, 100)]:\n with self.subTest(iterable_len=iterable_len):\n iterable_of_args, iterable_len_, chunk_size, n_splits = apply_numpy_chunking(\n self.test_data_numpy, iterable_len=iterable_len, n_splits=1\n )\n\n # Materialize generator and test contents\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), 1)\n self.assertIsInstance(iterable_of_args[0][0], np.ndarray)\n np.testing.assert_array_equal(iterable_of_args[0][0], self.test_data_numpy[:expected_size])\n\n # Test other output\n self.assertEqual(iterable_len_, 1)\n self.assertEqual(chunk_size, 1)\n self.assertIsNone(n_splits)", "def is_iterable(var):\n return any(isinstance(var, cls) for cls in [list, tuple, types.GeneratorType])", "def nonstringiter(obj):\n return not isinstance(obj, string_types) and isinstance(obj, Iterable)", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def _build_iterable(self):", "def _assert_non_empty(iterable):\n first_elem = six.next(iterable, None)\n assert first_elem is not None, first_elem\n return itertools.chain([first_elem], iterable)", "def test_iter_of_many():\n\n @type_checked\n def _run_test(thing:(float,)):\n for item in thing:\n assert isinstance(item, float)\n assert len(thing) == 3\n\n _run_test(thing=(\"10\", 1, 5 / 6))\n\n # should work with lists too\n @type_checked\n def _run_test(thing:[str]):\n for item in thing:\n assert isinstance(item, str)\n assert len(thing) == 3\n\n _run_test(thing=(\"10\", 1, 5 / 6))", "def _data_sanity_checks(self, explore_iterable):\n data_list = []\n\n for val in explore_iterable:\n\n if not self.f_supports(val):\n raise TypeError(\n \"%s is of not supported type %s.\" % (repr(val), str(type(val)))\n )\n\n if not self._values_of_same_type(val, self._default):\n raise TypeError(\n \"Data of `%s` is not of the same type as the original entry value, \"\n \"new type is %s vs old type %s.\"\n % (self.v_full_name, str(type(val)), str(type(self._default)))\n )\n\n data_list.append(val)\n\n if len(data_list) == 0:\n raise ValueError(\"Cannot explore an empty list!\")\n\n return data_list", "def test_iterable():\n # 3 pages of results\n pages = (\n json_to_resp([{'id': 1}, {'id': 2}]),\n json_to_resp([{'id': 3}, {'id': 4}]),\n json_to_resp([{'id': 5}, {'id': 6}]),\n )\n cn = connect(email='foo', token='bar')\n cn.session.post = mock.Mock(side_effect=pages)\n rs = ResultSet(resource_cls=IdResource, page_size=2)\n assert {r.id for r in rs} == {1, 2, 3, 4, 5, 6}", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def __iter__(self):\n return iter(())", "def __iter__(self):\n raise Exception(\"Don't iterate this! Did you pass this to intersect without putting it in a list?\")", "def any_user(iterable):\n for element in iterable:\n if element:\n return True\n return False", "def is_iterable(obj, isStrIterable=False):\n if not isinstance(obj, Iterable):\n return False\n else:\n # XNOR is an iff statement A XNOR B = A iff B\n return not xor(isStrIterable, isinstance(obj, str))", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def test_iterator_input():\n empty_iterator = iter(())\n transactions = empty_iterator\n itemsets, rules = apriori(transactions, 0.2, 0.2)\n assert itemsets == {} and rules == []\n\n transactions = [(1, 2), (1, 2), (1, 3), (1, 4), (1, 3)]\n transactions_iter = iter(transactions)\n itemsets1, rules1 = apriori(transactions_iter, 0.2, 1)\n itemsets2, rules2 = apriori(transactions, 0.2, 1)\n assert len(rules1) == len(rules2)\n for i in range(len(rules1)):\n assert rules1[i] == rules2[i]", "def __iter__(self):\n\n return self._entries.__iter__()", "def is_iterable(obj: Any, allow_str: bool = False):\n if isinstance(obj, str) and not allow_str:\n return False\n try:\n it = iter(obj) # noqa\n except TypeError:\n return False\n return True", "def test_iter(self):\n b = Vector(5, 6)\n assert list(b) == [5, 6]", "def __iter__(self):\n return iter(self.to_list())", "def __iter__(self):\r\n return iter(self._items)", "def __iter__(self):\r\n return iter(self._items)", "def is_iterable_container(value):\n # strings are iterable too so we have to treat them as a special case\n return not isinstance(value, str) and isinstance(value, collections.Iterable)", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def is_iterable(\n obj: Any,\n *,\n suppress_chars: bool = True,\n exclude_types: Optional[Tuple[Type, ...]] = None,\n) -> bool:\n if suppress_chars and is_chars(obj):\n return False\n elif exclude_types is not None and isinstance(obj, exclude_types):\n return False\n elif isinstance(obj, collections.abc.Iterable):\n return True\n else:\n try:\n iter(obj)\n return True\n except TypeError:\n return False", "def isiterable(obj, classinfo=None, of_type=None):\n if classinfo is not None:\n if not isinstance(obj, classinfo):\n return False\n elif not hasattr(obj, '__iter__') and not hasattr(obj, '__getitem__'):\n return False\n if of_type is not None:\n return all(isinstance(ele, of_type) for ele in obj)\n return True", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext" ]
[ "0.7090814", "0.7074417", "0.69252294", "0.6905731", "0.6872017", "0.6810469", "0.68096995", "0.68096995", "0.68021095", "0.66896445", "0.6655056", "0.65693426", "0.65589577", "0.65366095", "0.6519211", "0.65080535", "0.6505808", "0.6505808", "0.6505169", "0.647377", "0.64608705", "0.6447887", "0.64249605", "0.64221656", "0.6418386", "0.6393862", "0.63692224", "0.6353794", "0.6353435", "0.63481724", "0.63157356", "0.6300683", "0.6275185", "0.6269356", "0.6254831", "0.6252343", "0.6245389", "0.6234693", "0.6214832", "0.62087214", "0.61762553", "0.6141859", "0.61377084", "0.6118159", "0.61062163", "0.61062163", "0.61062163", "0.61062163", "0.6100813", "0.6099247", "0.60960984", "0.60853374", "0.6074727", "0.60379845", "0.60379237", "0.6035533", "0.6034184", "0.60210466", "0.6016431", "0.60141855", "0.60071135", "0.6006859", "0.60014004", "0.5992293", "0.5974558", "0.5973466", "0.59699273", "0.59424883", "0.5940034", "0.5927559", "0.5925078", "0.5905321", "0.58813363", "0.58778864", "0.5861798", "0.5854258", "0.5847603", "0.582524", "0.58164436", "0.5803922", "0.5796694", "0.5789577", "0.57651997", "0.575277", "0.5744043", "0.5744043", "0.5744043", "0.5744043", "0.5744043", "0.5741395", "0.57358557", "0.5718667", "0.5712159", "0.5711001", "0.57096964", "0.57096964", "0.5700365", "0.56969327", "0.569235", "0.56889504", "0.5681595" ]
0.0
-1
Load the draft results.
def test_load_draft(league): draft = league.draft_results() assert(len(draft) == 144) #mcdavid 1st assert(draft[0]['player_key'] == '396.p.6743') # carter hart 67th assert(draft[66]['player_key'] == '396.p.7156') # zadorov last assert(draft[-1]['player_key'] == '396.p.5995')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_enriched_results(self):\n return super().load_results()", "def load(self):\n self.results = pickle_load('results', self.main_dir)", "def run(self):\n results = self.fetch()\n return results", "def load_results_internal(self):\r\n filename = f\"{self.search_internal_path}/results_internal.dill\"\r\n\r\n with open_(filename, \"rb\") as f:\r\n return dill.load(f)", "def stage_draft_rulings(self):\r\n rulings = pd.read_excel(self.draft_ruling_path)\r\n for k in rulings.keys():\r\n rulings[k].fillna(value=\"\", inplace=True)\r\n rulings = rulings.to_dict(\"records\")\r\n id_to_ruling = dict(\r\n map(lambda r: (self.id(r), r), rulings)\r\n )\r\n u.cache_results(id_to_ruling, self.staged_ruling_path)", "def load_raw_results(self):\n if not self.setup.complete():\n raise AttributeError(\"Import setup is not complete\")\n access = DataImport(self.setup)\n if access.data_loaded:\n self.raw_results = access.results\n self.set_start_stop_time()\n return True\n return False", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def load_rentedout():", "def drafts():\n query = Entry.drafts().order_by(Entry.last_mod_date.desc())\n return object_list('index.html', query)", "def _load(self):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n LOG.debug(\"Loading %s\" % self.branch_id)\n doc = self._client.getjson(path=\"/users/%(username)s/repos/%(reponame)s\"\n \"/branches/%(name)s\" % context)\n LOG.debug(\"doc loaded: %r\" % doc)\n slice_id = \"%(username)s/%(reponame)s/%(slice_id)s\" % {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"slice_id\": doc[\"slice_id\"]\n }\n self._slice = self._client.slice(slice_id)\n self._packages = doc[\"packages\"]", "def load_results(self):\n self.find_benchmark_directories()\n for (benchmark, producer), result in self.results.items():\n print('Reading results for ' + benchmark + ' ' + producer)\n if not result.directory:\n print('No results found for ' + benchmark + ' ' + producer)\n else:\n print('Generating report for: ' + result.directory)\n report = Report(result.directory)\n result.reports = report.generate()", "def nflffdraftresults(self, irc, msg, args, opttype):\n \n validtypes = ['QB','TQB','RB','WR','TE','DT','DE','LB','CB','S','D/ST','K','P','HC','ALL']\n \n if opttype and opttype not in validtypes:\n irc.reply(\"Type must be one of: %s\" % validtypes)\n return\n\n url = self._b64decode('aHR0cDovL2dhbWVzLmVzcG4uZ28uY29tL2ZmbC9saXZlZHJhZnRyZXN1bHRz')\n \n if opttype:\n url += '?position=%s' % opttype\n \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class':'tableBody'})\n headers = table.findAll('tr')[2]\n rows = table.findAll('tr')[3:13]\n\n append_list = []\n\n for row in rows:\n rank = row.find('td')\n player = rank.findNext('td')\n avgpick = player.findNext('td').findNext('td')\n append_list.append(rank.getText() + \". \" + ircutils.bold(player.getText()) + \" (\" + avgpick.getText() + \")\")\n\n descstring = string.join([item for item in append_list], \" | \") # put the list together.\n\n if not opttype:\n opttype = 'ALL'\n\n title = \"Top 10 drafted at: %s\" % opttype\n output = \"{0} :: {1}\".format(ircutils.mircColor(title, 'red'), descstring)\n irc.reply(output)", "def trigger_refresh(self):\n self.get_selected()\n self.manage_loading(loading=True)\n self.current_feed.fetch_content(unread_only=self.show_unread_only)\n self.manage_actions()", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def load(self):\n self._load()", "def load_submission_schedule():\n logger.info('Loading submission window schedule data')\n load_submission_window_schedule()", "def results(self):\n if not self._results:\n self.read_results()\n return self._results", "def get_results(self):\n\n super().get_results()", "def _fetch_data(self):\n pass", "def results(self):\n pass", "def load(self, index):\n selected = self.games[index]\n try:\n with open(path.join(self.saved_games, selected)) as f:\n self.game_data['game_data'] = json.load(f)\n self.game_data['file_name'] = selected\n self.game_data['loaded'] = True\n self.game_data['next'] = False\n super().set_state(TRANSITION_OUT)\n logger.info('Load : %s', selected)\n except EnvironmentError as e:\n logger.exception(e)\n\n try:\n self.load_minimap()\n except EnvironmentError as e:\n logger.exception(e)", "def load_data(self):", "def _get_results(self, res):\n self.async_res = res\n self.full_res = res.wait() # pragma: no cover\n self.trained = True # pragma: no cover\n self.mod_id = self.full_res['model_id'] # pragma: no cover\n self.data_id = self.full_res['data_id'] # pragma: no cover\n self.params_dump = self.full_res['params_dump'] # pragma: no cover\n if self.verbose > 0: # pragma: no cover\n print(\"Result {} | {} ready\".format(\n self.mod_id, self.data_id)) # pragma: no cover", "def __call__(self, results):\n results = super().__call__(results)\n if self.with_bbox_3d:\n results = self._load_bboxes_3d(results)\n if results is None:\n return None\n if self.with_bbox_depth:\n results = self._load_bboxes_depth(results)\n if results is None:\n return None\n\n if self.with_corners_2d:\n results = self._load_corners_2d(results)\n if self.with_label_3d:\n results = self._load_labels_3d(results)\n if self.with_attr_label:\n results = self._load_attr_labels(results)\n if self.with_mask_3d:\n results = self._load_masks_3d(results)\n if self.with_seg_3d:\n results = self._load_semantic_seg_3d(results)\n if self.with_tokens:\n results = self._load_tokens(results)\n\n return results", "def results(self):\r\n pass", "def getResults():", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "def load(self):\n self.tickets = Ticket.objects.select_related(\n 'input_module'\n ).filter(**self.ticket_data_query()).filter(\n Q(created__gte=self.date_start, created__lte=self.date_end) |\n Q(closed_date__gte=self.date_start, closed_date__lte=self.date_end) |\n Q(assigned_date__gte=self.date_start, assigned_date__lte=self.date_end)\n )\n\n self.closed_tickets = self.tickets.filter(\n closed_date__gte=self.date_start,\n closed_date__lte=self.date_end\n )\n\n self.assigned_tickets = self.tickets.filter(\n assigned_date__gte=self.date_start,\n assigned_date__lte=self.date_end\n )\n\n self.closed = self.closed_tickets.count()\n self.closed_by_users: int = 0\n\n # in minutes (time between open and assignment)\n avg_pre_processing = [0]\n\n # in minutes (time between open and close)\n avg_full_processing = [0]\n\n # how many messages before closing the tickets\n avg_msg_to_close = [0]\n\n # how many minutes between taken by operators and closed\n avg_time_created_taken = [0]\n\n # Time between creating a ticket and operator first message.\n first_time_op_answer = [0]\n\n # in a single loop I have to process\n # whatever, otherwise it will takes too long. Efficiency may be huge, we know.\n tmsgs = TicketReply.objects.filter(\n ticket__pk__in = self.tickets.values_list(\"pk\", flat=True)\n ).values_list(\"ticket__pk\", \"created\", \"owner\")\n\n operators_pks = self.get_operators_pks()\n\n content_type = ContentType.objects.get_for_model(Ticket)\n\n for i in self.tickets:\n ticket_time = timezone.localtime(i.created).strftime(\"%d-%m-%Y %H\")\n ticket_day, ticket_hour = ticket_time.split(\" \")\n ticket_day_eu = timezone.localtime(i.created).strftime(\"%Y-%m-%d\")\n if not self.ticket_per_day_hour.get(ticket_day):\n # {'01-01-2022': {'total': int, 'hours': {0: int, ... 23: int}}}\n self.ticket_per_day_hour[ticket_day] = {'total': 0, 'hours': {}}\n\n self.ticket_per_day_hour[ticket_day]['total'] += 1\n if not self.ticket_per_day_hour[ticket_day][\"hours\"].get(ticket_hour):\n self.ticket_per_day_hour[ticket_day][\"hours\"][ticket_hour] = 0\n self.ticket_per_day_hour[ticket_day][\"hours\"][ticket_hour] += 1\n\n # if not self.ticket_per_day.get(ticket_day_eu):\n # self.ticket_per_day[ticket_day_eu] = 0\n # self.ticket_per_day[ticket_day_eu] += 1\n\n # put the ticket in a configured time slot\n if i.created >= self.date_start and i.created <= self.date_end:\n for slot, hour_range in STATS_TIME_SLOTS.items():\n if int(ticket_hour) in hour_range:\n self.ticket_per_weekday[\n timezone.localtime(i.created).strftime(calendar.day_name.format)\n ][slot - 1] += 1\n break\n\n if i.is_notification:\n self.notifications += 1\n else:\n _msgs = tmsgs.filter(ticket=i)\n op_msgs = _msgs.filter(\n owner__pk__in = operators_pks\n ).values_list(\"created\", flat=True)\n if _msgs and op_msgs:\n first_time_op_answer.append(\n (op_msgs[0] - i.created).seconds\n )\n\n # Cosa si vuole mostrare?\n # Quanti ticket si trovavano in stato \"assegnato\" in quel giorno?\n # if not i.has_been_taken():\n # O quanti ticket sono stati presi in carico in quel giorno?\n # if not i.assigned_date and not i.is_closed:\n if i.created >= self.date_start and i.created <= self.date_end:\n self.open += 1\n # if not self.open_day_serie.get(ticket_day):\n # self.open_day_serie[ticket_day] = 0\n # self.open_day_serie[ticket_day] += 1\n self.open_day_serie[timezone.localtime(i.created).strftime(\"%d-%m-%Y\")] += 1\n\n # elif not i.is_closed and i.assigned_date >= self.date_start and i.assigned_date <= self.date_end:\n if i.assigned_date and i.assigned_date >= self.date_start and i.assigned_date <= self.date_end:\n self.assigned += 1\n avg_pre_processing.append(\n (i.assigned_date - i.created).seconds\n )\n # if not self.assigned_day_serie.get(ticket_day):\n # self.assigned_day_serie[ticket_day] = 0\n # self.assigned_day_serie[ticket_day] += 1\n self.assigned_day_serie[timezone.localtime(i.assigned_date).strftime(\"%d-%m-%Y\")] += 1\n\n if i.closed_date and not i.is_closed:\n # if not self.reopened_day_serie.get(ticket_day):\n # self.reopened_day_serie[ticket_day] = 0\n # self.reopened_day_serie[ticket_day] += 1\n\n # get reopen time from first log action after closing\n reopen_log_entry = Log.objects.filter(content_type_id=content_type.pk,\n object_id=i.pk,\n action_time__gt=i.closed_date,\n action_time__lte=self.date_end,\n action_time__gte=self.date_start).first()\n if reopen_log_entry:\n self.reopened += 1\n self.reopened_day_serie[timezone.localtime(reopen_log_entry.action_time).strftime(\"%d-%m-%Y\")] += 1\n\n # elif i.closed_date and i in self.closed_tickets:\n if i.closed_date and i in self.closed_tickets:\n # is closed\n # if not self.closed_day_serie.get(ticket_day):\n # self.closed_day_serie[ticket_day] = 0\n # self.closed_day_serie[ticket_day] += 1\n self.closed_day_serie[timezone.localtime(i.closed_date).strftime(\"%d-%m-%Y\")] += 1\n if i.closed_by:\n # otherwise the user closed by himself\n _op_name = i.closed_by.__str__()\n if not self.closed_by_ops.get(_op_name, None):\n self.closed_by_ops[_op_name] = 0\n self.closed_by_ops[_op_name] += 1\n self.closed_by_ops_count += 1\n else:\n self.closed_by_users += 1\n\n avg_full_processing.append(\n (i.closed_date - i.created).seconds\n )\n if i.assigned_date:\n avg_time_created_taken.append(\n (i.closed_date - i.assigned_date).seconds\n )\n\n # get how many messages has been taken to close this ticket\n # excluding the closing message\n _mcount = tmsgs.filter(ticket=i).count()\n avg_msg_to_close.append(_mcount)\n\n _user_name = i.created_by.__str__()\n if not self.open_by_user.get(_user_name, None):\n self.open_by_user[_user_name] = 0\n self.open_by_user[_user_name] += 1\n\n # aggregation and details in hours\n self.avg_pre_processing_seconds = statistics.mean(avg_pre_processing)\n self.avg_pre_processing = int(self.avg_pre_processing_seconds / 60)\n\n self.avg_full_processing = int(statistics.mean(avg_full_processing) / 60)\n self.avg_msg_to_close = statistics.mean(avg_msg_to_close)\n self.first_time_op_answer_seconds = statistics.mean(first_time_op_answer)\n self.avg_first_time_op_answer = int(self.first_time_op_answer_seconds / 60)\n self.avg_time_created_taken = int(statistics.mean(avg_time_created_taken) / 60)\n\n # sort descending\n self.open_by_user = {k: v for k, v in sorted(self.open_by_user.items(), key=lambda item: item[1])}\n self.closed_by_ops = {k: v for k, v in sorted(self.closed_by_ops.items(), key=lambda item: item[1], reverse=True)}", "def _setData(self):\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n data_list = []\n results = self.query.all()\n \n # if no current parliament, no data\n try:\n parliament_id = model_utils.get_current_parliament().parliament_id\n except: \n return data_list\n #\n government_id = self.__parent__.government_id\n for result in results:\n data = {}\n data[\"qid\"] = \"g_%s\" % (result.group_id)\n data[\"subject\"] = result.short_name\n data[\"title\"] = \"%s (%s)\" % (result.short_name, result.type)\n data[\"result_item_class\"] = \"workflow-state-%s\" % (result.status)\n _url = \"/archive/browse/parliaments/obj-%s\" % (parliament_id)\n if type(result) == domain.Parliament:\n data[\"url\"] = url.set_url_context(_url)\n continue\n elif type(result) == domain.Committee:\n #data[\"url\"] = url + \"/committees/obj-\" + str(result.group_id) \n data[\"url\"] = url.set_url_context(\"/groups/%s/%s\" % (\n result.parent_group.group_principal_id,\n result.group_principal_id))\n elif type(result) == domain.PoliticalGroup:\n data[\"url\"] = url.set_url_context(\n \"%s/politicalgroups/obj-%s\" % (_url, result.group_id))\n elif type(result) == domain.Ministry:\n data[\"url\"] = url.set_url_context(\n \"%s/governments/obj-%s/ministries/obj-%s\" % (\n _url, government_id, result.group_id))\n else:\n data[\"url\"] = \"#\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def _load(self):\n\t\tpool = []\n\t\tview = []\n\t\tlibrary = []\n\n\t\tif is_file(\"~/comiccrawler/pool.json\"):\n\t\t\tpool = json.loads(content_read(\"~/comiccrawler/pool.json\"))\n\n\t\tif is_file(\"~/comiccrawler/view.json\"):\n\t\t\tview = json.loads(content_read(\"~/comiccrawler/view.json\"))\n\n\t\tif is_file(\"~/comiccrawler/library.json\"):\n\t\t\tlibrary = json.loads(content_read(\"~/comiccrawler/library.json\"))\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\tepisodes = []\n\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = Mission(**m_data)\n\t\t\tself._add(mission)\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.library)", "def main(self):\n\t\tprint \"Retreiving view 'All\",\n\t\tview_all = self.hudson.getViewByName('All')\n\t\tprint \"Done\"\n\t\tprint \"iterating over jobs\"\n\t\tfor job in view_all.jobs.values():\n\t\t\tviewname = job.name.split(\".\")[0]\n\t\t\tif job.name not in self.getJobListFromDB():\n\t\t\t\tself.addJobToDb(job.name)\n\t\t\tif viewname not in self.getViewListFromDB():\n\t\t\t\tself.addViewToDb(viewname)\n\t\t\tfor build in job.builds:\n\t\t\t\tbo = HudsonConnector.HudsonObject( self.hudson.getDataFromUrl(build['url']) )\n\t\t\t\tstamp = datetime.datetime.fromtimestamp(bo.timestamp/1000)\n\t\t\t\tif stamp > self.lastrun:\n\t\t\t\t\tif bo.result is None:\n\t\t\t\t\t\trunname = job.name+\" #%d\" % bo.number\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), bo.result.capitalize()\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), \"Unknown\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tjobdata = { 'name':job.name, 'view':job.name.split(\".\")[0], 'start':stamp, \n\t\t\t\t\t\t\t\t\t'end':stamp + datetime.timedelta(seconds=bo.duration),\n\t\t\t\t\t\t\t\t\t'duration':bo.duration,\n\t\t\t\t\t\t\t\t\t'result':bo.result\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tself.uploadJobState(jobdata)\n\t\tself.saveState()", "def fetch_pending(self):\n pending = self.open(self.urls['pending'])\n soup = BeautifulSoup(pending.read())", "def load_objects(self, queue):\n pass", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def force_load(self):\n for selection in self.selections.normal_values():\n selection.force_load()", "def loads(self):\n return self._loads", "def fetch(self):\r\n if not self._fetched:\r\n self._fetched = True\r\n self.data = query_cache.get(self.iden) or []", "def fetch_data(self):", "def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()", "def fetch_self(self):\r\n self.parsed_doc['names'] = self.fetch_candidate_name() \r\n self.parsed_doc['phones'] = self.fetch_phone_numbers() \r\n self.parsed_doc['emails'] = self.fetch_emails() \r\n self.parsed_doc['github'] = self.fetch_github() \r\n self.parsed_doc['linkedin'] = self.fetch_linkedin() \r\n self.parsed_doc['degrees'] = self.fetch_degrees() \r\n self.parsed_doc['skills'] = self.fetch_skills() \r\n self.parsed_doc['education'] = self.fetch_education() \r\n self.parsed_doc['languages'] = self.fetch_languages() \r\n self.parsed_doc['addresses'] = self.fetch_address() \r\n self.parsed_doc['raw_resume'] = self.stringtext", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = intersect_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def load(self):\n return", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self) -> FileHandle:\n with self.service() as api:\n return api.runs().get_result_file(\n run_id=self.run_id,\n file_id=self.file_id\n )", "async def load(self) -> None:\n pass", "def load(self):\n if self.content_provider:\n self.content_provider.load()\n self.items = self.content_provider.movies", "def load_data():\n try:\n loader.download()\n load_table_data()\n status = 'loaded'\n except Exception as ex:\n log.log_traceback(ex)\n status = 'failed'\n return flask.jsonify({'status': status})", "def load_thread(self):\n self.load_count = load.loader.update_hotel_with_data(self.transform_result)\n self.clear_temporary_data()", "def load_data(self) -> None:", "def fetch(self):\n # reset dynamic keys\n for key in self._dynamic_keys:\n try:\n delattr(self, key)\n except:\n pass\n self._dynamic_keys = []\n\n self._result_cache = self.fetch_raw()\n assert isinstance(self._result_cache, dict), 'received an invalid ' \\\n 'response of type %s: %s' % \\\n (type(self._result_cache), repr(self._result_cache))\n self._total_rows = self._result_cache.get('total_rows')\n self._offset = self._result_cache.get('offset', 0)\n\n # add key in view results that could be added by an external\n # like couchdb-lucene\n for key in self._result_cache.keys():\n if key not in [\"total_rows\", \"offset\", \"rows\"]:\n self._dynamic_keys.append(key)\n setattr(self, key, self._result_cache[key])", "def load_results(self):\n\n scan_results = engine_pb2.EnrichedLaunchToolResponse()\n collected_results = load_files(scan_results, self.pb_location)\n\n return collected_results", "def update_results(self):\n try:\n results = self.shared_state[\"active_collection\"].query_source(\n self.query_str\n )\n self.results = results\n\n except Exception:\n self.results = {}\n self.status_textcontrol.text = \"(invalid query)\"\n else:\n self.formatted_results = self._apply_default_format(self.results)\n self.results_textcontrol.text = self.formatted_results\n self.index = 0\n self.status_textcontrol.text = (\n f\"showing {len(self.results)} of \"\n f\"{self.shared_state['active_collection'].df.shape[0]} records \"\n f\"syntax: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax)\"\n )", "def get_drafts(self):\n return self.filter(status=\"D\")", "def get_results(self):\n return self._do_action_under_lock(self._get_all_results)", "def load(self):\r\n\r\n pickle_file = 'paderborn.pickle'\r\n\r\n\r\n if os.path.isfile(pickle_file):\r\n with open(pickle_file, 'rb') as handle:\r\n acquisitions = pickle.load(handle)\r\n else:\r\n self.download()\r\n acquisitions = self.acquisitions()\r\n with open(pickle_file, 'wb') as handle:\r\n pickle.dump(acquisitions, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n return acquisitions", "def load(self):", "def fetch(self):\n pass", "def fetch(self):\n pass", "def load_index():\n\n if len(request.accept_mimetypes) != 0 and \"application/json\" not in request.accept_mimetypes:\n response = Response(\n response=json.dumps({\"Error\": \"This body type is not supported.\"}),\n status=406,\n mimetype='application/json'\n )\n return response\n elif request.data:\n failed = {\"Error\": \"The request object does not follow specifications - see documentation.\"}\n response = Response(\n response=json.dumps(failed),\n status=400,\n mimetype='application/json'\n )\n return response\n\n query = client.query(kind=\"load\")\n q_limit = int(request.args.get('limit', '3'))\n q_offset = int(request.args.get('offset', '0'))\n l_iterator = query.fetch(limit=q_limit, offset=q_offset)\n pages = l_iterator.pages\n results = list(next(pages))\n if l_iterator.next_page_token:\n next_offset = q_offset + q_limit\n next_url = request.base_url + \"?limit=\" + str(q_limit) + \"&offset=\" + str(next_offset)\n else:\n next_url = None\n for e in results:\n e[\"id\"] = e.key.id\n e[\"self\"] = request.url_root + \"loads/\" + str(e.key.id)\n if e[\"carrier\"]:\n e[\"carrier\"][\"self\"] = request.url_root + \"boats/\" + str(e[\"carrier\"][\"id\"])\n output = {\"loads\": results}\n if next_url:\n output[\"next\"] = next_url\n return Response(\n response=json.dumps(output),\n status=200,\n mimetype='application/json'\n )", "def _initialize_drafts(self):\n drafts = memcache.get('user_drafts:' + self.email)\n if drafts is not None:\n self._drafts = drafts\n ##logging.info('HIT: %s -> %s', self.email, self._drafts)\n return False\n # We're looking for the Issue key id. The ancestry of comments goes:\n # Issue -> PatchSet -> Patch -> Comment.\n issue_ids = set(comment.key().parent().parent().parent().id()\n for comment in gql(Comment,\n 'WHERE author = :1 AND draft = TRUE',\n self.user))\n self._drafts = list(issue_ids)\n ##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)\n return True", "def load(self):\n the_redis = DARedis()\n cases = the_redis.get_data(self.user_cases_key) or {}\n self.cases = cases", "def load_quests(self):\n\n raise NotImplementedError()", "def load(self):\n self._really_load()", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def do_fetch(self):\n pass", "def load_fit_results(self, load_path: str = \"./fit_results.json\"):\n assert load_path.endswith(\".json\"), self.JSON_ASSERTION\n with open(load_path, \"r\") as fjson:\n wrapped_results = json.load(fjson)\n assert (\n \"fit_src_dst_results\" in wrapped_results\n and \"fit_dst_src_results\" in wrapped_results\n ), \"Required keys fit_src_dst_results and fit_dst_src_results keys in the json not found\"\n self._fit_src_dst_results = wrapped_results[\"fit_src_dst_results\"]\n self._fit_dst_src_results = wrapped_results[\"fit_dst_src_results\"]", "def _load_assessment_results_page(self):\r\n\r\n fmt = '{0:0.' + str(Configuration.PLACES) + 'g}'\r\n\r\n self.txtAvailability.set_text(\r\n str(fmt.format(self._function_model.availability)))\r\n self.txtMissionAt.set_text(\r\n str(fmt.format(self._function_model.mission_availability)))\r\n self.txtMissionHt.set_text(\r\n str(fmt.format(self._function_model.mission_hazard_rate)))\r\n self.txtPredictedHt.set_text(\r\n str(fmt.format(self._function_model.hazard_rate)))\r\n\r\n self.txtMMT.set_text(str(fmt.format(self._function_model.mmt)))\r\n self.txtMCMT.set_text(str(fmt.format(self._function_model.mcmt)))\r\n self.txtMPMT.set_text(str(fmt.format(self._function_model.mpmt)))\r\n\r\n self.txtMissionMTBF.set_text(\r\n str(fmt.format(self._function_model.mission_mtbf)))\r\n self.txtMTBF.set_text(str(fmt.format(self._function_model.mtbf)))\r\n self.txtMTTR.set_text(str(fmt.format(self._function_model.mttr)))\r\n\r\n return False", "def _fetch_items(self):\n url = self._api.router.publication['search'].format(\n project_id=self.project_id\n )\n res_data = self._api.post(url, data=self.search_param)\n self.total = res_data['total']\n self._items = (\n Publication(item, self.project_id)\n for item in res_data['hits']\n )\n div = self.total // self.search_param['limit']\n reste = self.total % self.search_param['limit']\n self.total_page = div\n if reste != 0: self.total_page += 1\n self.search_param = self.search_param.next_page()", "def _load(self) -> None:\n self.record = self._saved_record\n self.counter = self._saved_counter\n self.current_objects = self._saved_objects", "def test_get_drafts(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n\n url = '/0/chefs/%i/drafts' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('drafts', resp.data)\n self.assertEqual(1, len(resp.data['drafts']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['drafts'][0].keys()))\n self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])", "def load_list(request):\n q=Load.objects.all()\n template = get_template(\"load.html\")\n if request.method == 'POST':\n load_form = LoadForm(data=request.POST)\n load_form.save()\n else:\n load_form=LoadForm()\n return HttpResponse(template.render(context={'list': q, 'load_form': load_form},\n request=request))", "def load_data(self):\n return self._load_data", "def get_results(self):\n return self.results", "def get_results(self):\n return self.results", "def defer_results(self):\n return recipe_api.defer_results", "async def load_from_store(self):\n start_time = int(time.time()) - self.keep_interval\n for lid, t, d in await self.store.get_recent_partials(start_time):\n self.cache[lid].add(t, d, remove=False)\n self.cache.all.add(t, d, remove=False)\n await self.store.scrub_pplns(start_time)", "def get_data(self):\n return self._results", "def get_red_rcp_primary_result_data(finished_states_dict):\n\tdata_filepath = database.get_database_filepath() + \"rep_primary_results_table.html\"\n\thtml_str = open(data_filepath, 'r').read()\n\tbs_obj = bs4.BeautifulSoup(html_str, 'html.parser')\n\ttable = bs_obj.find('table')\n\tfor row in table.find_all(\"tr\"):\n\t\tstate_name = \"\"\n\t\tspan_list = row.find_all('span')\n\t\tif len(span_list) > 1:\n\t\t\tstate_name = filter(lambda ch: ch not in \"0123456789\", row.find_all(\"span\", \"full_name\")[0].text)\n\t\telt_list = row.find_all(\"td\")\n\t\tif len(elt_list) > 0:\n\t\t\tif elt_list[1].span != None:\n\t\t\t\tdate_str = elt_list[1].span.string\n\t\t\t\tif elt_list[3].string != None:\n\t\t\t\t\ttrump_votes = int(elt_list[3].string) # trump\n\t\t\t\t\tcruz_votes = int(elt_list[4].string) # cruz\n\t\t\t\t\tif elt_list[5].string != None:\n\t\t\t\t\t\trubio_votes = int(elt_list[5].string) # rubio\n\t\t\t\t\telse:\n\t\t\t\t\t\trubio_votes = 0\n\t\t\t\t\tkasich_votes = int(elt_list[6].string) # kasich\n\t\t\t\t\tstate_obj = None\n\t\t\t\t\tif state_name not in finished_states_dict.keys():\n\t\t\t\t\t\tstate_obj = data_structures.StatePollData(state_name)\n\t\t\t\t\telse:\n\t\t\t\t\t\tstate_obj = finished_states_dict[state_name]\n\t\t\t\t\tred_dict = {}\n\t\t\t\t\tcan_dict = {}\n\t\t\t\t\tcan_dict[\"Trump\"] = trump_votes\n\t\t\t\t\tcan_dict[\"Cruz\"] = cruz_votes\n\t\t\t\t\tcan_dict[\"Rubio\"] = rubio_votes\n\t\t\t\t\tcan_dict[\"Kasich\"] = kasich_votes\n\t\t\t\t\tred_dict[date_str] = can_dict\n\t\t\t\t\tstate_obj.red_poll_dict_list.append(red_dict)\n\t\t\t\t\tfinished_states_dict[state_name] = state_obj", "def results(self, results):\n self._results = results", "def results(self, results):\n self._results = results", "def results(self):\n return extract_results(self.model)", "def _load(self):\n if not self._loaded:\n url = f\"https://api.opendota.com/api/matches/{self.id}\"\n logger.info(\"Loading match details for match id: %s from url %s\",\n self._id, url)\n self.data = requests.get(url).json()\n self._duration = self.data.get('duration')\n self._chat = self.data.get('chat')\n self._cluster = self.data.get('cluster')\n self._engine = self.data.get('engine')\n self._first_blood_time = self.data.get('first_blood_time')\n self._game_mode = self.data.get('game_mode')\n self._human_players = self.data.get('human_players')\n self._league_id = self.data.get('league_id')\n self._lobby_type = self.data.get('lobby_type')\n self._match_seq_num = self.data.get('match_seq_num')\n self._negative_votes = self.data.get('negative_votes')\n self._positive_votes = self.data.get('positive_votes')\n self._objectives = self.data.get('objectives')\n self._picks_bans = self.data.get('picks_bans')\n self._barracks_status_dire = self.data.get('barracks_status_dire')\n self._dire_score = self.data.get('dire_score')\n self._dire_team = self.data.get('dire_team')\n self._tower_status_dire = self.data.get('tower_status_dire')\n self._barracks_status_radiant = self.data.get('barracks_status_radiant')\n self._radiant_gold_adv = self.data.get('radiant_gold_adv')\n self._radiant_xp_adv = self.data.get('radiant_xp_adv')\n self._radiant_score = self.data.get('radiant_score')\n self._radiant_team = self.data.get('radiant_team')\n self._radiant_win = self.data.get('radiant_win')\n self._tower_status_radiant = self.data.get('tower_status_radiant')\n self._start_time = self.data.get('start_time')\n self._teamfights = self.data.get('teamfights')\n self._version = self.data.get('version')\n self._replay_salt = self.data.get('replay_salt')\n self._series_id = self.data.get('series_id')\n self._series_type = self.data.get('series_type')\n self._league = self.data.get('league')\n self._skill = self.data.get('skill')\n self._players = self.data.get('players')\n self._patch = self.data.get('patch')\n self._region = self.data.get('region')\n self._all_word_counts = self.data.get('all_word_counts')\n self._version = self.data.get('version')\n self._throw = self.data.get('throw')\n self._comeback = self.data.get('comeback')\n self._cosmetics = self.data.get('cosmetics')\n self._draft_timings = self.data.get('draft_timings')\n self._loss = self.data.get('loss')\n self._win = self.data.get('win')\n self._replay_url = self.data.get('replay_url')\n self._loaded = True", "def load(self, id):\n return self.getTable().get(id).run(self.r)", "def results(self):\r\n return self._results", "def _run_async_query(self, context):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n self._total_results = len(results)\n self._count_valid = True\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def load(self):\n return self._load", "def refresh(self):\n self.fetch(False)", "def load_articles():\n\t\n\tlog(\"Reading articles from file: articles_dumped...\")\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tlog(\"Done!\")\n\tsys.stdout.write(\"Done!\\n\")\n\tsys.stdout.flush()\n\t\n\treturn articles", "def results(self):\r\n return results.Results(self.parent, self.object_id)", "def getResults(self,timeout=None):\n self.results = odict.ODict()\n self.setResult(0)\n if self._pos is not None:\n self.restoreGeometry(self._pos)\n \n self.show(timeout,modal=True)\n self.exec_()\n #self.activateWindow()\n #self.raise_()\n pf.app.processEvents()\n self._pos = self.saveGeometry()\n return self.results", "def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")", "def loadUI(self,factory={}):\n keepTypes = self.canSave and 'ALL' or tuple()\n self.load(keepTypes=keepTypes,factory=factory)\n uiTypes = set(factory.keys())\n self.unpackRecords(uiTypes)\n self.indexRecords(uiTypes)", "def load_full():\n _fetch_full()\n return _load(cache_full, _parse_full)", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def load_failed_tas():\n logger.info('Loading TAS Failing Edits')\n load_all_tas_failing_edits()" ]
[ "0.6382324", "0.6246874", "0.5810889", "0.5807111", "0.57492447", "0.56064266", "0.5601259", "0.55607885", "0.54455495", "0.5442246", "0.5427821", "0.5390353", "0.53654075", "0.5347294", "0.53464603", "0.53339255", "0.53226596", "0.53156066", "0.5312531", "0.5308264", "0.52943623", "0.52930695", "0.527965", "0.5263135", "0.52562904", "0.5250532", "0.52485144", "0.5240229", "0.5226236", "0.5226052", "0.51957005", "0.5192008", "0.51879084", "0.51876724", "0.5182415", "0.51811767", "0.5173678", "0.5168294", "0.5155646", "0.5130589", "0.5123259", "0.5114783", "0.51102114", "0.51017123", "0.50942147", "0.50942147", "0.50942147", "0.50942147", "0.5090315", "0.50893015", "0.50840455", "0.5079356", "0.5077911", "0.5077764", "0.507217", "0.50720954", "0.5070086", "0.50676394", "0.50462866", "0.5045567", "0.50379056", "0.50295365", "0.50295365", "0.5028359", "0.502707", "0.5023295", "0.50232255", "0.50227875", "0.5021678", "0.501472", "0.49995527", "0.4997869", "0.49968162", "0.49916002", "0.49901807", "0.49803722", "0.4980222", "0.49741825", "0.49741825", "0.49711567", "0.49609587", "0.4959581", "0.4951741", "0.49477077", "0.49477077", "0.49474058", "0.49438292", "0.4938526", "0.4932367", "0.4929557", "0.49221873", "0.49166876", "0.49161208", "0.49149224", "0.490856", "0.49061748", "0.49030003", "0.4902459", "0.49002755", "0.48993048" ]
0.6256367
1
Return dataframe of all free agents.
def test_get_free_agents_season_start(league, season_start_date): # equals all players minus drafted players # make sure none of the draft players in list free_agents = league.as_of(season_start_date).free_agents() drafted = league.draft_results(format='Pandas') assert(len(free_agents.index.intersection(drafted.index)) == 0), "Should be no drafted players as free agents" # could make sure all 'all_players' that weren't drafted are here
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getFreeAgentRoles(self, ctx):\n server = ctx.message.server\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n\n if(len(free_agent_dict.items()) > 0):\n for key, value in free_agent_dict.items():\n try:\n try:\n freeAgentRole = self.find_role(server.roles, value)\n await self.bot.say(\"Free agent role for {0} = {1}\".format(key, freeAgentRole.mention))\n except LookupError:\n await self.bot.say(\":x: Could not find free agent role with id of {0}\".format(value))\n except IndexError:\n await self.bot.say(\":x: Error finding key value pair in free agent role dictionary\")\n else:\n await self.bot.say(\":x: No free agent roles are set in the dictionary\")", "def get_manager_stats(self):\n try:\n names, quantities, types, passwords = zip(*[(manager.name,\n manager.transports_in_fleet, manager.fleet_type, manager.password)\n for manager in self.manager_agents.values()])\n except ValueError:\n names, quantities, types, passwords = [], [], [], []\n\n df = pd.DataFrame.from_dict(\n {\"password\": passwords, \"name\": names, \"transports_in_fleet\": quantities, \"fleet_type\": types})\n return df", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def collect_nodes(self):\n free_nodes = Node.query.filter_by(project_id=None).all()\n return free_nodes", "def reset(self):\n agent_info = []\n\n for a in self.agents:\n agent_info.append(a.reset())\n print('agent_info', agent_info)\n return agent_info", "def get_communalities(self):\n df_communalities = pd.DataFrame(self.fa.get_communalities()).set_index(self.df.columns)\n if self.verbose:\n print(f'Communalities\\n{df_communalities}\\n')\n return df_communalities", "def get_station_stats(self):\n try:\n names, status, places, power = zip(*[(p.name, p.status,\n p.available_places, p.power)\n for p in self.station_agents.values()])\n except ValueError:\n names, status, places, power = [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"status\": status, \"available_places\": places, \"power\": power})\n return df", "def get_agents(self):\n ret = []\n for i in self.all_instances:\n if i.instance_type == InstanceType.AGENT:\n ret.append(i)\n return ret", "def get_transport_stats(self):\n try:\n names, assignments, distances, statuses, trusts, passwords, types, positions, vel_factors, rates, managers = zip(*[(t.name, t.num_assignments,\n \"{0:.2f}\".format(sum(t.distances)),\n status_to_str(t.status),\n t.trust,\n t.password, \n t.fleet_type,\n t.get_position(),\n t.velocity_factor,\n t.rates,\n t.fleetmanager_id\n )\n for t in self.transport_agents.values()])\n except ValueError:\n names, assignments, distances, statuses, trusts, passwords, types, positions, vel_factors, rates, managers = [], [], [], [], [], [], [], [], [], [], []\n df = pd.DataFrame.from_dict({\"name\": names,\n \"assignments\": assignments,\n \"distance\": distances,\n \"status\": statuses,\n \"trust\": trusts,\n \"password\": passwords,\n \"fleet_type\": types,\n \"position\": positions,\n \"velocity_factor\": vel_factors,\n \"rates\": rates,\n \"speed\": 2000,\n \"fleet\": managers,\n })\n return df", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def br_get_requested_df(agent_name, *args):\r\n df = pd.DataFrame()\r\n if args == \"coils\":\r\n search_str = '{\"id\":{\"0\":\"' + \"coil\" + '_' # tiene que encontrar todas las coil que quieran fabricarse y como mucho los últimos 1000 registros.\r\n else:\r\n search_str = \"activation_time\" # takes every record with this. Each agent is sending that info while alive communicating to log.\r\n l = []\r\n N = 1000\r\n with open(r\"log.log\") as f:\r\n for line in f.readlines()[-N:]: # from the last 1000 lines\r\n if search_str in line: # find search_str\r\n n = line.find(\"{\")\r\n a = line[n:]\r\n l.append(a)\r\n df_0 = pd.DataFrame(l, columns=['register'])\r\n for ind in df_0.index:\r\n if ind == 0:\r\n element = df_0.loc[ind, 'register']\r\n z = json.loads(element)\r\n df = pd.DataFrame.from_dict(z)\r\n else:\r\n element = df_0.loc[ind, 'register']\r\n y = json.loads(element)\r\n b = pd.DataFrame.from_dict(y)\r\n df = df.append(b)\r\n df = df.reset_index(drop=True)\r\n if args == \"coils\": # if ca is requesting\r\n df = df.loc[0, 'to_do'] == \"search_auction\" # filters coils searching for auction\r\n return df", "def get_available_vehicles(self):\n return np.sum([self.env.acc[region][self.env.time] for region in self.env.region])", "async def clearFreeAgentRoles(self, ctx):\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n\n try:\n free_agent_dict.clear()\n self.save_data()\n await self.bot.say(\":white_check_mark: All free agent roles have been removed from dictionary\")\n except:\n await self.bot.say(\":x: Something went wrong when trying to clear the free agent role dictionary\")", "def get_customer_stats(self):\n try:\n names, waitings, totals, statuses, destinations, passwords, types, positions = zip(*[(p.name, p.get_waiting_time(),\n p.total_time(), status_to_str(p.status), p.get_position(), p.password, p.fleet_type, p.init_position)\n for p in self.customer_agents.values()])\n except ValueError:\n names, waitings, totals, statuses, destinations, passwords, types, positions = [], [], [], [], [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"waiting_time\": waitings, \"total_time\": totals, \"status\": statuses, \"destination\": destinations, \"password\": passwords, \"fleet_type\": types, \"position\": positions})\n return df", "def agents(self):\n return AgentManager(session=self._session)", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def list_bundles(self):\n if not self.monitor or len(self.sent) == 0:\n idx = pd.MultiIndex(levels=[[],[]], labels=[[],[]], names=['bid', 'cid'])\n df = pd.DataFrame(index=idx)\n else:\n df = pd.DataFrame([b.to_dict() for b in self.sent]) if self.monitor else pd.DataFrame()\n df.set_index(['bid', 'cid'], drop=True, inplace=True)\n return df", "def getAllAgents(self):\n agent_dict ={}\n for member in self.membership.listMembers():\n if member.has_role('Agent'):\n agent_id = member.getUserName()\n agent_dict[agent_id]={}\n agent_dict[agent_id]['email'] = member.getProperty('email')\n agent_dict[agent_id]['areas'] = self.__wrapAreas(member.getProperty('areas'))\n agent_dict[agent_id]['fullname'] = member.getProperty('fullname')\n \n return agent_dict", "def RetrieveAllAgent(**argd):\n flag, ret = CGateway.core.RetrieveAllAgent(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n hmBuilder = []\n for hm in ret:\n hmBuilder.append(hm.ToJsonDict())\n return CGateway._SuccessResponse({'return': hmBuilder})", "def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )", "def _get_next_features(self) -> DataFrameLike:\n # get nodes neighbors and aggregate their previous generation features\n prev_features = self._final_features[self.generation_count - 1].keys()\n features = {\n node: (\n self._features\n # select previous generation features for neighbors of current node\n .reindex(index=self.graph.get_neighbors(node), columns=prev_features)\n # aggregate\n .agg(self.aggs)\n # fill nans that result from dangling nodes with 0\n .fillna(0)\n # store new aggregations as dict\n .pipe(self._aggregated_df_to_dict)\n )\n for node in self.graph.get_nodes()\n }\n return pd.DataFrame.from_dict(features, orient='index')", "def get_free_sessions(self):\n return [session for session in self.sessions if not session.is_booked()]", "def get_free_nodes(self):\n return len(api.node.Node.list(self.workflow.request, False))", "def get(self):\n free_tables = []\n tables = TableDetails.query.all()\n for table in tables:\n if table.table_status == \"Empty\":\n free_tables.append(table)\n return free_tables, 200", "def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents", "def getAllocations():\n allocationSeq = Cuebot.getStub('allocation').GetAll(\n facility_pb2.AllocGetAllRequest(), timeout=Cuebot.Timeout).allocations\n return [Allocation(a) for a in allocationSeq.allocations]", "def server_agent_list(ctx, output_format, columns):\n data = ctx.obj.get_agents()\n\n for agent in data['agent']:\n agent_info = ctx.obj.get_agent_by_agent_id(agent['id'])\n agent['ip'] = agent_info['ip']\n agent['pool'] = agent_info['pool']['name']\n agent['build_type'] = ctx.obj.get_agent_build_type(agent['id'])\n agent['build_text'] = ctx.obj.get_agent_build_text(agent['id'])\n\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['agent'])\n elif output_format == 'json':\n output_json_data(data)", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def get_design_matrix(feature_columns, slots_offered): # prev -> getDesignMatrix\n df = pd.DataFrame(columns=feature_columns, index=list(np.append(['NO_PURCHASE'], slots_offered)))\n for i in np.append(['NO_PURCHASE'], slots_offered):\n df.loc[i, [col for col in feature_columns if i in col]] = 1\n return df.fillna(0)", "def get_all_l3_agents(self, plugin, context):\n with context.session.begin(subtransactions=True):\n query = context.session.query(agents_db.Agent)\n query = query.filter(\n agents_db.Agent.topic == 'l3_agent')\n query = (query.filter_by(admin_state_up=True))\n\n return [l3_agent\n for l3_agent in query\n if (agentschedulers_db.AgentSchedulerDbMixin.\n is_eligible_agent(True, l3_agent))]", "def investments_table(self):\n table = pd.DataFrame(index=[etf.buy_date for etf in self.etfs.values()])\n table['Ticker'] = [name.split('-')[0].split('.')[0] for name in self.etfs.keys()]\n table['Buying Price (€)'] = [etf.buy_price for etf in self.etfs.values()]\n table['Number of Shares'] = [etf.n_shares for etf in self.etfs.values()]\n table['Commissions (€)'] = [etf.total_commissions() for etf in self.etfs.values()]\n table['Invested (€)'] = [etf.initial_investment() for etf in self.etfs.values()]\n table['Share Price (€)'] = [etf.stock_price() for etf in self.etfs.values()]\n table['Value (€)'] = [etf.present_value() for etf in self.etfs.values()]\n table['P/L (€)'] = [etf.profit_loss() for etf in self.etfs.values()]\n table['P/L (%)'] = [etf.profit_loss(pct=True) for etf in self.etfs.values()]\n return table", "def get_active_units(self):\n alive_units = self.get_alive_units()\n active_units = []\n for alive_unit in alive_units:\n if not alive_unit.ready_to_attack():\n continue\n active_units.append(alive_unit)\n return active_units", "def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)", "def get_test_dataframe(self,dataframe):\n traits = []\n for clade in self.collect_clades:\n names = []\n ts = clade.get_terminals()\n for ele in ts:\n names.append(ele.name)\n traits.append(names)\n new_test_df = []\n for trait in traits:\n test_biomarker= dataframe[trait].sum(axis=1)\n new_test_df.append(test_biomarker)\n new_test_df = pd.DataFrame(new_test_df).T\n return new_test_df", "def transport_agents(self):\n return self.get(\"transport_agents\")", "def get_all_members_info(self) -> None:\n members_url = self.get_parliament_members_urls()\n print(f\"Found {len(members_url)} number of Parliamentary members.\")\n members_data = []\n\n for member_url in tqdm(members_url):\n try:\n members_data.append(self.get_person_delayed(member_url))\n except Exception:\n print(f\"Failed to get members information: {member_url}\")\n\n self.dataframe = pd.DataFrame(members_data, columns=self._columns)\n self.dataframe.replace(\"\", np.nan, inplace=True, regex=True)", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def get_free_games(self) -> List[Game]:", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def _usage_matrix(self, normalize=True):\n result = pd.DataFrame([\n {str(k): v for k, v in s.net_output().items()} for s in self\n ]).fillna(0)\n\n # Rescale each resource to unity-max\n for c in result.columns:\n result[c] /= result[c].abs().max()\n\n return result", "def new_lists():\n free_naives, free_memory = [], []\n GC_waiting = [[] for gc in range(cf.nGCs)]\n return free_naives, free_memory, GC_waiting", "def customer_agents(self):\n return self.get(\"customer_agents\")", "def get_available_companies(team):", "def dataframe(self):\n return self.get_target().dataframe()", "def df(client_ids, start, end):\n obj = search(client_ids, start, end)\n df = DataFrame.from_dict(obj).T\n\n if df.empty:\n return df\n\n df.index.name = 'client_id'\n df = df.rename(columns={ 0: 'inactive', 1: 'active' })\n df['total'] = df.sum(axis=1)\n df = df.fillna(0).astype('int64')\n\n return df", "def get_tr_list(slot, br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"tc\"]\r\n br_data_df = br_data_df.reset_index(drop=True)\r\n to = str()\r\n if slot == 1:\r\n ca_location_1 = agent_df.loc[0, 'location_1']\r\n br_data_df['location_ca'] = str(ca_location_1) ### location 1!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_1 # location 1!!!!!\r\n elif slot == 2:\r\n ca_location_2 = agent_df.loc[0, 'location_2']\r\n br_data_df['location_ca'] = str(ca_location_2) ### location 2!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_2 # location 2!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n tr_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in tr_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n #print(f'active_users_location_df: {active_users_location_df}')\r\n #print(f'segment_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results", "def dataframe(self):\n return self.generator.dataframe", "def get_available_agendas(self):\n pass", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def station_agents(self):\n return self.get(\"station_agents\")", "def getAgents(issuer=False, dbn='core', env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n entries = []\n subDb = gDbEnv.open_db(dbn.encode(\"utf-8\"), dupsort=True) # open named sub db named dbn within env\n with gDbEnv.begin(db=subDb) as txn: # txn is a Transaction object\n with txn.cursor() as cursor:\n if cursor.first(): # first key in database\n while True:\n key = cursor.key().decode()\n if len(key) == DID_LENGTH and \"/\" not in key:\n value = cursor.value().decode()\n ser, sep, sig = value.partition(SEPARATOR)\n try:\n dat = json.loads(ser, object_pairs_hook=ODict)\n except ValueError as ex:\n if cursor.next():\n continue\n else:\n break\n try:\n did, index = dat[\"signer\"].rsplit(\"#\", maxsplit=1)\n except (AttributeError, ValueError) as ex:\n if cursor.next():\n continue\n else:\n break\n\n if did == key: # self signed so agent\n if issuer:\n if \"issuants\" in dat:\n entries.append(key)\n else:\n entries.append(key)\n if not cursor.next(): # next key in database if any\n break\n return entries", "def Load_AllCourseBuildersStatistics(self, data, suffix=''):\n\t\tself.temp[:]=[]\n\t\tfor x in xrange(len(self.active_tournaments)):\n\t\t\tself.temp.append(self.active_tournaments[x])\n\n\t\treturn self.temp", "def available_to_lay(self) -> pd.DataFrame:\n if not self.data_is_loaded:\n self.load_data()\n\n return self._available_to_lay", "def clear_agents(self):\n self.set(\"manager_agents\", {})\n self.set(\"transport_agents\", {})\n self.set(\"customer_agents\", {})\n self.set(\"station_agents\", {})\n self.simulation_time = None\n self.simulation_init_time = None", "def get_pnodes(self, start: datetime, end: datetime) -> pd.DataFrame:\n\n self._validate_date_range(start, end)\n\n params: Dict[str, Any] = {\n \"queryname\": \"ATL_PNODE\",\n \"startdatetime\": self._get_UTC_string(start),\n \"enddatetime\": self._get_UTC_string(end),\n \"Pnode_type\": \"ALL\",\n \"version\": 1,\n \"resultformat\": 6,\n }\n\n response = self.request(params)\n\n return self.get_df(response)", "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def get_alive_units(self):\n alive_units = []\n for unit in self.units:\n if not unit.is_alive():\n continue\n alive_units.append(unit)\n return alive_units", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def get_active_features(summary_df, slots_offered): # prev -> getActiveFeatures\n disc_cols = [col+'_Discount' for col in slots_offered]\n eco_cols = [col+'_Eco' for col in slots_offered]\n gr_cols = [col+'_Eco' for col in slots_offered]\n features = summary_df.loc[:, disc_cols+eco_cols+gr_cols]\n features = features.loc[:, features.sum(axis=0) > 0]\n for i in reversed(['NO_PURCHASE']+slots_offered):\n features.insert(0, i+'_Asc', value=1)\n return features, disc_cols, eco_cols, gr_cols", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_available_companies_and_people(team):", "def return_consumed_capacity_indexes(self):\n return self.__return_consumed_capacity.indexes()", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())", "def server_agent_statistics(ctx):\n data = ctx.obj.get_agent_statistics()\n output_json_data(data)", "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def reset(self):\n self.reset_agent_locations()\n return self.x_agent", "def get_distance_matrix():\n df_afstandn2 = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_poi_afstand\n WHERE afstand < 1000\n \"\"\")\n return df_afstandn2", "def tabulate(self):\n\n self.tables = []\n\n for sim in tqdm.tqdm(self.simulations):\n self.tables.append(pd.read_csv(sim.get_table()))\n\n return self.tables", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def __get_allocations(self):\n\n query = db_session.query(Schedule).all()\n\n db_session.close()\n\n service_allocations = dict()\n unit_allocations = dict()\n\n for alloc in query:\n\n service = (alloc.headcode, alloc.origin_location, alloc.origin_departure)\n unit = alloc.unit\n\n if service not in service_allocations:\n service_allocations[service] = set()\n service_allocations[service].add(unit)\n\n if unit not in unit_allocations:\n unit_allocations[unit] = set()\n unit_allocations[unit].add(service)\n\n return (service_allocations, unit_allocations)", "def sample(self):\n return [agent_observation_space.sample() for agent_observation_space in self._agents_observation_space]", "def get_occupied_tiles(self):\r\n occupied = np.zeros(self.searchenv.conv.num_tiles)\r\n #Convert current state (positions of agents) to tile indices\r\n tiles = self.searchenv.conv.state_to_tile(self.searchstate.positions)\r\n valid_tiles = tiles[self.searchstate.actives == 1]\r\n occupied[valid_tiles] = 1\r\n return occupied", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def describe_agents(agentIds=None, filters=None, maxResults=None, nextToken=None):\n pass", "def get_features(self):\n x,y = self.agent\n return np.array([x,y])", "def df_seqs_concepts(self):\n # Get the data #\n df = pandas.DataFrame(self.a.seq_to_counts)\n df = df.fillna(0)\n # Rename to original names #\n df = df.rename(columns=self.a.renamed_to_orig)\n # Rename envo integers to envo strings #\n envo_int_to_id = lambda e: \"ENVO:%08d\" % e\n df = df.rename(index=envo_int_to_id)\n # Return\n return df", "def get_live_node_list(self) -> []:\n do_backup = False\n try:\n response = self.dynamo_table.scan()\n now = self.get_current_time()\n\n new_live_nodes = [item['IP'] for item in response['Items'] if\n int(item['lastActiveTime']) >= now - 15000]\n\n if abs(len(new_live_nodes)-len(self.live_nodes)) != 0:\n self.set_num_live_and_prev_nodes(len(new_live_nodes))\n do_backup = True\n\n self.live_nodes = new_live_nodes\n\n except Exception as e:\n print(f'Error in get_live_node_list: {e}')\n\n return self.live_nodes, do_backup", "def get_available_rental_instruments(self) -> list:\n self.cursor.execute(\"\"\"\n SELECT DISTINCT name, brand, monthly_cost, ri_id AS id\n FROM rental_instrument AS ri\n WHERE NOT EXISTS\n (SELECT 1 FROM rental AS r\n WHERE ri.ri_id = r.ri_id \n AND CURRENT_DATE < end_date\n AND terminated IS NULL)\n \"\"\")\n self.db.commit()\n return self._cursor_result()", "def all_equipment(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"equipment\")\n\t\ttmpl = lookup.get_template(\"equipment.html\")\n\t\treturn (tmpl.render(equipment=activity_all))", "def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result", "def make_hh_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-97 from excel_import\r\n self.hhpos = self.determine_hhpos(hh_row, 'house_latitude', 'house_longitude')\r\n self.hh_id = return_values(hh_row, 'hh_id')\r\n self.admin_village = 1\r\n\r\n # 2016\r\n mig_remittances = return_values(hh_row, 'mig_remittances') # remittances of initial migrant\r\n if mig_remittances is None:\r\n mig_remittances = 0\r\n household_income_list[hh_row - 1] = int(mig_remittances)\r\n household_remittances_list[hh_row - 1] = int(mig_remittances)\r\n\r\n if return_values(hh_row, 'initial_migrants') is not None:\r\n out_mig_list[hh_row - 1] = 1\r\n household_migrants_list.append(self.hh_id)\r\n cumulative_mig_list[hh_row - 1] = 1\r\n\r\n num_labor_list[hh_row - 1] = initialize_labor(hh_row)\r\n hh_size_list[hh_row - 1] = len(return_values(hh_row, 'age'))\r\n\r\n a = HouseholdAgent(hh_row, self, self.hh_id, self.admin_village)\r\n self.space.place_agent(a, self.hhpos) # admin_village placeholder\r\n self.schedule.add(a)", "def get_global_active_list(self):\n return self.api.get_active_global_version_manager()", "def retrieve_leads(self) -> pd.DataFrame:\n if self.leads_df is None:\n self.leads_df = pd.read_sql_query('SELECT * FROM clean_leads ORDER BY created_on DESC',\n con=self.connection())\n return self.leads_df", "def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()", "async def get_all_investigators(request):\n client_key = general.get_request_key_header(request)\n investigator_list = await security_messaging.get_investigators(request.app.config.VAL_CONN, client_key)\n\n investigator_list_json = []\n for address, dp in investigator_list.items():\n investigator_list_json.append({\n 'public_key': dp.public_key,\n 'name': dp.name\n })\n return response.json(body={'data': investigator_list_json},\n headers=general.get_response_headers())", "def dataframe(self):\n frames = []\n for game in self.__iter__():\n df = game.dataframe\n if df is not None:\n frames.append(df)\n if frames == []:\n return None\n return pd.concat(frames)", "def gasprices():\n # Query all gas price data by state\n Gasprices = engine.execute(\"SELECT * FROM gasprices\").fetchall()\n \n return jsonify({'Gasprices': [dict(row) for row in Gasprices]})", "def build_df(packages, software_mentions, cran_links, titles): \n df = pd.DataFrame({'CRAN Package' : packages, 'CRAN Link' : cran_links, 'Title' : titles})\n df = df[df['CRAN Package'].isin(software_mentions)]\n return df", "def auction_blank_df():\r\n df = pd.DataFrame([], columns=['id', 'agent_type', 'location_1', 'location_2', 'location',\r\n 'coil_auction_winner', 'coil_length', 'coil_width', 'coil_thickness', 'coil_weight',\r\n 'int_fab', 'bid', 'budget', 'ship_date', 'ship_date_rating',\r\n 'setup_speed', 'T1', 'T2', 'T3', 'T4', 'T5', 'q', 'T1dif', 'T2dif', 'T3dif', 'T4dif', 'T5dif', 'total_temp_dif', 'temp_rating',\r\n 'bid_rating', 'int_fab_priority', 'int_fab_rating', 'rating', 'rating_dif', 'negotiation',\r\n 'pre_auction_start', 'auction_start', 'auction_finish',\r\n 'active_tr_slot_1', 'active_tr_slot_2', 'tr_booking_confirmation_at', 'active_wh', 'wh_booking_confirmation_at', 'wh_location', 'active_coils', 'auction_coils',\r\n 'brAVG(tr_op_time)', 'brAVG(ca_op_time)', 'AVG(tr_op_time)', 'AVG(ca_op_time)', 'fab_start'\r\n 'slot_1_start', 'slot_1_end', 'slot_2_start', 'slot_2_end', 'name_tr_slot_1', 'name_tr_slot_2', 'delivered_to_wh', 'handling_cost_slot_1', 'handling_cost_slot_2',\r\n 'coil_ratings_1', 'coil_ratings_2',\r\n 'pre_auction_duration', 'auction_duration',\r\n 'gantt', 'location_diagram'\r\n ])\r\n return df", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}" ]
[ "0.5804655", "0.5775972", "0.56420845", "0.55906963", "0.54590887", "0.54168296", "0.5397101", "0.5394799", "0.5385905", "0.5349997", "0.5243113", "0.5241917", "0.5230085", "0.5196623", "0.5092877", "0.5069075", "0.5068402", "0.50677747", "0.5053582", "0.5020693", "0.5011909", "0.499841", "0.4996714", "0.4990112", "0.49826375", "0.49716035", "0.4956883", "0.4941838", "0.490633", "0.490404", "0.49012014", "0.49006268", "0.48981276", "0.48972926", "0.48925182", "0.48686177", "0.4863197", "0.4859084", "0.48590088", "0.48391512", "0.4838413", "0.48359275", "0.48314863", "0.482395", "0.48168853", "0.48084608", "0.48000592", "0.4797037", "0.47932652", "0.47926065", "0.47693253", "0.47620803", "0.47606754", "0.4757628", "0.4750027", "0.47330275", "0.47248966", "0.47223696", "0.47217152", "0.47120988", "0.471173", "0.46947607", "0.46931797", "0.4690812", "0.46901882", "0.46849737", "0.46849737", "0.46849737", "0.46849737", "0.46785447", "0.46750408", "0.46692425", "0.46687204", "0.46543527", "0.465078", "0.465036", "0.46492186", "0.46445724", "0.46353364", "0.46347335", "0.4632744", "0.46325076", "0.46311072", "0.46279627", "0.46156347", "0.46052358", "0.4604768", "0.4601398", "0.45962626", "0.45862105", "0.45845553", "0.45796385", "0.45780057", "0.45689896", "0.4568744", "0.45672032", "0.45489335", "0.45462143", "0.45448393", "0.45402637" ]
0.5439093
5
Return dataframe of all free agents.
def test_fantasy_status_nov_1(league): nov_1 = datetime.datetime(2019,11,1) players = league.as_of(nov_1).all_players() # make sure sammy blais is not a free agent, he was picked up oct 31 assert(players.loc[6544, 'fantasy_status'] != 'FA')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getFreeAgentRoles(self, ctx):\n server = ctx.message.server\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n\n if(len(free_agent_dict.items()) > 0):\n for key, value in free_agent_dict.items():\n try:\n try:\n freeAgentRole = self.find_role(server.roles, value)\n await self.bot.say(\"Free agent role for {0} = {1}\".format(key, freeAgentRole.mention))\n except LookupError:\n await self.bot.say(\":x: Could not find free agent role with id of {0}\".format(value))\n except IndexError:\n await self.bot.say(\":x: Error finding key value pair in free agent role dictionary\")\n else:\n await self.bot.say(\":x: No free agent roles are set in the dictionary\")", "def get_manager_stats(self):\n try:\n names, quantities, types, passwords = zip(*[(manager.name,\n manager.transports_in_fleet, manager.fleet_type, manager.password)\n for manager in self.manager_agents.values()])\n except ValueError:\n names, quantities, types, passwords = [], [], [], []\n\n df = pd.DataFrame.from_dict(\n {\"password\": passwords, \"name\": names, \"transports_in_fleet\": quantities, \"fleet_type\": types})\n return df", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def collect_nodes(self):\n free_nodes = Node.query.filter_by(project_id=None).all()\n return free_nodes", "def reset(self):\n agent_info = []\n\n for a in self.agents:\n agent_info.append(a.reset())\n print('agent_info', agent_info)\n return agent_info", "def test_get_free_agents_season_start(league, season_start_date):\n # equals all players minus drafted players\n # make sure none of the draft players in list\n free_agents = league.as_of(season_start_date).free_agents()\n drafted = league.draft_results(format='Pandas')\n assert(len(free_agents.index.intersection(drafted.index)) == 0), \"Should be no drafted players as free agents\"\n # could make sure all 'all_players' that weren't drafted are here", "def get_communalities(self):\n df_communalities = pd.DataFrame(self.fa.get_communalities()).set_index(self.df.columns)\n if self.verbose:\n print(f'Communalities\\n{df_communalities}\\n')\n return df_communalities", "def get_station_stats(self):\n try:\n names, status, places, power = zip(*[(p.name, p.status,\n p.available_places, p.power)\n for p in self.station_agents.values()])\n except ValueError:\n names, status, places, power = [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"status\": status, \"available_places\": places, \"power\": power})\n return df", "def get_agents(self):\n ret = []\n for i in self.all_instances:\n if i.instance_type == InstanceType.AGENT:\n ret.append(i)\n return ret", "def get_transport_stats(self):\n try:\n names, assignments, distances, statuses, trusts, passwords, types, positions, vel_factors, rates, managers = zip(*[(t.name, t.num_assignments,\n \"{0:.2f}\".format(sum(t.distances)),\n status_to_str(t.status),\n t.trust,\n t.password, \n t.fleet_type,\n t.get_position(),\n t.velocity_factor,\n t.rates,\n t.fleetmanager_id\n )\n for t in self.transport_agents.values()])\n except ValueError:\n names, assignments, distances, statuses, trusts, passwords, types, positions, vel_factors, rates, managers = [], [], [], [], [], [], [], [], [], [], []\n df = pd.DataFrame.from_dict({\"name\": names,\n \"assignments\": assignments,\n \"distance\": distances,\n \"status\": statuses,\n \"trust\": trusts,\n \"password\": passwords,\n \"fleet_type\": types,\n \"position\": positions,\n \"velocity_factor\": vel_factors,\n \"rates\": rates,\n \"speed\": 2000,\n \"fleet\": managers,\n })\n return df", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def br_get_requested_df(agent_name, *args):\r\n df = pd.DataFrame()\r\n if args == \"coils\":\r\n search_str = '{\"id\":{\"0\":\"' + \"coil\" + '_' # tiene que encontrar todas las coil que quieran fabricarse y como mucho los últimos 1000 registros.\r\n else:\r\n search_str = \"activation_time\" # takes every record with this. Each agent is sending that info while alive communicating to log.\r\n l = []\r\n N = 1000\r\n with open(r\"log.log\") as f:\r\n for line in f.readlines()[-N:]: # from the last 1000 lines\r\n if search_str in line: # find search_str\r\n n = line.find(\"{\")\r\n a = line[n:]\r\n l.append(a)\r\n df_0 = pd.DataFrame(l, columns=['register'])\r\n for ind in df_0.index:\r\n if ind == 0:\r\n element = df_0.loc[ind, 'register']\r\n z = json.loads(element)\r\n df = pd.DataFrame.from_dict(z)\r\n else:\r\n element = df_0.loc[ind, 'register']\r\n y = json.loads(element)\r\n b = pd.DataFrame.from_dict(y)\r\n df = df.append(b)\r\n df = df.reset_index(drop=True)\r\n if args == \"coils\": # if ca is requesting\r\n df = df.loc[0, 'to_do'] == \"search_auction\" # filters coils searching for auction\r\n return df", "def get_available_vehicles(self):\n return np.sum([self.env.acc[region][self.env.time] for region in self.env.region])", "async def clearFreeAgentRoles(self, ctx):\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n\n try:\n free_agent_dict.clear()\n self.save_data()\n await self.bot.say(\":white_check_mark: All free agent roles have been removed from dictionary\")\n except:\n await self.bot.say(\":x: Something went wrong when trying to clear the free agent role dictionary\")", "def get_customer_stats(self):\n try:\n names, waitings, totals, statuses, destinations, passwords, types, positions = zip(*[(p.name, p.get_waiting_time(),\n p.total_time(), status_to_str(p.status), p.get_position(), p.password, p.fleet_type, p.init_position)\n for p in self.customer_agents.values()])\n except ValueError:\n names, waitings, totals, statuses, destinations, passwords, types, positions = [], [], [], [], [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"waiting_time\": waitings, \"total_time\": totals, \"status\": statuses, \"destination\": destinations, \"password\": passwords, \"fleet_type\": types, \"position\": positions})\n return df", "def agents(self):\n return AgentManager(session=self._session)", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def list_bundles(self):\n if not self.monitor or len(self.sent) == 0:\n idx = pd.MultiIndex(levels=[[],[]], labels=[[],[]], names=['bid', 'cid'])\n df = pd.DataFrame(index=idx)\n else:\n df = pd.DataFrame([b.to_dict() for b in self.sent]) if self.monitor else pd.DataFrame()\n df.set_index(['bid', 'cid'], drop=True, inplace=True)\n return df", "def getAllAgents(self):\n agent_dict ={}\n for member in self.membership.listMembers():\n if member.has_role('Agent'):\n agent_id = member.getUserName()\n agent_dict[agent_id]={}\n agent_dict[agent_id]['email'] = member.getProperty('email')\n agent_dict[agent_id]['areas'] = self.__wrapAreas(member.getProperty('areas'))\n agent_dict[agent_id]['fullname'] = member.getProperty('fullname')\n \n return agent_dict", "def RetrieveAllAgent(**argd):\n flag, ret = CGateway.core.RetrieveAllAgent(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n hmBuilder = []\n for hm in ret:\n hmBuilder.append(hm.ToJsonDict())\n return CGateway._SuccessResponse({'return': hmBuilder})", "def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )", "def _get_next_features(self) -> DataFrameLike:\n # get nodes neighbors and aggregate their previous generation features\n prev_features = self._final_features[self.generation_count - 1].keys()\n features = {\n node: (\n self._features\n # select previous generation features for neighbors of current node\n .reindex(index=self.graph.get_neighbors(node), columns=prev_features)\n # aggregate\n .agg(self.aggs)\n # fill nans that result from dangling nodes with 0\n .fillna(0)\n # store new aggregations as dict\n .pipe(self._aggregated_df_to_dict)\n )\n for node in self.graph.get_nodes()\n }\n return pd.DataFrame.from_dict(features, orient='index')", "def get_free_sessions(self):\n return [session for session in self.sessions if not session.is_booked()]", "def get_free_nodes(self):\n return len(api.node.Node.list(self.workflow.request, False))", "def get(self):\n free_tables = []\n tables = TableDetails.query.all()\n for table in tables:\n if table.table_status == \"Empty\":\n free_tables.append(table)\n return free_tables, 200", "def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents", "def getAllocations():\n allocationSeq = Cuebot.getStub('allocation').GetAll(\n facility_pb2.AllocGetAllRequest(), timeout=Cuebot.Timeout).allocations\n return [Allocation(a) for a in allocationSeq.allocations]", "def server_agent_list(ctx, output_format, columns):\n data = ctx.obj.get_agents()\n\n for agent in data['agent']:\n agent_info = ctx.obj.get_agent_by_agent_id(agent['id'])\n agent['ip'] = agent_info['ip']\n agent['pool'] = agent_info['pool']['name']\n agent['build_type'] = ctx.obj.get_agent_build_type(agent['id'])\n agent['build_text'] = ctx.obj.get_agent_build_text(agent['id'])\n\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['agent'])\n elif output_format == 'json':\n output_json_data(data)", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def get_design_matrix(feature_columns, slots_offered): # prev -> getDesignMatrix\n df = pd.DataFrame(columns=feature_columns, index=list(np.append(['NO_PURCHASE'], slots_offered)))\n for i in np.append(['NO_PURCHASE'], slots_offered):\n df.loc[i, [col for col in feature_columns if i in col]] = 1\n return df.fillna(0)", "def get_all_l3_agents(self, plugin, context):\n with context.session.begin(subtransactions=True):\n query = context.session.query(agents_db.Agent)\n query = query.filter(\n agents_db.Agent.topic == 'l3_agent')\n query = (query.filter_by(admin_state_up=True))\n\n return [l3_agent\n for l3_agent in query\n if (agentschedulers_db.AgentSchedulerDbMixin.\n is_eligible_agent(True, l3_agent))]", "def investments_table(self):\n table = pd.DataFrame(index=[etf.buy_date for etf in self.etfs.values()])\n table['Ticker'] = [name.split('-')[0].split('.')[0] for name in self.etfs.keys()]\n table['Buying Price (€)'] = [etf.buy_price for etf in self.etfs.values()]\n table['Number of Shares'] = [etf.n_shares for etf in self.etfs.values()]\n table['Commissions (€)'] = [etf.total_commissions() for etf in self.etfs.values()]\n table['Invested (€)'] = [etf.initial_investment() for etf in self.etfs.values()]\n table['Share Price (€)'] = [etf.stock_price() for etf in self.etfs.values()]\n table['Value (€)'] = [etf.present_value() for etf in self.etfs.values()]\n table['P/L (€)'] = [etf.profit_loss() for etf in self.etfs.values()]\n table['P/L (%)'] = [etf.profit_loss(pct=True) for etf in self.etfs.values()]\n return table", "def get_active_units(self):\n alive_units = self.get_alive_units()\n active_units = []\n for alive_unit in alive_units:\n if not alive_unit.ready_to_attack():\n continue\n active_units.append(alive_unit)\n return active_units", "def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)", "def get_test_dataframe(self,dataframe):\n traits = []\n for clade in self.collect_clades:\n names = []\n ts = clade.get_terminals()\n for ele in ts:\n names.append(ele.name)\n traits.append(names)\n new_test_df = []\n for trait in traits:\n test_biomarker= dataframe[trait].sum(axis=1)\n new_test_df.append(test_biomarker)\n new_test_df = pd.DataFrame(new_test_df).T\n return new_test_df", "def transport_agents(self):\n return self.get(\"transport_agents\")", "def get_all_members_info(self) -> None:\n members_url = self.get_parliament_members_urls()\n print(f\"Found {len(members_url)} number of Parliamentary members.\")\n members_data = []\n\n for member_url in tqdm(members_url):\n try:\n members_data.append(self.get_person_delayed(member_url))\n except Exception:\n print(f\"Failed to get members information: {member_url}\")\n\n self.dataframe = pd.DataFrame(members_data, columns=self._columns)\n self.dataframe.replace(\"\", np.nan, inplace=True, regex=True)", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def get_free_games(self) -> List[Game]:", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def _usage_matrix(self, normalize=True):\n result = pd.DataFrame([\n {str(k): v for k, v in s.net_output().items()} for s in self\n ]).fillna(0)\n\n # Rescale each resource to unity-max\n for c in result.columns:\n result[c] /= result[c].abs().max()\n\n return result", "def new_lists():\n free_naives, free_memory = [], []\n GC_waiting = [[] for gc in range(cf.nGCs)]\n return free_naives, free_memory, GC_waiting", "def customer_agents(self):\n return self.get(\"customer_agents\")", "def get_available_companies(team):", "def dataframe(self):\n return self.get_target().dataframe()", "def df(client_ids, start, end):\n obj = search(client_ids, start, end)\n df = DataFrame.from_dict(obj).T\n\n if df.empty:\n return df\n\n df.index.name = 'client_id'\n df = df.rename(columns={ 0: 'inactive', 1: 'active' })\n df['total'] = df.sum(axis=1)\n df = df.fillna(0).astype('int64')\n\n return df", "def get_tr_list(slot, br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"tc\"]\r\n br_data_df = br_data_df.reset_index(drop=True)\r\n to = str()\r\n if slot == 1:\r\n ca_location_1 = agent_df.loc[0, 'location_1']\r\n br_data_df['location_ca'] = str(ca_location_1) ### location 1!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_1 # location 1!!!!!\r\n elif slot == 2:\r\n ca_location_2 = agent_df.loc[0, 'location_2']\r\n br_data_df['location_ca'] = str(ca_location_2) ### location 2!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_2 # location 2!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n tr_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in tr_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n #print(f'active_users_location_df: {active_users_location_df}')\r\n #print(f'segment_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results", "def dataframe(self):\n return self.generator.dataframe", "def get_available_agendas(self):\n pass", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def station_agents(self):\n return self.get(\"station_agents\")", "def getAgents(issuer=False, dbn='core', env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n entries = []\n subDb = gDbEnv.open_db(dbn.encode(\"utf-8\"), dupsort=True) # open named sub db named dbn within env\n with gDbEnv.begin(db=subDb) as txn: # txn is a Transaction object\n with txn.cursor() as cursor:\n if cursor.first(): # first key in database\n while True:\n key = cursor.key().decode()\n if len(key) == DID_LENGTH and \"/\" not in key:\n value = cursor.value().decode()\n ser, sep, sig = value.partition(SEPARATOR)\n try:\n dat = json.loads(ser, object_pairs_hook=ODict)\n except ValueError as ex:\n if cursor.next():\n continue\n else:\n break\n try:\n did, index = dat[\"signer\"].rsplit(\"#\", maxsplit=1)\n except (AttributeError, ValueError) as ex:\n if cursor.next():\n continue\n else:\n break\n\n if did == key: # self signed so agent\n if issuer:\n if \"issuants\" in dat:\n entries.append(key)\n else:\n entries.append(key)\n if not cursor.next(): # next key in database if any\n break\n return entries", "def Load_AllCourseBuildersStatistics(self, data, suffix=''):\n\t\tself.temp[:]=[]\n\t\tfor x in xrange(len(self.active_tournaments)):\n\t\t\tself.temp.append(self.active_tournaments[x])\n\n\t\treturn self.temp", "def available_to_lay(self) -> pd.DataFrame:\n if not self.data_is_loaded:\n self.load_data()\n\n return self._available_to_lay", "def clear_agents(self):\n self.set(\"manager_agents\", {})\n self.set(\"transport_agents\", {})\n self.set(\"customer_agents\", {})\n self.set(\"station_agents\", {})\n self.simulation_time = None\n self.simulation_init_time = None", "def get_pnodes(self, start: datetime, end: datetime) -> pd.DataFrame:\n\n self._validate_date_range(start, end)\n\n params: Dict[str, Any] = {\n \"queryname\": \"ATL_PNODE\",\n \"startdatetime\": self._get_UTC_string(start),\n \"enddatetime\": self._get_UTC_string(end),\n \"Pnode_type\": \"ALL\",\n \"version\": 1,\n \"resultformat\": 6,\n }\n\n response = self.request(params)\n\n return self.get_df(response)", "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def get_alive_units(self):\n alive_units = []\n for unit in self.units:\n if not unit.is_alive():\n continue\n alive_units.append(unit)\n return alive_units", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def get_active_features(summary_df, slots_offered): # prev -> getActiveFeatures\n disc_cols = [col+'_Discount' for col in slots_offered]\n eco_cols = [col+'_Eco' for col in slots_offered]\n gr_cols = [col+'_Eco' for col in slots_offered]\n features = summary_df.loc[:, disc_cols+eco_cols+gr_cols]\n features = features.loc[:, features.sum(axis=0) > 0]\n for i in reversed(['NO_PURCHASE']+slots_offered):\n features.insert(0, i+'_Asc', value=1)\n return features, disc_cols, eco_cols, gr_cols", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_available_companies_and_people(team):", "def return_consumed_capacity_indexes(self):\n return self.__return_consumed_capacity.indexes()", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())", "def server_agent_statistics(ctx):\n data = ctx.obj.get_agent_statistics()\n output_json_data(data)", "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def reset(self):\n self.reset_agent_locations()\n return self.x_agent", "def get_distance_matrix():\n df_afstandn2 = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_poi_afstand\n WHERE afstand < 1000\n \"\"\")\n return df_afstandn2", "def tabulate(self):\n\n self.tables = []\n\n for sim in tqdm.tqdm(self.simulations):\n self.tables.append(pd.read_csv(sim.get_table()))\n\n return self.tables", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def __get_allocations(self):\n\n query = db_session.query(Schedule).all()\n\n db_session.close()\n\n service_allocations = dict()\n unit_allocations = dict()\n\n for alloc in query:\n\n service = (alloc.headcode, alloc.origin_location, alloc.origin_departure)\n unit = alloc.unit\n\n if service not in service_allocations:\n service_allocations[service] = set()\n service_allocations[service].add(unit)\n\n if unit not in unit_allocations:\n unit_allocations[unit] = set()\n unit_allocations[unit].add(service)\n\n return (service_allocations, unit_allocations)", "def sample(self):\n return [agent_observation_space.sample() for agent_observation_space in self._agents_observation_space]", "def get_occupied_tiles(self):\r\n occupied = np.zeros(self.searchenv.conv.num_tiles)\r\n #Convert current state (positions of agents) to tile indices\r\n tiles = self.searchenv.conv.state_to_tile(self.searchstate.positions)\r\n valid_tiles = tiles[self.searchstate.actives == 1]\r\n occupied[valid_tiles] = 1\r\n return occupied", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def describe_agents(agentIds=None, filters=None, maxResults=None, nextToken=None):\n pass", "def get_features(self):\n x,y = self.agent\n return np.array([x,y])", "def df_seqs_concepts(self):\n # Get the data #\n df = pandas.DataFrame(self.a.seq_to_counts)\n df = df.fillna(0)\n # Rename to original names #\n df = df.rename(columns=self.a.renamed_to_orig)\n # Rename envo integers to envo strings #\n envo_int_to_id = lambda e: \"ENVO:%08d\" % e\n df = df.rename(index=envo_int_to_id)\n # Return\n return df", "def get_live_node_list(self) -> []:\n do_backup = False\n try:\n response = self.dynamo_table.scan()\n now = self.get_current_time()\n\n new_live_nodes = [item['IP'] for item in response['Items'] if\n int(item['lastActiveTime']) >= now - 15000]\n\n if abs(len(new_live_nodes)-len(self.live_nodes)) != 0:\n self.set_num_live_and_prev_nodes(len(new_live_nodes))\n do_backup = True\n\n self.live_nodes = new_live_nodes\n\n except Exception as e:\n print(f'Error in get_live_node_list: {e}')\n\n return self.live_nodes, do_backup", "def get_available_rental_instruments(self) -> list:\n self.cursor.execute(\"\"\"\n SELECT DISTINCT name, brand, monthly_cost, ri_id AS id\n FROM rental_instrument AS ri\n WHERE NOT EXISTS\n (SELECT 1 FROM rental AS r\n WHERE ri.ri_id = r.ri_id \n AND CURRENT_DATE < end_date\n AND terminated IS NULL)\n \"\"\")\n self.db.commit()\n return self._cursor_result()", "def all_equipment(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"equipment\")\n\t\ttmpl = lookup.get_template(\"equipment.html\")\n\t\treturn (tmpl.render(equipment=activity_all))", "def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result", "def make_hh_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-97 from excel_import\r\n self.hhpos = self.determine_hhpos(hh_row, 'house_latitude', 'house_longitude')\r\n self.hh_id = return_values(hh_row, 'hh_id')\r\n self.admin_village = 1\r\n\r\n # 2016\r\n mig_remittances = return_values(hh_row, 'mig_remittances') # remittances of initial migrant\r\n if mig_remittances is None:\r\n mig_remittances = 0\r\n household_income_list[hh_row - 1] = int(mig_remittances)\r\n household_remittances_list[hh_row - 1] = int(mig_remittances)\r\n\r\n if return_values(hh_row, 'initial_migrants') is not None:\r\n out_mig_list[hh_row - 1] = 1\r\n household_migrants_list.append(self.hh_id)\r\n cumulative_mig_list[hh_row - 1] = 1\r\n\r\n num_labor_list[hh_row - 1] = initialize_labor(hh_row)\r\n hh_size_list[hh_row - 1] = len(return_values(hh_row, 'age'))\r\n\r\n a = HouseholdAgent(hh_row, self, self.hh_id, self.admin_village)\r\n self.space.place_agent(a, self.hhpos) # admin_village placeholder\r\n self.schedule.add(a)", "def get_global_active_list(self):\n return self.api.get_active_global_version_manager()", "def retrieve_leads(self) -> pd.DataFrame:\n if self.leads_df is None:\n self.leads_df = pd.read_sql_query('SELECT * FROM clean_leads ORDER BY created_on DESC',\n con=self.connection())\n return self.leads_df", "def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()", "async def get_all_investigators(request):\n client_key = general.get_request_key_header(request)\n investigator_list = await security_messaging.get_investigators(request.app.config.VAL_CONN, client_key)\n\n investigator_list_json = []\n for address, dp in investigator_list.items():\n investigator_list_json.append({\n 'public_key': dp.public_key,\n 'name': dp.name\n })\n return response.json(body={'data': investigator_list_json},\n headers=general.get_response_headers())", "def dataframe(self):\n frames = []\n for game in self.__iter__():\n df = game.dataframe\n if df is not None:\n frames.append(df)\n if frames == []:\n return None\n return pd.concat(frames)", "def gasprices():\n # Query all gas price data by state\n Gasprices = engine.execute(\"SELECT * FROM gasprices\").fetchall()\n \n return jsonify({'Gasprices': [dict(row) for row in Gasprices]})", "def build_df(packages, software_mentions, cran_links, titles): \n df = pd.DataFrame({'CRAN Package' : packages, 'CRAN Link' : cran_links, 'Title' : titles})\n df = df[df['CRAN Package'].isin(software_mentions)]\n return df", "def auction_blank_df():\r\n df = pd.DataFrame([], columns=['id', 'agent_type', 'location_1', 'location_2', 'location',\r\n 'coil_auction_winner', 'coil_length', 'coil_width', 'coil_thickness', 'coil_weight',\r\n 'int_fab', 'bid', 'budget', 'ship_date', 'ship_date_rating',\r\n 'setup_speed', 'T1', 'T2', 'T3', 'T4', 'T5', 'q', 'T1dif', 'T2dif', 'T3dif', 'T4dif', 'T5dif', 'total_temp_dif', 'temp_rating',\r\n 'bid_rating', 'int_fab_priority', 'int_fab_rating', 'rating', 'rating_dif', 'negotiation',\r\n 'pre_auction_start', 'auction_start', 'auction_finish',\r\n 'active_tr_slot_1', 'active_tr_slot_2', 'tr_booking_confirmation_at', 'active_wh', 'wh_booking_confirmation_at', 'wh_location', 'active_coils', 'auction_coils',\r\n 'brAVG(tr_op_time)', 'brAVG(ca_op_time)', 'AVG(tr_op_time)', 'AVG(ca_op_time)', 'fab_start'\r\n 'slot_1_start', 'slot_1_end', 'slot_2_start', 'slot_2_end', 'name_tr_slot_1', 'name_tr_slot_2', 'delivered_to_wh', 'handling_cost_slot_1', 'handling_cost_slot_2',\r\n 'coil_ratings_1', 'coil_ratings_2',\r\n 'pre_auction_duration', 'auction_duration',\r\n 'gantt', 'location_diagram'\r\n ])\r\n return df", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}" ]
[ "0.5804655", "0.5775972", "0.56420845", "0.55906963", "0.54590887", "0.5439093", "0.54168296", "0.5397101", "0.5394799", "0.5385905", "0.5349997", "0.5243113", "0.5241917", "0.5230085", "0.5196623", "0.5092877", "0.5069075", "0.5068402", "0.50677747", "0.5053582", "0.5020693", "0.5011909", "0.499841", "0.4996714", "0.4990112", "0.49826375", "0.49716035", "0.4956883", "0.4941838", "0.490633", "0.490404", "0.49012014", "0.49006268", "0.48981276", "0.48972926", "0.48925182", "0.48686177", "0.4863197", "0.4859084", "0.48590088", "0.48391512", "0.4838413", "0.48359275", "0.48314863", "0.482395", "0.48168853", "0.48084608", "0.48000592", "0.4797037", "0.47932652", "0.47926065", "0.47693253", "0.47620803", "0.47606754", "0.4757628", "0.4750027", "0.47330275", "0.47248966", "0.47223696", "0.47217152", "0.47120988", "0.471173", "0.46947607", "0.46931797", "0.4690812", "0.46901882", "0.46849737", "0.46849737", "0.46849737", "0.46849737", "0.46785447", "0.46750408", "0.46692425", "0.46687204", "0.46543527", "0.465078", "0.465036", "0.46492186", "0.46445724", "0.46353364", "0.46347335", "0.4632744", "0.46325076", "0.46311072", "0.46279627", "0.46156347", "0.46052358", "0.4604768", "0.4601398", "0.45962626", "0.45862105", "0.45845553", "0.45796385", "0.45780057", "0.45689896", "0.4568744", "0.45672032", "0.45489335", "0.45462143", "0.45448393", "0.45402637" ]
0.0
-1
Return player on waivers for given time.
def test_get_waivers(league): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def player(self):\n return self.players[self.tictactoe.turn]", "def get_player(self,p):\n self._validate(p)\n return p.player()", "async def get_player(self) -> Optional[andesite.Player]:\n ...", "def get_player(self):\n return self.player", "def get_player(self):\n return self.player", "def _get_player(self, player_name):\n return self._collection.find_one({'name': player_name})", "def get_player(playerName):\n return players_col.find_one({\"name\": playerName})", "def query(self, hero, time):\n return self.heroes[hero].query(time)", "def get_owning_player(self):\n i = 0\n while True:\n try:\n players = self.tiles[i].players\n except IndexError:\n return None\n if len(players) == 1:\n return players[0]\n i += 1", "def get_player(self):\n return self._pubg.player(self.player_id, self.shard)", "def get_player(self):\r\n return self.player_control.get_player()", "def player(self):\n return self.current.player", "def player(self, state, current_player):\r\n\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n if new_piece:\r\n return player\r\n else:\r\n return current_player", "def get_player(self, player):\n return self._db.Players.find_one({'Name' :\n re.compile(player, re.IGNORECASE)})", "def __get_other_player(self):\n return engine.Engine.game_manager.players[(self.current_player_index + 1) % 2]", "def current_player(self) -> Player:\n return self.players[self.player]", "def get_opposing_player(self, player_name):\n pass", "def player(self):\n return self._player", "def get_player(self, user_id):\n players = [p for p in self.players if p.id() == user_id]\n return players[0] if players else None", "def other_player(self):\n return self.get_others_in_group()[0]", "def find_player(self, name):\n for index in range(0, len(self.player_list)):\n if self.player_list[index].name == name:\n return self.player_list[index]", "def get_player(self, number):\n num = int(number)\n assert (num in [1, 2])\n return self.player_1 if num == 1 else self.player_2", "def getPlayer(self):\n return self.currentPlayer", "def getPlayer(self):\n return self.__player__", "def get_players():\n return {\"X\": play_human, \"O\": play_ai}", "def get_current_player(self) :\n return self.current_player", "def find_player_tile():\n if client.LoggedIn:\n try:\n # Try to catch System.NUllReferenceException race condition\n if client.Map.GetTileWithPlayer(): #\n floor_tiles = list(client.Map.GetTilesOnSameFloor())\n # floor_tiles = get_floor_tiles()\n floor_obj_data = [[\n i.Data for i in list(j.Objects)] for j in floor_tiles]\n # floor_objs = [list(j.Objects) for j in floor_tiles]\n # floor_obj_ids = [[i.Data for i in j] for j in floor_objs]\n\n for obj in range(0, len(floor_obj_data)): # 252\n if player.Id in floor_obj_data[obj]:\n return floor_tiles[obj] # Player tile\n except:\n pass\n\n print 'Player tile not found'\n return None", "def _get_player(self, serial):\n return self.players[serial]", "def player(self):\n # type: () -> string_types\n return self._player", "def get_player(self):\n\n return self._player", "def next_player(self):\n return next(self.next_tour)", "def get_best_time():\n\n # Always remember about db.session.rollback() when debugging\n\n max_timing = db.session.query(func.min(Game.timing)).filter(Game.status == \"won\").first()\n time_user = db.session.query(Game.timing, User.username).join(User).filter(Game.timing == max_timing, Game.status == \"won\").first()\n\n return time_user", "def harvest(self, player):\n return", "def get_member(self, user):\n for player in self.members:\n if player.uuid == user.id:\n return player\n return None", "def at_time(self, time):\n return self._collection.at_time(time)", "def get_current_player(self):\n return self.current_player", "def test_get_player_upcoming_chests(self):\n pass", "def object_at(self, time):\n for event in self._timeline: \n if time >= event.start_time and time <= event.end_time:\n return event.obj\n return self._timeline[-1].obj", "def get_current_player(self):\n return self.in_game_players[self.curr_player_index]", "def get_player(self, player_name, player_type):\n\n # Return correct player class\n if player_type == \"human\":\n return Player(player_name)\n if player_type == \"computer\":\n return ComputerPlayer(player_name)", "def get_next_player(self, player):\r\n return player * -1", "def get_next_player(self, player):\r\n return player * -1", "def get_time(self) -> float:\n return self.player.time", "def getPlayer(self, playerName, team=None):\n if team is None:\n teams = self.players.keys()\n elif team.lower() in self.players.keys():\n teams = [team.lower()]\n else:\n return None\n \n for team in teams:\n for player in self.players[team]:\n if playerName == player.name:\n return player\n return None", "def players(self):\n return self.currents.player", "def get_player(self, name: str, platform: ALPlatform, skip_tracker_rank=False) -> ALPlayer:\n basic_player_stats: list = self.basic_player_stats(name, platform, skip_tracker_rank)\n assert len(basic_player_stats) == 1\n event_info: list = self.events(\n player_name=name,\n platform=platform,\n action=ALAction.INFO\n )\n events: list = list()\n tracked_player: dict\n for tracked_player in event_info[0].get('data'):\n if name == tracked_player.get('name') and \\\n platform.value == tracked_player.get('platform'):\n events = self.events(\n player_name=name,\n platform=platform,\n action=ALAction.GET\n )\n return ALPlayer(basic_player_stats_data=basic_player_stats[0], events=events)", "def get_current_player(self) -> chr:\n return self._players[self._current_player]", "def get_winner(self, game):\n assert self.is_terminal(game)\n if game.is_winner(game.active_player):\n return game.active_player\n return game.inactive_player", "def winning_game_player(players):\n\n # in order for there to be a winner, the game must\n # be over\n if not game_over(players):\n return None\n\n # if the game is over, it could be that there is no\n # winner\n active_players = players_with_decks(players)\n if not active_players:\n return False\n\n # if the game is over than find the winner\n return players_with_decks(players)[0]", "def found_specific_player(self) -> Player:\n search_question = ('Nom du joueur recherché : ',\n 'Prénom du joueur recherché : ')\n search_response = []\n for question in search_question:\n valid = self.ask_and_store_text(question)\n while not valid[0]:\n valid = self.ask_and_store_text(question)\n search_response.append(valid[1])\n\n for player in Player.PLAYERS:\n if player.name.upper() == search_response[0].upper() and \\\n player.first_name.capitalize() == search_response[1].capitalize():\n return player\n\n self.view_menu.stand_by_msg(\"Joueur introuvable !\\n\"\n \"Rechercher à nouveau ou créer le joueur\")", "def get_winner(self) -> None:\n if not self.get_game_ending_hands():\n if max(self.user.hand.value) > max(self.house.hand.value): # Values above 21 are omitted\n self.event_player_wins()\n elif max(self.user.hand.value) == max(self.house.hand.value):\n self.event_player_push()\n else:\n self.event_house_wins()", "def get_player_move(self, roundNum, player):\n return (self.moves[roundNum])[player]", "def get_player(self):\n return int(4 - floor(abs(3.5 - self.turn_number)))", "def get_active_player(game_data):\n try:\n return [p for p in game_data['players'] if p['active']][0]\n except IndexError:\n return None", "def get_player(self):\n return MarkerType(self.__turn % 2)", "def find_self(self):\n for index in range(0, len(self.player_list)):\n if self.player_list[index].name == self.jid:\n return self.player_list[index]", "def get_current_player(self):\r\n\r\n return self.players[(self.turn_number) % len(self.players)].get_id()", "def getCurrentPlayer(self):\r\n return self.currentPlayer", "def __get_current_player(self):\n return engine.Engine.game_manager.players[self.current_player_index]", "def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]", "def get_action_at(self, time: int) -> Action:\n return [x for x in self.actions if x.time_start <= time <= x.time_end][0]", "def get_player(self, player_id):\r\n \r\n if player_id in self.players:\r\n player = self.players[player_id]\r\n else:\r\n player = SelectPlayer(self.player_control, id=player_id)\r\n player = self.add_player(player)\r\n return player", "def player(self):\n return self._node._player", "def hive_for(self, player: PlayerID) -> Hive:\n return next(h for h in self.hives if h.player == player)", "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def player(self, name):\n\n self.name = name\n q = Query()\n data = TinyDB('app/data/db_player.json').table('players')\n\n self.search_result = data.search(\n (q.name == self.name) |\n (q.surname == self.name)\n )\n\n if len(self.search_result) == 0:\n v_menu.View().search('player_none')\n return 'None'\n\n elif len(self.search_result) == 1:\n v_menu.View().search_players(\n 'find_player',\n self.search_result[0]['name'],\n self.search_result[0]['surname'],\n self.search_result[0]['birthday'],\n self.search_result[0]['rank']\n )\n return self.search_result[0]['id']\n\n elif len(self.search_result) >= 2:\n for i in range(len(self.search_result)):\n v_menu.View().search_players(\n 'find_players',\n self.search_result[i]['name'],\n self.search_result[i]['surname'],\n self.search_result[i]['birthday'],\n self.search_result[i]['rank'], i+1\n )\n\n self.player_number = c_input.Input().select_menu_number(\n len(self.search_result))\n\n return self.search_result[self.player_number-1]['id']", "def active_game(self, player):\n actives = self.filter(active=True, finished=False, player=player)\n if actives.count() > 1:\n log.warning(f\"User {player} has more than one active round.\")\n return actives.latest(\"created\")", "def user_playing(self, user):\n if not user:\n return False\n query = db.Query(GamePlayer)\n query.filter('game =', self)\n query.filter('user =', user)\n return query.get()", "def getPlayer(self, idx):\n return self.players[idx]", "def find_team_by_player(self, player, year=None):\n year = self._year if not year else year\n fortypath = 'Fortyman.{}.Name'.format(year)\n res = self._db.Teams.aggregate([{'$match':\n {fortypath : player}},\n {'$project': {'_id' : 0,\n 'Tm' : 1}}])\n try:\n return next(res)['Tm']\n except:\n return None", "def get_player(self, player_id):\r\n\r\n for player in self.players:\r\n if player.player_id == player_id:\r\n return player\r\n\r\n raise Exception(\"A player with the given id {0} was not found in the player list\".format(player_id))", "def get_winner(self):\n winner: Player = Player('none')\n points_winner = 0\n for player in self.players:\n for key, value in player.get_stats().items():\n print('{}: {}'.format(key, value))\n if key == 'points':\n if value >= points_winner:\n winner = player\n print()\n\n print('The winner is: ' + winner.get_name())\n return winner", "def get_player(self, num):\n\n name = input(f\"What is the name for player number {num}? \")\n player = Player(name)\n return player", "def find_gkeeper(alist):\n\n for player in alist:\n if player[2] == ['Por']:\n return player\n\n return False", "def get_weather_with_time(time):\n global DARK\n\n if TIME in range(6, 9):\n DARK = False\n return 1\n elif TIME in range(9, 13):\n return 2\n elif TIME in range(13, 16):\n return 3\n elif TIME in range(16, 19):\n if HAS_RAINCOAT:\n return 4\n else:\n if not NICE_WEATHER:\n add_strength(False, 10)\n return 5\n\n elif TIME in range(19, 22):\n if HAS_RAINCOAT:\n return 7\n else:\n if not NICE_WEATHER:\n add_strength(False, 10)\n return 6\n\n else: # 9 - 6am\n DARK = True\n if HAS_FLASHLIGHT:\n return 9\n else:\n return 8", "def get_current_player(player_one_turn):\n \n # Get appropriate player whether the parameter is True or False\n if player_one_turn == True:\n return 'Player One'\n return 'Player Two'", "def get_winner(self):\n diff = self.home_score - self.away_score\n if diff > 0:\n return self.home_team\n elif diff < 0:\n return self.away_team\n else:\n return None", "def _getAndSetByePlayer():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"SELECT player FROM bye_candidate;\"\"\")\n query_result = cur.fetchall()\n bye_player = query_result[0]\n cur.execute(\"\"\"INSERT INTO byes VALUES (%s);\"\"\", (bye_player,))\n conn.commit()\n return bye_player", "def get_guess(self):\n guess = self.player.higher_or_lower", "def getplayer(self,\n playertype: Type[PlayerType],\n doraise: bool = False) -> Optional[PlayerType]:\n player: Any = self._player\n assert isinstance(player, playertype)\n if not player.exists() and doraise:\n raise ba.PlayerNotFoundError()\n return player if player.exists() else None", "def get_opponent(self):\n for cell in self.__state.board.find_workers():\n player = self.__state.board.get_player_id(cell[0], cell[1])\n if not player == self.__pid:\n return player", "def who_plays_first():\n random = randrange(0, 2)\n if random == 0:\n return globals()['computer']\n else:\n return globals()['player']", "def findNextSuitablePlayer(self, n):\n\t\tfor _ in range(len(self.getPlayers())):\n\t\t\tplayer, seat = self.findNthPlayerFromSeat(n, 1)\n\t\t\tif self.playerList[seat].money > 0 and self.playerList[seat].isHandLive == True:\n\t\t\t\treturn (player, seat)\n\t\t\telse:\n\t\t\t\tn = seat", "def other_player(self, player):\n if player == self.__opponent:\n return self.__pid\n else:\n return self.__opponent", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def get_winner(game):\n return game['winner']", "def target_position(self, time):\n return self.target(time, self.positions, self.dt, self.num_way)", "def target_position(self, time):\n return self.target(time, self.positions, self.dt, self.num_way)", "def getPlayer(self, index):\n if type(index) is not int:\n raise TypeError(\"The index passed to getPlayer must be of type int.\")\n elif index < 0 or index >= len(self.__colordict__.keys()):\n raise IndexError(\"Index less than 0 or greater than or equal to the number of \" +\n \"players in the game.\")\n node_i = self.__currentnode__\n for i in range(index):\n node_i = node_i.getNext()\n return node_i.getPlayer()", "def get_object(self):\n return self.request.user.player", "def get_next_player(current_player: Optional[str]) -> str:\n if current_player == c.X:\n return c.O\n else:\n return c.X", "def play_war_round(player):\n for i in xrange(4):\n play_normal_round(player)\n\n return player", "def visibleShotAtTime(sequence, t):\n shot = sequence.trackItemAt(t)\n if shot == None:\n return shot\n\n elif shot.isMediaPresent() and shot.isEnabled():\n return shot\n\n else:\n # If we're here, the Media is offline or disabled... work out what's visible on other tracks...\n badTrack = shot.parent()\n vTracks = list(sequence.videoTracks())\n vTracks.remove(badTrack)\n for track in reversed(vTracks):\n trackItems = track.items()\n for shotCandidate in trackItems:\n if shotCandidate.timelineIn() <= t and shotCandidate.timelineOut() >= time:\n if shotCandidate.isMediaPresent() and shotCandidate.isEnabled():\n shot = shotCandidate\n break\n\n return shot", "def check_win(self) -> Union[Player, None]:\n\t\tfor player in self.players:\n\t\t\tif len(player.deck) == 0:\n\t\t\t\treturn player\n\t\t\n\t\treturn None", "def nextPlayer(self):\n self.turn += 1\n if self.turn >= len(self.players):\n self.turn = 0\n return self.players[self.turn]", "def get_player(self, name=None, platform=None, uid=None):\n\n results = yield from self.get_players(name=name, platform=platform, uid=uid)\n return results[0]", "def determineWinner(self):\n if self.game_state.numActive() == 1:\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name]:\n print \"\"\n print player.name + \" wins with\"\n for card in self.player_hand_dict[player.name]:\n print card\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[player.name] += self.game_state.pot\n return\n\n for player in self.game_state.player_list:\n for card in self.game_state.board:\n self.player_hand_dict[player.name].append(Card(card.suit, card.rank))\n hand_ranking = HandRanking(self.game_state.player_list, self.player_hand_dict)\n hand_ranking.rankHands()\n winning_rank = -1\n winner = None\n tie_list = []\n \"\"\" Get winning rank, only consider active players for the pot \"\"\"\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name] == True:\n if DEBUG:\n print \"Considering \" + str(player.name) + \"'s hand for the pot.\"\n if hand_ranking.player_ranks_dict[player.name] > winning_rank:\n winning_rank = hand_ranking.player_ranks_dict[player.name]\n winner = player \n tie_list = []\n tie_list.append(player)\n elif hand_ranking.player_ranks_dict[player.name] == winning_rank:\n tie_list.append(player)\n \"\"\" winner should never be equal to None \"\"\"\n\n \"\"\" Check for tie and resolve if needed \"\"\"\n if len(tie_list) > 1:\n if DEBUG:\n print \"found potential tie...\"\n for player in tie_list:\n print player.name + \"'s hand:\"\n for card in hand_ranking.player_best_hand_dict[player.name]:\n print card\n print \"resolving tie...\"\n result_tie_list = self.resolveTie(hand_ranking, tie_list)\n print \"\"\n self.printPlayersHands()\n for player in result_tie_list:\n print player.name + \",\",\n print \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot / len(tie_list)) + \" chips!\"\n for player in result_tie_list:\n self.game_state.player_chips[player.name] += self.game_state.pot / len(tie_list)\n else:\n print \"\"\n self.printPlayersHands()\n print winner.name + \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[winner.name] += self.game_state.pot", "def get_next_player(self, set_player=True):\r\n return self.player_control.get_next_player(set_player=set_player)", "def get_current_player(self):\n\n # Return the current player\n return self._current_player", "def get_current_server_of_player(target_username):\n servers = get_servers()\n\n target_username = target_username.lower()\n real_username = target_username\n found_server = None\n\n for server in servers:\n if not server.players.list:\n continue\n\n players_list = [player.lower() for player in server.players.list]\n\n for player in players_list:\n if target_username in player:\n real_username = player\n found_server = server\n\n return real_username.upper(), found_server", "def test_get_player(self):\n pass" ]
[ "0.6203599", "0.61011946", "0.59679496", "0.59320474", "0.59320474", "0.5928937", "0.5890586", "0.5869932", "0.5858092", "0.58263284", "0.5819567", "0.5788687", "0.57525885", "0.573477", "0.57151794", "0.57138526", "0.56666094", "0.5606717", "0.5583962", "0.5573942", "0.5557938", "0.553803", "0.5532619", "0.5489593", "0.54842794", "0.5474366", "0.54720724", "0.54685843", "0.5459165", "0.545352", "0.54418886", "0.54330724", "0.5424158", "0.5418971", "0.54044795", "0.53948677", "0.53743815", "0.53721535", "0.5349879", "0.5345458", "0.5327856", "0.5327856", "0.531282", "0.5281011", "0.527956", "0.52785724", "0.5269052", "0.5268049", "0.52551687", "0.52549785", "0.525057", "0.5249699", "0.52493805", "0.5249104", "0.52379614", "0.5236059", "0.52299905", "0.5224894", "0.5223076", "0.52218103", "0.5221662", "0.5221257", "0.5215834", "0.5215679", "0.51874727", "0.5186654", "0.5185097", "0.51795286", "0.5177819", "0.515792", "0.51518154", "0.5146509", "0.5144249", "0.5135738", "0.51247126", "0.5115722", "0.5114339", "0.51059115", "0.5100916", "0.509948", "0.50816375", "0.50804824", "0.50691545", "0.5067676", "0.50606686", "0.505699", "0.5055843", "0.5055843", "0.50538236", "0.50499976", "0.5047413", "0.50399953", "0.5030444", "0.50254947", "0.5024877", "0.5023899", "0.50235504", "0.5009852", "0.5007276", "0.5007109", "0.50060624" ]
0.0
-1
Return team roster at given date.
def get_team_roster(league): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def roster(\n self, ctx: commands.Context, season: Optional[YearFinder] = None, *, search: HockeyTeams\n ) -> None:\n season_str = None\n season_url = \"\"\n if season:\n if season.group(3):\n if (int(season.group(3)) - int(season.group(1))) > 1:\n return await ctx.send(_(\"Dates must be only 1 year apart.\"))\n if (int(season.group(3)) - int(season.group(1))) <= 0:\n return await ctx.send(_(\"Dates must be only 1 year apart.\"))\n if int(season.group(1)) > datetime.now().year:\n return await ctx.send(_(\"Please select a year prior to now.\"))\n season_str = f\"{season.group(1)}{season.group(3)}\"\n else:\n if int(season.group(1)) > datetime.now().year:\n return await ctx.send(_(\"Please select a year prior to now.\"))\n year = int(season.group(1)) + 1\n season_str = f\"{season.group(1)}{year}\"\n if season:\n season_url = f\"?season={season_str}\"\n if search is None:\n return await ctx.send(_(\"You must provide a valid current team.\"))\n rosters = {}\n players = []\n teams = [team for team in TEAMS if search.lower() in team.lower()]\n if teams != []:\n for team in teams:\n url = f\"{BASE_URL}/api/v1/teams/{TEAMS[team]['id']}/roster{season_url}\"\n async with self.session.get(url) as resp:\n data = await resp.json()\n if \"roster\" in data:\n for player in data[\"roster\"]:\n players.append(player[\"person\"][\"id\"])\n else:\n return await ctx.send(_(\"No team name was provided.\"))\n\n if players:\n await BaseMenu(\n source=PlayerPages(pages=players, season=season_str),\n cog=self,\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)\n else:\n if season:\n year = _(\" in the {season} season\").format(\n season=f\"{season.group(1)}-{season.group(3)}\"\n )\n else:\n year = \"\"\n await ctx.send(\n _(\"I could not find a roster for the {team}{year}.\").format(team=team, year=year)\n )", "def __call__(self, date):\n for game in self._games:\n if game.datetime.year == date.year and \\\n game.datetime.month == date.month and \\\n game.datetime.day == date.day:\n return game\n raise ValueError('No games found for requested date')", "def get_games_by_date(self, date):\n return self._db.Games.find({'date' : date})", "def date_leaderboard(cls, date, limit=False, cutoff=False):\r\n\r\n\t\tdate = DATES.to_ID(date)\r\n\r\n\t\tdate_rankings = []\r\n\r\n\t\tPLAYERS = cls.RESULTDAILY.keys()\r\n\r\n\t\tfor player in PLAYERS:\r\n\t\t\tscore, RM, RD, RP = cls.player_info(player, date, convert=True)\r\n\t\t\t\r\n\t\t\tdate_rankings.append([player, score, RM, RD, RP])\r\n\t\t\r\n\t\tdate_rankings = sorted(date_rankings, key=lambda m: m[1], reverse=True)\r\n\r\n\t\tif cutoff and min([p[3] for p in date_rankings]) < cls.RD_CUTOFF:\r\n\t\t\tdate_rankings = [p for p in date_rankings if p[3] < cls.RD_CUTOFF]\r\n\t\t\r\n\t\tif limit:\r\n\t\t\tdate_rankings = date_rankings[:limit]\r\n\t\t\r\n\t\treturn date_rankings", "def get_team_game_preview(self, team, date):\n abbr = convert_name(team, how='abbr')\n return self._db.Games.find({'date' : date,\n '$or' : [{'home' : abbr},\n {'away' : abbr}]})", "def get_next_game(today_game_date: datetime, team_id: int) -> dict:\n\n game_date = today_game_date.strftime(\"%Y-%m-%d\")\n tomorrow = (today_game_date + timedelta(days=1)).strftime(\"%Y-%m-%d\")\n end_date = (today_game_date + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n\n logging.info(\"Checking the schedule API endpoint for the next game.\")\n url = f\"schedule?teamId={team_id}&startDate={game_date}&endDate={end_date}\"\n\n response = api.nhl_api(url)\n if not response:\n return None\n\n next_game_json = response.json()\n next_game = next_game_json.get(\"dates\")[1].get(\"games\")[0]\n\n return next_game", "def for_date(self, date):\n return self.get(start_date__lte=date, end_date__gte=date)", "def starting_date(cls, player):\r\n\r\n\t\treturn cls.RESULTDAILY[player][0]", "async def reschedule(self, ctx, match_id: str, *, date: str):\n tournament = self.get_tournament(ctx.guild.id)\n try:\n new_date = tournament.parse_date(date, prefer_dates_from=\"future\")\n except ValueError:\n raise commands.UserInputError()\n if not new_date:\n raise commands.UserInputError()\n\n for bracket in tournament.brackets:\n if await self.reschedule_for_bracket(\n ctx,\n tournament,\n bracket,\n match_id,\n new_date,\n ):\n return\n raise tosurnament.InvalidMatchId()", "def get_schedule():\n startdate = '02/28/2020'\n enddate = '04/01/2020'\n return statsapi.schedule(start_date=startdate, end_date=enddate, team=134)", "def getMatchDate(self) -> str:\n return self.__getDataField(\"date\")", "def player_rank(cls, player, date):\r\n\r\n\t\ttry:\r\n\t\t\tP_RANKS = cls.RANKS[player]\r\n\t\texcept KeyError:\t# If player does not exist\r\n\t\t\treturn False\r\n\r\n\t\tinit_date = P_RANKS[0]\r\n\r\n\t\t# If player hadn't played yet by the date specified\r\n\t\tif date < init_date:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tdate_ind = DATES.day_diff(date, init_date)\r\n\r\n\t\trank = P_RANKS[date_ind + 1]\r\n\t\t\r\n\t\treturn rank", "def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)", "def get_race_date(today, time_range):\r\n max_date = today.replace(year = today.year + time_range)\r\n return get_date(\"When is your marathon?\", \"Race Date\", today, max_date)", "def get_team_roster_and_depth_charts(self, team_name):\n result = self._method_call(\"Players/{team}\", team=team_name)\n return result", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def fetch_roster_data(\n round_number: int = None, verbose: int = 1\n) -> List[Dict[str, Any]]:\n if verbose == 1:\n print(f\"Fetching roster data for round {round_number}...\")\n\n data = fetch_afl_data(\"/rosters\", params={\"round_number\": round_number})\n\n if verbose == 1:\n if not any(data):\n print(\n \"No roster data was received. It's likely that the team roster page \"\n \"hasn't been updated for the upcoming round.\"\n )\n else:\n print(\"Roster data received!\")\n\n return data", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def get_rosters(year, output_file=True):\n year = str(year)\n\n # Get this year's playoff teams from config file\n current_year_playoff_teams = playoff_teams[year]\n\n # Get team attributes from config file\n playoff_team_attrs = {team: attrs for team, attrs in teams.items() if team in current_year_playoff_teams}\n\n # Positions of interest\n eligible_pos = ['WR', 'TE', 'QB', 'RB', 'FB', 'K', 'RB/WR']\n\n # For each playoff team get roster data from Pro Football Reference\n rosters = []\n for key, val in playoff_team_attrs.items():\n url = f'https://www.pro-football-reference.com/teams/{key}/{year}_roster.htm'\n print(f\"Fetching: {url}\")\n page = requests.get(url)\n df = pd.read_html(page.text.replace('<!--',''))[1] # Replace the value that interrupts HTML parsing\n df = df[df['Pos'].isin(eligible_pos)][['Player', 'Pos', 'G']]\n df['Team'] = val['teamFull'] # Add full team name ex: Buffalo Bills\n df['TeamMascot'] = df['Team'].apply(lambda x: x.split(' ')[-1]) # Mascot only\n df['TeamShort'] = val['teamShort'] # Abbreviated team name\n df['TeamKey'] = key.upper() # Pro football reference abbrev, these are weird\n rosters.append(df)\n \n df_combined = pd.concat(rosters)\n df_combined.columns = [c.lower() for c in df_combined.columns]\n\n if output_file:\n save_dir = prompt_save_location()\n file_path = os.path.join(save_dir, f\"rosters{year}.csv\")\n df_combined.to_csv(file_path, index = False)\n print(\"Output File Successfully Created!\")\n print(f\"Destination: {file_path}\")\n\n return df_combined", "def read_by_date(self, date=None):\n\n if not date:\n date = datetime.today()\n elif isinstance(date, str):\n date = datetime.strptime(date, \"%Y-%m-%d\")\n else:\n pass # Assume datetime object\n\n datestring = date.strftime(\"%Y-%m-%d\")\n filepath = Path(datadir(), 'ClubElo_{}.csv'.format(datestring))\n url = 'http://api.clubelo.com/{}'.format(datestring)\n\n if not filepath.exists():\n self._download_and_save(url, filepath)\n\n df = (pd.read_csv(str(filepath),\n parse_dates=['From', 'To'],\n infer_datetime_format=True,\n dayfirst=False\n )\n .rename(columns={'Club': 'team'})\n .replace({'team': TEAMNAME_REPLACEMENTS})\n .replace('None', np.nan)\n .assign(Rank=lambda x: x['Rank'].astype('float'))\n .assign(league=lambda x: x['Country'] + '_' + x['Level'].astype(str))\n .pipe(self._translate_league)\n .reset_index(drop=True)\n .set_index('team')\n )\n return df", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def getStream(self, date):\n\n return getMerraStream(date)", "def get_roster_players_via_api(self, team, season=None):\n # setting up empty list of players\n players = list()\n\n if season is None:\n season = str(retrieve_season())\n\n # creating stats api url with optional season parameter\n url = \"\".join((self.API_TEAM_SITE_PREFIX, str(team.team_id)))\n url_params = {\n 'expand': 'team.roster',\n 'season': \"%s%d\" % (season, int(season) + 1)\n }\n # retrieving data\n r = requests.get(url, params=url_params)\n team_data = r.json()\n\n if 'teams' not in team_data:\n logging.warn(\n \"+ %s not part of the league in %s/%d\" % (\n team, season, int(season) + 1))\n return players\n\n team_data = team_data['teams'][0]\n\n if 'roster' not in team_data:\n logging.warn(\n \"+ No roster found for %s/%d %s\" % (\n season, int(season) + 1, team))\n return players\n\n roster = team_data['roster']['roster']\n\n for plr_src in roster:\n # retrieving player if of current player in roster\n plr_id = plr_src['person']['id']\n # searching and optionally creating player with found player id\n plr = self.search_player_by_id(plr_id)\n players.append(plr)\n\n return players", "def _get_schedule_html_for_date(squadron_url: str, date_state: str) -> str:\n state = date_state.copy()# don't mutate the original\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n state['btnViewSched'] = 'View Schedule'\n html_string = _get_page_html(squadron_url, state, headers=headers)\n return html_string", "def get_games(season, date):\n url = \"http://live.nhl.com/GameData/SeasonSchedule-\" + season + \".json\"\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n games = []\n for game in data:\n if game[\"est\"][:8] == date:\n games.append(game)\n return games", "def dawn(self, date=None, local=True):\n\n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n dawn = self.astral.dawn_utc(date, self.latitude, self.longitude)\n\n if local:\n return dawn.astimezone(self.tz) \n else:\n return dawn", "def get_roster_players_with_data(self, team):\n # TODO: find usage for this function\n # getting html document with team's roster\n doc = self.get_html_document(team, 'roster')\n\n # retrieving player page urls, and player first and last names\n # from roster page\n urls = doc.xpath(\"//td[@class='name-col']/a[@href]/@href\")\n first_names = doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-col__item \" +\n \"name-col__firstName']/text()\")\n # using filter to get rid of empty strings after stripping string\n # elements\n # using replace to get rid of asterisk indicating players on injury\n # reserve\n last_names = filter(\n None, [\n x.replace(\"*\", \"\").strip() if x else None for x in doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-\" +\n \"col__item name-col__lastName']/text()\")])\n\n # retrieving further player data from roster page\n # player jersey numbers\n numbers = doc.xpath(\n \"//td[@class='number-col fixed-width-font']/text()\")\n # player positions\n positions = [x[:1] for x in doc.xpath(\n \"//td[@class='position-col fixed-width-font']/text()\")]\n # shooting hands, unfortunately goaltender's glove hands aren't\n # listed any longer\n hands = doc.xpath(\"//td[@class='shoots-col fixed-width-font']/text()\")\n # player heights (in ft. + in.)\n heights = doc.xpath(\n \"//td[@class='height-col fixed-width-font']/span[2]/text()\")\n # player weights (in lbs.)\n weights = [int(x) if x.isdigit() else 0 for x in doc.xpath(\n \"//td[@class='weight-col fixed-width-font']/text()\")]\n # player dates of birth\n dobs = doc.xpath(\"//td[@class='birthdate-col']/span[2]/text()\")\n hometowns = doc.xpath(\"//td[@class='hometown-col']/text()\")\n\n players = list()\n\n for (\n first_name, last_name, url, _, position, _, _, _, _, _\n ) in zip(\n first_names, last_names, urls, numbers, positions,\n hands, weights, heights, dobs, hometowns\n ):\n # retrieving nhl id from player page url\n plr_id = int(url.split(\"-\")[-1])\n\n # trying to find player in database\n plr = Player.find_by_id(plr_id)\n # creating player if not already in database\n if plr is None:\n plr = self.create_player(\n plr_id, last_name, first_name, position)\n print(\"%s created...\" % plr)\n\n players.append(plr)\n\n return players", "def getTeam(self):\n return self.team", "def get_upcoming(self):\n try:\n race = next(\n (\n race\n for race in self.race_weekends\n if race[\"sessions\"][\"race\"] >= self.date\n ),\n self.race_weekends[-1],\n )\n return race\n except Exception:\n logger.exception(f\"Error getting upcoming race for year {self.date.year}\")", "def get_season(\n current_date: date, hemisphere: str, season_tracking_type: str\n) -> str | None:\n\n if hemisphere == \"equator\":\n return None\n\n if season_tracking_type == TYPE_ASTRONOMICAL:\n spring_start = ephem.next_equinox(str(current_date.year)).datetime()\n summer_start = ephem.next_solstice(str(current_date.year)).datetime()\n autumn_start = ephem.next_equinox(spring_start).datetime()\n winter_start = ephem.next_solstice(summer_start).datetime()\n else:\n spring_start = datetime(2017, 3, 1).replace(year=current_date.year)\n summer_start = spring_start.replace(month=6)\n autumn_start = spring_start.replace(month=9)\n winter_start = spring_start.replace(month=12)\n\n if spring_start <= current_date < summer_start:\n season = STATE_SPRING\n elif summer_start <= current_date < autumn_start:\n season = STATE_SUMMER\n elif autumn_start <= current_date < winter_start:\n season = STATE_AUTUMN\n elif winter_start <= current_date or spring_start > current_date:\n season = STATE_WINTER\n\n # If user is located in the southern hemisphere swap the season\n if hemisphere == NORTHERN:\n return season\n return HEMISPHERE_SEASON_SWAP.get(season)", "def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)", "def get_date(date):\n return date", "def get_game(self, game_id):\n \n session = requests.session()\n response = session.get(self.baseURL + str(game_id), headers=self.headers)\n soup = BeautifulSoup(response.text)\n \n #get teams\n defeated_by = False \n game_header = soup.find_all(text=re.compile('defeats'))\n \n if len(game_header) == 0:\n game_header = soup.find_all(text=re.compile('defeated by'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('defeat'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('drew'))\n defeated_by = True \n else:\n defeated_by = True \n\n if defeated_by: \n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[3]\n else:\n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[2]\n \n date_string = game_header[0].split(' ')\n date_string_find = [date.lower() for date in date_string]\n \n venue = date_string[date_string_find.index('at') + 1]\n \n #get round\n round_num = None\n \n try:\n date_string_find.remove('')\n except:\n pass\n \n try:\n round_num = int(date_string[date_string_find.index('round') + 1])\n except:\n try:\n round_num = date_string_find[date_string_find.index('final') - 1] + ' final'\n except:\n round_num = date_string_find[date_string_find.index('semi-final')]\n \n date = date_string[-3:]\n date = ' '.join(date) \n date = parser.parse(date)\n \n #get attendance\n attend = soup.find_all(text=re.compile('Attendance'))\n attendance = 0\n \n if (len(attend) > 3):\n attendance = int(attend[1].split(' ')[-1])\n \n #get stats \n away_stats = {}\n home_stats = {}\n \n for stat in stats:\n stat_row = soup.find_all('td', text=stat)[0].find_parent('tr')\n elements = stat_row.find_all('td')\n \n if elements[0].text == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = elements[0].text\n \n if elements[0].text == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = elements[2].text\n \n return Game(game_id, home_team, away_team, venue, round_num, date, attendance, home_stats, away_stats)", "def get_updates(cls, date, team):\n return cls.query(\n cls.date == date,\n cls.team == team.lower()\n ).order(-cls.name).fetch(100)", "def base_depth_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n base_depth_to_return = None\n query = \"SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, date)\n\n connection = get_connection()\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n base_depth_to_return = row\n except Exception as e:\n print(e, file=sys.stderr)\n connection.close()\n return json.dumps(base_depth_to_return)", "def yield_team(self) -> str: # pragma: no cover", "def get_teams():", "def is_game_today(team_id, date):\n args = arguments.get_arguments()\n\n url = \"/schedule?teamId={id}&expand=\" \"schedule.broadcasts,schedule.teams&date={date:%Y-%m-%d}\".format(\n id=team_id, date=date\n )\n\n response = api.nhl_api(url)\n if response:\n schedule = response.json()\n games_total = schedule[\"totalItems\"]\n else:\n return False, None\n\n if games_total == 1:\n games_info = schedule[\"dates\"][0][\"games\"][0]\n return True, games_info\n\n if games_total == 2:\n if args.split is False:\n logging.info(\"Split Squad - spawning a second process to pick up second game.\")\n game_index = 0\n process.spawn_another_process()\n time.sleep(10)\n else:\n game_index = 1\n logging.info(\"Split Squad - this is the process to pick up second game (sleep 5 seconds).\")\n time.sleep(5)\n\n games_info = schedule[\"dates\"][0][\"games\"][game_index]\n return True, games_info\n\n date_string = date.date() if args.date else \"today\"\n logging.info(\"There are no games scheduled for %s, SAD!\", date_string)\n return False, schedule", "def get_team(self):\n if self.team:\n return self.team\n return None", "def test_get_team_profile___roster(self):\n msg = \"Response status is not 200\"\n response = self.api.get_team_profile___roster(self.team_id)\n self.assertEqual(response.status_code, 200, msg)", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def get_by_date(sequence, date):\r\n item = filter_by_date(sequence, date, date)\r\n return item.pop() if item else None", "def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]", "def _get_neat_date(date: datetime) -> str:\n month_selector = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\",\n \"October\", \"November\", \"December\"]\n month_string = month_selector[date.month - 1]\n\n day = date.day\n\n if day == 1 or day == 21 or day == 31:\n suffix = \"st\"\n elif day == 2 or day == 22:\n suffix = \"nd\"\n elif day == 3 or day == 23:\n suffix = \"rd\"\n else:\n suffix = \"th\"\n\n neat_date = f\"{month_string} {day}{suffix}\"\n return neat_date", "def snowfall_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n\n query = \"SELECT snowfall FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, new_date)\n connection = get_connection()\n snowfall_to_return = None\n\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n snowfall_to_return = row\n except Exception as e:\n print(e, file=sys.stderr)\n\n connection.close()\n return json.dumps(snowfall_to_return)", "def final_standing_projection(league_no):\n ros_proj_b_list = BatterProjection.objects.all()\n ros_proj_p_list = PitcherProjection.objects.all()\n league_settings = get_league_settings(league_no)\n current_standings = get_standings(league_no, int(league_settings['Max Teams:']))\n team_list = yahoo_teams(league_no)\n final_stats = final_stats_projection(team_list, ros_proj_b_list, ros_proj_p_list, current_standings,\n league_settings)\n volatility_standings = league_volatility(SGP_DICT, final_stats)\n ranked_standings = rank_list(volatility_standings)\n return ranked_standings", "def change_date(self, date):\n self.date = date\n relative_url = \"https://www.sevenrooms.com/manager/twelvewest/reservations/day/\" + date.strftime(\"%m-%d-20%y\")\n self.driver.get(relative_url)\n self.update_html()", "def team(self):\n return self._team", "def seasonStats(personId,type = 'gameLog',group = 'hitting'):\n\n #playerInfo = get('people', {'personIds':personId})\n\n\n teamStats = get('person',{ 'ver':'v1' , 'personId':personId,'hydrate':['stats(group={},type={})'.format(group,type),'currentTeam']})\n return teamStats\n #iterate of stats and find the right player id\n #career stats broken\n #fix the season :2019\n #make function to get team id", "def get_start_date(today, race_date):\r\n title = \"Starting Date\"\r\n question = \"When do you want to start training?\"\r\n choices = ['I already did!', 'Today!', 'Ummm, later...']\r\n choice = e.choicebox(question, title, choices)\r\n if choice == choices[0]:\r\n return get_date('When did you start training?', title,\r\n today.replace(year = today.year - 1), today)\r\n elif choice == choices[1]:\r\n return today\r\n elif choice == choices[2]:\r\n return get_date('When will you start training?', title, today, race_date)\r\n else:\r\n raise QuitError", "async def games(\n self, ctx: commands.Context, *, teams_and_date: Optional[TeamDateFinder] = {}\n ) -> None:\n log.debug(teams_and_date)\n await GamesMenu(\n source=Schedule(**teams_and_date, session=self.session),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def schedule_and_record(season: int, team: str) -> pd.DataFrame:\n # retrieve html from baseball reference\n # sanatize input\n team = team.upper()\n try:\n first_season = get_first_season(team)\n if first_season is None or season < first_season:\n m = \"Season cannot be before first year of a team's existence\"\n raise ValueError(m)\n # ignore validation if team isn't found in dictionary\n except KeyError:\n pass\n if season > datetime.now().year:\n raise ValueError('Season cannot be after current year')\n\n soup = get_soup(season, team)\n table = get_table(soup, team)\n table = process_win_streak(table)\n table = make_numeric(table)\n return table", "def generate_tournament(self, date, surface, n_players=256):\n self.date = date\n self.surface = surface\n players = self._get_players(date, n_players)\n oppositions = self._generate_oppositions(players)\n\n self._debug(\"Generated oppositions: {}\".format(oppositions))\n oppositions.to_csv(\"oppositions_{}_{}_{}\".format(surface, n_players, date), index=False)\n\n return oppositions", "def get_displayed_team(client_id, team_id,\n\t\tprev_time=None, prev_match_id=None, next_time=None, next_match_id=None,\n\t\tpage_limit=None, now=None):\n\ttry:\n\t\t# Get the team.\n\t\tteam, starred_team = session.query(Team, StarredTeam)\\\n\t\t\t\t.outerjoin(StarredTeam, sa.and_(\n\t\t\t\t\tStarredTeam.user_id == client_id,\n\t\t\t\t\tStarredTeam.team_id == team_id))\\\n\t\t\t\t.filter(Team.id == team_id)\\\n\t\t\t\t.one()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\tis_starred = (starred_team is not None)\n\n\t# Get the partial list of matches for this team.\n\tnow = _get_now(now)\n\tpaginator = MatchOpponentsPaginator(team_id, client_id, now)\n\tmatches, prev_time, prev_match_id, next_time, next_match_id = _paginate(\n\t\t\tpaginator, prev_time, prev_match_id, next_time, next_match_id, page_limit)\n\t\n\t# Return the displayed team.\n\treturn DisplayedTeamDetails(team_id,\n\t\t\tteam.display_name,\n\t\t\tteam.num_stars,\n\t\t\tis_starred,\n\t\t\tteam.game,\n\t\t\tteam.division,\n\t\t\tteam.fingerprint,\n\t\t\ttuple(_get_displayed_team_match(match, team_id, opponent_team, is_starred)\n\t\t\t\t\tfor match, opponent_team, is_starred in matches),\n\t\t\tprev_time,\n\t\t\tprev_match_id,\n\t\t\tnext_time,\n\t\t\tnext_match_id)", "def get_pitchers_by_game(self, team, date):\n abbr = convert_name(team, how='abbr')\n pitch = '{}.pitching'.format(abbr)\n return self._db.Games.aggregate([{'$match':\n {'$and': [{'date': date},\n {'$or': [{'home' : abbr},\n {'away' : abbr}]}]}},\n {'$project': {'_id' : 0,\n pitch : 1}}])", "def latest(cls, team):\n return cls.query(\n cls.team == team.lower()\n ).order(-cls.date).get()", "def tournament(self, name):\n self.name = name\n q = Query()\n data = TinyDB('app/data/db_tournaments.json').table('tournaments')\n\n self.search_result = data.search(\n (q.name == self.name) |\n (q.place == self.name)\n )\n\n if len(self.search_result) == 0:\n v_menu.View().search('tournament_none')\n return 'None'\n\n elif len(self.search_result) == 1:\n v_menu.View().search_tournaments(\n 'find_tournament',\n self.search_result[0]['name'],\n self.search_result[0]['place'],\n self.search_result[0]['start']\n )\n return self.search_result[0]['id']\n\n elif len(self.search_result) >= 2:\n for i in range(len(self.search_result)):\n v_menu.View().search_tournaments(\n 'find_tournaments',\n self.search_result[i]['name'],\n self.search_result[i]['place'],\n self.search_result[i]['start'], i+1\n )\n\n self.player_number = c_input.Input().select_menu_number(\n len(self.search_result))\n\n return self.search_result[self.player_number-1]['id']", "def winning_team(self):\n return self.team_id", "def get_win_rate_regular_season_for_each_coach(self):\n self.games_won_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n self.games_lose_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df\n self.combine_regular_games_won_lose = (\n self.games_lose_for_coaches\n .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"games_lost\",\"which_coach_for_win\":\"games_won\"})\n )", "def _pick_candidate_plant(self, day_date, partner_name):\n candidate_plant = None\n # First check to see if there is already a plant for this day.\n plants = self.filter(last_seen=day_date,\n partner_short_name=partner_name,\n include=True)\n if len(plants) > 0:\n candidate_plant = plants[0]\n else:\n # A plant wasn't found for the requested date.\n if day_date > date.today():\n # The requested date is in the future.\n candidate_plant = None\n else:\n # Pick a new Plant of the Day for this date.\n\n # Try picking a yet-unseen plant at random.\n plants = self.filter(last_seen__isnull=True,\n partner_short_name=partner_name,\n include=True)\n if len(plants) > 0:\n index = random.randrange(0, len(plants))\n candidate_plant = plants[index]\n else:\n # If none are unseen, pick the one last seen longest ago.\n plants = self.filter(last_seen__isnull=False,\n partner_short_name=partner_name,\n include=True).order_by('last_seen')\n if len(plants) > 0:\n candidate_plant = plants[0]\n\n return candidate_plant", "def latestGamePack(team):\n lgr= get('schedule', {'ver':'v1', 'sportId':1, 'date':today, 'teamId':team, 'fields':['dates','games','gamePk'] })\n return lgr['dates'][0]['games'][0]['gamePk']", "def get_team(self):\n try:\n team_id = self.request.GET.get('team')\n if team_id is not None:\n team_id = int(team_id)\n return self.get_available_teams().get(pk=team_id)\n return self.get_available_teams().latest()\n except (Team.DoesNotExist, ValueError):\n return None", "def test_movements_date_from(api_client):\n\n MovementFactory(date=datetime.date(2017, 2, 10))\n MovementFactory(date=datetime.date(2017, 2, 11))\n\n response = api_client.get(\n reverse(\"api:movements-list\"), {\"date_from\": \"2017-02-11\"}\n )\n\n assert response.status_code == 200\n assert len(response.data) == 1\n assert response.data[0][\"date\"] == \"2017-02-11\"", "def get_rating_as_of_date(\n self,\n date: Union[str, float],\n default_rating: float = DEFAULT_INITIAL_RATING\n ) -> float:\n history_df = DataFrame(self.rating_history, columns=[\"date\", \"rating\"])\n\n # only select one entry per distinct date\n history_df[\"r\"] = history_df.groupby([\"date\"]).rank(method=\"first\", ascending=False)\n history_df = history_df[history_df[\"r\"] == 1]\n\n # get the rating for the latest date\n history_df = history_df[history_df[\"date\"] <= date].sort_values(\"date\", ascending=False)\n if history_df.shape[0] == 0:\n return default_rating\n else:\n return history_df.reset_index().loc[0, \"rating\"]", "async def _get_league_leaderboard(self, server_id: str, league_id: str, matchday: str):\n if matchday is None:\n matchday = ''\n params = {'matchday': matchday}\n url = self.api_url + 'competitions/{}/leagueTable'.format(league_id)\n\n return await self._make_request(url, params, server_id)", "def get_player_team_data(self, start_date, end_date = None, \n get_player_data_ind = True, get_team_data_ind = True, \n pre_player_data_dir = None, pre_team_data_dir = None):\n #Converts start and end date from string to datetime\n start_date = datetime.strptime(start_date, '%Y-%m-%d').date()\n \n if end_date:\n end_date = datetime.strptime(end_date, '%Y-%m-%d').date()\n else: \n end_date = start_date\n \n if pre_player_data_dir:\n try: \n #Reads in the existing player dataset to append the scraped data to \n exist_player_data = pd.read_csv(pre_player_data_dir)\n except:\n raise Exception('Cannot read in existing player dataset please ensure the directory is correct')\n \n if pre_team_data_dir:\n try: \n #Reads in the existing player dataset to append the scraped data to \n exist_team_data = pd.read_csv(pre_team_data_dir)\n except:\n raise Exception('Cannot read in existing team dataset please ensure the directory is correct')\n \n delta = end_date - start_date \n #Appends list of date between start and end date to strings\n date_list = []\n for i in range(delta.days + 1):\n day = start_date + timedelta(days=i)\n date_list.append(str(day))\n \n for date in date_list:\n \n print(f'Now scraping data from NBA games on {date}')\n home_team_list = get_list_of_hometeams(self.driver, date)\n\n if len(home_team_list) > 0:\n\n counter = 1 \n\n for home_team in home_team_list:\n \n if counter == 1: \n if get_player_data_ind: \n player_df_full = get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver)\n if get_team_data_ind:\n team_df_full = get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver)\n else:\n if get_player_data_ind: \n player_df_full = player_df_full.append(get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver), ignore_index=True)\n if get_team_data_ind:\n team_df_full = team_df_full.append(get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver), ignore_index=True)\n counter+=1\n \n if pre_player_data_dir:\n exist_player_data = exist_player_data.append(player_df_full)\n exist_player_data.to_csv(pre_player_data_dir, index = False)\n print(f'Updated player dataset will be overwritten in {pre_player_data_dir}')\n \n if pre_team_data_dir:\n exist_team_data = exist_team_data.append(team_df_full)\n exist_team_data.to_csv(pre_team_data_dir, index = False)\n print(f'Updated team dataset will be overwritten in {pre_team_data_dir}')\n \n if pre_player_data_dir and pre_team_data_dir:\n return exist_player_data, exist_team_data\n elif pre_player_data_dir:\n return exist_player_data\n elif pre_team_data_dir:\n return exist_team_data\n elif get_player_data_ind and get_team_data_ind:\n return player_df_full, team_df_full \n elif get_player_data_ind:\n return player_df_full\n elif get_team_data_ind:\n return team_df_full", "def recherche_plage (self, date, ) :\n \n if not isinstance ( date, type (' ')) or date < self.dateDebut_string :\n raise ValueError\n \n date = datetime.strptime(date, self.formatDate )\n \n dateDebut_courante = self.dateDebut\n dateFin_courante = self._increment_date (dateDebut_courante )\n \n while (not ( (date >= dateDebut_courante) and (date < dateFin_courante) ) ) :\n dateDebut_courante = self._increment_date (dateDebut_courante )\n dateFin_courante = self._increment_date (dateDebut_courante )\n continue\n \n return str(dateDebut_courante), str(dateFin_courante)", "def rent(self, movie_title, CNP, rented_date = None):\n self.__validator.validate_title(movie_title)\n movie_title = self.__formatter.format_title(movie_title)\n movie = self.__movie_repository.find(movie_title)\n\n self.__validator.validate_CNP(CNP)\n CNP = self.__formatter.format_CNP(CNP)\n client = self.__client_repository.find(CNP)\n\n rentals = self.__repository.get_all()\n for rental in rentals:\n if rental.movie == movie and rental.returned_date == None:\n raise ValueError(\"Filmul a fost deja inchiriat\")\n elif rental.client == client and rental.returned_date == None:\n raise ValueError(\"Clientul nu a adus un film inchiriat\")\n elif rental.client == client and rental.due_date < rental.returned_date:\n raise ValueError(\"Clientul a intarziat cu returnarea unui film\")\n\n if rented_date == None:\n self.__repository.add(Rental(movie, client))\n else:\n self.__repository.add(Rental(movie, client, rented_date = rented_date))", "def get_roster_players(self, team, season=None):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's roster\n doc = self.get_html_document(team, 'roster', season)\n\n # retrieving player page urls, and players' first and last names\n # from roster page\n urls = doc.xpath(\"//td[@class='name-col']/a[@href]/@href\")\n first_names = doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-col__item \" +\n \"name-col__firstName']/text()\")\n # using filter to get rid of empty strings after stripping string\n # elements\n # using replace to get rid of asterisk indicating players on injury\n # reserve\n last_names = filter(\n None, [\n x.replace(\"*\", \"\").strip() if x else None for x in doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-\" +\n \"col__item name-col__lastName']/text()\")])\n # retrieving players' positions\n positions = [x[:1] for x in doc.xpath(\n \"//td[@class='position-col fixed-width-font']/text()\")]\n\n for (\n first_name, last_name, url, position\n ) in zip(\n first_names, last_names, urls, positions\n ):\n # retrieving nhl id from player page url\n plr_id = int(url.split(\"-\")[-1])\n\n # trying to find player in database\n plr = Player.find_by_id(plr_id)\n # creating player if not already in database\n if plr is None:\n plr = self.create_player(\n plr_id, last_name, first_name, position)\n logging.info(\"+ %s created\" % plr)\n\n players.append(plr)\n\n return players", "def get_last_pitch_date(self, name, team=None, year=None):\n abbr = convert_name(team, how='abbr')\n dates = self.get_past_game_dates_by_team(abbr, year)\n\n for date in dates:\n if date == self._day:\n continue\n game = list(self._db.Games.find({'$and': [{'date': date},\n {'$or': [{'home' : abbr},\n {'away' : abbr}]}]}))\n pitchers = [x['Pitching'] for x in game[0][abbr]['pitching']]\n if name in pitchers:\n return date\n return None", "def get_window(cls, course_id, date):\r\n try:\r\n return cls.objects.get(course_id=course_id, start_date__lte=date, end_date__gte=date)\r\n except cls.DoesNotExist:\r\n return None", "def by_date(self, date_=None):\n\n if date_ is None:\n try:\n date_ = date.fromtimestamp(\n mktime(strptime(self.date, '%Y-%m-%d'))\n )\n return self.by_date(date_)\n except (TypeError, ValueError):\n try:\n return self.by_year(int(self.date))\n except ValueError:\n return self.current()\n\n else:\n query = self.query()\n query = query.filter(ArchivedResult.date == date_)\n query = query.order_by(\n ArchivedResult.domain,\n ArchivedResult.name,\n ArchivedResult.shortcode,\n ArchivedResult.title\n )\n\n last_modified = self.session.query(\n func.max(query.subquery().c.last_modified)\n )\n\n return query.all(), (last_modified.first() or [None])[0]", "def display_matches(league, starting_date, ending_date):\n matches = get_matches(league, starting_date, ending_date)\n display_given_matches(matches)", "def set_harvest_date(self, date):\n if not date:\n return datetime.utcnow().strftime(\"%Y/%m/%d\")\n return datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y/%m/%d\")", "def get_people(team):", "def date_rappro(self):\n try:\n opes = Ope.objects.filter(compte__id=self.id).filter(rapp__isnull=False).select_related('rapp').latest(\n 'date')\n return opes.rapp.date\n except Ope.DoesNotExist:\n return None", "def get_current_team(self):\n import src.application.Domain.Team as Team\n try:\n return Cache.get_element(self.id, \"PLAYER_CURRENT_TEAM\")\n except KeyError:\n pass\n\n matches = self.get_matches()\n current_team = None\n if len(matches) > 0:\n last_match = sorted(matches, key=lambda match: match.date)[-1]\n home_player_i = 'home_player_'\n away_player_i = 'away_player_'\n for i in range(11):\n if last_match.__getattribute__(home_player_i + str(i + 1)) == self.player_api_id:\n current_team = Team.read_by_team_api_id(last_match.home_team_api_id)\n break\n if last_match.__getattribute__(away_player_i + str(i + 1)) == self.player_api_id:\n current_team = Team.read_by_team_api_id(last_match.away_team_api_id)\n break\n Cache.add_element(self.id, current_team, \"PLAYER_CURRENT_TEAM\")\n return current_team", "def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):\n return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)", "def evening_twilight_6(self, date=None):\n self.site.horizon = self.horizon6\n self._set_site_date(date)\n r_date = self.site.next_setting(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date", "def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str", "def get_winner(self):\n diff = self.home_score - self.away_score\n if diff > 0:\n return self.home_team\n elif diff < 0:\n return self.away_team\n else:\n return None", "def test_movements_date_to(api_client):\n\n MovementFactory(date=datetime.date(2017, 2, 10))\n MovementFactory(date=datetime.date(2017, 2, 11))\n\n response = api_client.get(reverse(\"api:movements-list\"), {\"date_to\": \"2017-02-10\"})\n\n assert response.status_code == 200\n assert len(response.data) == 1\n assert response.data[0][\"date\"] == \"2017-02-10\"", "def nflschedule(self, irc, msg, args, optlist, optteam):\n \n fullSchedule = False\n for (option, arg) in optlist:\n if option == 'full':\n fullSchedule = True\n \n optteam = optteam.upper()\n \n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n lookupteam = self._translateTeam('yahoo', 'team', optteam) # don't need a check for 0 here because we validate prior.\n \n if fullSchedule: # diff url/method.\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmZsL3RlYW1z') + '/%s/schedule' % lookupteam\n\n try:\n request = urllib2.Request(url)\n html = (urllib2.urlopen(request)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'summary':'Regular Season Games'})\n \n if not table:\n irc.reply(\"ERROR: Failed to find schedule for: %s\") % optteam\n return\n \n tbody = table.find('tbody')\n rows = tbody.findAll('tr')\n\n append_list = []\n\n for row in rows:\n tds = row.findAll('td')\n week = tds[0]\n \n if row.find('td', attrs={'class':'title bye'}):\n date = \"BYE\"\n opp = \"\"\n score = \"\"\n appendString = \"W{0}-{1}\".format(ircutils.bold(week.getText()), ircutils.underline(\"BYE\"))\n else:\n date = tds[1].getText()\n dateSplit = date.split(',', 1) # take the date, dump the rest.\n date = dateSplit[1]\n opp = tds[2] # with how the Tag/string comes in, we need to extract one part and format the other.\n oppName = opp.find('span')\n if oppName:\n oppName.extract()\n oppTeam = opp.find('a').getText() \n #opp = tds[2].find('span').getText()\n #opp = self._translateTeam('team','full', opp) # use the db to make a full team small.\n score = tds[3].getText().replace('EDT','').replace('EST','').replace('pm','').replace('am','') # strip the garbage\n #score = score.replace('W', ircutils.mircColor('W', 'green')).replace('L', ircutils.mircColor('L', 'red'))\n appendString = \"W{0}-{1} {2} {3}\".format(ircutils.bold(week.getText()), date.strip(), oppTeam.strip(), score.strip())\n \n append_list.append(appendString)\n\n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} SCHED :: {1}\".format(ircutils.mircColor(optteam, 'red'), descstring)\n irc.reply(output)\n else:\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmZsL3RlYW1z') + '/%s/calendar/rss.xml' % lookupteam\n \n try:\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n html = response.read()\n except:\n irc.reply(\"Cannot open: %s\" % url)\n return\n\n # clean this stuff up\n html = html.replace('<![CDATA[','').replace(']]>','').replace('EDT','').replace('\\xc2\\xa0',' ')\n\n soup = BeautifulSoup(html)\n items = soup.find('channel').findAll('item')\n \n append_list = []\n\n for item in items:\n title = item.find('title').renderContents().strip() # title is good.\n day, date = title.split(',')\n desc = item.find('description') # everything in desc but its messy.\n desctext = desc.findAll(text=True) # get all text, first, but its in a list.\n descappend = (''.join(desctext).strip()) # list transform into a string.\n if not descappend.startswith('@'): # if something is @, it's before, but vs. otherwise.\n descappend = 'vs. ' + descappend\n descappend += \" [\" + date.strip() + \"]\"\n append_list.append(descappend) # put all into a list.\n\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} {1}\".format(ircutils.bold(optteam), descstring)\n irc.reply(output)", "def get_same_or_newer(this_date):\n data = requests.get(FILE_URL, allow_redirects=True)\n open(\"employees-with-date.csv\", \"wb\").write(data.content)\n with open(\"employees-with-date.csv\") as csv_file:\n reader = csv.DictReader(csv_file)\n \n # We want all employees that started at the same date or the closest newer\n # date. To calculate that, we go through all the data and find the\n # employees that started on the smallest date that's equal or bigger than\n # the given start date.\n this_date_employees = []\n reader = sorted(reader, key=lambda d: d['Start Date'], reverse=True)\n \n row_date = datetime.datetime.strptime(row[\"Start Date\"], '%Y-%m-%d')\n \n final_date_employees.append((row[\"Name\"], row[\"Surname\"]))\n\n return final_date_employees", "def load_future(date):\n\n if type(date) == str:\n date = datetime.strptime(date, '%Y%m%d')\n lats, lons = get_grid()\n ltg = Lightning(date, lats, lons)\n\n return ltg", "def get_dates(self, user_ssn, date_ssn, date_date):\n cur = self.conn.cursor(pymysql.cursors.DictCursor)\n # i think this works, only want to get the name of the people who\n # aren't the current user\n # sql = 'SELECT * FROM client, dates where (ssn = c1_ssn or ssn = c2_ssn) and ssn != %s'\n # dates = cur.execute(sql, (user_ssn))\n\n print(user_ssn, date_ssn, date_date)\n\n # sql = \"SELECT * FROM dates WHERE scheduled_date = '%s' AND ((c1_ssn = %s AND c2_ssn = %s) OR (c1_ssn = %s AND c2_ssn = %s))\"\n sql = \"SELECT * FROM dates WHERE scheduled_date = '{}' AND (c1_ssn = {} AND c2_ssn = {}) OR (c1_ssn = {} AND c2_ssn = {})\".format(date_date, user_ssn, date_ssn, date_ssn, user_ssn)\n # result = cur.execute(\n # sql, (date_date, user_ssn, date_ssn, date_ssn, user_ssn))\n result = cur.execute(sql)\n return CursorIterator(cur)", "def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]", "async def get_season(self, server: model.Server):\n api_url = ('https://eu.api.blizzard.com/sc2/'\n f'ladder/season/{server.id()}')\n payload = {'locale': 'en_US',\n 'access_token': await self.get_access_token()}\n data, status = await self._perform_api_request(api_url, params=payload)\n if status != 200:\n raise InvalidApiResponse(f'{status}: {api_url}')\n\n return model.Season(\n season_id=data.get('seasonId'),\n number=data.get('number'),\n year=data.get('year'),\n server=server,\n start=datetime.fromtimestamp(int(data.get('startDate'))),\n end=datetime.fromtimestamp(int(data.get('endDate')))\n )", "def print_team_schedule(\n sch: Schedule,\n team: str,\n team_list: list[str],\n capt_list: list[str],\n outfile: typing.Union[str, TextIOWrapper] = \"print\",\n):\n if outfile == \"print\":\n\n def pline(txt):\n print(txt)\n\n else:\n\n def pline(txt):\n outfile.write(txt + \"\\n\")\n\n line = \"\"\n\n pline(\"\\nTeam: \" + team + \"\\n\")\n for rnd in range(sch.nrounds):\n _rnd = sch.rounds[rnd]\n line = f\"{_rnd.play_date}\"\n game_not_found = True\n match = 0\n while game_not_found and match < _rnd.nmatches:\n _match = _rnd.matches[match]\n if _match.home == team:\n _teamidx = team_list.index(_match.away)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.away:\n line = line + f\" vs. {_match.away} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n elif _match.away == team:\n _teamidx = team_list.index(_match.home)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.home:\n line = line + f\" @ {_match.home} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n else:\n match = match + 1\n if game_not_found:\n logging.warning(\"Bye week is not expected.\")\n line = line + \"Bye Week\"\n pline(line)", "def test_next_meeting_one_row_typical(self, one_row_worksheet):\n\n initial_date = datetime.date(2021, 5, 1)\n actual_result = one_row_worksheet.next_meeting(initial_date)\n assert actual_result.get(\"Date\") == \"May 2 ,2021\"\n assert actual_result.get(\"Activity\") == \"Play Games!\"\n assert actual_result.get(\"Leader\") == \"RandomPerson1\"", "def scrape():\n league_year = Config.get_property(\"league_year\")\n\n # Create table\n season_data = client.season_schedule(league_year)\n season_data = br_enum_to_string(season_data)\n return season_data", "def get_team(uid=None):\n user = get_user(uid=uid)\n return api.team.get_team(tid=user[\"tid\"])", "def handle_find_slot(date=None):\n if not date:\n session.attributes['stage'] = 'book_slot'\n return question('You didn\\'t specify the date. What date you would like to book?')\n else:\n print(date)\n params = {\n 'date': date\n }\n req = requests.get(config.API + '/find_slot', params=params)\n print(req.text)\n freeslots_string = get_time_strings(json.loads(req.text)['freesloats'])\n session.attributes['stage'] = 'find_slot'\n session.attributes['date'] = date\n return question(\n 'The free slots for ' + date + ' are ' + freeslots_string + ' Which one do you want me to book?')", "def scrap(self):\n scrapper = RaceDayScrapper()\n return scrapper.get()", "async def schedule(\n self, ctx: commands.Context, *, teams_and_date: Optional[TeamDateFinder] = {}\n ) -> None:\n log.debug(teams_and_date)\n timezone = await self.config.guild(ctx.guild).timezone()\n log.debug(timezone)\n await GamesMenu(\n source=ScheduleList(**teams_and_date, session=self.session, timezone=timezone),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def __str__(self):\n return \"{} v {} - {}\".format(self.homeTeam['name'].capitalize(), self.awayTeam['name'].capitalize(), self.date)", "def visitTo(self, date):\n raise NotImplementedError()", "def _get_csvs_date(self, date):\n\n # from the root page, get the links to each internet registry\n paths = []\n for registry in self._soup(self.root)('a')[1:]:\n paths.append(os.path.join(self.root, registry['href']))\n\n # complete the url by adding the date and 'roas.csv'\n date_as_url = date.strftime('%Y/%m/%d/') \n for i in range(len(paths)):\n paths[i] = os.path.join(paths[i], date_as_url, 'roas.csv')\n \n # return the paths that exists\n return [p for p in paths if self.session.get(p).status_code == 200]", "def get_past_matches_data(team):\n matches = team.get_past_matches()\n match_list = []\n for match in matches:\n match_dict = {}\n match_dict['match_date'] = match.match_date\n match_dict['match_name'] = match.__str__()\n match_dict['id'] = match.id\n innings = match.get_innings()\n if len(innings):\n if innings[0].runs > innings[1].runs:\n match_dict['winner_team'] = innings[0].bat_team\n match_dict['win_margin'] = innings[0].runs - innings[1].runs\n match_dict['win_type'] = 'Runs'\n match_dict['winner_score'] = str(innings[0].runs) + '/' + str(innings[0].wickets)\n else:\n match_dict['winner_team'] = innings[1].bat_team\n match_dict['win_margin'] = 10 - innings[1].wickets\n match_dict['win_type'] = 'Wickets'\n match_dict['winner_score'] = str(innings[1].runs) + '/' + str(innings[1].wickets)\n match_list.append(match_dict)\n return match_list" ]
[ "0.6794363", "0.63804543", "0.5813007", "0.5741318", "0.57277983", "0.5647605", "0.5467505", "0.5346135", "0.5336544", "0.53275824", "0.53005236", "0.5272862", "0.5261342", "0.521931", "0.5192084", "0.514297", "0.51121926", "0.50987285", "0.5097064", "0.50702137", "0.50670063", "0.5008724", "0.49876702", "0.4974747", "0.49680772", "0.49635786", "0.49604803", "0.49472487", "0.49153125", "0.49094445", "0.48929095", "0.4892247", "0.4873914", "0.4873282", "0.48497254", "0.48485032", "0.48348564", "0.47886536", "0.4787561", "0.47868395", "0.47756305", "0.4765254", "0.47619656", "0.4761908", "0.47536168", "0.47407293", "0.47273389", "0.47232932", "0.47177628", "0.4704258", "0.47013995", "0.4683983", "0.4681976", "0.46692008", "0.46620008", "0.4655624", "0.46555012", "0.4655033", "0.46402484", "0.4639112", "0.46387425", "0.46290618", "0.46279415", "0.46258217", "0.46176162", "0.4616339", "0.46133503", "0.4598327", "0.4593111", "0.45919824", "0.45882383", "0.4585168", "0.4582716", "0.45792487", "0.4579051", "0.4576518", "0.45748645", "0.4569309", "0.45685065", "0.45610836", "0.45594698", "0.4558753", "0.4554821", "0.45542097", "0.45439687", "0.4540979", "0.45337212", "0.45307148", "0.452878", "0.45249632", "0.45237043", "0.4513333", "0.45130897", "0.45058027", "0.4502674", "0.44990602", "0.44950268", "0.44926012", "0.44864964", "0.44821927" ]
0.65519655
1
Detect which spins in each peak make up spin anchors.
def anchors(self): dims = self.dims anchors = [] for peak in self: possible_anchors = [] for combination in combinations(range(dims), 2): spins = [peak[i] for i in combination] if any(s.res_num is None or s.atom is None for s in spins): continue res_nums = [spin.res_num for spin in spins] atoms = [spin.atom for spin in spins] elements = [atom[0] for atom in atoms] positions = [atom[1:] for atom in atoms] same_res_num = res_nums[0] == res_nums[1] valid_pairs = [set(('H', 'N')), set(('H', 'C'))] is_proton_heavy_pair = set(elements) in valid_pairs same_position = all(c[0] == c[1] for c in zip(*positions)) if same_res_num and is_proton_heavy_pair and same_position: if '' in positions and set(elements) != set(('H', 'N')): # One of the atom names must have been 'H', 'N' or 'C' # Of these, only the amide proton anchor is valid continue if elements[0] == 'H': possible_anchors.append(combination) else: possible_anchors.append(combination[::-1]) if len(possible_anchors) > 1: pa_sets = [set(pa) for pa in possible_anchors] overlap = set.intersection(*pa_sets) if overlap: # Ambiguous, overlapping anchors continue for poss_anc in possible_anchors: if poss_anc not in anchors: anchors.append(poss_anc) anchors = tuple(anchors) return anchors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inPointing(self, pulsar):\n # initialise offset_deg to be a big old number\n # FWHM is in arcmin so always multiply by 60\n offset_deg = 5.\n\n # loop over pointings\n for point in self.pointingslist:\n # do a really basic check first\n\n glterm = (pulsar.gl - point.gl)**2\n gbterm = (pulsar.gb - point.gb)**2\n offset_new = math.sqrt(glterm + gbterm)\n\n # if the beam is close enough, break out of the loop\n if offset_new < offset_deg:\n offset_deg = offset_new\n self.gain = point.gain\n self.tobs = point.tobs\n \n return offset_deg", "def splitDetectorPeakInfo(self):\r\n\t\tsplit_raw_min = np.amin(self.splitData)\r\n\t\tsplit_min = split_raw_min - self.splitBaseline\r\n\t\t\t\t\r\n\t\tsplit_raw_max = np.amax(self.splitData)\r\n\t\tsplit_max = split_raw_max - self.splitBaseline\r\n\t\r\n\t\tself.splitMax = split_max\r\n\t\tself.splitMin = split_min", "def panPeakDetect(detection, fs):\n\n min_distance = int(0.25 * fs)\n\n signal_peaks = [0]\n noise_peaks = []\n\n SPKI = 0.0\n NPKI = 0.0\n\n threshold_I1 = 0.0\n threshold_I2 = 0.0\n\n RR_missed = 0\n index = 0\n indexes = []\n\n missed_peaks = []\n peaks = []\n\n for i in range(len(detection)):\n\n if 0 < i < len(detection) - 1:\n if detection[i - 1] < detection[i] and detection[i + 1] < detection[i]:\n peak = i\n peaks.append(i)\n\n if detection[peak] > threshold_I1 and (peak - signal_peaks[-1]) > 0.25 * fs:\n\n signal_peaks.append(peak)\n indexes.append(index)\n SPKI = 0.125 * detection[signal_peaks[-1]] + 0.875 * SPKI\n if RR_missed != 0:\n if signal_peaks[-1] - signal_peaks[-2] > RR_missed:\n missed_section_peaks = peaks[indexes[-2] + 1:indexes[-1]]\n missed_section_peaks2 = []\n for missed_peak in missed_section_peaks:\n if missed_peak - signal_peaks[-2] > min_distance and signal_peaks[\n -1] - missed_peak > min_distance and detection[missed_peak] > threshold_I2:\n missed_section_peaks2.append(missed_peak)\n\n if len(missed_section_peaks2) > 0:\n missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]\n missed_peaks.append(missed_peak)\n signal_peaks.append(signal_peaks[-1])\n signal_peaks[-2] = missed_peak\n\n else:\n noise_peaks.append(peak)\n NPKI = 0.125 * detection[noise_peaks[-1]] + 0.875 * NPKI\n\n threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)\n threshold_I2 = 0.5 * threshold_I1\n\n if len(signal_peaks) > 8:\n RR = np.diff(signal_peaks[-9:])\n RR_ave = int(np.mean(RR))\n RR_missed = int(1.66 * RR_ave)\n\n index = index + 1\n # First possible peak detection\n first_possible_peak = np.argmax(detection[0:int(0.25 * fs)])\n if detection[first_possible_peak] > SPKI:\n signal_peaks[0] = first_possible_peak\n else:\n signal_peaks.pop(0)\n signal_peaks = np.array(signal_peaks)\n return signal_peaks", "def addPeakResonancesToSeqSpinSystems(peak, seqOffsets):\n \n assert len(peak.peakDims) == len(seqOffsets)\n assert None in seqOffsets # otherwise no reference point\n\n spinSystems = []\n resonanceList = []\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n spinSystem = None\n resonances = []\n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n resonances.append(resonance)\n \n if resonance.resonanceGroup:\n if not spinSystem:\n spinSystem = resonance.resonanceGroup\n\n elif spinSystem is not resonance.resonanceGroup:\n msg = 'There are multiple spin systems for peak dimension %d.\\n' % (i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(resonance.resonanceGroup,spinSystem)\n else:\n return\n\n resonanceList.append(resonances)\n spinSystems.append( spinSystem )\n\n ref = None\n I = 0\n for i, spinSystem in enumerate(spinSystems):\n if spinSystem is not None:\n if seqOffsets[i] is None:\n if ref is None:\n ref = spinSystem\n I = i\n \n else:\n if spinSystem is not ref:\n msg = 'Dimensions %d and %d have different spin systems.\\n' % (I+1,i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(spinSystem, ref)\n else:\n return\n \n if ref is not None:\n for i, seqOffset in enumerate(seqOffsets):\n \n if seqOffset:\n spinSystem = findConnectedSpinSystem(ref, seqOffset)\n if spinSystems[i] is ref:\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n showWarning('Failure','Spin system cannot be both i and i%s (dimension %d)' % (deltaText,i+1))\n continue\n \n \n if spinSystem and spinSystems[i]:\n if spinSystem is not spinSystems[i]:\n if (not spinSystem.residue) or (not spinSystems[i].residue):\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n \n msg = 'There is an i%s spin system already present (dimension %d).\\n' % (deltaText, i+1)\n msg += 'Merge spin systems together?'\n if showOkCancel('Confirm', msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n elif spinSystem.residue is spinSystems[i].residue:\n name = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n msg = 'There are multiple spin systems for residue %s.\\n?' % name\n msg += 'Merge spin systems together?'\n \n if showOkCancel('Confirm',msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n else:\n txt1 = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n txt2 = '%d%s' % (spinSystems[i].residue.seqCode,spinSystems[i].residue.ccpCode)\n msg = 'Cannot set spin system for F%d dim' % (i+1)\n msg += 'Offset %d causes conflict between %s and %s' % (seqOffset, txt1, txt2)\n showWarning('Failure',msg)\n return\n \n if resonanceList[i]:\n nmrProject = resonanceList[i][0].nmrProject\n if not spinSystem:\n if spinSystems[i]:\n spinSystem = spinSystems[i]\n else:\n spinSystem = nmrProject.newResonanceGroup()\n \n makeSeqSpinSystemLink(ref, spinSystem, seqOffsets[i])\n \n for resonance in resonanceList[i]:\n if resonance.resonanceGroup is not spinSystem:\n addSpinSystemResonance(spinSystem,resonance)", "def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points):\n func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m\n fitted_peaks = []\n for peak in raw_peaks:\n index = peak[0]\n x_data = x_axis[index - points // 2: index + points // 2 + 1]\n y_data = y_axis[index - points // 2: index + points // 2 + 1]\n # get a first approximation of tau (peak position in time)\n tau = x_axis[index]\n # get a first approximation of peak amplitude\n m = peak[1]\n \n # build list of approximations\n # k = -m as first approximation?\n p0 = (-m, tau, m)\n popt, pcov = curve_fit(func, x_data, y_data, p0)\n # retrieve tau and m i.e x and y value of peak\n x, y = popt[1:3]\n \n # create a high resolution data set for the fitted waveform\n x2 = np.linspace(x_data[0], x_data[-1], points * 10)\n y2 = func(x2, *popt)\n \n fitted_peaks.append([x, y, [x2, y2]])\n \n return fitted_peaks", "def get_steps_between_peaks(self):\n max_x, max_y = self.get_local_maxes()\n full_steps = np.ediff1d(max_x)\n # _full_mean, _full_std = np.mean(full_steps), np.std(full_steps)\n _full_count = len(full_steps)\n\n unique_steps_between_peaks, unique_steps_counts = np.unique(full_steps, return_counts=True)\n\n _filter = np.logical_and(full_steps < unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 1.7,\n full_steps > unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 0.3)\n # 1.7 chosen as filter, as there seems to be another peak ~2* (probably due to single missed peaks)\n # 1.7 avoids the start of the gaussian at 2*\n\n if not _filter.all():\n steps = full_steps[_filter]\n # print(unique_steps_between_peaks[np.argmax(unique_steps_counts)])\n _filtered_count = len(steps)\n _counts = (_full_count, _filtered_count, _full_count - _filtered_count)\n # print('Original Count: %s, Filtered Count: %s, Excluded Count: %s' % _counts)\n # print('Filtered:', full_steps[np.invert(_filter)])\n unique_steps_between_peaks, unique_steps_counts = np.unique(steps, return_counts=True)\n else:\n steps = full_steps\n\n return steps, unique_steps_between_peaks, unique_steps_counts", "def locateSinglePeak(self, start, end, list):\n for x in range(start,end):\n currentVal = list[x]\n if currentVal > self.maxFound:\n self.maxFound = currentVal\n diff = self.maxFound - currentVal\n if diff > self.maxDiffFound:\n self.maxDiffFound = diff\n if diff > self.maxFound/2:\n self.dipsBelowHalf += 1", "def first_peak_detect(beam, start_point):\n logging.debug('running first_peak_detect function')\n for i in range(start_point, len(beam)):\n logging.debug('current value of i is %d', i)\n if beam[i-1] < beam[i] > beam[i+1]:\n logging.debug('value determined to be the center of the values %d, %d, %d', beam[i-1], beam[i], beam[i+1])\n return i\n\n logging.error(\"no peak was found. will try working with the length of the beam\")\n return len(beam)", "def find_starts(config, data):\n\n trigger = butter_bandpass_filter(\n data, config.bandpass_lower, config.bandpass_upper,\n config.sampling_rate, 6)\n trigger = np.absolute(trigger)\n trigger = butter_lowpass_filter(\n trigger, config.lowpass_freq, config.sampling_rate, 6)\n\n # transient = 0.0005\n # start_idx = int(transient * config.sampling_rate)\n start_idx = 0\n average = np.average(trigger[start_idx:])\n maximum = np.max(trigger[start_idx:])\n minimum = np.min(trigger[start_idx:])\n middle = (np.max(trigger[start_idx:]) - min(trigger[start_idx:])) / 2\n if average < 1.1 * middle:\n print()\n print(\"Adjusting average to avg + (max - avg) / 2\")\n average = average + (maximum - average) / 2\n offset = -int(config.trigger_offset * config.sampling_rate)\n\n if config.trigger_rising:\n trigger_fn = lambda x, y: x > y\n else:\n trigger_fn = lambda x, y: x < y\n\n # The cryptic numpy code below is equivalent to looping over the signal and\n # recording the indices where the trigger crosses the average value in the\n # direction specified by config.trigger_rising. It is faster than a Python\n # loop by a factor of ~1000, so we trade readability for speed.\n trigger_signal = trigger_fn(trigger, average)[start_idx:]\n starts = np.where((trigger_signal[1:] != trigger_signal[:-1])\n * trigger_signal[1:])[0] + start_idx + offset + 1\n if trigger_signal[0]:\n starts = np.insert(starts, 0, start_idx + offset)\n\n # plt.plot(data)\n # plt.plot(trigger*100)\n # plt.axhline(y=average*100)\n # plt.show()\n\n return starts, trigger, average", "def stichAnchors(chrom, loops, margin=1):\n cov = set()\n for i, loop in enumerate(loops):\n cov.update(range(loop.x_start, loop.x_end + 1))\n cov.update(range(loop.y_start, loop.y_end + 1))\n cov = list(cov)\n cov.sort()\n npeaks = []\n i = 0\n while i < len(cov) - 1:\n j = i + 1\n while j < len(cov):\n if cov[j] - cov[j - 1] > margin:\n break\n else:\n j += 1\n peak = Peak()\n peak.chrom = chrom\n peak.start = cov[i]\n peak.end = cov[j - 1]\n peak.length = cov[j - 1] - cov[i] + 1\n npeaks.append(peak)\n i = j #update search start\n return npeaks", "def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def measure_peak(sig, use_inflection=True, return_allinfo=False):\n sig = np.array(sig)\n cr = locate_peak(sig)\n cr_crosszero = np.zeros_like(cr)\n cr_inflection = np.zeros_like(cr)\n\n # cross zero points\n cr_cr1 = -int_sign(sig[1:] * sig[:-1])\n cr_cr2 = -int_sign(sig[:-1] * sig[1:])\n cr_cr1[cr_cr1<0] = 0\n cr_cr2[cr_cr2<0] = 0\n cr_crosszero[1:] = cr_cr1\n cr_crosszero[:-1] += cr_cr2\n cr_crosszero = int_sign(cr_crosszero * sig) * 4\n\n # inflection points\n d2 = second_derivate(sig)\n d2p = locate_peak(d2)\n d2p[np.where( np.abs(d2p) != 1 )] = 0\n d2p[np.where( ((d2p==1) & (sig<0)) | ((d2p==-1) & (sig>0)) )] = 0\n cr_inflection[np.where(d2p==-1)] = 8\n cr_inflection[np.where(d2p==1)] = -8\n \n if use_inflection:\n cr_combine = cr + cr_inflection + cr_crosszero \n else:\n cr_combine = cr + cr_crosszero\n\n oned = False\n if len(np.shape(sig)) == 1:\n oned = True\n sig = sig[:, np.newaxis]\n \n peaks_list = []\n for i in range(np.shape(sig)[1]):\n pvs = np.where(np.abs(cr[:,i]) == 1)[0]\n lims = np.where(np.abs(cr_combine[:,i]) >= 2)[0]\n if len(pvs) == 0 :\n peaks_list.append([])\n continue\n if np.shape(lims)[0] == 0:\n lower_pos = pvs\n upper_pos = pvs\n else:\n lower_arr = (pvs > lims[:, np.newaxis])\n upper_arr = (pvs < lims[:, np.newaxis])\n lower_arr_r = np.flipud(lower_arr)\n upper_pos_i = np.argmax(upper_arr, axis=0)\n upper_pos = lims[(upper_pos_i, )]\n w_upper_none = np.where(upper_arr[-1,:] == False)\n upper_pos[w_upper_none] = pvs[w_upper_none]\n lower_pos_r_i = np.argmax(lower_arr_r, axis=0)\n lower_pos_i = len(lims) - 1 - lower_pos_r_i\n lower_pos = lims[(lower_pos_i, )]\n w_lower_none = np.where(lower_arr[0, :] == False)\n lower_pos[w_lower_none] = 0\n\n peaks = []\n for center, lower, upper in zip(pvs, lower_pos, upper_pos):\n depth = sig[center, i]\n sig_range = sig[lower:upper+1, i]\n sig_range[np.where(int_sign(sig_range) != int_sign(depth))] = 0.0\n volume = np.sum(sig_range)\n peaks.append(Peak(center=center, lower=lower, upper=upper, depth=depth, volume=volume))\n peaks_list.append(peaks)\n if oned:\n peaks_list = peaks_list[0]\n \n if return_allinfo:\n return peaks_list, cr, cr_crosszero, cr_inflection \n else:\n return peaks_list", "def peaks(n, binCenters, method=\"JI\", window=100, peakAmpThresh=0.00005, valleyThresh=0.00003):\n data = zip(binCenters, n)\n binCenters = np.array(binCenters)\n firstCenter = (min(binCenters)+1.5*window)/window*window\n lastCenter = (max(binCenters)-window)/window*window\n if firstCenter < -1200: firstCenter = -1200\n if lastCenter > 3600: lastCenter = 3600\n\n\n if method == \"slope\" or method == \"hybrid\":\n peaks = {}\n peakInfo = peaksBySlope(n, binCenters, lookahead=20, delta=valleyThresh, averageHist=True)\n\n #find correspondences between peaks and valleys, and set valleys are left and right Indices\n #see the other method(s) for clarity!\n\n peakData = peakInfo[\"peaks\"]\n valleyData = peakInfo[\"valleys\"]\n\n #print len(peakData[0]), len(peakData[1])\n for i in xrange(len(peakData[0])):\n nearestIndex = findNearestIndex(valleyData[0], peakData[0][i])\n if valleyData[0][nearestIndex] < peakData[0][i]:\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][nearestIndex+1:]) == 0):\n rightIndex = findNearestIndex(binCenters, peakData[0][i]+window/2.0)\n else:\n offset = nearestIndex+1\n nearestIndex = offset+findNearestIndex(valleyData[0][offset:], peakData[0][i])\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n else:\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][:nearestIndex]) == 0):\n leftIndex = findNearestIndex(binCenters, peakData[0][i]-window/2.0)\n else:\n nearestIndex = findNearestIndex(valleyData[0][:nearestIndex], peakData[0][i])\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n\n pos = findNearestIndex(binCenters, peakData[0][i])\n #print binCenters[pos], peakData[1][i], binCenters[leftIndex], binCenters[rightIndex]\n peaks[pos] = [peakData[1][i], leftIndex, rightIndex]\n\n if method == \"hybrid\": slopePeaks = peaks\n \n if method == \"JI\" or method == \"ET\" or method == \"hybrid\":\n peaks = {}\n #Obtain max value per interval\n if method == \"JI\" or method == \"hybrid\":\n firstCenter = nearestJI(firstCenter)\n lastCenter = nearestJI(lastCenter)\n\n interval = firstCenter\n prevInterval = firstCenter-window\n #NOTE: All *intervals are in cents. *indices are of binCenters/n\n while interval < lastCenter:\n if method == \"ET\":\n leftIndex = findNearestIndex(binCenters, interval-window/2)\n rightIndex = findNearestIndex(binCenters, interval+window/2)\n interval += window\n elif method == \"JI\" or method == \"hybrid\":\n leftIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n prevInterval = interval\n interval = nextJI(interval)\n rightIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n peakPos = np.argmax(n[leftIndex:rightIndex])\n peakAmp = n[leftIndex+peakPos]\n peaks[leftIndex+peakPos] = [peakAmp, leftIndex, rightIndex]\n \n #print binCenters[leftIndex], binCenters[rightIndex], binCenters[leftIndex+peakPos], peakAmp\n #NOTE: All the indices (left/rightIndex, peakPos) are to be changed to represent respective cent \n #value corresponding to the bin. Right now, they are indices of respective binCenters in the array.\n \n if method == \"hybrid\":\n #Mix peaks from slope method and JI method.\n p1 = slopePeaks.keys()\n p2 = peaks.keys()\n allPeaks = {} #overwriting peaks dict\n for p in p1:\n nearIndex = findNearestIndex(p2, p)\n if abs(p-p2[nearIndex]) < window/2.0: p2.pop(nearIndex)\n \n for p in p1: allPeaks[p] = slopePeaks[p]\n for p in p2: allPeaks[p] = peaks[p]\n peaks = allPeaks\n\n #Filter the peaks and retain eligible peaks, also get their valley points.\n\n # ----> peakAmpThresh <---- : remove the peaks which are below that\n\n for pos in peaks.keys():\n #pos is an index in binCenters/n. DOES NOT refer to a cent value.\n if peaks[pos][0] < peakAmpThresh:\n #print \"peakAmp: \", binCenters[pos]\n peaks.pop(pos)\n\n #Check if either left or right valley is deeper than ----> valleyThresh <----.\n valleys = {}\n for pos in peaks.keys():\n leftLobe = n[peaks[pos][1]:pos]\n rightLobe = n[pos:peaks[pos][2]]\n #Sanity check: Is it a genuine peak? Size of distributions on either side of the peak should be comparable.\n if len(leftLobe) == 0 or len(rightLobe) == 0:\n continue\n if 1.0*len(leftLobe)/len(rightLobe) < 0.15 or 1.0*len(leftLobe)/len(rightLobe) > 6.67:\n #print \"size: \", binCenters[pos]\n #peaks.pop(pos)\n continue\n\n leftValleyPos = np.argmin(leftLobe)\n rightValleyPos = np.argmin(rightLobe)\n if (abs(leftLobe[leftValleyPos]-n[pos]) < valleyThresh and abs(rightLobe[rightValleyPos]-n[pos]) < valleyThresh):\n #print \"valley: \", binCenters[pos]\n peaks.pop(pos)\n else:\n valleys[peaks[pos][1]+leftValleyPos] = leftLobe[leftValleyPos]\n valleys[pos+rightValleyPos] = rightLobe[rightValleyPos]\n \n if len(peaks) > 0:\n temp1 = np.array(peaks.values())\n temp1 = temp1[:, 0]\n\n return {'peaks':[binCenters[peaks.keys()], temp1], 'valleys':[binCenters[valleys.keys()], valleys.values()]}\n else:\n return {'peaks':[[], []], 'valleys':[[], []]}", "def _getPeaks(self, ch, validDataIdt):\n startInd, endInd = locate_consecutive_numbers.locateConsecutiveNumbers(\n self.burstIdt) # Find consecutive numbers to get a max of first\n self.peakInd = np.nan*np.ones(len(startInd), dtype=int)\n # Loop over every microburst detection region (consecutive microburst indicies)\n for i, (st, et) in enumerate(zip(startInd, endInd)):\n if st == et: \n # If the same index \n et += 1\n # Index nightmare, but works. There may be a better way\n offset = validDataIdt[self.burstIdt[st]]\n self.peakInd[i] = np.argmax(\n self.d[ch][validDataIdt[self.burstIdt[st:et]]]) + offset\n self.peakInd = self.peakInd.astype(int)\n return", "def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def peakdetect_parabole(y_axis, x_axis, points = 9):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)\n min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)\n \n max_peaks = map(lambda x: [x[0], x[1]], max_)\n max_fitted = map(lambda x: x[-1], max_)\n min_peaks = map(lambda x: [x[0], x[1]], min_)\n min_fitted = map(lambda x: x[-1], min_)\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]", "def whichPeaks(trace):\n peaks = []\n df = np.diff(trace)\n for t in range(len(df)-4):\n if df[t] > 0 and df[t+1] > 0:\n if df[t+2] < 0 and df[t+3] < 0: # Potential peak\n if trace[t+2] > np.mean(trace):\n peaks.append([t+2, trace[t+2]])\n return peaks", "def test_peak_detection(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, _) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_sms = utilFunctions.peakDetection(mx, self.sm.t)\n for j, (p, p_s) in enumerate(itertools.zip_longest(ploc, ploc_sms)):\n with self.subTest(frame=i, peak_n=j):\n self.assertEqual(p, p_s)", "def check_star(peaks,data):\n star = 0\n for i in peaks:\n max = data[i]\n if i<3 or i+4>data.size:\n continue\n mean = data[i-3:i+4].mean()\n if (max-mean)<0.1*max:\n star += 1\n if star*2>peaks.size:\n return True\n else:\n return False", "def peakdetect_sine_locked(y_axis, x_axis, points = 9):\n return peakdetect_sine(y_axis, x_axis, points, True)", "def get_spin_link_dict(peaklist):\n spin_link_dict = {}\n for peak in peaklist:\n spins = [spin for spin in peak\n if spin.atom is not None and spin.atom[0] == 'H']\n if len(spins) != 2:\n err = ('expected 2 Hydrogens in each peak, '\n 'found %d' % len(spins))\n raise ValueError(err)\n link = frozenset(spin.assignment for spin in spins)\n spin_link_dict.setdefault(link, []).append(peak)\n return spin_link_dict", "def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.", "def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def _get_trough_and_peak_idx(waveform):\n trough_idx = np.argmin(waveform, axis=1)\n peak_idx = -1 * np.ones(trough_idx.shape, dtype=int) # int, these are used for indexing\n for i, tridx in enumerate(trough_idx):\n if tridx == waveform.shape[1] - 1:\n trough_idx[i] = 0\n peak_idx[i] = 0\n continue\n idx = np.argmax(waveform[i, tridx:])\n peak_idx[i] = idx + tridx\n return trough_idx, peak_idx", "def nearest_test_pulse(self):", "def check_setpoints(self):\n # TODO: Can possibly put this in the CCBC Brains\n for heater in self.ard_data['heaters'].keys():\n current_temp = float(self.ard_data['tempsensors'][self.ard_data['heaters'][heater]['tsensor_name']]['value'])\n\n # Assign the pin_status the previous value from the previous iteration\n pin_status = self.ard_data['heaters'][heater]['status']\n\n if current_temp > self.ard_data['heaters'][heater]['upper limit']:\n pin_status = 'OFF'\n\n if current_temp < self.ard_data['heaters'][heater]['lower limit']:\n pin_status = 'ON'\n\n if current_temp >= self.ard_data['heaters'][heater]['maxtemp']:\n pin_status = 'OFF'\n\n self.ard_data['heaters'][heater]['status'] = pin_status\n\n for pump in self.ard_data['pumps'].keys():\n pressure = float(self.ard_data['presssensors'][self.ard_data['pumps'][pump]['psensor_name']]['pressure'])\n gallons = float(pressure * self.ard_data['pumps'][pump]['psi_to_gal_slope'] +\n self.ard_data['pumps'][pump]['psi_to_gal_intercept'])\n self.ard_data['pumps'][pump]['gallons'] = gallons\n\n # Assign the pin status the previous value from the previous cycle\n pin_status = self.ard_data['pumps'][pump]['status']\n\n if gallons > self.ard_data['pumps'][pump]['upper limit']:\n # Turn the pump off when the setpoint is above the setpoint\n pin_status = 'OFF'\n # TODO: Account for solenoid valve control when available\n\n if gallons < self.ard_data['pumps'][pump]['lower limit']:\n pin_status = 'ON'\n\n self.ard_data['pumps'][pump]['status'] = pin_status", "def _calc_ball_scan(self):\n # Default scan to false.\n self._scan_line = [False] * Stella.FRAME_WIDTH\n\n if self._enabled:\n for x in range(self._x_min, self._x_max):\n self._scan_line[x % Stella.FRAME_WIDTH] = True", "def get_threshold_between_peaks(smoothed, peaks, valleys) :\n # For all pairs of consecutive : find the valley in between\n valleys = valleys\n tresholds = {}\n first_peak_width = peak_widths(smoothed, [peaks[0]])[0][0] # Get peak widths\n first_peak_boundary = int(peaks[0] - first_peak_width)\n first_peak_boundary = 0 if first_peak_boundary < 0 else first_peak_boundary\n\n last_peak_width = peak_widths(smoothed, [peaks[-1]])[0][0] # Get peak widths\n last_peak_boundary = int(peaks[-1] + last_peak_width)\n last_peak_boundary = len(smoothed) if last_peak_boundary > len(smoothed) else last_peak_boundary\n\n tresholds[(0,peaks[0])] = first_peak_boundary\n\n for p1, p2 in zip(peaks, peaks[1:]) :\n valid_thresholds = []\n for val in valleys :\n if p1 < val < p2 :\n valid_thresholds.append(val)\n else :\n continue\n\n if len(valid_thresholds) > 1 :\n\n most_middle_threshold, diff_size = None, None\n for v in valid_thresholds :\n x = v - p1\n y = p2 - v\n diff = abs(x - y)\n if diff_size is None or diff < diff_size :\n most_middle_threshold = v\n diff_size = diff\n else :\n continue\n\n tresholds[(p1,p2)] = most_middle_threshold\n else :\n tresholds[(p1,p2)] = valid_thresholds[0]\n\n # last peak\n tresholds[(peaks[-1], \"inf\")] = last_peak_boundary\n\n return tresholds", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def simple_peak_find(s, init_slope=500, start_slope=500, end_slope=200,\n min_peak_height=50, max_peak_width=1.5):\n point_gap = 10\n\n def slid_win(itr, size=2):\n \"\"\"Returns a sliding window of size 'size' along itr.\"\"\"\n itr, buf = iter(itr), []\n for _ in range(size):\n try:\n buf += [next(itr)]\n except StopIteration:\n return\n for new_item in itr:\n yield buf\n buf = buf[1:] + [new_item]\n yield buf\n\n # TODO: check these smoothing defaults\n y, t = s.values, s.index.astype(float)\n smooth_y = movingaverage(y, 9)\n dxdt = np.gradient(smooth_y) / np.gradient(t)\n # dxdt = -savitzkygolay(ts, 5, 3, deriv=1).y / np.gradient(t)\n\n init_slopes = np.arange(len(dxdt))[dxdt > init_slope]\n if len(init_slopes) == 0:\n return []\n # get the first points of any \"runs\" as a peak start\n # runs can have a gap of up to 10 points in them\n peak_sts = [init_slopes[0]]\n peak_sts += [j for i, j in slid_win(init_slopes, 2) if j - i > 10]\n peak_sts.sort()\n\n en_slopes = np.arange(len(dxdt))[dxdt < -end_slope]\n if len(en_slopes) == 0:\n return []\n # filter out any lone points farther than 10 away from their neighbors\n en_slopes = [en_slopes[0]]\n en_slopes += [i[1] for i in slid_win(en_slopes, 3)\n if i[1] - i[0] < point_gap or i[2] - i[1] < point_gap]\n en_slopes += [en_slopes[-1]]\n # get the last points of any \"runs\" as a peak end\n peak_ens = [j for i, j in slid_win(en_slopes[::-1], 2)\n if i - j > point_gap] + [en_slopes[-1]]\n peak_ens.sort()\n # avals = np.arange(len(t))[np.abs(t - 0.675) < 0.25]\n # print([i for i in en_slopes if i in avals])\n # print([(t[i], i) for i in peak_ens if i in avals])\n\n peak_list = []\n pk2 = 0\n for pk in peak_sts:\n # don't allow overlapping peaks\n if pk < pk2:\n continue\n\n # track backwards to find the true start\n while dxdt[pk] > start_slope and pk > 0:\n pk -= 1\n\n # now find where the peak ends\n dist_to_end = np.array(peak_ens) - pk\n pos_end = pk + dist_to_end[dist_to_end > 0]\n for pk2 in pos_end:\n if (y[pk2] - y[pk]) / (t[pk2] - t[pk]) > start_slope:\n # if the baseline beneath the peak is too large, let's\n # keep going to the next dip\n peak_list.append({'t0': t[pk], 't1': t[pk2]})\n pk = pk2\n elif t[pk2] - t[pk] > max_peak_width:\n # make sure that peak is short enough\n pk2 = pk + np.abs(t[pk:] - t[pk] - max_peak_width).argmin()\n break\n else:\n break\n else:\n # if no end point is found, the end point\n # is the end of the timeseries\n pk2 = len(t) - 1\n\n if pk == pk2:\n continue\n pk_hgt = max(y[pk:pk2]) - min(y[pk:pk2])\n if pk_hgt < min_peak_height:\n continue\n peak_list.append({'t0': t[pk], 't1': t[pk2]})\n return peak_list", "def peak(self):\n pass", "def test_peak_refinement(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, px) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_i, pmag_i, pph_i = sample_dsp.peak_refine(ploc, mx, px) # pylint: disable=W0632\n ploc_i_sms, pmag_i_sms, pph_i_sms = utilFunctions.peakInterp(mx, px, ploc)\n with self.subTest(frame=i, value=\"location\"):\n self.assert_almost_equal_rmse(ploc_i, ploc_i_sms)\n with self.subTest(frame=i, value=\"magnitude\"):\n self.assert_almost_equal_rmse(pmag_i, pmag_i_sms)\n with self.subTest(frame=i, value=\"phase\"):\n self.assert_almost_equal_rmse(pph_i, pph_i_sms)", "def get_anchor_points(self):\n rows, cols = np.where(self.overlap_mask)\n self.anchor_points = tuple(zip(rows, cols))[:: self.sampling_int]\n print(\"# of anchors: {}\".format(len(self.anchor_points)))", "def get_lat_offsets(self):\n\n startlat = self.parameters['startlatitude']\n stoplat = self.parameters['stoplatitude']\n\n #Given the start and stops,\n startidx, startvalue = utils.getnearest(self.latitudes, startlat)\n stopidx, stopvalue = utils.getnearest(self.latitudes, stoplat)\n startidx -= 2\n stopidx += 2\n latslice = np.arange(startidx, stopidx + 1)\n if utils.checkmonotonic(latslice):\n latslice = latslice\n else:\n #TODO: Support pole crossing images\n logger.error('Image is pole crossing, not currently supported.')\n '''\n print \"NOT MONOTONIC\"\n #Handle wraps around the poles\n latslice = np.arange(start_idx, stop_idx + 1)\n nlats = self.startlookup.shape[1]\n greatermask = np.where(latslice >= nlats)\n latslice[greatermask] -= nlats\n lessmask = np.where(latslice < 0)\n latslice[lessmask] += self.startlookup.shape[1]\n\n self.latsort = np.argsort(latslice)\n self.latslice = latslice[self.latsort]\n self.latsort = np.argsort(self.latsort)\n '''\n latslice = None\n logger.debug('Start latitude node is {}. Nearest lookup node is {}.'.format(startlat, startidx))\n logger.debug('Stop latitude node is {}. Nearest lookup node is {}.'.format(stoplat, stopidx))\n return latslice", "def global_peak(apsp, sfield, peaks, n_size=5):\n\n peak_map = {p: None for p in peaks}\n corr_map = {p: None for p in peaks}\n\n for p in peaks:\n\n idx = (apsp[p, :]<=n_size)\n peak_map[p] = sfield[idx].mean()\n corr_map[p] = sfield[p]\n\n maxima = max(peak_map, key=peak_map.get)\n\n return [maxima, peak_map]", "def detect_peak(data):\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None", "def parks(self):\n point_array = [0, 2, 8, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14]\n park_coords = []\n parks_sorted = []\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'p':\n park_coords.append(tuple([i, j]))\n while len(park_coords) > 0:\n x, y = park_coords.pop(0)\n if len(parks_sorted) == 0:\n parks_sorted.append([(x, y)])\n else:\n borders_bool = []\n for block_no, park_block in enumerate(parks_sorted):\n borders_bool.append(False)\n for i, j in park_block:\n if abs(x - i) + abs(y - j) == 1:\n borders_bool[block_no] = True\n if (num_true := borders_bool.count(True)) == 1:\n parks_sorted[borders_bool.index(True)].append((x, y))\n elif num_true > 1:\n new_parks_sorted = []\n i_mega_park = None\n for block_no, park_block in enumerate(parks_sorted):\n if borders_bool[block_no]: # If it is bordering\n if i_mega_park is None:\n i_mega_park = block_no\n new_parks_sorted.append(park_block)\n else:\n new_parks_sorted[i_mega_park] += park_block\n new_parks_sorted[i_mega_park] += [(x, y)]\n parks_sorted = new_parks_sorted\n else:\n new_parks_sorted.append(park_block)\n parks_sorted = new_parks_sorted\n else:\n parks_sorted.append([(x, y)])\n\n return sum([point_array[len(block)] for block in parks_sorted])", "def pull_peak_times(data):\n bin_centers = np.arange(0.,1.501,0.002)\n data = np.asarray(data)\n maxs = np.argmax(data, axis=1)\n return bin_centers[maxs]", "def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)", "def isSingleParticle(self):\r\n\r\n\t\tindex_of_maximum = np.argmax(self.scatData) #get the peak position\r\n\t\trun = 55. #define the run to use\r\n\t\t\r\n\t\tleft_rise = self.scatData[index_of_maximum]-self.scatData[index_of_maximum-int(run)] #get the rise from posn 10 to the peak\r\n\t\tleft_slope = left_rise/run\r\n\t\t\r\n\t\ttry:\r\n\t\t\tright_rise = self.scatData[index_of_maximum]-self.scatData[index_of_maximum+int(run)] #get the rise from a point the same distance away from teh peak as position 10, but on the other side\r\n\t\t\tright_slope = right_rise/run\r\n\t\texcept:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tpercent_diff = np.absolute((right_slope-left_slope)/(0.5*right_slope+0.5*left_slope))\r\n\t\tif percent_diff > 0.1:\r\n\t\t\tself.doublePeak = True", "def findPeaks(self, fit_peaks_image):\n self.pf_iterations += 1\n \n # Use pre-specified peak locations if available, e.g. bead calibration.\n if self.peak_locations is not None:\n return [self.peak_locations, self.peak_locations_type, True]\n \n # Otherwise, identify local maxima in the image.\n new_peaks = self.peakFinder(fit_peaks_image)\n\n # Update new peak identification threshold (if necessary).\n # Also, while threshold is greater than min_threshold we\n # are automatically not done.\n if (self.cur_threshold > self.threshold):\n self.cur_threshold -= 1.0\n return [new_peaks, \"finder\", False]\n\n # If we did not find any new peaks then we may be done.\n if (new_peaks[\"x\"].size == 0):\n return [new_peaks, \"finder\", True]\n else:\n return [new_peaks, \"finder\", False]", "def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))", "def get_peakiness(spot_data):\n return spot_data[3] / np.mean((spot_data[5], spot_data[6]))", "def spinAround(self):", "def findPeakAndValley(np):\n peakValleyArray = []\n for i in range (1, len(np) - 1):\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] > 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] < 1):\n peakValleyArray.append(i)\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] < 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] > 1):\n peakValleyArray.append(i)\n return peakValleyArray", "def get_regions_above_threshold(self, threshold, values):\n\n xlocs = arange(0, len(values))\n\n # finds all turns, between above and below threshold\n # and generate areas to call peaks in, also\n # makes sure starting and stopping above maxima is caught\n # threshold is at or equal to values, need to correct this\n starts = xlocs[r_[True, diff(values >= threshold)] & (values >= threshold)]\n stops = xlocs[r_[diff(values >= threshold), True] & (values >= threshold)]\n # add to fix off by one bug\n stops += + 1\n\n # error correction incase my logic is wrong here, assuming that starts\n # and stops are always paired, and the only two cases of not being\n # pared are if the spline starts above the cutoff or the spline starts\n # below the cutoff\n assert len(starts) == len(stops)\n\n ### important note: for getting values x->y [inclusive]\n # you must index an array as ar[x:(y+1)]|\n # or else you end up with one-too-few values, the second\n # index is non-inclusive\n\n # gets all local minima, function taken from:\n # http://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array\n # Can't have local minima at start or end, that would get caught by\n # previous check, really need to think about that more\n\n local_minima = self.find_local_minima(values)\n\n # append to list any local minima above threshold\n for i, minima in enumerate(local_minima):\n if minima and values[i] >= threshold:\n starts = append(starts, i)\n stops = append(stops, i)\n\n starts = array(sorted(set(starts)))\n stops = array(sorted(set(stops)))\n starts_and_stops = []\n\n # making sure we aren't in some strange state\n assert len(starts) == len(stops)\n\n # get all contigous start and stops pairs\n while len(starts) > 0:\n stop_list = stops[stops > starts[0]]\n\n # if there are no more stops left exit the loop and return the\n # currently found starts and stops\n if len(stop_list) == 0:\n break\n stop = stop_list[0]\n starts_and_stops.append((starts[0], stop))\n starts = starts[starts >= stop]\n\n starts = array([x[0] for x in starts_and_stops])\n stops = array([x[1] for x in starts_and_stops])\n return starts_and_stops, starts, stops", "def peakdetect_sine(y_axis, x_axis, points = 9, lock_frequency = False):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n # get global offset\n offset = np.mean([np.mean(max_raw, 0)[1], np.mean(min_raw, 0)[1]])\n # fitting a k * x + m function to the peaks might be better\n #offset_func = lambda x, k, m: k * x + m\n \n # calculate an approximate frequenzy of the signal\n Hz = []\n for raw in [max_raw, min_raw]:\n if len(raw) > 1:\n peak_pos = [x_axis[index] for index in zip(*raw)[0]]\n Hz.append(np.mean(np.diff(peak_pos)))\n Hz = 1 / np.mean(Hz)\n \n # model function\n # if cosine is used then tau could equal the x position of the peak\n # if sine were to be used then tau would be the first zero crossing\n if lock_frequency:\n func = lambda x, A, tau: A * np.sin(2 * pi * Hz * (x - tau) + pi / 2)\n else:\n func = lambda x, A, Hz, tau: A * np.sin(2 * pi * Hz * (x - tau) + \n pi / 2)\n #func = lambda x, A, Hz, tau: A * np.cos(2 * pi * Hz * (x - tau))\n \n \n #get peaks\n fitted_peaks = []\n for raw_peaks in [max_raw, min_raw]:\n peak_data = []\n for peak in raw_peaks:\n index = peak[0]\n x_data = x_axis[index - points // 2: index + points // 2 + 1]\n y_data = y_axis[index - points // 2: index + points // 2 + 1]\n # get a first approximation of tau (peak position in time)\n tau = x_axis[index]\n # get a first approximation of peak amplitude\n A = peak[1]\n \n # build list of approximations\n if lock_frequency:\n p0 = (A, tau)\n else:\n p0 = (A, Hz, tau)\n \n # subtract offset from waveshape\n y_data -= offset\n popt, pcov = curve_fit(func, x_data, y_data, p0)\n # retrieve tau and A i.e x and y value of peak\n x = popt[-1]\n y = popt[0]\n \n # create a high resolution data set for the fitted waveform\n x2 = np.linspace(x_data[0], x_data[-1], points * 10)\n y2 = func(x2, *popt)\n \n # add the offset to the results\n y += offset\n y2 += offset\n y_data += offset\n \n peak_data.append([x, y, [x2, y2]])\n \n fitted_peaks.append(peak_data)\n \n # structure date for output\n max_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[0])\n max_fitted = map(lambda x: x[-1], fitted_peaks[0])\n min_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[1])\n min_fitted = map(lambda x: x[-1], fitted_peaks[1])\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]", "def test_find_peaks_exact(self):\n sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]\n num_points = 500\n test_data, act_locs = _gen_gaussians_even(sigmas, num_points)\n widths = np.arange(0.1, max(sigmas))\n found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,\n min_length=None)\n np.testing.assert_array_equal(found_locs, act_locs,\n \"Found maximum locations did not equal those expected\")", "def _unique_beams(self):\n bmap, mask = self.single_pointing_telescope._unique_beams()\n block_bmap = linalg.block_diag(*[bmap+i*self.single_pointing_telescope.nfeed for i, _ in enumerate(self.pointings)])\n block_mask = linalg.block_diag(*[mask for _ in self.pointings])\n\n return block_bmap, block_mask", "def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))", "def detectBorders(self, points):\n lane1 = []; lane2 = []\n self.leftLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n self.rightLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n\n pointMap = np.zeros((points.shape[0], 20))\n prePoint = np.zeros((points.shape[0], 20))\n postPoint = np.zeros((points.shape[0], 20))\n\n dis = 10\n max1 = -1; max2 = -1\n\n ##\n ## /!\\ UNSAFE LOOP, TODO: FIX\n ##\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n pointMap[i][j] = 1\n prePoint[i][j] = -1\n postPoint[i][j] = -1\n\n for i in reversed(range(points.shape[0] - 2)):\n\n for j in range(len(points[i])):\n\n err = 320\n for m in range(1, min(points.shape[0] - 1 - i, 5)):\n check = False ## TODO: why unused ?\n\n for k in range(len(points[i + 1])):\n\n (x_m, y_m) = points[i + m][k].pt\n (x, y) = points[i][j].pt\n\n if (abs(x_m - x) < dis and abs(y_m - y) < err):\n err = abs(x_m - x)\n\n pointMap[i][j] = pointMap[i + m][k] + 1\n prePoint[i][j] = k\n postPoint[i + m][k] = j\n check = True\n\n break ## breaks out of the m loop. Why is it not conditioned by check ? TODO: ???\n\n if (pointMap[i][j] > max1):\n max1 = pointMap[i][j]\n posMax = cv2.KeyPoint(i, j, _size=0)\n \n else:\n posMax = None\n\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n if posMax:\n if (pointMap[i][j] > max2 and (i != posMax.pt[0] or j != posMax.pt[1]) and postPoint[i][j] == -1): #FIXME \"local variable 'posMax' referenced before assignment\" possible\n max2 = pointMap[i][j]\n posMax2 = cv2.KeyPoint(i, j, _size=0)\n\n\n\n if max1 == -1:\n return\n\n # DEFINES LANE 1 POINTS\n while (max1 >= 1):\n (x,y) = points[int(posMax.pt[0])][int(posMax.pt[1])].pt\n lane1.append(\n [x,y]\n )\n if (max1 == 1):\n break\n\n posMax = cv2.KeyPoint(\n posMax.pt[0]+1,\n prePoint[int(posMax.pt[0])][int(posMax.pt[1])],\n _size=0\n )\n\n max1 -= 1\n\n # DEFINES LANE 2 POINTS\n while (max2 >= 1):\n (x,y) = points[int(posMax2.pt[0])][int(posMax2.pt[1])].pt\n lane2.append(\n [x, y]\n )\n if (max2 == 1):\n break\n\n posMax2 = cv2.KeyPoint(\n posMax2.pt[0]+1,\n prePoint[int(posMax2.pt[0])][int(posMax2.pt[1])],\n _size=0\n )\n\n max2-= 1\n\n subLane1 = np.array(lane1[0:5])\n subLane2 = np.array(lane2[0:5])\n\n # checking if sublane has an empty value\n\n line1 = cv2.fitLine(subLane1, 2, 0, 0.01, 0.01)\n line2 = cv2.fitLine(subLane2, 2, 0, 0.01, 0.01)\n\n try:\n lane1X = (self.BIRDVIEW_WIDTH - line1[3]) * line1[0] / line1[1] + line1[2]\n except:\n lane1X = 0\n\n try:\n lane2X = (self.BIRDVIEW_WIDTH - line2[3]) * line2[0] / line2[1] + line2[2]\n except:\n lane2X = 0\n \n if (lane1X < lane2X):\n for i in range(len(lane1)):\n self.leftLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.rightLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]\n\n else:\n\n for i in range(len(lane1)):\n self.rightLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.leftLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]", "def final_detection(spikes):\n # Initialize the detection result\n shape = spikes.shape\n final = np.zeros(shape=(shape[0] + 1, shape[1]), dtype=np.int)\n\n # Detect patterns of interest\n final[0] = detect_pattern(spikes[0: 2], [1, 0])[0]\n final[2: shape[0] - 1] += detect_pattern(spikes, [0, 1, 1, 0])\n final[-1] += detect_pattern(spikes[-3:], [0, 1, 1])[0]\n final[-1] += detect_pattern(spikes[-2:], [0, 1])[0]\n\n # Information message\n logger.info(\"The final spike detection matrix is '%s' when looking for \"\n \"global pattern [0, 1, 1, 0], begining pattern [1, 0] and \"\n \"final patterns [0, 1, 1] and [0, 1].\", final)\n\n return final\n\n # First compute where there should be some spikes, ie. where we have 2\n # consecutive ones in the spikes array\n #final[1:-1, :] = spikes[:-1, :] + spikes[1:, :]\n\n # Special case: deal with the first time point\n # > if there is a 2 at time zero, this means that there's\n # also a spike detected at time 1: it has a neighbor. Put those point at 0.\n # > if there is a 1, this means there is no neighbor therefore\n # the first time should be bad. Put those point at 2.\n #final[0, :] = final[1, :]\n #final[0, np.where(final[0, :] == 2)] = 0\n #final[0, np.where(final[0, :] == 1)] = 2\n\n # Special case: deal with the laste time point\n # > same use cases as for the first time point\n #final[-1, :] = final[-2, :]\n #final[-1, np.where(final[-1, :] == 2)] = 0\n #final[-1, np.where(final[-1, :] == 1)] = 2\n\n # Finally returns the spikes, ie. points at 2\n #return (final == 2).astype(int)", "def peaks(self, start_seek, end_seek):\n \n # larger blocksizes are faster but take more mem...\n # Aha, Watson, a clue, a tradeof!\n block_size = 4096\n \n max_index = -1\n max_value = -1\n min_index = -1\n min_value = 1\n \n if end_seek > self.frames:\n end_seek = self.frames\n \n if block_size > end_seek - start_seek:\n block_size = end_seek - start_seek\n \n if block_size <= 1:\n samples = self.read(start_seek, 1)\n return samples[0], samples[0]\n elif block_size == 2:\n samples = self.read(start_seek, True)\n return samples[0], samples[1]\n \n for i in range(start_seek, end_seek, block_size):\n samples = self.read(i, block_size)\n \n local_max_index = numpy.argmax(samples)\n local_max_value = samples[local_max_index]\n \n if local_max_value > max_value:\n max_value = local_max_value\n max_index = local_max_index\n \n local_min_index = numpy.argmin(samples)\n local_min_value = samples[local_min_index]\n \n if local_min_value < min_value:\n min_value = local_min_value\n min_index = local_min_index\n \n return (min_value, max_value) if min_index < max_index else (max_value, min_value)", "def findpeaks(project_name, treatment_id, control_id, index_file_parameters, tool_parameters_dict, temp_dir, macs_cnv_region_identifiers, output_dir):\n treatment_bamfile=getcodetofilename(index_file_parameters,treatment_id)\n control_bamfile=getcodetofilename(index_file_parameters,control_id)\n \n cmd_dict=genPeakToolRunCommands(project_name,treatment_id,treatment_bamfile,control_bamfile, tool_parameters_dict, temp_dir )\n MACSpeakfile='%s/MACS/%s_peaks.bed'%(temp_dir,treatment_id)\n HMCanpeakfile='%s/HMCan/%s_regions.bed'%(temp_dir,treatment_id)\n \n if not os.path.exists(MACSpeakfile): \n flog.write('%s: Running %s\\n'%(time.asctime(),cmd_dict['MACS']))\n os.system(cmd_dict['MACS'])\n else:\n flog.write('%s: No need to run %s\\nMACS peaks already there\\n'%(time.asctime(),cmd_dict['MACS']))\n \n if not os.path.exists(HMCanpeakfile): \n flog.write('%s: Running %s\\n'%(time.asctime(),cmd_dict['HMCan'])) \n os.system(cmd_dict['HMCan'])\n else:\n flog.write('%s: No need to run %s\\nHMCan peaks already there'%(time.asctime(),cmd_dict['HMCan'])) \n \n min_size,min_coverage_gain_over_average,window_size=macs_cnv_region_identifiers\n \n MACSpeaklist=[]\n for lntxt in open(MACSpeakfile):\n ln=lntxt.rstrip('\\n').split('\\t')\n MACSpeaklist.append([ln[0],int(ln[1]),int(ln[2])]) \n flog.write('%s: Info: number of MACS peaks %d\\n'%(time.asctime(),len(MACSpeaklist)))\n missedoutregionslist=getmissedoutregions(MACSpeakfile,treatment_bamfile, min_size, min_coverage_gain_over_average,window_size)\n \n \n HMCanpeaklist=[]\n for lntxt in open(HMCanpeakfile):\n ln=lntxt.rstrip('\\n').split('\\t')\n HMCanpeaklist.append([ln[0],int(ln[1]),int(ln[2])])\n flog.write('%s: Info: number of HMCan peaks %d\\n'%(time.asctime(),len(HMCanpeaklist)))\n \n HMCanadditions=common.interval_join(HMCanpeaklist, missedoutregionslist,3)\n flog.write('%s: Info: number of HMCan added peaks %d\\n'%(time.asctime(),len(HMCanadditions)))\n \n all_peaklist=[]\n for peak in MACSpeaklist:\n all_peaklist.append(peak+['MACS'])\n for peak in HMCanadditions:\n all_peaklist.append(peak+['HMCan']) \n all_peaklist.sort()\n \n outcsv='%s/peaks/%s__%s__peaks.bed'%(output_dir,project_name,treatment_id)\n outjson='%s/peaks/%s__%s__peaks.json'%(output_dir,project_name,treatment_id)\n \n fout=open(outcsv,'w')\n jsondict={}\n \n for peak in all_peaklist:\n fout.write('%s\\t%d\\t%d\\t%s\\n'%tuple(peak))\n jsondict['%s:%d-%d'%tuple(peak[0:3])]={}\n jsondict['%s:%d-%d'%tuple(peak[0:3])]['called_by']=peak[3]\n \n fout.close()\n json.dump(jsondict, open(outjson,'w'),indent=4,sort_keys=True)", "def search_base_angles(self, motor_positions):\n\n # Errore nelle soluzioni\n err = [0, 0, 0]\n\n # Angolo di inizio ricerca\n # Se gia' eseguita una conversione suo i valori precedenti\n if self.isLastAnglesValid:\n self.alpha_start = list(self.alpha)\n else:\n # Angolo minimo di partenza ( -alpha_limit )\n # TO_CHECK: perche' parto da una configurazione sicuramente errata? -10?\n self.alpha_start = [self.alpha_limit_r, self.alpha_limit_r, self.alpha_limit_r]\n\n # Angoli presunti\n # self.temp = list(self.alpha_start)\n alpha = list(self.alpha_start)\n\n # Altezze reali degli attuatori\n height = [self.real_height + motor_positions[0],\n self.real_height + motor_positions[1],\n self.real_height + motor_positions[2]]\n\n # Trovo il mediano tra 0-1-2\n '''\n if any([all([motor_positions[0] > motor_positions[1], motor_positions[0] < motor_positions[2]]),\n all([motor_positions[0] < motor_positions[1], motor_positions[0] > motor_positions[2]])]):\n is_0_median = True\n is_1_median = False\n is_2_median = False\n else:\n is_0_median = False\n if any([all([motor_positions[1] > motor_positions[2], motor_positions[1] < motor_positions[0]]),\n all([motor_positions[1] < motor_positions[2], motor_positions[1] > motor_positions[0]])]):\n is_0_median = False\n is_1_median = True\n is_2_median = False\n else:\n is_1_median = False\n if any([all([motor_positions[2] > motor_positions[1], motor_positions[2] < motor_positions[0]]),\n all([motor_positions[2] < motor_positions[1], motor_positions[2] > motor_positions[0]])]):\n is_0_median = False\n is_1_median = False\n is_2_median = True\n else:\n is_2_median = False\n '''\n\n # Incrementi degli angoli\n # TO_CHECK: perche' quattro? L'ultimo e' di backup?\n step_alpha_base = 0.1 * Kinematic.M_TO_RAD\n step_alpha = [step_alpha_base, step_alpha_base, step_alpha_base]\n\n # Numero di cicli eseguiti\n self.cycles = 0\n\n # Calcolo la condizione iniziale\n d1 = self.distance_12(alpha, height)\n err[0] = d1 - self.base_length\n step_alpha[1] = err[0] * self.ke * step_alpha_base\n\n d2 = self.distance_23(alpha, height)\n err[1] = d2 - self.base_length\n step_alpha[2] = err[1] * self.ke * step_alpha_base\n\n d3 = self.distance_13(alpha, height)\n err[2] = d3 - self.base_length\n step_alpha[0] = err[2] * self.ke * step_alpha_base\n\n i = 0\n while i < self.cycle_limit:\n\n i += 1\n\n # Incremento alfa1 ed azzero alfa2\n alpha[0] += step_alpha[0]\n alpha[1] = self.alpha_start[1]\n\n j = 0\n\n while j < self.cycle_limit:\n\n j += 1\n\n # self.next_iteration(alpha, step_alpha, i, j, n, err)\n self.cycles += 1\n\n if self.cycles > self.cycle_limit:\n logging.error(\"Maximum number of cycles executed, no solution found!\")\n return False\n\n # Incremento alfa1 ed azzero alfa2\n alpha[1] += step_alpha[1]\n\n # Se supero l'angolo limite\n # Partendo da -10 ( -0.17 ), non devo superare 10 ( 0.17 )\n if alpha[1] > -self.alpha_limit_r:\n\n # Angolo non trovato\n step_alpha[1] = step_alpha_base\n step_alpha[0] = err[0] * step_alpha_base * self.ke\n self.alpha_start[1] = -self.alpha_limit_r - 2 * step_alpha[1]\n break\n\n d1 = self.distance_12(alpha, height)\n err[0] = d1 - self.base_length\n step_alpha[1] = err[0] * self.ke * step_alpha_base\n\n if abs(err[0]) < self.err_limit:\n\n # Trovato il minimo\n self.alpha_start[1] = alpha[1]\n step_alpha[1] = step_alpha_base\n\n n = 0\n while n < self.cycle_limit:\n\n n += 1\n\n # self.next_iteration(alpha, step_alpha, i, j, n, err)\n self.cycles += 1\n\n if self.cycles > self.cycle_limit:\n logging.error(\"Maximum number of cycles executed, no solution found!\")\n return False\n\n alpha[2] += step_alpha[2]\n d2 = self.distance_23(alpha, height)\n err[1] = d2 - self.base_length\n step_alpha[2] = err[1] * self.ke * step_alpha_base\n\n if abs(err[1]) < self.err_limit:\n\n step_alpha[2] = step_alpha_base\n d3 = self.distance_13(alpha, height)\n err[2] = d3 - self.base_length\n step_alpha[0] = err[2] * self.ke * step_alpha_base\n\n if abs(err[2]) < self.err_limit:\n\n # Trovatas la soluzione\n self.alpha = list(alpha)\n return True\n\n # NEXT j!!!\n break\n\n # Next i!!!\n break", "def scatteringPeakInfoLG(self):\t\t\r\n\t\tself.scatteringBaselineLG = (np.mean(self.lowGainScatData[0:10]))\r\n\t\tself.scatteringBaselineNoiseThreshLG = 3*np.std(self.lowGainScatData[0:10])\r\n\r\n\t\traw_max = np.amax(self.lowGainScatData)\r\n\t\tmax = raw_max - self.scatteringBaselineLG\r\n\t\t\r\n\t\tself.scatteringMaxPos_LG = np.argmax(self.lowGainScatData)\r\n\t\tself.scatteringMax_LG = max", "def addPeakResonancesToSpinSystem(peaks):\n \n # TBD check experiment type of the peak\n \n if not peaks:\n return\n \n resonances = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n resonances.append(contrib.resonance)\n \n spinSystems = []\n for resonance in resonances:\n resonanceGroup = resonance.resonanceGroup\n if resonanceGroup and (resonanceGroup not in spinSystems):\n spinSystems.append(resonanceGroup)\n\n spinSystem = None\n if len(spinSystems) == 1:\n spinSystem = spinSystems[0]\n elif len(spinSystems) > 1:\n msg = 'There are multiple spin systems for these peaks.\\n'\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm',msg):\n spinSystem = spinSystems[0]\n for spinSystem2 in spinSystems[1:]:\n mergeSpinSystems(spinSystem2,spinSystem)\n else:\n return\n \n if spinSystem is None:\n spinSystem = peaks[0].topObject.newResonanceGroup()\n\n for resonance in resonances:\n addSpinSystemResonance(spinSystem,resonance)\n\n return spinSystem", "def createMaks(self):\n mask = np.zeros((self.height, self.width)) # (H, W)\n center = self.width // 2\n\n for lat in range(self.height):\n count = int(self.counts[lat])\n # print(lat, count)\n # print(center - count, center, center + count)\n mask[lat][center: center + count] = 1\n mask[lat][center - count: center] = 1\n\n return mask # (H, W)", "def is_in_hotspot(self):\r\n in_hotspot = False\r\n hotspots = parser.parse_hotspot_bed()\r\n \r\n if hotspots.get(self.chrom): \r\n chrom_hotspots = hotspots[self.chrom]\r\n \r\n for interval in chrom_hotspots: \r\n if interval[0] <= self.pos <= interval[1]:\r\n in_hotspot = True\r\n break\r\n \r\n return in_hotspot", "def unique_peaks(self):\n return(None)", "def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)", "def wait(self):\n self.set_vals(spin=.2)\n nearest_deg = 0\n nearest_deg_dist = self.perim_dist + 1\n for i, x in enumerate(self.ranges):\n if (x != 0) and (x < nearest_deg_dist):\n nearest_deg = i\n nearest_deg_dist = x\n if nearest_deg_dist < self.perim_dist:\n nearest_deg = ((nearest_deg + 180) % 360) - 180\n self.center(degree=nearest_deg)\n self.current_state = \"follow\"", "def find_pins_and_blockages(self, pin_list):\n # This finds the pin shapes and sorts them into \"groups\" that are connected\n # This must come before the blockages, so we can not count the pins themselves\n # as blockages.\n start_time = datetime.now()\n for pin_name in pin_list:\n self.retrieve_pins(pin_name)\n print_time(\"Retrieving pins\",datetime.now(), start_time, 4)\n \n start_time = datetime.now()\n for pin_name in pin_list:\n self.analyze_pins(pin_name)\n print_time(\"Analyzing pins\",datetime.now(), start_time, 4)\n\n # This will get all shapes as blockages and convert to grid units\n # This ignores shapes that were pins\n start_time = datetime.now()\n self.find_blockages()\n print_time(\"Finding blockages\",datetime.now(), start_time, 4)\n\n # Convert the blockages to grid units\n start_time = datetime.now()\n self.convert_blockages()\n print_time(\"Converting blockages\",datetime.now(), start_time, 4)\n \n # This will convert the pins to grid units\n # It must be done after blockages to ensure no DRCs between expanded pins and blocked grids\n start_time = datetime.now()\n for pin in pin_list:\n self.convert_pins(pin)\n print_time(\"Converting pins\",datetime.now(), start_time, 4)\n\n # Combine adjacent pins into pin groups to reduce run-time\n # by reducing the number of maze routes.\n # This algorithm is > O(n^2) so remove it for now\n # start_time = datetime.now()\n # for pin in pin_list:\n # self.combine_adjacent_pins(pin)\n # print_time(\"Combining adjacent pins\",datetime.now(), start_time, 4)\n\n\n # Separate any adjacent grids of differing net names that overlap\n # Must be done before enclosing pins\n start_time = datetime.now()\n self.separate_adjacent_pins(0)\n print_time(\"Separating adjacent pins\",datetime.now(), start_time, 4)\n \n # Enclose the continguous grid units in a metal rectangle to fix some DRCs\n start_time = datetime.now()\n self.enclose_pins()\n print_time(\"Enclosing pins\",datetime.now(), start_time, 4)", "def foundPeak(self, peak):\n\n self.sequence.append({\"type\": \"foundPeak\", \"coord\": peak})", "def find_reference_radials(azimuth, velocity):\n\n def find_min_quadrant(azi, vel, nvalid_gate_qd, nsum_moy):\n return azi[nvalid_gate_qd >= nsum_moy][np.argmin(np.nanmean(np.abs(vel), axis=1)[nvalid_gate_qd >= nsum_moy])]\n\n nvalid_gate = np.sum(~np.isnan(velocity), axis=1)\n nvalid_gate[nvalid_gate < 10] = 0\n nsum_tot = np.sum(~np.isnan(velocity[nvalid_gate > 0, :]))\n nvalid_beam = len(azimuth[nvalid_gate > 0])\n\n nsum_moy = nsum_tot / nvalid_beam\n if nsum_moy > 0.7 * velocity.shape[1]:\n nsum_moy = 0.7 * velocity.shape[1]\n\n try:\n start_beam = find_min_quadrant(azimuth, velocity, nvalid_gate, nsum_moy)\n except ValueError:\n start_beam = azimuth[np.argmin(np.nanmean(np.abs(velocity), axis=1))]\n\n nb = np.zeros((4,))\n for i in range(4):\n pos = (azimuth >= i * 90) & (azimuth < (i + 1) * 90)\n try:\n nb[i] = find_min_quadrant(azimuth[pos], velocity[pos, :], nvalid_gate[pos], nsum_moy)\n except ValueError:\n nb[i] = 9999\n\n opposition = start_beam + 180\n if opposition >= 360:\n opposition -= 360\n\n end_beam = nb[np.argmin(np.abs(nb - opposition))]\n\n return start_beam, end_beam", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def incandPeakInfo(self):\t\r\n\t\tself.incandBaseline = (np.mean(self.wideBandIncandData[0:10]))\r\n\t\t\t\t\r\n\t\traw_incand_max = np.amax(self.wideBandIncandData)\r\n\t\tincand_max = raw_incand_max - self.incandBaseline\r\n\t\tincand_max_index = np.argmax(self.wideBandIncandData)\r\n\t\t\r\n\t\tself.incandMax =incand_max\r\n\t\tself.incandMaxPos = incand_max_index", "def scatteringPeakInfo(self):\r\n\t\tself.scatteringBaseline = (np.mean(self.scatData[0:10]))\r\n\t\tself.scatteringBaselineNoiseThresh = 3*np.std(self.scatData[0:10])\r\n\r\n\t\traw_max = np.amax(self.scatData)\r\n\t\tmax = raw_max - self.scatteringBaseline\r\n\t\t\r\n\t\tself.scatteringMaxPos = np.argmax(self.scatData)\r\n\t\tself.scatteringMax = max", "def peak_indices(self, **kwargs):\n kwarg_defaults = {\n 'width': 5, # ensure small spikes are ignored\n }\n kwarg_defaults.update(kwargs)\n return signal.find_peaks(self.ys, **kwarg_defaults)", "def get_following_peak_multi_channel(ind_spike, sigs, sign, method = 'biggest_amplitude'):\n \n multi_peaks =[ ]\n amplitudes = [ ]\n for c, sig in enumerate(sigs):\n multi_peaks.append(get_following_peak(ind_spike, sig, sign))\n multi_peaks = np.array(multi_peaks)\n \n ind_peaks = -np.ones(ind_spike.size, dtype = 'i')\n for i, ind in enumerate(ind_spike):\n if method == 'closer':\n ind_peaks = multi_peak[:,i].min()\n elif method == 'biggest_amplitude':\n if np.all(multi_peaks[:,i] == -1):\n ind_peaks[i] = -1\n continue\n \n peak_values = [ ]\n for c, sig in enumerate(sigs):\n if multi_peaks[c,i] != -1:\n peak_values.append(sig[multi_peaks[c,i]])\n else:\n peak_values.append(0)\n \n if sign == '+':\n biggest = np.argmax(peak_values)\n elif sign == '-':\n biggest = np.argmin(peak_values)\n ind_peaks[i] = multi_peaks[biggest,i]\n \n \n return ind_peaks+1", "def _find_peaks_heuristic(phnorm):\n median_scale = np.median(phnorm)\n\n # First make histogram with bins = 0.2% of median PH\n hist, bins = np.histogram(phnorm, 1000, [0, 2*median_scale])\n binctr = bins[1:] - 0.5 * (bins[1] - bins[0])\n\n # Scipy continuous wavelet transform\n pk1 = np.array(sp.signal.find_peaks_cwt(hist, np.array([2, 4, 8, 12])))\n\n # A peak must contain 0.5% of the data or 500 events, whichever is more,\n # but the requirement is not more than 5% of data (for meager data sets)\n Ntotal = len(phnorm)\n MinCountsInPeak = min(max(500, Ntotal//200), Ntotal//20)\n pk2 = pk1[hist[pk1] > MinCountsInPeak]\n\n # Now take peaks from highest to lowest, provided they are at least 40 bins from any neighbor\n ordering = hist[pk2].argsort()\n pk2 = pk2[ordering]\n peaks = [pk2[0]]\n\n for pk in pk2[1:]:\n if (np.abs(peaks-pk) > 10).all():\n peaks.append(pk)\n peaks.sort()\n return np.array(binctr[peaks])", "def get_all_offgrid_pin(self, pin, insufficient_list):\n #print(\"INSUFFICIENT LIST\",insufficient_list)\n # Find the coordinate with the most overlap\n any_overlap = set()\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the max x or y overlap\n max_overlap = max(overlap_rect)\n if max_overlap>0:\n any_overlap.update([coord])\n \n return any_overlap", "def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.", "def get_following_peak(ind_spike, sig, sign):\n sig1 = sig[:-2]\n sig2 = sig[1:-1]\n sig3 = sig[2:]\n if sign == '+':\n all_peaks, = np.where(numexpr.evaluate( '(sig1<=sig2) & ( sig2>sig3)'))\n elif sign == '-':\n all_peaks, = np.where(numexpr.evaluate( '(sig1>=sig2) & ( sig2<sig3)'))\n \n ind_peaks = -np.ones(ind_spike.size, dtype = 'i')\n for i, ind in enumerate(ind_spike):\n possible = all_peaks[all_peaks>ind]\n if possible.size>0:\n ind_peaks[i] = possible[0]\n \n return ind_peaks", "def startEndPoints(mazz):\n for i in range (len(mazz)):\n for j in range (len(mazz[i])):\n if mazz[i][j] == 6:\n startx = i\n starty = j\n elif mazz[i][j] == 7:\n endx = i\n endy = j\n return startx, starty, endx, endy", "def count_single_spikes_and_bursts(info):\r\n\r\n singles = 0\r\n bursts = 0\r\n inBurst = False\r\n\r\n spikesperbursts = []\r\n spb = 2\r\n\r\n prev_start = -1000\r\n prev_end = -1000\r\n for spike in info:\r\n if spike[0] - prev_end > 80:\r\n #print('single:', spike[0])\r\n singles += 1\r\n if inBurst:\r\n spikesperbursts.append(spb)\r\n inBurst = False\r\n else:\r\n #print('burst:',inBurst, spike[0])\r\n if not inBurst:\r\n spb = 2\r\n singles -=1\r\n bursts += 1\r\n else:\r\n spb += 1\r\n inBurst = True\r\n prev_start = spike[0]\r\n prev_end = spike[1]\r\n return singles, bursts, spikesperbursts, sum(spikesperbursts)/len(spikesperbursts)", "def findAxialSegmentationLimitFromMarker(self):\n #productive \n profprint()\n asl=0\n try:\n nodes = slicer.util.getNodes('template slice position*')\n found=False\n for node in nodes.values():\n coord = [0,0,0]\n node.GetFiducialCoordinates(coord)\n asl=int(round(self.ras2ijk(coord)[2]))\n print \"limit marker found in scene, z-limit [ras]: \",coord[2]\n if found:\n print \"/!\\ there should be only one limit marker!\"\n found = True\n except:\n print \"/!\\ no z-limit marker in scene (required)!\"\n msgbox(\"/!\\ no z-limit marker in scene (required)!\")\n return asl", "def bls_peakfinder(results):\n maxima = find_peaks(results.power, distance=100)[0]\n\n top_power_inds = maxima[np.argsort(results.power[maxima])[::-1]]\n\n highest_peak = results.power[top_power_inds[0]]\n next_highest_peak = results.power[top_power_inds[1]]\n\n significance = highest_peak / next_highest_peak\n\n return top_power_inds, significance", "def findLowerNeedles(self, pt):\r\n nodes = slicer.util.getNodes('manual-seg_*')\r\n candidates = []\r\n validNeedles = self.findNeedles()\r\n for node in nodes.values():\r\n name = node.GetName()\r\n nb = int(name.split('_')[1]) # get needle number\r\n if nb in validNeedles:\r\n hp = self.getNeedleHighestPoint(nb)\r\n if hp[2] < pt[2]:\r\n theta = self.angle(self.getNeedleOrientation(nb),-self.getOrientationVect(pt, hp))\r\n candidates.append([name, min(theta,abs(theta-np.pi))])\r\n\r\n return candidates", "def punches(self):\n #:TODO Need to parameterize n\n # Initialize smoothing function\n # Also because I can't take the second derivitive\n\n n = 3\n assert (len(self.averages)==len(self.timestamps))\n size = len(self.averages)\n slopes = []\n for t in [0,size-n]:\n averages = np.asarray(self.averages[t:size])\n timestamps = np.asarray(self.timestamps[t:size])\n \"\"\"\n slope = np.absolute((np.corrcoef(averages,\n timestamps))*np.std(averages)/np.std(timestamps))\n \"\"\"\n slope = np.absolute(np.polyfit(timestamps, averages, 1)[0])*1000000\n #plt.scatter(timestamps, averages)\n slopes.append(slope)\n # If you were punching you are likely still punching need to set a weighting factor to this somehow\n # print(slopes[1])\n self.smoothing_queue.pop(0)\n if self.SIG_DELTA_AVERAGE < slopes[1]:\n self.smoothing_queue.append(1)\n else:\n self.smoothing_queue.append(0)\n if self.smoothing_queue.count(1) > len(self.smoothing_queue)/2:\n punching = True\n else: punching = False\n # print(self.smoothing_queue)\n\n return punching\n #self.counter +=1\n \"\"\"\n if self.counter==self.timing:\n self.counter == 0\n else:\n \"\"\"", "def stitchpeaklist(inpeak_list,mergethreshold):\n peak_list=[]\n prev_peak=['chr0',0,1]\n inpeak_list.sort()\n for curr_peak in inpeak_list:\n if curr_peak[0]==prev_peak[0] and prev_peak[2]+mergethreshold>=curr_peak[1]:\n curr_peak[1]=min(prev_peak[1],curr_peak[1])\n curr_peak[2]=max(prev_peak[2],curr_peak[2])\n else:\n if prev_peak!=['chr0',0,1]:\n peak_list.append(prev_peak)\n prev_peak=curr_peak[:]\n peak_list.append(prev_peak)\n return peak_list", "def get_arrivals(self):\n arr = rnd.poisson(self.average)\n while arr > self.maximum:\n arr = rnd.poisson(self.average)\n return arr", "def minpeaks(sig):\n diff_sig = np.diff(sig)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd]<0 and diff_sig[nd + 1]>0)])", "def _get_minimal_lanes(self):\n return np.argwhere(self.end_of_lanes == np.min(self.end_of_lanes)).flatten()", "def _findKeyframeSegments(self,array, classifications):\n keyframes=[]\n while True:\n max_idx = np.argmax(array)\n max_value = array[max_idx]\n if max_value < PEAK_THRESHOLD:\n return keyframes\n\n area_sum = peak_sum(array, max_idx, MIN_SPACING)\n low_idx = max(max_idx-MIN_SPACING,0)\n limit = min(max_idx+MIN_SPACING+1,len(array))\n if area_sum > AREA_THRESHOLD:\n max_clear = 0.0\n clear_idx = None\n for area_idx in range(low_idx,limit):\n # The actual frame is KEYFRAME_OFFSET away from the\n # result vector due to padding\n class_idx = area_idx - KEYFRAME_OFFSET\n\n # If there are no detections don't attemt to extract\n # then don't attempt to extract cover info\n if len(classifications[class_idx]) == 0:\n continue\n element_cover = classifications[class_idx][0].cover[2]\n if element_cover > max_clear:\n max_clear = element_cover\n clear_idx = area_idx\n\n if clear_idx is not None:\n keyframes.append(clear_idx)\n keyframes.sort()\n\n # Zero out the area identified\n for clear_idx in range(low_idx, limit):\n array[clear_idx] = 0.0", "def identify_outliers_consecutive(self, max_vel):\r\n if 'Dry' in self.outlier_methods:\r\n flagged = self.rec_track['Dry'].copy()\r\n niterations = 1\r\n for nit in range(0,niterations):\r\n not_flagged = np.where(flagged==0)[0]\r\n valid_tr = self.rec_track[not_flagged]\r\n # compute segments associated with valid detects\r\n nvalid = len(valid_tr)\r\n if nvalid < 3:\r\n self.valid = False\r\n return flagged\r\n valid_seg = self.make_segments(input_rec_track = valid_tr)\r\n for ndv in range(1,nvalid-1):\r\n sp1 = valid_seg.speed[ndv-1]\r\n sp2 = valid_seg.speed[ndv]\r\n if (sp1 > max_vel) and (sp2 > max_vel):\r\n nd_orig = valid_tr[ndv].nd\r\n flagged[nd_orig] = 1\r\n\r\n ncol = len(self.df_track.columns)\r\n self.df_track.insert(ncol, 'Consecutive', flagged)\r\n ncol = len(self.df_track.columns)\r\n\r\n return flagged", "def init(self, target):\n # Finds positive and negative peaks\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n positive_peak_ixs, peak_props = find_peaks(np.clip(target, 0, None), width=0, prominence=0, height=0)\n negative_peak_ixs, dip_props = find_peaks(np.clip(-target, 0, None), width=0, prominence=0, height=0)\n\n # Indexes for minimum and maximum center frequency\n min_fc_ix = np.argmin(np.abs(self.f - self.min_fc))\n max_fc_ix = np.argmin(np.abs(self.f - self.max_fc))\n\n # All peak indexes together\n peak_ixs = np.concatenate([positive_peak_ixs, negative_peak_ixs])\n # Exclude peak indexes which are outside of minimum and maximum center frequency\n mask = np.logical_and(peak_ixs >= min_fc_ix, peak_ixs <= max_fc_ix)\n\n if (len(positive_peak_ixs) == 0 and len(negative_peak_ixs) == 0) or np.sum(mask) == 0:\n # No peaks found\n params = []\n if self.optimize_fc:\n self.fc = self.f[(min_fc_ix + max_fc_ix) // 2]\n params.append(np.log10(self.fc))\n if self.optimize_q:\n self.q = np.sqrt(2)\n params.append(self.q)\n if self.optimize_gain:\n self.gain = 0.0\n params.append(self.gain)\n return params\n\n peak_ixs = peak_ixs[mask]\n # Properties of included peaks together\n widths = np.concatenate([peak_props['widths'], dip_props['widths']])[mask]\n heights = np.concatenate([peak_props['peak_heights'], dip_props['peak_heights']])[mask]\n # Find the biggest peak, by height AND width\n sizes = widths * heights # Size of each peak for ranking\n ixs_ix = np.argmax(sizes) # Index to indexes array which point to the biggest peak\n ix = peak_ixs[ixs_ix] # Index to f and target\n\n params = []\n if self.optimize_fc:\n self.fc = np.clip(self.f[ix], self.min_fc, self.max_fc)\n params.append(np.log10(self.fc)) # Convert to logarithmic scale for optimizer\n if self.optimize_q:\n width = widths[ixs_ix]\n # Find bandwidth which matches the peak width\n f_step = np.log2(self.f[1] / self.f[0])\n bw = np.log2((2 ** f_step) ** width)\n # Calculate quality with bandwidth\n self.q = np.sqrt(2 ** bw) / (2 ** bw - 1)\n self.q = np.clip(self.q, self.min_q, self.max_q)\n params.append(self.q)\n if self.optimize_gain:\n # Target value at center frequency\n self.gain = heights[ixs_ix] if target[ix] > 0 else -heights[ixs_ix]\n self.gain = np.clip(self.gain, self.min_gain, self.max_gain)\n params.append(self.gain)\n return params", "def beam_plot(self, t):\n e_positions = np.array(self.straight.step(t)[0])[:, 0].tolist()\n # Remove duplicates in data.\n for i in range(len(self.straight.data.get_elements('drift'))):\n if e_positions[i] == e_positions[i+1]:\n e_positions.pop(i+1)\n\n p_positions = np.array(self.straight.step(t)[1])[:, [0, 2]]\n\n return e_positions, p_positions", "def peakdetect_zero_crossing(y_axis, x_axis=None, window=49):\n\n if x_axis is None:\n x_axis = range(len(y_axis))\n\n length = len(y_axis)\n# if length != len(x_axis):\n# raise ValueError, 'Input vectors y_axis and x_axis must have same length'\n\n # needs to be a numpy array\n y_axis = np.asarray(y_axis)\n\n zero_indices = zero_crossings(y_axis, window=window)\n period_lengths = np.diff(zero_indices)\n\n bins = [y_axis[indice:indice + diff] for indice, diff in\n zip(zero_indices, period_lengths)]\n\n even_bins = bins[::2]\n odd_bins = bins[1::2]\n # check if even bin contains maxima\n if even_bins[0].max() > abs(even_bins[0].min()):\n hi_peaks = [bin.max() for bin in even_bins]\n lo_peaks = [bin.min() for bin in odd_bins]\n else:\n hi_peaks = [bin.max() for bin in odd_bins]\n lo_peaks = [bin.min() for bin in even_bins]\n\n\n hi_peaks_x = [x_axis[np.where(y_axis == peak)[0]] for peak in hi_peaks]\n lo_peaks_x = [x_axis[np.where(y_axis == peak)[0]] for peak in lo_peaks]\n\n maxtab = [(x, y) for x, y in zip(hi_peaks, hi_peaks_x)]\n mintab = [(x, y) for x, y in zip(lo_peaks, lo_peaks_x)]\n\n return maxtab, mintab", "def search_peaks(wavelength, flux, smooth_points=20, lmin=0, lmax=0, fmin=0.5, fmax=3., \n emission_line_file=\"lineas_c89_python.dat\", brightest_line=\"Ha\", cut=1.2, \n check_redshift = 0.0003, only_id_lines=True, plot=True, verbose=True, fig_size=12): \n # Setup wavelength limits\n if lmin == 0 :\n lmin = np.nanmin(wavelength)\n if lmax == 0 :\n lmax = np.nanmax(wavelength)\n \n # Fit a smooth continuum\n #smooth_points = 20 # Points in the interval\n step = np.int(len(wavelength)/smooth_points) # step\n w_cont_smooth = np.zeros(smooth_points) \n f_cont_smooth = np.zeros(smooth_points) \n\n for j in range(smooth_points):\n w_cont_smooth[j] = np.nanmedian([wavelength[i] for i in range(len(wavelength)) if (i > step*j and i<step*(j+1))])\n f_cont_smooth[j] = np.nanmedian([flux[i] for i in range(len(wavelength)) if (i > step*j and i<step*(j+1))]) # / np.nanmedian(spectrum)\n #print j,w_cont_smooth[j], f_cont_smooth[j]\n\n interpolated_continuum_smooth = interpolate.splrep(w_cont_smooth, f_cont_smooth, s=0)\n interpolated_continuum = interpolate.splev(wavelength, interpolated_continuum_smooth, der=0)\n\n\n funcion = flux/interpolated_continuum\n \n # Searching for peaks using cut = 1.2 by default\n peaks = []\n index_low = 0\n for i in range(len(wavelength)):\n if funcion[i] > cut and funcion[i-1] < cut :\n index_low = i\n if funcion[i] < cut and funcion[i-1] > cut :\n index_high = i\n if index_high != 0 :\n pfun = np.nanmax([funcion[j] for j in range(len(wavelength)) if (j > index_low and j<index_high+1 )])\n peak = wavelength[funcion.tolist().index(pfun)]\n if (index_high - index_low) > 1 :\n peaks.append(peak)\n \n # Identify lines\n # Read file with data of emission lines: \n # 6300.30 [OI] -0.263 15 5 5 15\n # el_center el_name el_fnl lowlow lowhigh highlow highigh \n # Only el_center and el_name are needed\n el_center,el_name,el_fnl,el_lowlow,el_lowhigh,el_highlow,el_highhigh = read_table(emission_line_file, [\"f\", \"s\", \"f\", \"f\", \"f\", \"f\", \"f\"] )\n #for i in range(len(el_name)):\n # print \" %8.2f %9s %6.3f %4.1f %4.1f %4.1f %4.1f\" % (el_center[i],el_name[i],el_fnl[i],el_lowlow[i], el_lowhigh[i], el_highlow[i], el_highhigh[i])\n #el_center,el_name = read_table(\"lineas_c89_python.dat\", [\"f\", \"s\"] )\n\n # In case this is needed in the future...\n# el_center = [6300.30, 6312.10, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7318.39, 7329.66]\n# el_fnl = [-0.263, -0.264, -0.271, -0.296, -0.298, -0.300, -0.313, -0.318, -0.320, -0.364, -0.374, -0.398, -0.400 ]\n# el_name = [\"[OI]\", \"[SIII]\", \"[OI]\", \"[NII]\", \"Ha\", \"[NII]\", \"HeI\", \"[SII]\", \"[SII]\", \"HeI\", \"[ArIII]\", \"[OII]\", \"[OII]\" ]\n\n # Search for the brightest line in given spectrum (\"Ha\" by default)\n peaks_flux = np.zeros(len(peaks))\n for i in range(len(peaks)):\n peaks_flux[i] = flux[wavelength.tolist().index(peaks[i])]\n Ha_w_obs = peaks[peaks_flux.tolist().index(np.nanmax(peaks_flux))] \n \n # Estimate redshift of the brightest line ( Halpha line by default)\n Ha_index_list = el_name.tolist().index(brightest_line)\n Ha_w_rest = el_center[Ha_index_list]\n Ha_redshift = (Ha_w_obs-Ha_w_rest)/Ha_w_rest\n if verbose: print(\"\\n> Detected %i emission lines using %8s at %8.2f A as brightest line!!\\n\" % (len(peaks),brightest_line, Ha_w_rest)) \n# if verbose: print \" Using %8s at %8.2f A as brightest line --> Found in %8.2f with a redshift %.6f \" % (brightest_line, Ha_w_rest, Ha_w_obs, Ha_redshift)\n \n # Identify lines using brightest line (Halpha by default) as reference. \n # If abs(wavelength) > 2.5 we don't consider it identified.\n peaks_name = [None] * len(peaks)\n peaks_rest = np.zeros(len(peaks))\n peaks_redshift = np.zeros(len(peaks))\n peaks_lowlow = np.zeros(len(peaks)) \n peaks_lowhigh = np.zeros(len(peaks))\n peaks_highlow = np.zeros(len(peaks))\n peaks_highhigh = np.zeros(len(peaks))\n\n for i in range(len(peaks)):\n minimo_w = np.abs(peaks[i]/(1+Ha_redshift)-el_center)\n if np.nanmin(minimo_w) < 2.5:\n indice = minimo_w.tolist().index(np.nanmin(minimo_w))\n peaks_name[i]=el_name[indice]\n peaks_rest[i]=el_center[indice]\n peaks_redshift[i] = (peaks[i]-el_center[indice])/el_center[indice]\n peaks_lowlow[i] = el_lowlow[indice]\n peaks_lowhigh[i] = el_lowhigh[indice]\n peaks_highlow[i] = el_highlow[indice]\n peaks_highhigh[i] = el_highhigh[indice]\n if verbose: print(\"%9s %8.2f found in %8.2f at z=%.6f |z-zref| = %.6f\" % (peaks_name[i], peaks_rest[i],peaks[i], peaks_redshift[i],np.abs(peaks_redshift[i]- Ha_redshift) ))\n #print peaks_lowlow[i],peaks_lowhigh[i],peaks_highlow[i],peaks_highhigh[i]\n # Check if all redshifts are similar, assuming check_redshift = 0.0003 by default\n # If OK, add id_peaks[i]=1, if not, id_peaks[i]=0 \n id_peaks=[]\n for i in range(len(peaks_redshift)):\n if np.abs(peaks_redshift[i]-Ha_redshift) > check_redshift:\n if verbose: print(\" WARNING!!! Line %8s in w = %.2f has redshift z=%.6f, different than zref=%.6f\" %(peaks_name[i],peaks[i],peaks_redshift[i], Ha_redshift))\n id_peaks.append(0)\n else:\n id_peaks.append(1)\n\n if plot:\n plt.figure(figsize=(fig_size, fig_size/2.5)) \n plt.plot(wavelength, funcion, \"r\", lw=1, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"Flux / continuum\")\n \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.axhline(y=cut, color='k', linestyle=':', alpha=0.5) \n for i in range(len(peaks)):\n plt.axvline(x=peaks[i], color='k', linestyle=':', alpha=0.5)\n label=peaks_name[i]\n plt.text(peaks[i], 1.8, label) \n plt.show() \n \n continuum_limits = [peaks_lowlow, peaks_lowhigh, peaks_highlow, peaks_highhigh]\n \n if only_id_lines:\n peaks_r=[]\n peaks_name_r=[]\n peaks_rest_r=[]\n peaks_lowlow_r=[]\n peaks_lowhigh_r=[]\n peaks_highlow_r=[]\n peaks_highhigh_r=[]\n \n for i in range(len(peaks)): \n if id_peaks[i] == 1:\n peaks_r.append(peaks[i])\n peaks_name_r.append(peaks_name[i])\n peaks_rest_r.append(peaks_rest[i])\n peaks_lowlow_r.append(peaks_lowlow[i])\n peaks_lowhigh_r.append(peaks_lowhigh[i])\n peaks_highlow_r.append(peaks_highlow[i])\n peaks_highhigh_r.append(peaks_highhigh[i])\n continuum_limits_r=[peaks_lowlow_r,peaks_lowhigh_r,peaks_highlow_r,peaks_highhigh_r] \n\n return peaks_r, peaks_name_r , peaks_rest_r, continuum_limits_r \n else: \n return peaks, peaks_name , peaks_rest, continuum_limits", "def _identify_initial_pivot(X, up_thresh, down_thresh):\n x_0 = X[0]\n max_x = x_0\n max_t = 0\n min_x = x_0\n min_t = 0\n up_thresh += 1\n down_thresh += 1\n\n for t in range(1, len(X)):\n x_t = X[t]\n\n if x_t / min_x >= up_thresh:\n return VALLEY if min_t == 0 else PEAK\n\n if x_t / max_x <= down_thresh:\n return PEAK if max_t == 0 else VALLEY\n\n if x_t > max_x:\n max_x = x_t\n max_t = t\n\n if x_t < min_x:\n min_x = x_t\n min_t = t\n\n t_n = len(X)-1\n return VALLEY if x_0 < X[t_n] else PEAK", "def movePeaks(hist, peaks, dist=20):\n peakList = []\n smooth_hist = smooth(hist)\n for pk in peaks:\n p = int(round(pk))\n while True:\n start = int(round(max(0, p - dist)))\n end = int(round(min(len(hist), p + dist)))\n if end < start:\n new_peak = p\n break\n new_peak = start + np.argmax(hist[int(start):int(end)])\n\n # if the local maximum is not far from initital peak, break\n if abs(p - new_peak) <= 5: #\n break\n else:\n left = min(p, new_peak)\n right = max(p, new_peak)\n\n # Check if between initial peak and local maximum has valley\n if all(smooth_hist[left + 1:right] > smooth_hist[p]):\n break\n dist = dist / 2\n peakList.append(new_peak)\n return list(peakList)", "def is_inside_home_ring(self, detections):\n YAW_THRESHOLD = 1.3 # radians\n see_home_tag = False\n good_orientations = 0\n bad_orientations = 0\n\n for detection in detections:\n if detection.id == 256:\n see_home_tag = True\n home_detection = self._transform_to_base_link(detection)\n\n quat = [home_detection.pose.orientation.x,\n home_detection.pose.orientation.y,\n home_detection.pose.orientation.z,\n home_detection.pose.orientation.w]\n _r, _p, y = tf.transformations.euler_from_quaternion(quat)\n y -= math.pi / 2\n y = angles.normalize_angle(y)\n\n if abs(y) < YAW_THRESHOLD:\n bad_orientations += 1\n else:\n good_orientations += 1\n\n if not see_home_tag:\n return False\n\n return bad_orientations >= good_orientations", "def peakdetect_zero_crossing(y_axis, x_axis = None, window = 11):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n \n zero_indices = zero_crossings(y_axis, window = window)\n period_lengths = np.diff(zero_indices)\n \n bins_y = [y_axis[index:index + diff] for index, diff in \n zip(zero_indices, period_lengths)]\n bins_x = [x_axis[index:index + diff] for index, diff in \n zip(zero_indices, period_lengths)]\n \n even_bins_y = bins_y[::2]\n odd_bins_y = bins_y[1::2]\n even_bins_x = bins_x[::2]\n odd_bins_x = bins_x[1::2]\n hi_peaks_x = []\n lo_peaks_x = []\n \n #check if even bin contains maxima\n if abs(even_bins_y[0].max()) > abs(even_bins_y[0].min()):\n hi_peaks = [bin.max() for bin in even_bins_y]\n lo_peaks = [bin.min() for bin in odd_bins_y]\n # get x values for peak\n for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, hi_peaks):\n hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, lo_peaks):\n lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n else:\n hi_peaks = [bin.max() for bin in odd_bins_y]\n lo_peaks = [bin.min() for bin in even_bins_y]\n # get x values for peak\n for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, hi_peaks):\n hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, lo_peaks):\n lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n \n max_peaks = [[x, y] for x,y in zip(hi_peaks_x, hi_peaks)]\n min_peaks = [[x, y] for x,y in zip(lo_peaks_x, lo_peaks)]\n \n return [max_peaks, min_peaks]", "def _closest_peak(peak_points, prev_step, dot_limit):\n peak_dots = dot(peak_points, prev_step)\n closest_peak = abs(peak_dots).argmax()\n dot_closest_peak = peak_dots[closest_peak]\n if abs(dot_closest_peak) < dot_limit:\n raise StopIteration(\"angle between peaks too large\")\n if dot_closest_peak > 0:\n return peak_points[closest_peak]\n else:\n return -peak_points[closest_peak]", "def eeg_peaks(array,tim,onset,plot='false'):\n\tp1_i,n1_i,p2_i = onset+56,onset+104,onset+176\n\twin_p1,win_n1,win_p2 = 15,20,40\n\t# determine P1,N1 and P2 values on the basis of the maximum in GFP in a window around the expected values\n\tidx_p1 = np.logical_and(tim>p1_i-win_p1, tim<p1_i+win_p1)\n\tidx_n1 = np.logical_and(tim>n1_i-win_n1, tim<n1_i+win_n1)\n\tidx_p2 = np.logical_and(tim>p2_i-win_p2, tim<p2_i+win_p2)\n\tp1 = np.max(array[idx_p1])\n\ttp1 = tim[idx_p1][array[idx_p1].argmax()]\n\tn1 = np.min(array[idx_n1])\n\ttn1 = tim[idx_n1][array[idx_n1].argmin()]\n\tp2 = np.max(array[idx_p2])\n\ttp2 = tim[idx_p2][array[idx_p2].argmax()]\n\n\tlineax = dict(linewidth=1, color='black', linestyle='--')\n\tlinep1 = dict(linewidth=1, color='red', linestyle='--')\n\tlinen1 = dict(linewidth=1, color='green', linestyle='--')\n\tlinep2 = dict(linewidth=1, color='blue', linestyle='--')\n\n\tif plot == 'true':\t\t\n\t\tfig = plt.figure(19,figsize=[7,5])\n\t\tax = fig.add_subplot(111, autoscale_on=False, xlim=[onset-100,tp2+200], ylim=[1.25*np.min([p1,n1,p2]),1.25*np.max([p1,n1,p2])])\n\t\tplt.plot(tim,array,'k-',lw=3)\n\t\tplt.plot(tp1,p1,'ro')\n\t\tplt.plot(tn1,n1,'go')\n\t\tplt.plot(tp2,p2,'bo')\n\t\tax.axvline(p1_i-win_p1,**linep1)\n\t\tax.axvline(p1_i+win_p1,**linep1)\n\t\tax.axvline(n1_i-win_n1,**linen1)\n\t\tax.axvline(n1_i+win_n1,**linen1)\n\t\tax.axvline(p2_i-win_p2,**linep2)\n\t\tax.axvline(p2_i+win_p2,**linep2)\n\t\tax.axhline(**lineax)\n\t\tplt.text(tp1-120,1.25*p1,'P1 = %.2f muV at %.0f ms' %(p1,tp1),fontsize=10)\n\t\tplt.text(tn1-40,1.1*n1,'N1 = %.2f muV at %.0f ms' %(n1,tn1),fontsize=10)\n\t\tplt.text(tn1+40,1.1*p2,'P2 = %.2f muV at %.0f ms' %(p2,tp2),fontsize=10)\n\t\tplt.xlabel('time (ms)',fontsize = 13)\n\t\tplt.ylabel('Amplitude',fontsize = 13)\n\treturn [p1,n1,p2,tp1,tn1,tp2]", "def calc_nearest_ind(self, robot_pose):\n pass", "def find_chimes(self):\n while True:\n self.recursion += 1\n if self.recursion > 10:\n self.exit_status = \"Recursion limit reached\"\n return None\n\n peaks, peaks_meta_data = find_peaks(self.amplitude, height=self.height,\n distance=int(self.fs / 2),\n prominence=[self.prominence_min, self.prominence_max])\n # If correct number of peaks are present, go with that.\n correct_profile_peaks = self.search_range_for_fit(peaks, mean_peak_distance=1.5)\n if len(correct_profile_peaks) == 1:\n return correct_profile_peaks[0] # Correct peaks in there.\n\n elif len(correct_profile_peaks) == 0:\n # not captured any peaks so broaden the search, drop height, drop prom min and increase prom max\n # Height is too high\n self.max_height = self.height\n self.height = (self.max_height - self.min_height) // 2\n\n # Prominence\n self.prominence_min -= self.prominence_min // 4\n self.prominence_max += self.prominence_max // 4\n\n elif len(correct_profile_peaks) > 1:\n # Captured too many peaks so narrow the criteria.\n self.min_height = self.height\n self.height = (self.max_height - self.min_height) * 2\n\n self.prominence_min += self.prominence_min // 4\n self.prominence_max -= self.prominence_max // 4", "def bout_detect(raw_motion_data):\n import numpy as np\n window_length = 50 # Most bouts last much lesser than 20 frames,\n # so this is a really safe window to search for bouts.\n nonzero_indices = np.flatnonzero(raw_motion_data)\n all_bout_indices = [] # A list of all the indices that belong to all bouts.\n bout_indices = [] # A list of tuples of bout indices.\n bouts = [] # A list of bouts, stored as tuples.\n # Run through the list of nonzero indices and look for the first zero value\n # after it.\n do_not_append = False\n for i in nonzero_indices:\n if i not in all_bout_indices:\n bout_start = i # Bout starting point\n try:\n first_zero_after_start = np.flatnonzero(\n raw_motion_data[bout_start:bout_start+window_length+1] == 0)[0]\n except IndexError:\n try:\n first_zero_after_start = np.flatnonzero(\n raw_motion_data[bout_start:len(raw_motion_data)] == 0)[0]\n except IndexError:\n first_zero_after_start = len(raw_motion_data)\n do_not_append = True\n if not do_not_append:\n # Adjusted to start and end at 0\n bout = tuple(raw_motion_data[bout_start-1:bout_start+first_zero_after_start+1])\n bout_inds = tuple(range(bout_start-1, bout_start+first_zero_after_start+1))\n bout_indices.append(bout_inds)\n bouts.append(bout)\n for j in range(bout_start, bout_start+first_zero_after_start):\n all_bout_indices.append(j)\n\n # Filter out the bouts list to remove tuples which are smaller than 5 entries long.\n \"\"\"This is not really needed, because the fish can make some tiny movements\n that need to be detected and counted as bouts. In any case, the decision for this\n can be made much later. It is not really important to figure this out right now.\"\"\"\n # bouts[:] = [tup for tup in bouts if len(tup) > 5]\n # bout_indices[:] = [x for x in bout_indices if len(x) > 5]\n return bouts, bout_indices" ]
[ "0.595715", "0.59206724", "0.57927513", "0.57917154", "0.5787008", "0.5730863", "0.57080287", "0.5705941", "0.569645", "0.5671896", "0.56714284", "0.5667955", "0.5589922", "0.55761975", "0.55648017", "0.5513475", "0.55061954", "0.5487037", "0.547641", "0.54698986", "0.54554987", "0.54476935", "0.544658", "0.5445142", "0.5430903", "0.54248047", "0.54216874", "0.54058045", "0.5405608", "0.537928", "0.5369088", "0.5343059", "0.5310281", "0.5303721", "0.528704", "0.52804714", "0.52645135", "0.52624154", "0.5262288", "0.52617896", "0.5257836", "0.52423096", "0.52336276", "0.5219033", "0.52142745", "0.5210219", "0.52038103", "0.5194689", "0.5186317", "0.5184581", "0.51750135", "0.51690376", "0.5163268", "0.51542735", "0.51491815", "0.5138426", "0.5136298", "0.51350933", "0.51232296", "0.51201236", "0.5117204", "0.51118875", "0.5098719", "0.5090719", "0.5089365", "0.5084823", "0.50789946", "0.5077931", "0.5075623", "0.50744796", "0.5069959", "0.50625116", "0.50517553", "0.5050587", "0.5046495", "0.5023238", "0.5020699", "0.50165844", "0.50155336", "0.50081617", "0.5002268", "0.4998926", "0.4998868", "0.4992641", "0.49895033", "0.49887764", "0.49870312", "0.49851802", "0.49850422", "0.49822035", "0.4980546", "0.49781823", "0.4976759", "0.49647778", "0.49647245", "0.49511358", "0.49496794", "0.49468186", "0.49405345", "0.49390823" ]
0.5789159
4
Calibrate the chemical shifts of each spin in the peak list.
def calibrate_peaklist(peaklist, calibration, attr='shift'): if len(calibration) != peaklist.dims: raise ValueError('incorrect calibration list length') for peak in peaklist: for spin, cal in zip(peak, calibration): shift = getattr(spin, attr) shift -= cal setattr(spin, attr, shift) return peaklist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def apply_calibration(self, cal):\n\n n_edges = len(self.channels) + 1\n channel_edges = np.linspace(-0.5, self.channels[-1] + 0.5, num=n_edges)\n self.bin_edges_kev = cal.ch2kev(channel_edges)", "def calibration_wheel(self):\n self.spectrum = self.spectrum", "def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)", "def calibrate(self):\n\t\twl = BLi.getWavelength()\n\t\tif abs(self.stokes()) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset2(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset4(yyy)\n\t\t\tself.offset9(self.dettrans())\n\t\telif abs(self.stokes()-90.) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset3(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset8(yyy)\n\t\t\tself.offset10(self.dettrans())\n\t\telse:\n\t\t\tprint \"Can't calibrate at stokes=\",self.stokes()\n\t\treturn [self.sign(),self.offset2(), self.offset3(),self.offset4(),self.offset5(),self.offset8(),self.offset9(),self.offset10()]", "def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n # compute the matrix for the scale and rotation correction\n shift = (np.asarray(shift) - np.dot(self._wcslin.wcs.crpix, matrix) +\n self._wcslin.wcs.crpix)\n\n matrix = inv(matrix).T\n\n cwcs = self._wcs.deepcopy()\n\n # estimate step for numerical differentiation. We need a step\n # large enough to avoid rounding errors and small enough to get a\n # better precision for numerical differentiation.\n # TODO: The logic below should be revised at a later time so that it\n # better takes into account the two competing requirements.\n crpix1, crpix2 = self._wcs.wcs.crpix\n hx = max(1.0, min(20.0, (crpix1 - 1.0) / 100.0,\n (self._wcs.pixel_shape[0] - crpix1) / 100.0))\n hy = max(1.0, min(20.0, (crpix2 - 1.0) / 100.0,\n (self._wcs.pixel_shape[1] - crpix2) / 100.0))\n\n # compute new CRVAL for the image WCS:\n crpixinref = self._wcslin.wcs_world2pix(\n self._wcs.wcs_pix2world([self._wcs.wcs.crpix], 1), 1)\n crpixinref = np.dot(crpixinref - shift, matrix.T).astype(np.float64)\n self._wcs.wcs.crval = self._wcslin.wcs_pix2world(crpixinref, 1)[0]\n self._wcs.wcs.set()\n\n # approximation for CD matrix of the image WCS:\n (U, u) = _linearize(cwcs, self._wcs, self._wcslin, self._wcs.wcs.crpix,\n matrix, shift, hx=hx, hy=hy)\n self._wcs.wcs.cd = np.dot(self._wcs.wcs.cd.astype(np.longdouble),\n U).astype(np.float64)\n self._wcs.wcs.set()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)", "def calibrate_file(self, calibration_slope, calibration_offset):\n\n self.data.average_spectrum = (calibration_slope * self.data.average_spectrum \n + calibration_offset)\n\n individual_wavelength = np.zeros(2048)\n individual_slope = np.zeros(2048)\n individual_offset = np.zeros(2048)\n\n for i_wavelength in range(2048):\n individual_wavelength[i_wavelength] = self.data.wavelength[\n i_wavelength * self.header.zero_fill]\n individual_slope[i_wavelength] = calibration_slope[\n i_wavelength * self.header.zero_fill]\n individual_offset[i_wavelength] = calibration_offset[\n i_wavelength * self.header.zero_fill]\n\n index = np.argsort(individual_wavelength)\n individual_wavelength = individual_wavelength[index]\n self.data.individual_wavelength = individual_wavelength\n average_spectrum = self.data.average_spectrum[index]\n\n i_min = np.argmin(abs(individual_wavelength - 8.0))\n i_max = np.argmin(abs(individual_wavelength - 14.0))\n\n for i in range(self.header.number_of_coadds):\n i_center_burst = np.argmax(np.absolute(self.data.interferogram[i]))\n\n size = self.header.interferogram_size\n interferogram_shift = size/2 - i_center_burst\n\n self.data.interferogram[i] = np.roll(self.data.interferogram[i], \n interferogram_shift)\n self.data.interferogram[i] = self.data.interferogram[i][\n size/2-2048:size/2+2048]\n\n window_fn = np.hanning(4096)\n \n spectrum = np.fft.fft(self.data.interferogram[i] * window_fn)\n spectrum = spectrum/3300\n spectrum = individual_slope * np.absolute(spectrum[0:2048]\n ) + individual_offset\n spectrum = spectrum[index]\n\n self.data.spectrum.append(spectrum)", "def calibration(self, cal: int, /) -> None:", "def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT", "def updateAllShifts(shiftList):\n \n for shift in shiftList.measurements:\n averageShiftValue(shift)", "def expandcal(self):\n ind=np.zeros(self.spec.shape[0]).astype(int)\n for k in range(self.nscan):\n ind[self.getscanind(k)]=k\n ind[self.getcalind(k)]=k\n return ind", "def calibrate(self, cal=1.0, pol_eff=1.0):\n \n if self.ncomp == 1:\n self.data *= cal\n else:\n self.data[0] *= cal\n self.data[1] *= cal * pol_eff\n self.data[2] *= cal * pol_eff\n\n return self", "def updatePeakDimShifts(peakDim):\n\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def calibrate(): \n \n # Calibrate of the run using beam data. Creates a folder cal-files/caltag \n # containing all calibration data. \n CalObj = Calibration(steerfiles=steerfiles, name=localcaltag + '-cal') \n\n # Set Beam energy\n CalObj.set_beam_momentum(beamenergy)\n\n # Get gearfile and set air as DUT material\n localgearfile = CalObj.get_filename('gear.xml')\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='radLength', value=304000.0)\n \n # Create list of calibration steps \n calpath = create_calibration_path(CalObj)\n \n # Run the calibration steps \n CalObj.calibrate(path=calpath,ifile=rawfile_air,caltag=localcaltag)", "def calibrate(self):\n if self.iCAL_required:\n logger.info(\"iCAL-sensitive registers were modified, performing calibration...\")\n return self._run_ical()\n else:\n logger.info(\"iCAL-sensitive registers were not modified, skipping calibration...\")\n return 0 # Still success", "def calibrate(self):\n import time\n\n CALIBRATE_SLEEP = 0.75\n\n self.change_power(-self._calpow)\n encprev, encnow = 0, None\n while encprev != encnow:\n encprev = encnow\n time.sleep(CALIBRATE_SLEEP)\n encnow = self._bp.get_motor_encoder(self._port)\n self._pmin = encnow\n self.change_power(0)\n\n self.change_power(self._calpow)\n encprev, encnow = 0, None\n while encprev != encnow:\n encprev = encnow\n time.sleep(CALIBRATE_SLEEP)\n encnow = self._bp.get_motor_encoder(self._port)\n self._pmax = encnow\n self.change_power(0)\n\n if self._pmax == self._pmin:\n raise Exception('motor {} does not move'.format(self._port))\n\n self._pinit = (self._pmax + self._pmin) * 0.5\n time.sleep(0.5)\n self.to_init_position()", "def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)", "def calibrate_sensors(self):\n for j in range(0, 10):\n self.read_sensors()\n for i in range(0, self.NUM_SENSORS):\n if self.calibratedMax[i] < self.sensorValues[i]:\n self.calibratedMax[i] = self.sensorValues[i]\n if self.calibratedMin[i] > self.sensorValues[i] and self.sensorValues[i] > 30:\n self.calibratedMin[i] = self.sensorValues[i]", "def check_calcs_v2(list_mols, data_file=\"\", max_restart=False, depth='medium', frc=False):\n import warnings\n from gausspy.gaussian_job_manager import server_data_unequal\n from gausspy.data_extract_utils import latest_restarts, import_moldata, load_from_server\n from gausspy.data_extract_utils import oniom_components_on_server, export_moldata, clean_local_files\n from gausspy import oniom_utils\n\n if max_restart:\n list_mols = latest_restarts(list_mols)\n\n #if we are forcing we ignore previously saved data\n if not frc:\n current_data = import_moldata(data_file)\n else:\n current_data = []\n\n #check saved data against current list of molecules\n current_labels = [m.calc.label for m in current_data]\n mol_labels = [m.calc.label for m in list_mols]\n mismatched_labels = [label for label in current_labels if label not in mol_labels]\n\n if mismatched_labels:\n warnings.warn(\n RuntimeWarning(\n \"Calculations: {m} in data_file do not match molecules passed to check_calcs\".format(\n m=\" \".join(mismatched_labels))\n )\n )\n\n #extract calculation data from the datafile (note at the moment we are not extracting the non calculation part\n # which means that if the calculation modifies the original ase object those changes will be lost\n for saved_mol in current_data:\n try:\n ind = mol_labels.index(saved_mol.calc.label)\n #loading the entire object = list_mols[ind] = saved_mod\n #but because of python's reference passing behaviour this would mean check_calcs_v2 would not act like check_calcs (think restarted incomplete calculations)\n list_mols[ind].calc = saved_mol.calc\n except ValueError:\n pass\n\n if frc:\n update_mask = [True for i in range(len(list_mols))]\n else:\n update_mask =server_data_unequal(list_mols)\n\n mols_to_update = [list_mols[i] for i in range(len(list_mols)) if update_mask[i]]\n for mol in mols_to_update:\n #if log files on home directory and server are different we copy those files to the home directory\n mol = load_from_server(mol, depth)\n\n #if we have an oniom calculation we check to see if the components of the calculation have been run and if so we retrieve them and attach them to the calculation object\n if 'oniom' in mol.calc.method and oniom_components_on_server(mol):\n init_mol = copy.deepcopy(mol)\n init_mol.calc.label += '_init'\n init_mol = load_from_server(init_mol, depth)\n mol.calc.components = oniom_utils.oniom_comp_calcs(init_mol)\n mol.calc.components = check_calcs_v2(mol.calc.components, depth=depth, max_restart=True, frc=frc)\n\n if data_file and any(update_mask):\n export_moldata(data_file, list_mols)\n clean_local_files(mols_to_update)\n\n return list_mols", "def calibrate(msname, parset, skymodel, logname_root, use_timecorr=False,\n time_block=None, ionfactor=0.5, outdir='.', instrument='instrument',\n solint=None, flag_filler=False, ncores=1):\n log = logging.getLogger(\"Calib\")\n\n if not use_timecorr:\n subprocess.call(\"calibrate-stand-alone -f {0} {1} {2} > {3}/logs/\"\n \"{4}_peeling_calibrate.log 2>&1\".format(msname, parset, skymodel,\n outdir, logname_root), shell=True)\n subprocess.call(\"cp -r {0}/instrument {0}/instrument_out\".format(msname),\n shell=True)\n else:\n # Perform a time-correlated solve\n dataset = msname\n blockl = time_block\n anttab = pt.table(dataset + '/ANTENNA', ack=False)\n antlist = anttab.getcol('NAME')\n instrument_orig = msname+'/instrument'\n instrument_out = msname+'/instrument_out'\n if solint < 1:\n solint = 1\n\n\n # Get time per sample and number of times\n t = pt.table(dataset, readonly=True, ack=False)\n for t2 in t.iter([\"ANTENNA1\",\"ANTENNA2\"]):\n if (t2.getcell('ANTENNA1',0)) < (t2.getcell('ANTENNA2',0)):\n timepersample = t2[1]['TIME']-t2[0]['TIME'] # sec\n trows = t2.nrows()\n t.close()\n\n # Calculate various intervals\n fwhm_min, fwhm_max = modify_weights(msname, ionfactor, dryrun=True) # s\n if time_block is None:\n # Set blockl to enclose the max FWHM and be divisible by 2 and by solint\n blockl = int(np.ceil(fwhm_max / timepersample / 2.0 / solint) * 2 * solint)\n tdiff = solint * timepersample / 3600. # difference between solutions in hours\n tlen = timepersample * np.float(blockl) / 3600. # length of block in hours\n nsols = int(np.ceil(trows / solint)) # number of solutions\n\n log.info('Performing time-correlated peeling for {0}...\\n'\n ' Time per sample: {1} (s)\\n'\n ' Samples in total: {2}\\n'\n ' Block size: {3} (samples)\\n'\n ' {4} (s)\\n'\n ' Number of solutions: {5}\\n'\n ' Ionfactor: {6}\\n'\n ' FWHM range: {7} - {8} (s)'.format(msname, timepersample,\n trows, blockl, tlen*3600.0, nsols, ionfactor, fwhm_min, fwhm_max))\n\n # Make a copy of the master parmdb to store time-correlated solutions\n # in, resetting and flagging as needed\n os.system('rm -rf ' +instrument_out)\n clean_and_copy_parmdb(instrument_orig, instrument_out, blockl,\n flag_filler=flag_filler, msname=msname, timepersample=timepersample)\n\n # Calibrate the chunks\n chunk_list = []\n tlen_mod = tlen / 2.0 # hours\n chunk_mid_start = blockl / 2 / solint\n chunk_mid_end = nsols - blockl / 2 / solint\n for c in range(nsols):\n chunk_obj = Chunk(dataset)\n chunk_obj.chunk = c\n chunk_obj.outdir = outdir\n if c < chunk_mid_start:\n chunk_obj.trim_start = True\n chunk_obj.t0 = 0.0 # hours\n chunk_obj.t1 = np.float(chunk_obj.t0) + tlen_mod # hours\n tlen_mod += tdiff # add one solution interval (in hours)\n elif c > chunk_mid_end:\n tlen_mod -= tdiff # subtract one solution interval (in hours)\n chunk_obj.trim_start = False\n chunk_obj.t0 = tdiff*float(chunk_obj.chunk - chunk_mid_start) # hours\n chunk_obj.t1 = np.float(chunk_obj.t0) + tlen_mod # hours\n else:\n chunk_obj.trim_start = False\n chunk_obj.t0 = tdiff*float(chunk_obj.chunk - chunk_mid_start) # hours\n chunk_obj.t1 = np.float(chunk_obj.t0) + tlen # hours\n chunk_obj.ionfactor = ionfactor\n chunk_obj.parset = parset\n chunk_obj.skymodel = skymodel\n chunk_obj.logname_root = logname_root + '_part' + str(c)\n chunk_obj.solnum = chunk_obj.chunk\n chunk_obj.output = chunk_obj.outdir + '/part' + str(chunk_obj.chunk) + os.path.basename(chunk_obj.dataset)\n chunk_obj.ntot = nsols\n chunk_list.append(chunk_obj)\n\n # Split the dataset into parts\n for chunk_obj in chunk_list:\n split_ms(chunk_obj.dataset, chunk_obj.output, chunk_obj.t0, chunk_obj.t1)\n\n # Calibrate in parallel\n pool = multiprocessing.Pool(ncores)\n pool.map(calibrate_chunk, chunk_list)\n pool.close()\n pool.join()\n\n # Copy over the solutions to the final parmdb\n pdb = lofar.parmdb.parmdb(instrument_out)\n parms = pdb.getValuesGrid(\"*\")\n for chunk_obj in chunk_list:\n instrument_input = chunk_obj.output + '/instrument'\n pdb_part = lofar.parmdb.parmdb(instrument_input)\n parms_part = pdb_part.getValuesGrid(\"*\")\n keynames = parms_part.keys()\n for key in keynames:\n if 'Phase' in key:\n tmp1=np.copy(parms[key]['values'][:,0])\n tmp1[chunk_obj.solnum] = np.copy(parms_part[key]['values'][0,0])\n parms[key]['values'][:,0] = tmp1\n os.system('rm -rf ' + instrument_out)\n lofar.expion.parmdbmain.store_parms(instrument_out, parms, create_new=True)\n\n # Clean up\n for chunk_obj in chunk_list:\n os.system('rm -rf {0}*'.format(chunk_obj.output))\n os.system('rm calibrate-stand-alone*.log')\n\n # Move the solutions to original parmdb\n subprocess.call('cp -r {0} {1}'.format(instrument_out, instrument_orig),\n shell=True)", "def steering_calibration(self):\n\n # Minimum change of rotary encoder per 100 ms to detect stall\n min_difference = 5\n # Calibration motor power in percentage (absolute)\n calibration_power = 30\n\n print('Calibration of steering axis started')\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n last_pos = 99999999 # any super big number\n # Left turn calibration\n while abs(current_pos - last_pos) > min_difference:\n last_pos = current_pos\n self.brick_pi.set_motor_power(self.motor_steer, calibration_power)\n time.sleep(0.1)\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n\n self.brick_pi.set_motor_power(self.motor_steer, 0)\n time.sleep(0.1)\n print('Reset motor encoder after left turn: 0')\n self.brick_pi.reset_motor_encoder(self.motor_steer)\n\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n last_pos = 99999999 # any super big number\n # Right turn calibration\n while abs(current_pos - last_pos) > min_difference:\n last_pos = current_pos\n self.brick_pi.set_motor_power(self.motor_steer, -calibration_power)\n time.sleep(0.1)\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n\n self.brick_pi.set_motor_power(self.motor_steer, 0)\n time.sleep(0.1)\n self.brick_pi.offset_motor_encoder(self.motor_steer, current_pos / 2)\n self.steering_limit = abs(\n self.brick_pi.get_motor_encoder(self.motor_steer))\n print('Offset motor encoder after right turn: ' +\n str(self.brick_pi.get_motor_encoder(self.motor_steer)))\n\n self.brick_pi.set_motor_position(self.motor_steer, 0)\n print('Calibration of steering axis completed')", "def vec_rolling_rms(decays: jnp.ndarray) -> _InitUpdate:\n return _vmap_accumulator(rolling_rms, decays)", "def applyCal(beam, row, freqs, freqs_cal, cf, T_d_x, T_d_y):\n \n P_sys_xx = beam.cols.xx[row].astype('float')\n xx_on = beam.cols.xx_cal_on[row].astype('float')\n xx_off = beam.cols.xx_cal_off[row].astype('float')\n P_on_xx = np.average(extractMid(xx_on))\n P_off_xx = np.average(extractMid(xx_off))\n \n #P_on_xx = fitLine(freqs_cal, xx_on, len(freqs))\n #P_off_xx = fitLine(freqs_cal, xx_off, len(freqs))\n\n P_sys_yy = beam.cols.yy[row].astype('float')\n yy_on = beam.cols.yy_cal_on[row].astype('float')\n yy_off = beam.cols.yy_cal_off[row].astype('float')\n P_on_yy = np.average(extractMid(yy_on))\n P_off_yy = np.average(extractMid(yy_off))\n \n #P_on_yy = fitLine(freqs_cal, yy_on, len(freqs))\n #P_off_yy = fitLine(freqs_cal, yy_off, len(freqs))\n\n \n T_sys_xx = P_sys_xx / (cf*P_on_xx - cf*P_off_xx) * T_d_x\n T_sys_yy = P_sys_yy / (cf*P_on_yy - cf*P_off_yy) * T_d_y\n \n return T_sys_xx, T_sys_yy", "def update_continuum_mask(self, refresh=False):\n\n ymin, ymax = (-1e8, 1e8)\n kwds = {\n \"xmin\": np.nan,\n \"xmax\": np.nan,\n \"ymin\": ymin,\n \"ymax\": ymax,\n \"facecolor\": \"r\",\n \"edgecolor\": \"none\",\n \"alpha\": 0.25,\n \"zorder\": -1\n }\n\n transform = lambda start, end, v=0: np.array([\n [start * (1 - v/c), ymin],\n [start * (1 - v/c), ymax],\n [end * (1 - v/c), ymax],\n [end * (1 - v/c), ymin],\n [start * (1 - v/c), ymin]\n ])\n\n mask = self._cache[\"masks\"][self.continuum_mask.currentText()]\n\n # Any added regions to mask out? v-stack these\n try:\n self._masked_wavelengths\n except AttributeError:\n self._masked_wavelengths = []\n self._masked_wavelengths_norm = []\n\n # Different kind of masks: rest_wavelength, obs_wavelength, pixels\n # rest_wavelength\n # The obsered spectrum is shifted to be at rest, so the continuum masks\n # will also be in the rest frame. So we don't need to shift the\n # 'rest_wavelength' mask, but we do need to shift the 'obs_wavelength'\n # mask\n\n # Get the applied velocity to shift some masks.\n try:\n rv_applied = self.parent.session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n rv_applied = 0\n\n _ =self.parent.session.metadata[\"normalization\"][\"normalization_kwargs\"]\n \n masked_regions = [\n np.array(mask.get(\"rest_wavelength\", [])),\n np.array(mask.get(\"obs_wavelength\", [])) * (1 - rv_applied/c),\n np.array(_[self.current_order_index].get(\"exclude\", []))\n ]\n if \"pixel\" in mask:\n masked_regions.append(\n # MAGIC HACK\n self.current_order.dispersion[np.array(mask[\"pixel\"])] + 1e-3\n )\n\n for each in masked_regions:\n each.shape = (-1, 2)\n\n masked_regions = np.vstack(masked_regions)\n\n # Remove duplicate masked regions.\n _ = np.ascontiguousarray(masked_regions).view(\n np.dtype((\n np.void, \n masked_regions.dtype.itemsize * masked_regions.shape[1])))\n __, idx = np.unique(_, return_index=True)\n masked_regions = masked_regions[idx]\n\n i = 0\n for start, end in masked_regions:\n if i >= len(self._masked_wavelengths):\n # Create a polygon in the main axis.\n self._masked_wavelengths.append(\n self.ax_order.axvspan(**kwds))\n\n # And for the normalization axis.\n self._masked_wavelengths_norm.append(\n self.ax_order_norm.axvspan(**kwds))\n\n polygons = (\n self._masked_wavelengths[i],\n self._masked_wavelengths_norm[i]\n )\n for polygon in polygons:\n polygon.set_xy(transform(start, end))\n\n i += 1\n\n # Any leftover polygons?\n for polygon in self._masked_wavelengths[i:]:\n polygon.set_xy(transform(np.nan, np.nan))\n\n for polygon in self._masked_wavelengths_norm[i:]:\n polygon.set_xy(transform(np.nan, np.nan))\n\n\n if refresh:\n self.norm_plot.draw()\n return True", "def calEachCrossflow2peak():\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n peakCross = crossFlow['Node2']\n crossFlowPeakFactor = peakCross/0.8\n \n peakCross2 = crossFlow['Node6']\n crossFlowPeakFactor2 = peakCross2/0.8\n #original_factor = peakCross/0.8\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for index in gapsToFlipIndex:\n crossFlowPeakFactor[index] = -crossFlowPeakFactor[index] \n crossFlowPeakFactor2[index] = -crossFlowPeakFactor2[index]\n \n return crossFlowPeakFactor, crossFlowPeakFactor2", "def __setSpectrum__(self):\n \n self.Ck = []\n TempCk = []\n TempOneCk = OneCk()\n \n # Process 1st frequency\n Tb = self.freqs[1].totalJ\n \n for b in range(-Tb, Tb, 1):\n TempOneCk.freq = b*self.freqs[1].Vph\n TempOneCk.Amp = self.freqs[1].Cjk(b)\n self.Ck.append(TempOneCk)\n \n # Process additional frequencies\n CkSize = len(self.Ck)\n Added = FALSE\n \n for f in range(2, len(self.freqs), 1):\n # Reset temporary variables\n Tb = self.freqs[f].totalJ\n TempCk = []\n \n # Calculate each Ck coefficient\n for b in range(-Tb, Tb, 1):\n for k in range(CkSize):\n TempOneCk.Amp = Ck[k].Amp * self.freq[f].Cjk(b)\n \n # Check to see if Amp is big enough to keep\n if( abs(TempOneCk.Amp) > self.min_Ck ):\n Added = FALSE\n TempOneCk.freq = self.Ck[k].freq + b*self.freqs.Vph\n \n # If freq is already in Ck, add new value to old,\n # if not, add new value and freq to spectrum\n for c in TempCk:\n if abs(c.freq-TempOneCk.freq < DOUBLE_PRECISION):\n c.Amp += TempOneCk.Amp\n Added = TRUE\n break\n \n if (not Added):\n TempCk.append(TempOneCk)\n \n self.Ck = TempCk\n CkSize = len(self.Ck)", "def _doCalibration(self):\n self._cmdCalibration(2)", "def calibration(self, pulse_min: int, pulse_max: int, pulse_centre: int, /) -> None:", "def find_calibrators(master_skymodel, beamMS, flux_cut_Jy=15.0,\n maj_cut_arcsec=None, plot=False):\n log.info('Checking {0}:'.format(beamMS))\n s = lsmtool.load(master_skymodel, beamMS=beamMS)\n if maj_cut_arcsec is not None:\n log.info('Filtering out sources larger than {0} arcsec:'.format(maj_cut_arcsec))\n if s.hasPatches:\n sizes = s.getPatchSizes(units='arcsec', weight=True, applyBeam=True)\n else:\n sizes = s.getColValues('MajorAxis', units='arcsec')\n indices = np.where(sizes <= maj_cut_arcsec)[0]\n s.select(indices, force=True, aggregate=True)\n if len(s) == 0:\n return [], [], []\n\n # Make sure all fluxes are at 60 MHz\n reffreqs = s.getColValues('ReferenceFrequency')\n fluxes = s.getColValues('I')\n alphas = s.getColValues('SpectralIndex')[:, 0] # just use slope\n fluxes_60 = fluxes*(60e6/reffreqs)**alphas\n s.setColValues('I', fluxes_60)\n s.setColValues('ReferenceFrequency', np.array([60e6]*len(reffreqs)))\n\n # Now select only those sources above the given apparent flux cut\n log.info('Filtering out sources with fluxes below {0} Jy:'.format(flux_cut_Jy))\n s.select(['I', '>', flux_cut_Jy, 'Jy'], applyBeam=True, aggregate='sum',\n force=True)\n\n if len(s) > 0:\n if plot:\n print('Showing potential calibrators. Close the plot window to continue.')\n s.plot()\n cal_fluxes = s.getColValues('I', aggregate='sum', applyBeam=True).tolist()\n if s.hasPatches:\n cal_names = s.getPatchNames().tolist()\n cal_sizes = s.getPatchSizes(units='arcsec', weight=True,\n applyBeam=True).tolist()\n else:\n cal_names = s.getColValues('Name').tolist()\n cal_sizes = s.getColValues('MajorAxis', units='arcsec').tolist()\n return cal_names, cal_fluxes, cal_sizes\n else:\n return [], [], []", "def addPeakResonancesToSeqSpinSystems(peak, seqOffsets):\n \n assert len(peak.peakDims) == len(seqOffsets)\n assert None in seqOffsets # otherwise no reference point\n\n spinSystems = []\n resonanceList = []\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n spinSystem = None\n resonances = []\n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n resonances.append(resonance)\n \n if resonance.resonanceGroup:\n if not spinSystem:\n spinSystem = resonance.resonanceGroup\n\n elif spinSystem is not resonance.resonanceGroup:\n msg = 'There are multiple spin systems for peak dimension %d.\\n' % (i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(resonance.resonanceGroup,spinSystem)\n else:\n return\n\n resonanceList.append(resonances)\n spinSystems.append( spinSystem )\n\n ref = None\n I = 0\n for i, spinSystem in enumerate(spinSystems):\n if spinSystem is not None:\n if seqOffsets[i] is None:\n if ref is None:\n ref = spinSystem\n I = i\n \n else:\n if spinSystem is not ref:\n msg = 'Dimensions %d and %d have different spin systems.\\n' % (I+1,i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(spinSystem, ref)\n else:\n return\n \n if ref is not None:\n for i, seqOffset in enumerate(seqOffsets):\n \n if seqOffset:\n spinSystem = findConnectedSpinSystem(ref, seqOffset)\n if spinSystems[i] is ref:\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n showWarning('Failure','Spin system cannot be both i and i%s (dimension %d)' % (deltaText,i+1))\n continue\n \n \n if spinSystem and spinSystems[i]:\n if spinSystem is not spinSystems[i]:\n if (not spinSystem.residue) or (not spinSystems[i].residue):\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n \n msg = 'There is an i%s spin system already present (dimension %d).\\n' % (deltaText, i+1)\n msg += 'Merge spin systems together?'\n if showOkCancel('Confirm', msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n elif spinSystem.residue is spinSystems[i].residue:\n name = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n msg = 'There are multiple spin systems for residue %s.\\n?' % name\n msg += 'Merge spin systems together?'\n \n if showOkCancel('Confirm',msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n else:\n txt1 = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n txt2 = '%d%s' % (spinSystems[i].residue.seqCode,spinSystems[i].residue.ccpCode)\n msg = 'Cannot set spin system for F%d dim' % (i+1)\n msg += 'Offset %d causes conflict between %s and %s' % (seqOffset, txt1, txt2)\n showWarning('Failure',msg)\n return\n \n if resonanceList[i]:\n nmrProject = resonanceList[i][0].nmrProject\n if not spinSystem:\n if spinSystems[i]:\n spinSystem = spinSystems[i]\n else:\n spinSystem = nmrProject.newResonanceGroup()\n \n makeSeqSpinSystemLink(ref, spinSystem, seqOffsets[i])\n \n for resonance in resonanceList[i]:\n if resonance.resonanceGroup is not spinSystem:\n addSpinSystemResonance(spinSystem,resonance)", "def calibrate(self):\n with completion_context(\"Estimating sensor offset\", color=\"c\", bright=True):\n # Reset calibration\n self._sens_offset = np.zeros(4) # last two entries are never calibrated but useful for broadcasting\n self._wait_for_pole_at_rest()\n\n # Create parts of the calibration controller\n go_right = QQubeGoToLimCtrl(positive=True, cnt_done=int(1.5 / self._dt))\n go_left = QQubeGoToLimCtrl(positive=False, cnt_done=int(1.5 / self._dt))\n go_center = QQubePDCtrl(self.spec)\n\n # Estimate alpha offset. Go to both limits for theta calibration.\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n while not go_right.done:\n meas = self._qsoc.snd_rcv(go_right(to.from_numpy(meas)))\n while not go_left.done:\n meas = self._qsoc.snd_rcv(go_left(to.from_numpy(meas)))\n self._sens_offset[0] = (go_right.th_lim + go_left.th_lim) / 2\n\n # Estimate alpha offset\n self._wait_for_pole_at_rest()\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n self._sens_offset[1] = meas[1]\n\n print_cbt(\n f\"Sensor offset: \"\n f\"theta = {self._sens_offset[0]*180/np.pi:.3f} deg, \"\n f\"alpha = {self._sens_offset[1]*180/np.pi:.3f} deg\",\n \"g\",\n )\n\n with completion_context(\"Centering cube\", color=\"c\", bright=True):\n meas = self._qsoc.snd_rcv(np.zeros(self.act_space.shape))\n while not go_center.done:\n meas = self._qsoc.snd_rcv(go_center(to.from_numpy(meas - self._sens_offset)))", "def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")", "def _calibrate_measurement(self):\n\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15,\n self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15,\n self.wbb.data.wavelength)\n\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n\n calibration_slope = ((warm_blackbody - cold_blackbody) /\n (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum))\n calibration_offset = warm_blackbody - (self.wbb.data.average_spectrum * \n calibration_slope)\n\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n\n plate_temperature = self.dwr.header.spare_f[0]\n if (self.plate == -1) :\n plate_emissivity = self.dwr.header.spare_f[1]\n\n plate_blackbody = bb_radiance(plate_temperature + 273.15,\n self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n\n self.dwr.data.average_spectrum = ((self.dwr.data.average_spectrum - \n plate_emission) / (1 - plate_emissivity))", "def calibrateDistance(self, initval, clusterList, coalescenceList):\n intServ = InteractionService()\n coalescenceList.sortList()\n \n #calibrate for each particleList in coalescenceList \n intServ.calibrateIntern(initval, clusterList, coalescenceList)\n #calibrate for all clusters\n intServ.calibrateCoalescence(initval, clusterList, coalescenceList)", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def findShifts(alternatives):\n shifted = []\n samples = {0: \"Acute\", 1:\"Challenge\", 2:\"Chronic\"}\n for item in alternatives:\n isShifted = False\n maxShift = 0.0\n percent_expr = 0.0\n what = \"\"\n p_value = 0\n transcript = 0\n m_samples = item.getSamples()\n for row in range(m_samples.shape[0]):\n if row in item.getNonAnnotated():\n continue\n meanAcute = np.mean(m_samples[row][:SAMPLES_PARTS[0]])\n meanChallenge = np.mean(m_samples[row][SAMPLES_PARTS[0]:SAMPLES_PARTS[1]])\n meanChronic = np.mean(m_samples[row][SAMPLES_PARTS[1]:])\n p = stats.kruskal(m_samples[row][:SAMPLES_PARTS[0]], m_samples[row][SAMPLES_PARTS[0]:SAMPLES_PARTS[1]],\n m_samples[row][SAMPLES_PARTS[1]:])[1]\n means = [meanAcute, meanChronic, meanChallenge]\n for i in range(len(means)):\n mean1 = means[i]\n for j in range(len(means)):\n mean2 = means[j]\n if mean2 > 0 and (mean1 / mean2 >= PERCENT_OF_SHIFT) and \\\n (mean1 >= RATIO_TRESHOLD or mean2 >= RATIO_TRESHOLD)\\\n and (abs(int(item.getCoordinates()[row][1]) - int(item.getCoordinates()[row][0])) <= 500):\n # and counter not in item.getNonAnnotated() \\\n if (mean1 / mean2) > maxShift:\n percent_expr = np.max([mean1, mean2])\n maxShift = (mean1 / mean2)\n transcript = row\n p_value = p\n what = (i, j)\n isShifted = True\n # counter += 1\n if isShifted:\n shifted.append(item)\n item.setMaxShift(maxShift)\n item.setNumTranscript(transcript + 1)\n item.setWhatDiffers(what)\n item.setPValue(p_value)\n item.setPercentOfExpression(percent_expr)\n return shifted", "def updateResonShift(resonance, peakDim):\n \n peak = peakDim.peak\n peakList = peak.peakList\n shiftList = peakList.dataSource.experiment.shiftList\n \n if not shiftList:\n return\n \n dataDimRef = peakDim.dataDimRef\n if not dataDimRef:\n return\n \n if peakDim.isDeleted:\n shift = resonance.findFirstShift(parentList=shiftList)\n if shift:\n averageShiftValue(shift)\n return \n \n value = peakDim.realValue # OK, even if split\n if value is None:\n return\n \n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift is None:\n unit = dataDimRef.expDimRef.unit or 'ppm' # 1D shapes fix default\n unit2 = shiftList.unit\n \n if unit2 != unit:\n shiftValue = unit_converter[(unit,unit2)](value,dataDimRef)\n else:\n shiftValue = value\n \n shift = shiftList.newShift(value=shiftValue, resonance=resonance)\n \n averageShiftValue(shift)", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range,\n speed_sp=self.MAX_SPEED,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this\n # line is correct as is).", "def calEachCrossflow():\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n peakCross = crossFlow['Node2']\n crossFlowPeakFactor = peakCross/0.8\n\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for index in gapsToFlipIndex:\n crossFlowPeakFactor[index] = -crossFlowPeakFactor[index] \n \n return crossFlowPeakFactor", "def atmgaincal(self,flim=[10000,10500]):\n sind=self.getscanind()\n ind=self.expandcal()\n amm=np.tile(self.am,(self.nf,1)).T\n x=(self.spec-self.b[ind])/self.g[ind];x=x[sind]\n x=x/amm[sind]\n y=x[ np.where((self.za[sind]>=25) & (self.za[sind]<=35))[0] ]\n \n #for k in range(y.shape[0]):\n # y[k]=y[k]-np.nanmedian(y[k])\n\n f=self.f+9500\n find=np.where((f>=flim[0]) & (f<=flim[1]))[0]\n \n for k in range(y.shape[0]):\n x=self.f[find];\n yy=y[k,find];\n p=np.polyfit(x,yy,deg=1);\n y[k]=y[k]-np.poly1d(p)(self.f)\n\n return y", "def reduc(self,zarange=[20,50]):\n \n # First, take out a secular gain drift for each constant elevation\n # stare. Fit P(t) to each channel in a contiguous elevation stare,\n # normalize fit to mean=1, and normalize each chan to this.\n #deg=10\n #self.removedrift(deg)\n\n # Convert P-> T RJ\n #self.P2T()\n\n # Now fit a line to P(am) in each scan and store the results.\n self.fitam(zarange)", "def __update(self):\n\n # Make sure loads have been assigned to group\n if type(self.appliedLoad) == Load:\n self.appliedLoad = LoadSet(self.appliedLoad)\n elif type(self.appliedLoad) != LoadSet:\n raise TypeError(\"Applied load must be a Load or LoadSet\")\n\n # Begin Calculations\n _cg = self.cg # calculate the cg once to save computation time\n _appLoad = self.appliedLoad.totalForce\n _appMoment = self.appliedLoad.totalMoment\n\n coef_mat = np.zeros((len(self) * 3, len(self) * 3)) # coeff matrix\n soln_mat = np.zeros(len(self) * 3) # solution matrix\n\n cSet = [[i, i+1, i+2] for i in range(0, 3 * len(self), 3)]\n rSet = [[i+6, i+7, i+8] for i in range(0, 3 * (len(self) - 2), 3)]\n\n for i, j in enumerate(cSet):\n # i = column fastener ID\n # j = column fastener set\n # Mx = yFz - zFy\n # My = zFx - xFz\n # Mz = xFy - yFx\n\n Fx = j[0]\n Fy = j[1]\n Fz = j[2]\n\n # fill in first three rows\n coef_mat[0][Fx] = 1 # sum of Fx\n coef_mat[1][Fy] = 1 # sum of Fy\n coef_mat[2][Fz] = 1 # sum of Fz\n\n # fill in fourth row (sum of Mx at CG)\n coef_mat[3][Fy] = -(F[i].xyz[2] - _cg[2]) # -zFy\n coef_mat[3][Fz] = +(F[i].xyz[1] - _cg[1]) # +yFz\n\n # fill in fifth row (sum of My at CG)\n coef_mat[4][Fx] = +(F[i].xyz[2] - _cg[2]) # +zFx\n coef_mat[4][Fz] = -(F[i].xyz[0] - _cg[0]) # -xFz\n\n # fill in sixth row (sum of Mz at CG)\n coef_mat[5][Fx] = -(F[i].xyz[1] - _cg[1]) # -yFx\n coef_mat[5][Fy] = +(F[i].xyz[0] - _cg[0]) # +xFy\n\n for u, w in enumerate(rSet):\n # u = row fastener ID\n # w = row fastener set\n\n rX = w[0]\n rY = w[1]\n rZ = w[2]\n\n coef_mat[rX][Fy] = -(F[i].xyz[2] - F[u].xyz[2]) # -zFy\n coef_mat[rX][Fz] = +(F[i].xyz[1] - F[u].xyz[1]) # +yFz\n\n coef_mat[rY][Fx] = +(F[i].xyz[2] - F[u].xyz[2]) # +zFx\n coef_mat[rY][Fz] = -(F[i].xyz[0] - F[u].xyz[0]) # -xFz\n\n coef_mat[rZ][Fx] = -(F[i].xyz[1] - F[u].xyz[1]) # -yFx\n coef_mat[rZ][Fy] = +(F[i].xyz[0] - F[u].xyz[0]) # +xFy\n\n # fill in the solution matrix (soln_mat)\n for i in range(3):\n soln_mat[i] = -_netLoad.force[i]\n soln_mat[i+3] = -_netLoad.moment[i]\n\n # fill in the remaining rows\n for i, j in enumerate(rSet):\n # i = fastener\n # j = row\n\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n # Mx = (y_cg - y_i)F_znet - (z_cg - z_i)F_ynet + M_xnet\n soln_mat[rX] = - ((_cg[1] - F[i].xyz[1]) * _netLoad.force[2]\n - (_cg[2] - F[i].xyz[2]) * _netLoad.force[1]\n + _netLoad.moment[0])\n\n # My = (z_cg - z_i)F_xnet - (x_cg - x_i)F_znet + M_ynet\n soln_mat[rY] = -((_cg[2] - F[i].xyz[2]) * _netLoad.force[0]\n - (_cg[0] - F[i].xyz[0]) * _netLoad.force[2]\n + _netLoad.moment[1])\n\n # Mz = (x_cg - x_i)F_ynet - (y_cg - y_i)F_xnet + M_znet\n soln_mat[rZ] = -((_cg[0] - F[i].xyz[0]) * _netLoad.force[1]\n - (_cg[1] - F[i].xyz[1]) * _netLoad.force[0]\n + _netLoad.moment[2])\n\n # Solve system of equations\n matSol = np.linalg.lstsq(coef_mat, soln_mat)[0]\n\n # Add resulting fastener loads to fastener objects\n for i, j in enumerate(cSet):\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n F[i].force[0] = matSol[rX]\n F[i].force[1] = matSol[rY]\n F[i].force[2] = matSol[rZ]", "def contmap_coadd(scans, e_kidpar=\"e_kidpar_median.fits\", cm_func=\"kidsdata.common_mode.pca_filtering\", **kwargs):\n\n # Define a common wcs/shape\n cdelt = kwargs.get(\"cdelt\", 0.01)\n wcs = WCS(naxis=2)\n wcs.wcs.ctype = (\"OLON-SFL\", \"OLAT-SFL\")\n wcs.wcs.cdelt = (cdelt, cdelt)\n wcs.wcs.cunit = [\"deg\", \"deg\"]\n wcs.wcs.crpix = (100, 100)\n shape = (200, 200)\n\n results = []\n for scan in scans:\n kd = read_scan(scan, extra_data=[\"I\", \"Q\"])\n kd._KissRawData__check_attributes([\"R0\", \"P0\", \"calfact\", \"F_sky_Az\", \"F_sky_El\", \"A_hours\", \"A_time_pps\"])\n kd._extended_kidpar = Table.read(Path(CALIB_DIR) / e_kidpar)\n\n # kids selection\n kid_mask = kd._kids_selection(std_dev=0.3)\n ikid_KA = np.where(kid_mask & np.char.startswith(kd.list_detector, \"KA\"))[0]\n ikid_KB = np.where(kid_mask & np.char.startswith(kd.list_detector, \"KB\"))[0]\n ikid_KAB = np.concatenate([ikid_KA, ikid_KB])\n\n data, weight, hit = kd.continuum_map(\n ikid=ikid_KAB, coord=\"pdiff\", wcs=wcs, shape=shape, flatfield=\"amplitude\", cm_func=cm_func, **kwargs\n )\n del kd\n results.append((data, weight))\n\n data = [np.ma.array(result[0].data, mask=np.isnan(result[0].data)) for result in results]\n weights = [np.ma.array(result[1].data) for result in results]\n\n combined_map, combined_weights = np.ma.average(data, axis=0, weights=weights, returned=True)\n\n header = wcs.to_header()\n header[\"SCANS\"] = str(scans)\n\n combined_map = fits.ImageHDU(combined_map.filled(np.nan), header, name=\"data\")\n combined_weights = fits.ImageHDU(combined_weights, header, name=\"weight\")\n\n FakeKissRawData = namedtuple(\"FakeKissRawData\", [\"source\", \"filename\"])\n fake_kd = FakeKissRawData(source=\"dummy\", filename=\"Coadd {}\".format(scans))\n\n fig = show_contmap([combined_map], [combined_weights], None)\n fig.suptitle(fake_kd.filename)\n\n return fake_kd, fig, (combined_map, combined_weights)", "def calibration(self) -> int:", "def _recalculate_centroids(self):\n\n self._prev_centroids = dict(self.centroids)\n for cluster in self.clusters:\n self.centroids[cluster] = np.average(self.clusters[cluster], axis=0)", "def calibrate(self):\n super().calibrate()\n dataH1 = self._bus.read_i2c_block_data(self.addr,\n self.CALIBRATION_H1, 1)\n dataHX = self._bus.read_i2c_block_data(self.addr,\n self.CALIBRATION_HX, 7)\n\n self.dig_H1 = float(c_ubyte(dataH1[0]).value)\n self.dig_H2 = float(c_short((dataHX[1] << 8) + dataHX[0]).value)\n self.dig_H3 = float(c_ubyte(dataHX[2]).value)\n self.dig_H4 = float(c_short(\n (dataHX[3] << 4) + (dataHX[4] & 0xf)).value)\n self.dig_H5 = float(c_short(\n (dataHX[5] << 4) + ((dataHX[4] & 0xf0) >> 4)).value)\n self.dig_H6 = float(c_byte(dataHX[6]).value)", "def updateCentroid(self, vectors):\n previous_centroid = self.centroid\n self.vectors = vectors\n self.centroid = self.calcCentroid()\n shift = distance(previous_centroid, self.centroid)\n \n return shift", "def generatelookup(num_probes = 16):\n calibration_lookup= [[0] * 3 for i in range(num_probes)]\n # print(calibration_lookup)\n day = '050119r'\n date = '050119'\n\n def _run_calib(shots, dir):\n \"\"\" Helper function \"\"\"\n for shot in shots:\n shot = day+str(shot)\n ratios = ratio_4_doc(shot, dir)\n for i in range(num_probes):\n calibration_lookup[i][dir] = calibration_lookup[i][dir] + ratios[i][dir]\n if shot == shots[-1]: #on the last shot, divide by the number of shots\n calibration_lookup[i][dir] = calibration_lookup[i][dir]/ len(shots)\n\n # shot = day+str(13)\n # ratios = ratio_4_doc(shot, 2)\n # time,eastcurrent,westcurrent = loadcurrent(shot)#u\n # plt.plot(time, eastcurrent, label = 'current')\n #\n # plt.show()\n # print(\"And the max current is %f\" %polyPeak_noPlot(time,eastcurrent))\n # #check if cm or m\n # r = get_probeLocs_calib_setup(shot)\n # print(\"r is:\", r[1])\n # print(ratios)\n #\n #\n # X - direction\n print(\"Generating X - dir\")\n shots = np.arange(17,21)\n _run_calib(shots, 0)\n\n\n # Y - direction\n print(\"Generating Y - dir\")\n shots = np.arange(21,26)\n _run_calib(shots, 1)\n\n print(\"Generating Z - dir\")\n # Z - direction\n shots = np.arange(11,16)\n _run_calib(shots, 2)\n\n\n pth = os.getcwd()\n print(\"Finished! File saved as calib-%s-4x4_lookup_5.txt in cwd\" %(date))\n savetxt(os.path.join(pth, 'calib-%s-4x4_lookup_5.txt' % (date)) , calibration_lookup)", "def adjust_sff_cycles(sff_data, num_cycles):\r\n # TODO: Move to PyCogent\r\n num_flows = num_cycles * 4\r\n header, reads = sff_data\r\n\r\n h = header.copy()\r\n h['number_of_flows_per_read'] = num_flows\r\n h['header_length'] = num_flows + 40\r\n h['index_offset'] = 0\r\n h['index_length'] = 0\r\n h['flow_chars'] = 'TACG' * num_cycles\r\n\r\n read_clip_keys = [\r\n 'clip_qual_left', 'clip_qual_right', 'clip_adapter_left',\r\n 'clip_adapter_right',\r\n ]\r\n\r\n def adjust_read(read):\r\n r = read.copy()\r\n r['flowgram_values'] = read['flowgram_values'][:num_flows]\r\n enumerated_flow_indices = list(enumerate(\r\n _cumulative_sum(read['flow_index_per_base'])))\r\n\r\n # Brain teaser: find the largest base index having a flow\r\n # index less than num_flows\r\n num_bases = 6789\r\n for base_idx, flow_idx in reversed(enumerated_flow_indices):\r\n num_bases = base_idx + 1\r\n if flow_idx <= num_flows:\r\n break\r\n\r\n r['number_of_bases'] = num_bases\r\n r['flow_index_per_base'] = read['flow_index_per_base'][:num_bases]\r\n r['Bases'] = read['Bases'][:num_bases]\r\n r['quality_scores'] = read['quality_scores'][:num_bases]\r\n\r\n for key in read_clip_keys:\r\n if r[key] > num_bases:\r\n r[key] = num_bases\r\n\r\n return r\r\n\r\n return (h, imap(adjust_read, reads))", "def make_instrument_calibration(wvs,dir_list,serkowski_array,names,filter_name = 'J', p_order=2,\n plot_residuals=True,plot_starting_position=False,plot_best_fit=True,\n plot_best_fit_on_sky=True,plot_residuals_on_sky=True,\n plot_mueller_matrix=True,binsize=1,output_dir=None):\n\n assert serkowski_array.shape[0] == len(dir_list), \"The number of data directories doesn't match the length of your serkowski array\"\n\n NUM_COLORS = 20\n cm = plt.get_cmap('tab20')\n colors = [cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)]\n\n # assert p_order > 2, \"You really want p_order > 2\"\n\n if filter_name == 'J':\n wlMin = 1.18\n wlMax = 1.31\n wlMin_plot = 1.175\n wlMax_plot = 1.325\n elif filter_name == 'H':\n wlMin = 1.55\n wlMax = 1.75\n wlMin_plot = 1.5\n wlMax_plot = 1.8\n\n #\n wv_snip = wvs.shape[0] % binsize\n if wv_snip > 0:\n binned_wvs = np.mean(wvs[:-wv_snip].reshape(-1,binsize),axis=1)\n else:\n binned_wvs = np.mean(wvs.reshape(-1,binsize),axis=1)\n\n good_inds = np.where((binned_wvs>wlMin) & (binned_wvs<wlMax))\n good_wvs = binned_wvs[good_inds]\n\n ###Cycle through the serkowski array and generate the expected polarization\n serkowski_q = np.zeros([binned_wvs.shape[0],serkowski_array.shape[0]]) #The expected q shape [n_waves,n_datasets]\n serkowski_u = np.zeros([binned_wvs.shape[0],serkowski_array.shape[0]]) #The expected u shape [n_waves,n_datasets]\n\n for i in range(serkowski_q.shape[1]):\n _,q,u = serkowski_polarization(binned_wvs,serkowski_array[i,1],\n serkowski_array[i,0],serkowski_array[i,2],serkowski_array[i,3])\n serkowski_q[:,i] = q\n serkowski_u[:,i] = u\n \n ###Now let's read in all the data\n data_q_pair1 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data q shape [n_waves,n_datasets]\n data_u_pair1 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data u\n data_qerrs_pair1 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data q errors\n data_uerrs_pair1 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data u errors\n\n data_q_pair2 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data q\n data_u_pair2 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data u\n data_qerrs_pair2 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data q errors\n data_uerrs_pair2 = np.zeros([binned_wvs.shape[0],len(dir_list)]) #The data u errors\n\n for i in range(len(dir_list)):\n directory = dir_list[i]\n\n qu_data = np.load(directory+\"/qu_data.npy\")\n qu_inds = np.load(directory+\"/qu_ind.npy\")\n \n #Expand the data back out\n qs = qu_data[0]\n us = qu_data[1]\n q_errs = qu_data[2]\n u_errs = qu_data[3]\n #Expand the indices\n qind = qu_inds[0]\n uind = qu_inds[1]\n\n # print(qs[qind==0][:,:-wv_snip].reshape(qs[qind==0].shape[0],binsize,-1).shape)\n # return\n\n if wv_snip > 0:\n\n #Get q and u - Pair 1\n data_q_pair1[:,i] = np.nanmedian(qs[qind==0][:,:-wv_snip].reshape(qs[qind==0].shape[0],-1,binsize),axis=(0,2))\n data_u_pair1[:,i] = np.nanmedian(us[uind==0][:,:-wv_snip].reshape(us[uind==0].shape[0],-1,binsize),axis=(0,2))\n \n #Take the standard error on the mean as the errors\n nqs = qs[qind==0].shape[0]+binsize\n nus = us[uind==0].shape[0]+binsize\n\n data_qerrs_pair1[:,i] = np.nanstd(qs[qind==0][:,:-wv_snip].reshape(qs[qind==0].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nqs)\n data_uerrs_pair1[:,i] = np.nanstd(us[uind==0][:,:-wv_snip].reshape(us[uind==0].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nus)\n \n #Take the photometric errors - probably not as reliable\n # data_qerrs_pair1[:,i] = np.sqrt(np.nanmean(q_errs[qind==0][:,:-wv_snip].reshape(qs[qind==0].shape[0],-1,binsize)**2, axis = (0,2)))\n # data_uerrs_pair1[:,i] = np.sqrt(np.nanmean(u_errs[uind==0][:,:-wv_snip].reshape(us[uind==0].shape[0],-1,binsize)**2, axis = (0,2)))\n\n\n #Get q and u - Pair 2\n data_q_pair2[:,i] = np.nanmedian(qs[qind==1][:,:-wv_snip].reshape(qs[qind==1].shape[0],-1,binsize),axis=(0,2))\n data_u_pair2[:,i] = np.nanmedian(us[uind==1][:,:-wv_snip].reshape(us[uind==1].shape[0],-1,binsize),axis=(0,2))\n\n #Take the standard error on the mean as the errors\n nqs = qs[qind==1].shape[0]+binsize\n nus = us[uind==1].shape[0]+binsize\n\n data_qerrs_pair2[:,i] = np.nanstd(qs[qind==1][:,:-wv_snip].reshape(qs[qind==1].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nqs)\n data_uerrs_pair2[:,i] = np.nanstd(us[uind==1][:,:-wv_snip].reshape(us[uind==1].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nus)\n\n #Take the photometric errors - probably not as reliable\n # data_qerrs_pair2[:,i] = np.sqrt(np.nanmean(q_errs[qind==1][:,:-wv_snip].reshape(qs[qind==1].shape[0],-1,binsize)**2, axis = (0,2)))\n # data_uerrs_pair2[:,i] = np.sqrt(np.nanmean(u_errs[uind==1][:,:-wv_snip].reshape(us[uind==1].shape[0],-1,binsize)**2, axis = (0,2)))\n\n else:\n\n data_q_pair1[:,i] = np.nanmedian(qs[qind==0].reshape(qs[qind==0].shape[0],-1,binsize),axis=(0,2))\n data_u_pair1[:,i] = np.nanmedian(us[uind==0].reshape(us[uind==0].shape[0],-1,binsize),axis=(0,2))\n #Take the standard error on the mean as the errors\n nqs = qs[qind==0].shape[0]*binsize\n nus = us[uind==0].shape[0]*binsize\n data_qerrs_pair1[:,i] = np.nanstd(qs[qind==0].reshape(qs[qind==0].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nqs)\n data_uerrs_pair1[:,i] = np.nanstd(us[uind==0].reshape(us[uind==0].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nus)\n \n #Take the photometric errors - probably not as reliable\n # data_qerrs_pair1[:,i] = np.sqrt(np.nanmean(q_errs[qind==0].reshape(qs[qind==0].shape[0],-1,binsize)**2, axis = (0,1)))\n # data_uerrs_pair1[:,i] = np.sqrt(np.nanmean(u_errs[uind==0].reshape(us[uind==0].shape[0],-1,binsize)**2, axis = (0,1)))\n\n data_q_pair2[:,i] = np.nanmedian(qs[qind==1].reshape(qs[qind==1].shape[0],-1,binsize),axis=(0,2))\n data_u_pair2[:,i] = np.nanmedian(us[uind==1].reshape(us[uind==1].shape[0],-1,binsize),axis=(0,2))\n \n #Take the standard error on the mean as the errors\n nqs = qs[qind==1].shape[0]*binsize\n nus = us[uind==1].shape[0]*binsize\n data_qerrs_pair2[:,i] = np.nanstd(qs[qind==1].reshape(qs[qind==1].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nqs)\n data_uerrs_pair2[:,i] = np.nanstd(us[uind==1].reshape(us[uind==1].shape[0],-1,binsize),axis=(0,2))/np.sqrt(nus)\n\n #Take the photometric errors - probably not as reliable\n # data_qerrs_pair2[:,i] = np.sqrt(np.nanmean(q_errs[qind==1].reshape(qs[qind==1].shape[0],-1,binsize)**2, axis = (0,1)))\n # data_uerrs_pair2[:,i] = np.sqrt(np.nanmean(u_errs[uind==1].reshape(us[uind==1].shape[0],-1,binsize)**2, axis = (0,1)))\n \n \n # data_q_pair1[:,i] = np.mean(qs[qind==0],axis=0)\n # data_u_pair1[:,i] = np.mean(us[uind==0],axis=0)\n # data_qerrs_pair1[:,i] = np.sqrt(np.nanmean(q_errs[qind==0]**2, axis = 0))\n # data_uerrs_pair1[:,i] = np.sqrt(np.nanmean(u_errs[uind==0]**2, axis = 0))\n\n # data_q_pair2[:,i] = np.nanmean(qs[qind==1],axis=0)\n # data_u_pair2[:,i] = np.nanmean(us[qind==1],axis=0)\n # data_qerrs_pair2[:,i] = np.sqrt(np.nanmean(q_errs[qind==1]**2, axis = 0))\n # data_uerrs_pair2[:,i] = np.sqrt(np.nanmean(u_errs[uind==1]**2, axis = 0))\n\n #Ok Data Loaded. \n\n ## Define a bunch of functions\n def forward_model_detector_qu(p,wvs,serkowski_qu):\n '''\n Calculates the detector qu for a given trace pair, give the polynomial \n coefficients, p, the wavelengths and input serkowski q and u\n\n Input:\n p - An array of length 4*(p_order+1) that contains the coefficients\n for the q and u crosstalks and efficiencies\n '''\n\n #Detector q = serkowski_q*q_efficiency + serkowski_u*u->q crosstalk\n detector_q = serkowski_qu[0]*np.poly1d(p[:p_order+1])(wvs) + serkowski_qu[1]*np.poly1d(p[p_order+1:2*(p_order+1)])(wvs)\n\n #Detector u = serkowski_q*q-u crosstalk + serkowski_u*u_efficiency\n detector_u = serkowski_qu[0]*np.poly1d(p[2*(p_order+1):3*(p_order+1)])(wvs) + serkowski_qu[1]*np.poly1d(p[3*(p_order+1):])(wvs)\n\n return detector_q,detector_u\n\n def residuals(p,wvs,serkowski_q,serkowski_u,data_q,data_u):\n '''\n Calculate the residuals \n '''\n good_wvs = np.where((wvs > wlMin) & (wvs < wlMax))\n q_residuals = []\n u_residuals = [] \n\n for i in range(serkowski_array.shape[0]):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n detector_q,detector_u = forward_model_detector_qu(p,wvs,serkowski_qu)\n q_residuals.append((data_q[:,i]-detector_q)[good_wvs])\n u_residuals.append((data_u[:,i]-detector_u)[good_wvs])\n \n q_residuals = np.swapaxes(np.array(q_residuals),0,1)\n u_residuals = np.swapaxes(np.array(u_residuals),0,1)\n\n return q_residuals,u_residuals\n \n def to_minimize(p,wvs,serkowski_q,serkowski_u,data_q,data_u,data_qerrs,data_uerrs):\n good_wvs = np.where((wvs > wlMin) & (wvs < wlMax))\n\n #We'll weight the residuals by the signal divided by the errors\n q_weights = serkowski_q[good_wvs]/data_qerrs[good_wvs]\n u_weights = serkowski_u[good_wvs]/data_uerrs[good_wvs]\n \n q_residuals,u_residuals = residuals(p,wvs,serkowski_q,serkowski_u,data_q,data_u)\n \n # return np.sqrt(np.nansum(((q_residuals/data_qerrs[good_wvs])**2))+np.sum(((u_residuals/data_uerrs[good_wvs])**2)))\n return np.sqrt(np.nansum(((q_residuals*q_weights)**2)+np.sum(((u_residuals*u_weights)**2))))\n\n ### We'll use a previous fit as a starting point. \n p0_tracepair1 =[1.0661088786177235, -1.7766660078904966, -0.9104631784841992, 2.64385024405191, 1.5556061450161307, \n -2.2859691699493196, -1.3236229169762028, 1.1997075732954565, 1.5948731326729078, -2.7313632656048603, -1.336150812645815, \n 2.4183673050376795, 0.7061071621209151, -1.3258240570135162, -0.8934473062537349, 1.278423011227312]\n p0_tracepair2 = [2.0007323143773106, -3.0369726347949255, -1.6047715137239809, 3.520169106554172, 1.0160466675010955, \n -1.506184150325796, -0.8435650594122575, 0.5977833961415223, 0.970243805370234, -1.844184514283156, -0.8705549135207251, \n 1.729607916941225, -1.4177309303796624, 1.7492484823597478, 0.9317500283173318, -1.6170943302985021]\n\n if p_order == 3:\n pstart_tracepair1 = p0_tracepair1\n pstart_tracepair2 = p0_tracepair2\n\n #If we want higher orders will still start at the same place, but fill in zeros elsewhere. \n elif p_order ==2:\n if filter_name == \"J\":\n pstart_tracepair1 = [ 5.05514516, -13.06881182, 9.18231296, 3.25136349,\n -9.09851162, 5.90894795, 2.97123352, -8.42829543,\n 5.41801771, -4.06984794, 10.22431952, -7.16947187]\n\n pstart_tracepair2 = [ -1.18368653, 2.73526593, -0.86543283, 9.75453874,\n -25.30567682, 15.90264321, 4.821971 , -12.84814372,\n 8.06299775, 1.37203403, -3.28463334, 1.0456316 ]\n\n else:\n pstart_tracepair1 = [ 0.5075295 , -1.37078725, 1.70441459, 1.52251149, -7.11868788,\n 6.57810932, -0.66553329, 2.33925357, -2.57755498, -4.13331747,\n 11.05530426, -8.0378703 ]\n pstart_tracepair2 = [ 8.57548499, -27.50437317, 22.7989317 , 9.88783885,\n -28.34238484, 19.17963517, 5.53634598, -18.20028437,\n 14.38411802, 1.81207485, -3.49035845, 0.65215973]\n\n\n # pstart_tracepair1 = np.zeros([4*(p_order+1)])\n # pstart_tracepair2 = np.zeros([4*(p_order+1)])\n\n # pstart_tracepair1[0] = 1\n # pstart_tracepair1[8] = 1\n \n else:\n pstart_tracepair1 = np.zeros([4*(p_order+1)])\n pstart_tracepair2 = np.zeros([4*(p_order+1)])\n\n for i in range(4):\n pstart_tracepair1[i*(p_order+1):i*(p_order+1)+4] = p0_tracepair1[i*4:(i+1)*4]\n pstart_tracepair2[i*(p_order+1):i*(p_order+1)+4] = p0_tracepair2[i*4:(i+1)*4]\n\n pstart_tracepair1\n\n\n starting_residuals_p1 = residuals(pstart_tracepair1,binned_wvs,serkowski_q,\n serkowski_u,data_q_pair1,data_u_pair1)\n\n ## Debugging things\n # print(np.shape(starting_residuals_p1))\n # fig,axes = plt.subplots(1,2)\n\n # for i in range(np.shape(starting_residuals_p1)[-1]):\n # axes[0].plot(wvs[good_inds],starting_residuals_p1[0][:,i],color=colors[i])\n # axes[1].plot(wvs[good_inds],starting_residuals_p1[1][:,i],color=colors[i])\n\n\n\n\n # print(pstart_tracepair1)\n ### Start the fitting! \n results1 = minimize(to_minimize,pstart_tracepair1,\n args=(binned_wvs,serkowski_q,serkowski_u,data_q_pair1,data_u_pair1,\n data_qerrs_pair1,data_uerrs_pair1))\n # data_qerrs_pair1,data_uerrs_pair1),method=\"Nelder-Mead\")\n # print(results1)\n\n results2 = minimize(to_minimize,pstart_tracepair2,\n args=(binned_wvs,serkowski_q,serkowski_u,data_q_pair2,data_u_pair2,\n data_qerrs_pair2,data_uerrs_pair2))\n # data_qerrs_pair2,data_uerrs_pair2),method=\"Nelder-Mead\")\n\n '''\n For each trace_pair we'll make 6 plots: \n - The forward modelled serkowski curves with the starting position and data overlaid\n - The forward modelled serkowski curves with the best fit position and data overlaid\n - The residuals of the new fits. \n '''\n\n # plot=True\n # if plot:\n # fig,axes = plt.subplots(2,2)\n\n # axes[0,0].plot(wvs,np.poly1d(results1.x[:(p_order+1)])(wvs))\n # axes[0,1].plot(wvs,np.poly1d(results1.x[1*(p_order+1):2*(p_order+1)])(wvs))\n # axes[1,0].plot(wvs,np.poly1d(results1.x[1*(p_order+1):2*(p_order+1)])(wvs))\n # axes[1,1].plot(wvs,np.poly1d(results1.x[3*(p_order+1):])(wvs))\n \n # axes[0,0].set_xlim(1.165,1.325)\n # axes[0,1].set_xlim(1.165,1.325)\n # axes[1,0].set_xlim(1.165,1.325)\n # axes[1,1].set_xlim(1.165,1.325)\n \n \n\n if plot_starting_position:\n\n ###################################\n ####### FIRST TRACE PAIR 1 ########\n ###################################\n \n\n fig1,big_axes1 = plt.subplots(1,2,figsize=(20,5))\n big_axes1[0].set_prop_cycle(cycler('color',colors))\n big_axes1[1].set_prop_cycle(cycler('color',colors))\n\n fig1.suptitle(\"Trace Pair 1 Starting Positions\")\n # fig1.suptitle(\"Trace Pair 1\")\n\n ### The start positions\n start_ax_q = big_axes1[0]\n start_ax_u = big_axes1[1]\n #Generate the starting positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n # print(serkowski_qu)\n starting_detector_q,starting_detector_u = forward_model_detector_qu(pstart_tracepair1,binned_wvs,serkowski_qu)\n # start_ax_q.plot(wvs,100*starting_detector_q,color='C{:d}'.format(i))\n # start_ax_u.plot(wvs,100*starting_detector_u,color='C{:d}'.format(i))\n start_ax_q.plot(binned_wvs,100*starting_detector_q,color=colors[i])\n start_ax_u.plot(binned_wvs,100*starting_detector_u,color=colors[i])\n \n #Add the data\n # start_ax_q.plot(wvs,100*data_q_pair1[:,i],'o',color='C{:d}'.format(i))\n # start_ax_u.plot(wvs,100*data_u_pair1[:,i],'o',color='C{:d}'.format(i),label=names[i])\n start_ax_q.plot(binned_wvs,100*data_q_pair1[:,i],'o',color=colors[i])\n start_ax_u.plot(binned_wvs,100*data_u_pair1[:,i],'o',color=colors[i],label=names[i])\n \n start_ax_u.legend()\n\n start_ax_q.set_xlim(wlMin_plot, wlMax_plot)\n start_ax_q.set_ylim(-4,7)\n start_ax_u.set_xlim(wlMin_plot, wlMax_plot)\n start_ax_u.set_ylim(-7,2)\n\n start_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n start_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n start_ax_q.set_ylabel(r\"q (%)\")\n start_ax_u.set_ylabel(r\"u (%)\")\n\n\n #############################\n ####### TRACE PAIR 2 ########\n #############################\n\n fig1,big_axes1 = plt.subplots(1,2,figsize=(20,5))\n big_axes1[0].set_prop_cycle(cycler('color',colors))\n big_axes1[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 2 Starting Positions\")\n # fig1.suptitle(\"Trace Pair 1\")\n\n ### The start positions\n start_ax_q = big_axes1[0]\n start_ax_u = big_axes1[1]\n #Generate the starting positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n starting_detector_q,starting_detector_u = forward_model_detector_qu(pstart_tracepair2,binned_wvs,serkowski_qu)\n # start_ax_q.plot(wvs,100*starting_detector_q,color='C{:d}'.format(i))\n # start_ax_u.plot(wvs,100*starting_detector_u,color='C{:d}'.format(i))\n start_ax_q.plot(binned_wvs,100*starting_detector_q,color=colors[i])\n start_ax_u.plot(binned_wvs,100*starting_detector_u,color=colors[i])\n \n #Add the data\n # start_ax_q.plot(wvs,100*data_q_pair1[:,i],'o',color='C{:d}'.format(i))\n # start_ax_u.plot(wvs,100*data_u_pair1[:,i],'o',color='C{:d}'.format(i),label=names[i])\n start_ax_q.plot(binned_wvs,100*data_q_pair2[:,i],'o',color=colors[i])\n start_ax_u.plot(binned_wvs,100*data_u_pair2[:,i],'o',color=colors[i],label=names[i])\n \n start_ax_u.legend()\n\n start_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n start_ax_q.set_ylim(-4,7)\n start_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n start_ax_u.set_ylim(-7,2)\n\n start_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n start_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n start_ax_q.set_ylabel(r\"q (%)\")\n start_ax_u.set_ylabel(r\"u (%)\")\n\n if plot_best_fit:\n\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 1 Best Fit Forward Model\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,binned_wvs,serkowski_qu)\n fit_ax_q.plot(binned_wvs,100*fit_detector_q,color=colors[i])\n fit_ax_u.plot(binned_wvs,100*fit_detector_u,color=colors[i])\n\n #Add the data\n # fit_ax_q.plot(wvs,100*data_q_pair1[:,i],'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(wvs,100*data_u_pair1[:,i],'o',color='C{:d}'.format(i))\n fit_ax_q.plot(binned_wvs,100*data_q_pair1[:,i],'x',color=colors[i])\n fit_ax_u.plot(binned_wvs,100*data_u_pair1[:,i],'x',color=colors[i])\n\n fit_ax_q.plot(binned_wvs[good_inds],100*data_q_pair1[good_inds,i].flatten(),'o',color=colors[i])\n fit_ax_u.plot(binned_wvs[good_inds],100*data_u_pair1[good_inds,i].flatten(),'o',color=colors[i])\n \n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_q.set_ylim(-4,7)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_u.set_ylim(-7,2)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"q (%)\")\n fit_ax_u.set_ylabel(r\"u (%)\")\n\n\n #############################\n ####### TRACE PAIR 2 ########\n #############################\n\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n fig1.suptitle(\"Trace Pair 2 Best Fit Forward Model\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n fit_detector_q,fit_detector_u = forward_model_detector_qu(results2.x,binned_wvs,serkowski_qu)\n fit_ax_q.plot(binned_wvs,100*fit_detector_q,color=colors[i])\n fit_ax_u.plot(binned_wvs,100*fit_detector_u,color=colors[i])\n\n #Add the data\n # fit_ax_q.plot(wvs,100*data_q_pair2[:,i],'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(wvs,100*data_u_pair2[:,i],'o',color='C{:d}'.format(i))\n fit_ax_q.plot(binned_wvs,100*data_q_pair2[:,i],'x',color=colors[i])\n fit_ax_u.plot(binned_wvs,100*data_u_pair2[:,i],'x',color=colors[i])\n\n fit_ax_q.plot(binned_wvs[good_inds],100*data_q_pair2[good_inds,i].flatten(),'o',color=colors[i])\n fit_ax_u.plot(binned_wvs[good_inds],100*data_u_pair2[good_inds,i].flatten(),'o',color=colors[i])\n \n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_q.set_ylim(-4,7)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_u.set_ylim(-7,2)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"q (%)\")\n fit_ax_u.set_ylabel(r\"u (%)\")\n\n if plot_best_fit_on_sky:\n\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 1 Best Fit On-Sky\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,binned_wvs,serkowski_qu)\n fit_ax_q.plot(binned_wvs,100*serkowski_q[:,i])\n fit_ax_u.plot(binned_wvs,100*serkowski_u[:,i])\n\n cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,data_q_pair1[:,i][good_inds],data_u_pair1[:,i][good_inds],\n data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n\n # cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,fit_detector_q[good_inds],fit_detector_u[good_inds],\n # data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n\n #Add the data \n # fit_ax_q.plot(good_wvs,100*cal_q,'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(good_wvs,100*cal_u,'o',color='C{:d}'.format(i))\n fit_ax_q.plot(good_wvs,100*cal_q,'o',color=colors[i])\n fit_ax_u.plot(good_wvs,100*cal_u,'o',color=colors[i])\n \n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_q.set_ylim(-4,7)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_u.set_ylim(-2,7)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"q (%)\")\n fit_ax_u.set_ylabel(r\"u (%)\")\n\n\n #############################\n ####### TRACE PAIR 2 ########\n #############################\n\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 2 Best Fit On-Sky\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n # fit_detector_q,fit_detector_u = forward_model_detector_qu(results2.x,wvs,serkowski_qu)\n fit_ax_q.plot(binned_wvs,100*serkowski_q[:,i])\n fit_ax_u.plot(binned_wvs,100*serkowski_u[:,i])\n\n cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,data_q_pair2[:,i][good_inds],data_u_pair2[:,i][good_inds],\n data_qerrs_pair2[:,i][good_inds],data_uerrs_pair2[:,i][good_inds],polynomial_coefficients=results2.x)\n\n # cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,fit_detector_q[good_inds],fit_detector_u[good_inds],\n # data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n\n #Add the data \n # fit_ax_q.plot(good_wvs,100*cal_q,'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(good_wvs,100*cal_u,'o',color='C{:d}'.format(i))\n fit_ax_q.plot(good_wvs,100*cal_q,'o',color=colors[i])\n fit_ax_u.plot(good_wvs,100*cal_u,'o',color=colors[i])\n \n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_q.set_ylim(-4,7)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_u.set_ylim(-2,7)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"q (%)\")\n fit_ax_u.set_ylabel(r\"u (%)\")\n\n if plot_residuals:\n\n #############################\n ####### TRACE PAIR 1 ########\n #############################\n\n fig1,big_axes1 = plt.subplots(1,2,figsize=(20,5))\n big_axes1[0].set_prop_cycle(cycler('color',colors))\n big_axes1[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 1 Residuals\")\n\n #Residuals\n residuals_ax_q = big_axes1[0]\n residuals_ax_u = big_axes1[1]\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,binned_wvs,serkowski_qu)\n # residuals_ax_q.plot(wvs,100*(data_q_pair1[:,i]-fit_detector_q),color='C{:d}'.format(i))\n # residuals_ax_u.plot(wvs,100*(data_u_pair1[:,i]-fit_detector_u),color='C{:d}'.format(i))\n residuals_ax_q.plot(binned_wvs,100*(data_q_pair1[:,i]-fit_detector_q),color=colors[i])\n residuals_ax_u.plot(binned_wvs,100*(data_u_pair1[:,i]-fit_detector_u),color=colors[i])\n\n residuals_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n residuals_ax_q.set_ylim(-1,1)\n residuals_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n residuals_ax_u.set_ylim(-1,1)\n\n residuals_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n residuals_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n residuals_ax_q.set_ylabel(r\"q (%)\")\n residuals_ax_u.set_ylabel(r\"u (%)\")\n\n #############################\n ####### TRACE PAIR 1 ########\n #############################\n fig1,big_axes1 = plt.subplots(1,2,figsize=(20,5))\n big_axes1[0].set_prop_cycle(cycler('color',colors))\n big_axes1[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 2 Residuals\")\n\n #Residuals\n residuals_ax_q = big_axes1[0]\n residuals_ax_u = big_axes1[1]\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n fit_detector_q,fit_detector_u = forward_model_detector_qu(results2.x,binned_wvs,serkowski_qu)\n # residuals_ax_q.plot(wvs,100*(data_q_pair2[:,i]-fit_detector_q),color='C{:d}'.format(i))\n # residuals_ax_u.plot(wvs,100*(data_u_pair2[:,i]-fit_detector_u),color='C{:d}'.format(i))\n residuals_ax_q.plot(binned_wvs,100*(data_q_pair2[:,i]-fit_detector_q),color=colors[i])\n residuals_ax_u.plot(binned_wvs,100*(data_u_pair2[:,i]-fit_detector_u),color=colors[i])\n\n residuals_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n residuals_ax_q.set_ylim(-1,1)\n residuals_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n residuals_ax_u.set_ylim(-1,1)\n\n residuals_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n residuals_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n residuals_ax_q.set_ylabel(r\"q (%)\")\n residuals_ax_u.set_ylabel(r\"u (%)\")\n \n plt.tight_layout()\n plt.show()\n\n if plot_residuals_on_sky: \n\n ###########################################################\n ############ Trace Pair 1 On-Sky P and Theta ##############\n ###########################################################\n\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 1 P and Theta On-Sky\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n # fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,wvs,serkowski_qu)\n p = 100*np.sqrt(serkowski_q[:,i]**2+serkowski_u[:,i]**2)\n theta = 0.5*np.degrees(np.arctan2(serkowski_u[:,i],serkowski_q[:,i]))\n fit_ax_q.plot(binned_wvs,p)\n fit_ax_u.plot(binned_wvs,theta,label=names[i])\n\n cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,data_q_pair1[:,i][good_inds],data_u_pair1[:,i][good_inds],\n data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n \n\n cal_p = 100*np.sqrt(cal_q**2+cal_u**2-cal_qerr**2-cal_uerr**2)\n cal_p_errs = 100*np.sqrt(cal_qerr**2+cal_uerr**2)\n cal_theta = 0.5*np.degrees(np.arctan2(cal_u,cal_q))\n cal_theta[cal_theta < -50] +=180\n cal_theta_err = 0.5*np.degrees(np.sqrt(cal_q**2*cal_uerr**2+cal_u**2*cal_qerr**2)/(cal_u**2+cal_q**2))\n\n #Add the data \n # fit_ax_q.plot(good_wvs,cal_p,'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(good_wvs,cal_theta,'o',color='C{:d}'.format(i))\n # fit_ax_q.plot(good_wvs,cal_p,'o',color=colors[i])\n fit_ax_q.errorbar(good_wvs,cal_p,yerr=cal_p_errs,marker='o',color=colors[i],linestyle=\"None\")\n fit_ax_u.errorbar(good_wvs,cal_theta,yerr=cal_theta_err,marker='o',color=colors[i],linestyle=\"None\")\n # fit_ax_u.plot(good_wvs,cal_theta,'o',color=colors[i])\n \n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n # fit_ax_q.set_ylim(-6,6)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_u.legend()\n # fit_ax_u.set_ylim(-6,6)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"p (%)\")\n fit_ax_u.set_ylabel(r\"$\\theta$ ($^\\circ$)\")\n\n if output_dir is not None:\n plt.savefig(\"Trace1_P_Theta_Onsky.png\",dpi=200,bbox_inches=\"tight\")\n\n # outputdir\n\n ####################################################################\n ############ Trace Pair 1 On-Sky Relative Residuals ################\n ####################################################################\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 1 Relative Residuals On-Sky\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n # fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,wvs,serkowski_qu)\n p = 100*np.sqrt(serkowski_q[:,i]**2+serkowski_u[:,i]**2)\n theta = 0.5*np.degrees(np.arctan2(serkowski_u[:,i],serkowski_q[:,i]))\n # fit_ax_q.plot(wvs,p)\n # fit_ax_u.plot(wvs,theta)\n\n cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,data_q_pair1[:,i][good_inds],data_u_pair1[:,i][good_inds],\n data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n\n cal_p = 100*np.sqrt(cal_q**2+cal_u**2)\n cal_theta = 0.5*np.degrees(np.arctan2(cal_u,cal_q))\n cal_theta_err = 0.5*np.degrees(np.sqrt(cal_q**2*cal_uerr**2+cal_u**2*cal_qerr**2)/(cal_u**2+cal_q**2))\n\n delta_theta = (theta[good_inds]-cal_theta)%180\n delta_theta[delta_theta > 90] -= 180\n relative_delta_p = (p[good_inds]-cal_p)/p[good_inds]\n # cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,fit_detector_q[good_inds],fit_detector_u[good_inds],\n # data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n\n #Add the data \n # fit_ax_q.plot(good_wvs,relative_delta_p,'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(good_wvs,delta_theta,'o',color='C{:d}'.format(i))\n # fit_ax_q.plot(good_wvs,relative_delta_p,'o',color=colors[i])\n fit_ax_q.errorbar(good_wvs+0.0004*i,100*relative_delta_p,yerr=100*cal_p_errs/p[good_inds],marker='o',linestyle=\"None\",color=colors[i],label=\"{:.1f}%\".format(np.mean(100*relative_delta_p)))\n # fit_ax_u.plot(good_wvs,delta_theta,'o',color=colors[i])\n fit_ax_u.errorbar(good_wvs,delta_theta,yerr=cal_theta_err,marker='o',color=colors[i],linestyle=\"None\",label=\"{:.1f}$^\\circ$\".format(np.mean(delta_theta)))\n \n fit_ax_q.legend(title=r\"Mean $\\Delta p/p$\",ncol=2)\n fit_ax_u.legend(title=r\"Mean $\\Delta \\theta$\",ncol=2)\n\n\n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n # fit_ax_q.set_ylim(-6,6)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n # fit_ax_u.set_ylim(-6,6)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"$\\Delta$ p / p (%)\")\n fit_ax_u.set_ylabel(r\"$\\Delta\\theta$ ($^\\circ$)\")\n\n if output_dir is not None:\n plt.savefig(\"Trace1_P_Theta_Onsky_residuals.png\",dpi=200,bbox_inches=\"tight\")\n\n ###########################################################\n ############ Trace Pair 2 On-Sky P and Theta ##############\n ###########################################################\n\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 2 P and Theta On-Sky\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n # fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,wvs,serkowski_qu)\n p = 100*np.sqrt(serkowski_q[:,i]**2+serkowski_u[:,i]**2)\n theta = 0.5*np.degrees(np.arctan2(serkowski_u[:,i],serkowski_q[:,i]))\n fit_ax_q.plot(binned_wvs,p)\n fit_ax_u.plot(binned_wvs,theta,label=names[i])\n\n cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,data_q_pair2[:,i][good_inds],data_u_pair2[:,i][good_inds],\n data_qerrs_pair2[:,i][good_inds],data_uerrs_pair2[:,i][good_inds],polynomial_coefficients=results2.x)\n\n cal_p = 100*np.sqrt(cal_q**2+cal_u**2)\n cal_theta = 0.5*np.degrees(np.arctan2(cal_u,cal_q))\n cal_theta[cal_theta < -50] +=180\n cal_theta_err = 0.5*np.degrees(np.sqrt(cal_q**2*cal_uerr**2+cal_u**2*cal_qerr**2)/(cal_u**2+cal_q**2))\n\n #Add the data \n # fit_ax_q.plot(good_wvs,cal_p,'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(good_wvs,cal_theta,'o',color='C{:d}'.format(i))\n fit_ax_q.errorbar(good_wvs,cal_p,yerr=cal_p_errs,marker='o',color=colors[i],linestyle=\"None\")\n fit_ax_u.errorbar(good_wvs,cal_theta,yerr=cal_theta_err,marker='o',color=colors[i],linestyle=\"None\")\n \n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n # fit_ax_q.set_ylim(-6,6)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n fit_ax_u.legend()\n # fit_ax_u.set_ylim(-6,6)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"p (%)\")\n fit_ax_u.set_ylabel(r\"$\\theta$ ($^\\circ$)\")\n\n if output_dir is not None:\n plt.savefig(\"Trace2_P_Theta_Onsky.png\",dpi=200,bbox_inches=\"tight\")\n\n ####################################################################\n ############ Trace Pair 2 On-Sky Relative Residuals ################\n ####################################################################\n fig1,axes = plt.subplots(1,2,figsize=(20,5))\n axes[0].set_prop_cycle(cycler('color',colors))\n axes[1].set_prop_cycle(cycler('color',colors))\n fig1.suptitle(\"Trace Pair 2 Relative Residuals On-Sky\")\n\n fit_ax_q = axes[0]\n fit_ax_u = axes[1]\n # fit_ax_q = fig1.add_subplot(1,2,1)\n # fit_ax_u = fig1.add_subplot(1,2,2)\n #Generate the best fit positions\n for i in range(len(dir_list)):\n serkowski_qu = [serkowski_q[:,i],serkowski_u[:,i]]\n # fit_detector_q,fit_detector_u = forward_model_detector_qu(results1.x,wvs,serkowski_qu)\n p = 100*np.sqrt(serkowski_q[:,i]**2+serkowski_u[:,i]**2)\n theta = 0.5*np.degrees(np.arctan2(serkowski_u[:,i],serkowski_q[:,i]))\n # fit_ax_q.plot(wvs,p)\n # fit_ax_u.plot(wvs,theta)\n\n cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,data_q_pair2[:,i][good_inds],data_u_pair2[:,i][good_inds],\n data_qerrs_pair2[:,i][good_inds],data_uerrs_pair2[:,i][good_inds],polynomial_coefficients=results2.x)\n\n cal_p = 100*np.sqrt(cal_q**2+cal_u**2)\n cal_theta = 0.5*np.degrees(np.arctan2(cal_u,cal_q))\n cal_theta_err = 0.5*np.degrees(np.sqrt(cal_q**2*cal_uerr**2+cal_u**2*cal_qerr**2)/(cal_u**2+cal_q**2))\n\n delta_theta = (theta[good_inds]-cal_theta)%180\n delta_theta[delta_theta > 90] -= 180\n relative_delta_p = (p[good_inds]-cal_p)/p[good_inds]\n # cal_q,cal_u,cal_qerr,cal_uerr = calibrate_qu(good_wvs,fit_detector_q[good_inds],fit_detector_u[good_inds],\n # data_qerrs_pair1[:,i][good_inds],data_uerrs_pair1[:,i][good_inds],polynomial_coefficients=results1.x)\n\n #Add the data \n # fit_ax_q.plot(good_wvs,relative_delta_p,'o',color='C{:d}'.format(i))\n # fit_ax_u.plot(good_wvs,delta_theta,'o',color='C{:d}'.format(i))\n # fit_ax_q.plot(good_wvs,relative_delta_p,'o',color=colors[i])\n fit_ax_q.errorbar(good_wvs+0.0002*i,100*relative_delta_p,yerr=100*cal_p_errs/p[good_inds],marker='o',linestyle=\"None\",color=colors[i],label=\"{:.1f}%\".format(np.mean(100*relative_delta_p)))\n fit_ax_u.errorbar(good_wvs,delta_theta,yerr=cal_theta_err,marker='o',color=colors[i],linestyle=\"None\",label=\"{:.1f}$^\\circ$\".format(np.mean(delta_theta)))\n # fit_ax_u.plot(good_wvs,delta_theta,'o',color=colors[i])\n \n fit_ax_q.legend(title=r\"Mean $\\Delta p/p$\",ncol=2)\n fit_ax_u.legend(title=r\"Mean $\\Delta \\theta$\",ncol=2)\n\n fit_ax_q.set_xlim(wlMin_plot,wlMax_plot)\n # fit_ax_q.set_ylim(-6,6)\n fit_ax_u.set_xlim(wlMin_plot,wlMax_plot)\n # fit_ax_u.set_ylim(-6,6)\n\n fit_ax_q.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_u.set_xlabel(r\"Wavelength ($\\mu m$)\")\n fit_ax_q.set_ylabel(r\"$\\Delta$ p / p (%)\")\n fit_ax_u.set_ylabel(r\"$\\Delta\\theta$ ($^\\circ$)\")\n\n if output_dir is not None:\n plt.savefig(\"Trace2_P_Theta_Onsky_residuals.png\",dpi=200,bbox_inches=\"tight\")\n \n if plot_mueller_matrix: \n\n\n fig,axes = plt.subplots(2,2,figsize=(10,10))\n # axes[0,0].set_prop_cycle(cycler('color',colors))\n # axes[0,1].set_prop_cycle(cycler('color',colors))\n # axes[1,0].set_prop_cycle(cycler('color',colors))\n # axes[1,1].set_prop_cycle(cycler('color',colors))\n fig.suptitle(\"Best-Fit Efficiencies and Crosstalks\")\n\n axes[0,0].plot(wvs,np.poly1d(results1.x[:(p_order+1)])(wvs),label=\"Trace Pair 1\")\n axes[0,1].plot(wvs,np.poly1d(results1.x[1*(p_order+1):2*(p_order+1)])(wvs))\n axes[1,0].plot(wvs,np.poly1d(results1.x[2*(p_order+1):3*(p_order+1)])(wvs))\n axes[1,1].plot(wvs,np.poly1d(results1.x[3*(p_order+1):])(wvs))\n \n axes[0,0].plot(wvs,np.poly1d(results2.x[:(p_order+1)])(wvs),label=\"Trace Pair 2\")\n axes[0,1].plot(wvs,np.poly1d(results2.x[1*(p_order+1):2*(p_order+1)])(wvs))\n axes[1,0].plot(wvs,np.poly1d(results2.x[2*(p_order+1):3*(p_order+1)])(wvs))\n axes[1,1].plot(wvs,np.poly1d(results2.x[3*(p_order+1):])(wvs))\n\n axes[0,0].set_xlim(wlMin,wlMax)\n axes[0,1].set_xlim(wlMin,wlMax)\n axes[1,0].set_xlim(wlMin,wlMax)\n axes[1,1].set_xlim(wlMin,wlMax)\n\n #15 degree offset\n axes[0,0].axhline(np.cos(2*np.radians(15)),color='r',label=\"15$^\\circ$ offset\",linestyle=\"--\",alpha=0.7)\n axes[0,1].axhline(-np.sin(2*np.radians(15)),color='r',label=\"15$^\\circ$ offset\",linestyle=\"--\",alpha=0.7)\n axes[1,0].axhline(-np.sin(2*np.radians(15)),color='r',label=\"15$^\\circ$ offset\",linestyle=\"--\",alpha=0.7)\n axes[1,1].axhline(-np.cos(2*np.radians(15)),color='r',label=\"15$^\\circ$ offset\",linestyle=\"--\",alpha=0.7)\n\n axes[0,0].legend()\n\n axes[0,0].set_ylim(0.,1)\n axes[0,1].set_ylim(-1.,0)\n axes[1,0].set_ylim(-1.,0)\n axes[1,1].set_ylim(-1.,0)\n \n axes[0,0].set_ylabel(r\"$\\eta_Q$\")\n axes[0,1].set_ylabel(r\"$\\chi_{U\\rightarrow Q}$\")\n axes[1,0].set_ylabel(r\"$\\chi_{Q\\rightarrow U}$\")\n axes[1,1].set_ylabel(r\"$\\eta_U$\")\n\n axes[0,0].set_xlabel(r\"Wavelength ($\\mu m$)\")\n axes[0,1].set_xlabel(r\"Wavelength ($\\mu m$)\")\n axes[1,0].set_xlabel(r\"Wavelength ($\\mu m$)\")\n axes[1,1].set_xlabel(r\"Wavelength ($\\mu m$)\")\n\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n if output_dir is not None:\n plt.savefig(\"BestFit_Mueller_Matrix.png\",dpi=200,bbox_inches=\"tight\")\n\n return (results1.x,results2.x),", "def arm_calibration(self):\n # DONE: 3. Implement the arm calibration movement by fixing the code below (it has many bugs). It should to this:\n # Command the arm_motor to run forever in the positive direction at max speed.\n # Create an infinite while loop that will block code execution until the touch sensor's is_pressed value is True.\n # Within that loop sleep for 0.01 to avoid running code too fast.\n # Once past the loop the touch sensor must be pressed. So stop the arm motor quickly using the brake stop action.\n # Make a beep sound\n # Now move the arm_motor 14.2 revolutions in the negative direction relative to the current location\n # Note the stop action and speed are already set correctly so we don't need to specify them again\n # Block code execution by waiting for the arm to finish running\n # Make a beep sound\n # Set the arm encoder position to 0 (the last line below is correct to do that, it's new so no bug there)\n\n # Code that attempts to do this task but has MANY bugs (nearly 1 on every line). Fix them!\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action='brake')\n ev3.Sound.beep().wait()\n # time.sleep(2)\n # arm_motor.stop(stop_action='brake')\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range, speed_sp=900)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this line is correct as is).", "def getcalind(self,blk=None):\n if blk!=None:\n blk=arr(blk)\n cblk=np.array([]).astype(int) # Initialize cal stare indices\n cs=self.ind['cs']\n ce=self.ind['ce']\n for k,val in enumerate(blk):\n ss=self.ind['ss'][val] # Scan start\n se=self.ind['se'][val] # Scan stop\n # Find leading cal stare \n ind=np.where(ce<=ss)[0]\n if ind.size>0:\n # If it exists, append it\n cblk=np.append(cblk,ind[-1])\n\n # Find trailing cal stare \n ind=np.where(cs>=se)[0]\n if ind.size>0:\n # If it exists, append it\n cblk=np.append(cblk,ind[0])\n else:\n cblk=None\n\n return np.unique(self.getind('cs','ce',blk=cblk))", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def calcSeries(self, I, V, meanIsc, Imax):\n # make sure all inputs are numpy arrays, but don't make extra copies\n I = np.asarray(I) # currents [A]\n V = np.asarray(V) # voltages [V]\n meanIsc = np.asarray(meanIsc) # mean Isc [A]\n Imax = np.asarray(Imax) # max current [A]\n # create array of currents optimally spaced from mean Isc to max VRBD\n Ireverse = (Imax - meanIsc) * self.Imod_pts_sq + meanIsc\n # range of currents in forward bias from 0 to mean Isc\n Iforward = meanIsc * self.pts\n Imin = np.minimum(I.min(), 0.) # minimum cell current, at most zero\n # range of negative currents in the 4th quadrant from min current to 0\n Iquad4 = Imin * self.Imod_negpts\n # create range for interpolation from forward to reverse bias\n Itot = np.concatenate((Iquad4, Iforward, Ireverse), axis=0).flatten()\n Vtot = np.zeros((3 * self.npts,))\n # add up all series cell voltages\n for i, v in zip(I, V):\n # interp requires x, y to be sorted by x in increasing order\n Vtot += npinterpx(Itot, np.flipud(i), np.flipud(v))\n return np.flipud(Itot), np.flipud(Vtot)", "def run_macs(in_files, out_peaks, max_fdr):\n in_treat, in_control = in_files[0]\n matches = re.search(r'(.*\\.treat)(.*)\\.mapped_reads', in_treat).groups()\n name = matches[0] + matches[1] + '.macs.peaks'\n max_fdr = cfg.getfloat('peaks', 'max_FDR')\n cmd = 'macs -t %s -c %s --name=%s %s' % (in_treat, in_control, name,\n cfg.get('peaks', 'macs_params'))\n sys_call(cmd)\n \n # convert to proper bedfile- ints for score and + for strand\n with open(out_peaks, 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(\n bedCommentFilter, infile)):\n fields = line.split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n start = str(max(0, int(fields[1])))\n score = str(max(0, min(1000, int(float(fields[6])))))\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], start, fields[2],\n 'MACS_peak_%s' % (index + 1), score]) +\n '\\t+\\n')\n # take region surrounding the peak center as the summit\n summit_size = cfg.getint('peaks', 'peak_summit_size')\n with open(out_peaks + '_summits.%s_around' % \\\n cfg.get('peaks', 'peak_summit_size'), 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.strip().split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n score = str(max(0, min(1000, int(float(fields[6])))))\n p_start, p_stop = max(0, int(fields[1])), int(fields[2])\n p_center = p_start + (p_stop - p_start) / 2\n s_start = p_center - summit_size / 2\n s_stop = p_center + summit_size / 2\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], str(s_start),\n str(s_stop),\n 'MACS_peak_%s' % (index + 1), score])\n + '\\t+\\n')", "def calibrate_power_ADC(self):\n self.send_packet('\\x61')", "def reconstruct(self, \n \t\t\t\tresonance_file, \n\t\t \t\tenergies # np.array with energies, or list of such arrays\n\t\t \t\t):\n # Local variables, not prone to change #\n mt_order_in_lst = [1,2,102]\n #\n # Error checks #\n if self.temperature > 0.: \n raise ValueError('Doppler broadening not available yet.')\n try: energies = np.array(energies) \n except: raise ValueError('Invalid energies.')\n #\n # Set temporary file names #\n tempinp = temp_file_gen('Sammy_reconstruct','inp')\n temppar = temp_file_gen('Sammy_reconstruct','par')\n templst = temp_file_gen('Sammy_reconstruct','lst')\n #\n # Generate input files from ENDF\n self.endf2inp_par_ndf(resonance_file, [tempinp, temppar])\n #\n # Prepare actual reconstruction run by modifying input #\n newkeylines = ['do not solve bayes equations',\n 'reconstruct cross sections',\n 'ignore input binary covariance file',\n ]\n self.modify_inp(tempinp, newkeylines)\n #\n # Run SAMMY to reconstruct cross sections #\n self.run([tempinp,temppar,'dummy.dat'],['SAMMY.LST'],[templst])\n #\n # Read output, generate list of CrossSection instances #\n cross_section_array = self.read_data(templst)\n cross_sections = []\n E = energies\n for k in range(len(mt_order_in_lst)):\n if type(energies) == list and len(energies) == len(mt_order_in_lst):\n \t\tE = energies[k]\n cross_sections.append(CrossSection(mt_order_in_lst[k], E, np.interp(\n \tE, cross_section_array[:,0], cross_section_array[:,k+1])))\n #\n # Clean up #\n if self.cleanup: \n for p in [tempinp, temppar, templst]: os.remove(p)\n # \n return cross_sections", "def addPeakResonancesToSpinSystem(peaks):\n \n # TBD check experiment type of the peak\n \n if not peaks:\n return\n \n resonances = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n resonances.append(contrib.resonance)\n \n spinSystems = []\n for resonance in resonances:\n resonanceGroup = resonance.resonanceGroup\n if resonanceGroup and (resonanceGroup not in spinSystems):\n spinSystems.append(resonanceGroup)\n\n spinSystem = None\n if len(spinSystems) == 1:\n spinSystem = spinSystems[0]\n elif len(spinSystems) > 1:\n msg = 'There are multiple spin systems for these peaks.\\n'\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm',msg):\n spinSystem = spinSystems[0]\n for spinSystem2 in spinSystems[1:]:\n mergeSpinSystems(spinSystem2,spinSystem)\n else:\n return\n \n if spinSystem is None:\n spinSystem = peaks[0].topObject.newResonanceGroup()\n\n for resonance in resonances:\n addSpinSystemResonance(spinSystem,resonance)\n\n return spinSystem", "def total_causal_shift(self):\n frames = self.causal_shift[0]\n next_dilation = self.filter_widths[0]\n for i in range(1, len(self.filter_widths)):\n frames += self.causal_shift[i] * next_dilation\n next_dilation *= self.filter_widths[i]\n return frames", "def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)", "def calibrateData(data, cal, antennas, sourceInfo, file=True, niter=None):\n # Loop over data\n for iant, dant in data.items():\n # Write results to a file\n writeOutputFile = False\n if file != False and file <> None:\n # Set file name\n writeOutputFile = True\n\n # date the output file to avoid having to parse huge files later on\n today = dt.date.today()\n dateStr = \"%i%02i%02i\" % (today.timetuple()[0], today.timetuple()[1], today.timetuple()[2]) \n \n if file == True:\n outputFileRoot = '%s_%.2d_%s.dat' % (RPNT_RESULTS, antennas[iant], dateStr)\n else:\n outputFileRoot = \"%s_%.2d_%s.dat\" % (file, antennas[iant], dateStr)\n\n # Open file\n fout = open(outputFileRoot, \"a\")\n fout.write(\"# Pointing data for antenna %d : %s\\n\" % (antennas[iant], time.asctime()))\n f=commands.freqSetup()\n fout.write(\"# Rest Frequency : %d\\n\" % f[0])\n fout.write(\"# UT : %s\\n\" % utils.getUT(timestamp=True))\n fout.write(\"# Source %s\\n\" % sourceInfo['name'])\n fout.write(\"#\\n\");\n fout.write(\"# Iter offset(az) offset(el) Amp sigma Az El\\n\");\n fout.write(\"# (arcmin) (arcmin) (Jy) (Jy) (deg) (deg)\\n\");\n\n # Get az/el\n mpAz = utils.getAntennaMp(antennas[iant]) + \".AntennaCommon.Drive.Track.actualAzimuth\"\n mpEl = utils.getAntennaMp(antennas[iant]) + \".AntennaCommon.Drive.Track.actualElevation\"\n antaz = commands.queryDouble(mpAz)\n antel = commands.queryDouble(mpEl)\n\n # Initialize\n cal[iant] = list()\n\n # Compute mean amplitude\n for d in dant:\n # Initialize\n sum = 0.0\n sumw = 0.0\n nwindows = len(d['use'])\n weights = np.zeros(nwindows)\n\n # Compute weighted average\n x = []\n for i in range(nwindows):\n if d['use'][i]:\n sum += d['amp'][i] * d['wt'][i]\n sumw += d['wt'][i]\n x.append(d['amp'][i])\n\n # Save data\n result = dict()\n if sumw > 0.0:\n # result['amp'] = sum / sumw\n x = sorted(x)\n n1 = len(x) / 2\n n2 = (len(x)-1)/ 2\n result['amp'] = 0.5 * (x[n1] + x[n2])\n result['fwhm'] = getFWHM(antennas[iant], sourceInfo['lofreq'])\n result['offaz'] = d['offaz']\n result['offel'] = d['offel']\n result['sigma'] = 1.0 / math.sqrt(sumw)\n cal[iant].append(result)\n\n # Write data\n if writeOutputFile and (niter == None or niter == d['niter']):\n fout.write(\"%6s %10.3f %10.3f %10.3f %10.3f %10.3f %10.3f\\n\" % \\\n (str(d['niter']), result['offaz'], result['offel'], result['amp'], result['sigma'], antaz, antel))\n\n # Close file\n fout.close()", "def adaptCovarianceMatrix(self, evalcount):\n\n cc, cs, c_1, c_mu, n = self.c_c, self.c_sigma, self.c_1, self.c_mu, self.n\n wcm, wcm_old, mueff, invsqrt_C = self.wcm, self.wcm_old, self.mu_eff, self.sqrt_C\n lambda_ = self.lambda_\n\n self.p_sigma = (1-cs) * self.p_sigma + \\\n sqrt(cs*(2-cs)*mueff) * dot(invsqrt_C, (wcm - wcm_old) / self.sigma)\n power = (2*evalcount/lambda_)\n if power < 1000: #TODO: Solve more neatly\n hsig = sum(self.p_sigma**2)/(1-(1-cs)**power)/n < 2 + 4/(n+1)\n else:\n #Prevent underflow error,\n hsig = sum(self.p_sigma**2)/n < 2 + 4/(n+1)\n self.p_c = (1-cc) * self.p_c + hsig * sqrt(cc*(2-cc)*mueff) * (wcm - wcm_old) / self.sigma\n offset = self.offset[:, :self.mu_int]\n\n # Regular update of C\n self.C = (1 - c_1 - c_mu) * self.C \\\n + c_1 * (outer(self.p_c, self.p_c) + (1-hsig) * cc * (2-cc) * self.C) \\\n + c_mu * dot(offset, self.weights*offset.T)\n if self.active and len(self.all_offspring) >= 2*self.mu_int: # Active update of C\n offset_bad = self.offset[:, -self.mu_int:]\n self.C -= c_mu * dot(offset_bad, self.weights*offset_bad.T)\n\n # Adapt step size sigma\n if self.tpa:\n alpha_act = self.tpa_result * self.alpha\n alpha_act += self.beta_tpa if self.tpa_result > 1 else 0\n self.alpha_s += self.c_alpha * (alpha_act - self.alpha_s)\n self.sigma *= exp(self.alpha_s)\n else:\n exponent = (norm(self.p_sigma) / self.chiN - 1) * self.c_sigma / self.damps\n if exponent < 1000: #TODO: Solve more neatly\n self.sigma = self.sigma * exp(exponent)\n else:\n self.sigma = self.sigma_mean\n self.sigma_mean = self.sigma\n\n ### Update BD ###\n C = self.C # lastest setting for\n C = triu(C) + triu(C, 1).T # eigen decomposition\n\n degenerated = False\n if any(isinf(C)) > 1: # interval\n degenerated = True\n # raise Exception(\"Values in C are infinite\")\n elif not 1e-16 < self.sigma_mean < 1e6:\n degenerated = True\n else:\n try:\n w, e_vector = eigh(C)\n e_value = sqrt(list(map(complex, w))).reshape(-1, 1)\n if any(~isreal(e_value)):\n degenerated = True\n # raise Exception(\"Eigenvalues of C are not real\")\n elif any(isinf(e_value)):\n degenerated = True\n # raise Exception(\"Eigenvalues of C are infinite\")\n else:\n self.D = real(e_value)\n self.B = e_vector\n self.sqrt_C = dot(e_vector, e_value**-1 * e_vector.T)\n except LinAlgError as e:\n # raise Exception(e)\n print(\"Restarting, degeneration detected: {}\".format(e))\n degenerated = True\n\n if degenerated:\n self.restart()", "def autocorrelate(x):\n\n global i1, i2\n # used for transposes\n t = roll(range(x.ndim), 1)\n\n # pairs of indexes\n # the first is for the autocorrelation array\n # the second is the shift\n ii = [list(enumerate(range(1, s - 1))) for s in x.shape]\n\n # initialize the resulting autocorrelation array\n acor = empty(shape=[len(s0) for s0 in ii])\n\n # iterate over all combinations of directional shifts\n for i in product(*ii):\n # extract the indexes for\n # the autocorrelation array\n # and original array respectively\n i1, i2 = asarray(i).T\n\n x1 = x.copy()\n x2 = x.copy()\n\n for i0 in i2:\n # clip the unshifted array at the end\n x1 = x1[:-i0]\n # and the shifted array at the beginning\n x2 = x2[i0:]\n\n # prepare to do the same for\n # the next axis\n x1 = x1.transpose(t)\n x2 = x2.transpose(t)\n\n # normalize shifted and unshifted arrays\n x1 -= x1.mean()\n x1 /= x1.std()\n x2 -= x2.mean()\n x2 /= x2.std()\n\n # compute the autocorrelation directly\n # from the definition\n acor[tuple(i1)] = (x1 * x2).mean()\n\n return acor", "def gather_pk_ss(self):\n # only works if peak is bigger than ss (otherwise will go to 1)\n \n _t_step = numpy.arange (1, 9, .2)\n _log_t_step = 1e-7 * 10 ** (_t_step) \n \n # take max 10mM relax mech 1\n r1 = Relaxation(self.m1.Qhi, self.m1.P_init_lo)\n r1.assemble(_log_t_step, self.m1.open_states)\n \n #take max of hi-conc jump\n self.peak1 = numpy.max(r1.relax_sum)\n # calc Iss mech 1\n self.eqbm1 = r1.relax_sum[-1] #steady state at 100 sec.\n \n # take max 10mM relax mech 2\n r2 = Relaxation(self.m2.Qhi, self.m2.P_init_lo)\n r2.assemble(_log_t_step, self.m2.open_states)\n \n #take max of hi-conc jump\n self.peak2 = numpy.max(r2.relax_sum)\n # calc Iss mech 2\n self.eqbm2 = r2.relax_sum[-1]\n \n self.fold_change = (self.eqbm1 * self.peak2) / (self.eqbm2 * self.peak1)", "def msc(spectra):\n\n spectra = scale(spectra, with_std=False, axis=0) # Demean\n reference = np.mean(spectra, axis=1)\n\n for col in range(spectra.shape[1]):\n a, b = np.polyfit(reference, spectra[:, col], deg=1)\n spectra[:, col] = (spectra[:, col] - b) / a\n\n return spectra", "def calibrate(self, master):\n if master.referencetype == \"Block\":\n self.caldat = self.mw \n else:\n print(\"Kein gültiger Referenzsensortyp\")", "def sensordaten_einlesen(self):\n self.caldata = []\n self.caldata_raw = np.genfromtxt(self.sensorfile, usecols = np.asarray(self.sensorspalte), skip_header = 1)\n for ele in self.caldata_raw:\n self.caldata.append(int(ele))\n self.Sensordata = Channel()", "def cyclic_merit_lag(x,*args):\n CS = args[0]\n print \"rindex\",CS.rindex\n ht = get_ht(x,CS.rindex)\n hf = time2freq(ht)\n CS.hf = hf\n CS.ht = ht\n cs_model,csplus,csminus,phases = make_model_cs(hf,CS.s0,CS.bw,CS.ref_freq)\n merit = 2*(np.abs(cs_model[:,1:] - CS.cs[:,1:])**2).sum() #ignore zeroth harmonic (dc term)\n \n # the objval list keeps track of how the convergence is going\n CS.objval.append(merit)\n \n #gradient_lag\n diff = cs_model - CS.cs #model - data\n cc1 = cs2cc(diff * csminus)\n \n# original c code for reference:\n# for (ilag=0; ilag<cc1.nlag; ilag++) {\n# gradient->data[ilag] = 0.0 + I * 0.0;\n# int lag = (ilag<=cc1.nlag/2) ? ilag : ilag-cc1.nlag;\n# tau = (double)lag * (double)cs->nchan /\n# ( (double)cc1.nlag * cc1.bw*1.e6 );\n# for (ih=1; ih<cc1.nharm; ih++) {\n# phs = M_PI * tau * (double)ih * cc1.ref_freq;\n# phasor = cos(phs)+I*sin(phs);\n# fftwf_complex *ccval = get_cc(&cc1,ih,ip,ilag);\n# gradient->data[ilag] += 4.0 * (*ccval) * phasor\n# * conj(s0->data[ih]) / (float)cs->nchan;\n# }\n# }\n\n #we reuse phases and csminus, csplus from the make_model_cs call\n\n phasors = np.exp(1j*phases)\n cs0 = np.repeat(CS.s0[np.newaxis,:],CS.nlag,axis=0) #filter2cs\n grad = 4.0 * cc1 * phasors * np.conj(cs0) / CS.nchan\n grad = grad[:,1:].sum(1) # sum over all harmonics to get function of lag\n \n #conjugate(res)\n #calc positive shear\n #multiply\n #cs2cc\n cc2 = cs2cc(np.conj(diff) * csplus)\n grad2 = 4.0 * cc2 * np.conj(phasors) * cs0 / CS.nchan\n \n grad = grad + grad2[:,1:].sum(1)\n CS.grad = grad[:]\n CS.model = cs_model[:]\n\n if CS.iprint:\n print \"merit= %.7e grad= %.7e\" % (merit,(np.abs(grad)**2).sum())\n \n if CS.make_plots:\n if CS.niter % CS.plot_every == 0:\n CS.plotCurrentSolution()\n \n \n \n grad = get_params(grad, CS.rindex)\n CS.niter += 1\n \n return merit,grad", "def calc(self,index,counter_values):\n gr = self.grSign * self.grPitch['Value'].value\n m = self.mSign * self.mPitch['Value'].value\n \n offsetG,offsetM = self.checkOffset()\n beta = self.toRadians(gr) - (math.pi/2.0) - offsetG\n theta = (math.pi/2.0) - (self.toRadians(m)) - offsetM\n alpha = (2.0*theta) + beta\n numerator = (math.sin(alpha) + math.sin(beta))\n denominator = (self.DiffrOrder * self.look_at_grx())\n wavelength = numerator / denominator\n \n if wavelength == 0.0:\n energy_physicalmot = 0.0\n else:\n energy_physicalmot = self.hc / wavelength\n #if self.FixedM2Pit: \n Cff = math.cos(beta)/math.cos(alpha)\n if energy_physicalmot < 0 :\n #warning: wavelength se vuelve negativo ... ??????\n energy_physicalmot = energy_physicalmot *(-1) \n \n # Real Energy is equal to the energy calculated by the encoders\n # minus an offset that depends on the same energy calculated by the \n # encoders:\n # E_physicalmot = Ereal + offset\n # with offset = a*Ereal + b\n # This implies that: Ereal = (Ephysicalmot - b)/(1+a) \n a_coeff = self.EnergyDP.a_offset_coeff\n b_coeff = self.EnergyDP.b_offset_coeff\n numerator = energy_physicalmot - b_coeff\n denominator = 1 + a_coeff\n energy = numerator / denominator\n \n if index == 1:\n return energy\n elif index == 2:\n return Cff", "def make_calcurve_order(self,oi):\n ccfs = np.full((self.n_vsini, self.n_velocities), np.nan)\n widths = np.full((self.n_vsini),np.nan)\n for i,v in enumerate(self.vsinis):\n # Make resampled/broadened spectra\n rotated = self.hpfspec.resample_and_broaden_order(oi, vsini=v, diag=True, upsample_factor=self.upsample_factor)\n # Make CCF and fit the output\n fit_output = self.hpfspec.ccfwidth_order(oi,w=rotated['w_resampled'], fl=rotated['fl_broadened'], debug=True, fitwidth=self.fitwidth, M=self.M, velocities=self.velocities)\n # Store results\n ccfs[i,:] = fit_output['ccf1']\n widths[i] = fit_output['fit']['sigma']\n self.calibration_widths[oi] = widths\n self.calibration_ccfs[oi] = ccfs", "def causDspectra(uxmax, uymax, ax, ay, dso, dsl, dm, m, n, N):\n \n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n \n dlo = dso - dsl\n coeff = dsl*dlo*re*dm/(2*pi*dso)\n \n rx = np.linspace(xmin - 5., xmax + 5., 500)\n ry = np.linspace(ymin - 5., ymax + 5., 500)\n uvec = np.meshgrid(rx, ry)\n A, B, C, D, E = causticFreqHelp(uvec, ax, ay, m, n)\n upxvec = np.linspace(xmin, xmax, N)\n freqcaus = []\n for upx in upxvec:\n eq1 = A*upx**2 + B*upx + C\n eq2 = D*upx + E\n evcaus = np.array([eq1, eq2])\n roots = polishedRootsBulk(evcaus, causEqFreq, rx, ry, args = (upx, ax, ay, m, n))\n for root in roots:\n ux, uy = root\n arg = coeff*lensg(ux, uy)[0]/(ux - upx)\n # print(arg)\n if arg > 0:\n freq = c*np.sqrt(arg)/(ax*GHz)\n if freq > 0.01:\n freqcaus.append([upx, freq])\n # print(freqcaus)\n freqcaus = np.asarray(freqcaus).T\n # plt.scatter(freqcaus[0], freqcaus[1], marker = '.', color = 'black', s = 3.)\n # plt.xlim(xmin, xmax)\n # plt.ylim(0., max(freqcaus[1]) + 0.5)\n # plt.xlabel(r\"$u'_x$\", fontsize = 16)\n # plt.ylabel(r'$\\nu$ (GHz)', fontsize = 16)\n # plt.grid()\n # plt.show()\n return freqcaus", "def _applyCalibration(self, ws, detPos):\n alg = self.createChildAlgorithm('ApplyCalibration')\n alg.setProperty('Workspace', ws)\n alg.setProperty('PositionTable', detPos)\n alg.execute()", "def changePeaks(self):\n # Change the number of peaks\n if self.minpeaks is not None and self.maxpeaks is not None:\n npeaks = len(self.peaks_function)\n u = self.random.random()\n r = self.maxpeaks - self.minpeaks\n if u < 0.5:\n # Remove n peaks or less depending on the minimum number of peaks\n u = self.random.random()\n n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n idx = self.random.randrange(len(self.peaks_function))\n self.peaks_function.pop(idx)\n self.peaks_position.pop(idx)\n self.peaks_height.pop(idx)\n self.peaks_width.pop(idx)\n self.last_change_vector.pop(idx)\n else:\n # Add n peaks or less depending on the maximum number of peaks\n u = self.random.random()\n n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n self.peaks_function.append(self.random.choice(self.pfunc_pool))\n self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])\n self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))\n self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))\n self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])\n\n for i in range(len(self.peaks_function)):\n # Change peak position\n shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]\n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n \n shift = [shift_length * (1.0 - self.lambda_) * s \\\n + self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]\n \n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n\n shift = [s*shift_length for s in shift]\n \n new_position = []\n final_shift = []\n for pp, s in zip(self.peaks_position[i], shift):\n new_coord = pp + s\n if new_coord < self.min_coord:\n new_position.append(2.0 * self.min_coord - pp - s)\n final_shift.append(-1.0 * s)\n elif new_coord > self.max_coord:\n new_position.append(2.0 * self.max_coord - pp - s)\n final_shift.append(-1.0 * s)\n else:\n new_position.append(new_coord)\n final_shift.append(s)\n\n self.peaks_position[i] = new_position\n self.last_change_vector[i] = final_shift\n\n # Change peak height\n change = self.random.gauss(0, 1) * self.height_severity\n new_value = change + self.peaks_height[i]\n if new_value < self.min_height:\n self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change\n elif new_value > self.max_height:\n self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change\n else:\n self.peaks_height[i] = new_value\n\n # Change peak width\n change = self.random.gauss(0, 1) * self.width_severity\n new_value = change + self.peaks_width[i]\n if new_value < self.min_width:\n self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change\n elif new_value > self.max_width:\n self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change\n else:\n self.peaks_width[i] = new_value\n\n self._optimum = None", "def calibrate(self):\n self.mode = Mode.calibrating\n yaw_sensor = yaw_button()\n while not yaw_sensor.is_pressed():\n self.move_left()\n for _ in range(75):\n self.move_right()\n\n pitch_sensor = pitch_button()\n while not pitch_sensor.is_pressed():\n self.move_up()\n for _ in range(21):\n self.move_down()\n\n self.pitch = 0.0\n self.yaw = 0.0\n self.mode = Mode.waiting", "def _calc_corrections(self): \n searchIter= self._niter-1\n while searchIter > 0:\n trySavefilename= self._createSavefilename(searchIter)\n if os.path.exists(trySavefilename):\n trySavefile= open(trySavefilename,'rb')\n corrections= sc.array(pickle.load(trySavefile))\n trySavefile.close()\n break\n else:\n searchIter-= 1\n if searchIter == 0:\n corrections= sc.ones((self._npoints,2))\n for ii in range(searchIter,self._niter):\n if ii == 0:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta)\n else:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta,\n corrections=corrections,\n npoints=self._npoints,\n rmax=self._rmax,\n savedir=self._savedir,\n interp_k=self._interp_k)\n newcorrections= sc.zeros((self._npoints,2))\n for jj in range(self._npoints):\n thisSurface= currentDF.surfacemass(self._rs[jj],\n use_physical=False)\n newcorrections[jj,0]= currentDF.targetSurfacemass(self._rs[jj],use_physical=False)/thisSurface\n newcorrections[jj,1]= currentDF.targetSigma2(self._rs[jj],use_physical=False)*thisSurface\\\n /currentDF.sigma2surfacemass(self._rs[jj],\n use_physical=False)\n #print(jj, newcorrections[jj,:])\n corrections*= newcorrections\n #Save\n picklethis= []\n for arr in list(corrections):\n picklethis.append([float(a) for a in arr])\n save_pickles(self._savefilename,picklethis) #We pickle a list for platform-independence)\n return corrections", "def apply_radcal(self, input_radcal=None):\n if input_radcal is None:\n # Preflight radcal from HDF5 header file\n new_radcal = self.meta['radcal']\n else:\n # User-inputted radcal curve\n new_radcal = np.array(input_radcal)\n if len(new_radcal) != self.data.shape[-1]:\n print('Error: input_radcal must have the same number of elements'\n +' as the last dimension in the data array.')\n return self\n\n output_radcal = new_radcal\n if self.unit != u.photon:\n if str(self.radcal) == 'unknown':\n print('Error: Data currently has an unknown radcal applied.'\n +' Unable to apply new calibration.')\n return self\n elif np.all(self.radcal == new_radcal):\n print('Error: input_radcal is identical to current radcal.'\n +' No calculation is required.')\n return self\n else:\n print('Warning: Data currently has a different radcal applied.'\n +' Old calibration curve will be removed.')\n new_radcal = new_radcal/self.radcal\n\n new_data = self.data.copy()*new_radcal\n new_errs = self.uncertainty.array.copy()*new_radcal\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Applied radcal to convert photon counts to intensity')\n wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()\n\n output_cube = EISCube(new_data, wcs=self.wcs, uncertainty=new_errs,\n wavelength=self.wavelength, radcal=output_radcal,\n meta=new_meta, unit='erg / (cm2 s sr)',\n mask=self.mask, missing_axes=wcs_mask)\n return output_cube", "def optimize(self, cycles = 1, waveplates = ['half', 'quarter'],\n counter = 0):\n \n if counter in range(0,4):\n self.counter = counter\n else:\n raise ValueError('Argument specified for counter is not understood')\n\n for c in range(cycles):\n print '* Optimizing cycle %d of %d...'%(c+1, cycles)\n \n for w in waveplates:\n #measure position before optimizing\n self.rotator.set_zero_position(getattr(self,'_'+w+'_channel'))\n pos_before = getattr(self.rotator, 'get_noof_steps_ch'+\\\n str(getattr(self,'_'+w+'_channel')) )()\n \n #turn waveplat2es\n data, qtdata, dataplot, premature_quit = self.run(w, self.get_opt_red_power())\n if not premature_quit:\n qtdata, fitres = self.fit(w, data, qtdata, dataplot)\n qtdata.close_file()\n\n if not premature_quit:\n \n #set optimal position\n if type(fitres) != type(False):\n if np.sign(fitres['a2']) != -1:\n optim_pos = -np.int(fitres['a1']/(2*fitres['a2']))\n else:\n print '\\tFitting a maximum instead of a minimum.'\n optim_pos = 0\n \n else:\n print '\\tGuessing optimal waveplate position...'\n optim_pos = data['wp_steps'](self.find_nearest(data['counts'],\n min(data['counts'])))\n\n if self.get_plot_degrees():\n print '\\tOptimal waveplate position determined at %.3f degrees.'%(optim_pos*self.get_conversion_factor(w))\n else:\n print '\\tOptimal waveplate position determined at %d steps.'%optim_pos\n \n #BEWARE: never ask the current position in noof_steps\n curr_pos = data['wp_step'][len(data['wp_step'])-1]\n\n #check that the optimum position is somewhat reasonable\n if abs(optim_pos) < self.check_noof_steps:\n #set the position to the optimal position\n self.rotator.quick_scan(optim_pos-curr_pos, \n getattr(self,'_'+w+'_channel'))\n else:\n print '\\tWARNING: Optimal position differs %s steps\\\n from initial position'%optim_pos\n check = raw_input('\\tPress \"ENTER\" to continue, \"q\" to quit\\n')\n \n if check == '':\n #set the position to the optimal position\n self.rotator.quick_scan(optim_pos-curr_pos, \n getattr(self,'_'+w+'_channel'))\n \n elif check == 'q':\n print 'Process aborted by user'\n pass\n else:\n raise ValueError('Response to question is not \\\n understood. Not taking any action.')\n else:\n #what to do if there was a premature quit during optimization?\n pos_quit = data['wp_step'][len(data['wp_step'])-1]\n\n print '\\tReturning to initial position...'\n #set the position to the optimal position\n self.rotator.quick_scan(pos_before-pos_quit, getattr(self,'_'+w+'_channel'))\n\n #measure position after optimizing\n pos_after = getattr(self.rotator, 'get_noof_steps_ch'+\\\n str(getattr(self,'_'+w+'_channel')))()\n\n #print \"\\tPosition of %s waveplate changed %d steps\"\\\n # %(w, pos_after-pos_before)\n \n if msvcrt.kbhit():\n kb_char=msvcrt.getch()\n if kb_char == \"q\" : break\n \n qt.msleep(0.5)\n \n if premature_quit:\n break", "def _calibrate(self, Otrain, Ftrain, Feval):\n raise NotImplementedError()", "def get_simple_ccf(flux1, flux2, shift_lst):\n\n n = flux1.size\n ccf_lst = []\n for shift in shift_lst:\n segment1 = flux1[max(0,shift):min(n,n+shift)]\n segment2 = flux2[max(0,-shift):min(n,n-shift)]\n c1 = math.sqrt((segment1**2).sum())\n c2 = math.sqrt((segment2**2).sum())\n corr = np.correlate(segment1, segment2)/c1/c2\n ccf_lst.append(corr)\n return np.array(ccf_lst)", "def rm_calibration(self):\n\n self.bin_edges_kev = None", "def step_autocorrelation(self):\n\n max_hops = max([len(x) for x in self.steps])\n\n self.acf = np.zeros([len(self.steps), max_hops])\n\n keep = [] # list to hold indices of trajectories with a non-zero amount of hops\n for i in range(len(self.steps)):\n hops = self.steps[i]\n if len(hops) > 1:\n self.acf[i, :len(self.steps[i])] = timeseries.acf(self.steps[i])\n keep.append(i)\n\n self.acf = self.acf[keep, :]\n\n self.acf = np.array([self.acf[np.nonzero(self.acf[:, i]), i].mean() for i in range(max_hops)])\n\n #self.acf = timeseries.step_autocorrelation(self.z_interpolated.T[..., np.newaxis])", "def do_transform(self):\n # TODO: after unit tests are added switch to astropy fftconvolve here.\n from scipy.signal import fftconvolve\n total_background = self.model + self.background + self.approx\n excess = self.image - total_background\n for key, kern in self.kernbase.items():\n self.transform[key] = fftconvolve(excess, kern, mode='same')\n self.error[key] = np.sqrt(fftconvolve(total_background, kern ** 2, mode='same'))\n\n self.approx = fftconvolve(self.image - self.model - self.bkg,\n self.kern_approx, mode='same')\n self.approx_bkg = fftconvolve(self.bkg, self.kern_approx, mode='same')", "def calculate_decay_metric(discont_lst, city_data_lst, roots_lst):\n \n\n discont_indices = get_discont_indices(city_lst) #indices of discontinuities in the city data\n \n real_disconts = []\n for i in range(len(discont_indices) - 1): #for each discontinuity except for the last one (we'll take care of it below)\n upper = discont_indices[i + 1] \n lower = discont_indices[i]\n real_disconts += [upper - lower] # add to real disconts length between the i-th and the i+1-th discontinuity\n \n \n real_disconts += [len(city_data_lst) - 1 - discont_indices[len(discont_indices) - 1]] # the last discont len\n \n\n \"\"\"(2) Creatingthe ideal disconts based on the ideal decay coefficients\"\"\"\n \n ideal_disconts = [] \n decay_coeff = roots_lst[len(discont_indices) + 1] #decay coefficient that generates our ideal geometric discontinuity distribution\n \n for k in range(1, len(discont_indices) + 2): #for each number in the list of \n ideal_disconts += [len(discont_lst) * decay_coeff**k] #add ideal discont to list\n \n\n \"\"\"(3) calculates the final average of the pairwise differences between the ideal distribution\n of discontinuities and the real distribution of discontinuities\"\"\"\n \n pairwise_diffs = 0\n for j in range(len(ideal_disconts)): #for each j indexing the number of ideal discontinuities \n pairwise_diffs += abs(real_disconts[j] - ideal_disconts[j]) #calculates difference between the indexes of ideal and real discontinuities\n\n return pairwise_diffs / (len(discont_indices) + 1) #returns pairwise differences normalized by number of discontinuities\n \n \n \"\"\"Calculates the decay metric over each city dataset in the sample.\n \n inputs:\n - discont_data array of discontinuity data for the cities in question\n - city_data: array of raw patch data for cities in question\n - root_lst: sufficiently large list of roots for the number of discontinuities in question \n \n \"\"\"\n \n def decay_metric(discont_lst, city_data, root_lst):\n outer = [] #outer layer indexed by city\n i = 0\n while i < len(city_data): \n inner = []\n j = 0\n while j < len(nice_lst[i]): #inner layer indexed by time\n inner += [calculate_decay_metric(discont_data[i][j], city_data[i][j], root_lst)] #calculate decay metric\n j += 1\n i += 1\n outer += [inner]\n return np.array(final) #convert to np array and return", "def auto_cali_bias_currents(self):\n msg =json.dumps(dict(command=\"AutoCaliBiasCurrents\", value=True))\n self.talk.send(msg)", "def do_mcp_nonlinearity_calibration():\n no_sample_data_path = ''.join([DATA_PATH, 'run108allevts.h5'])\n f = h5py.File(no_sample_data_path)\n phot = _get_photon_energy(f, 108)\n mcp = np.array(f['Acqiris2']['acq'])\n andor = np.array(f['Andor']['signal']-f['Andor']['reference'])\n # Subtract dark signals:\n dark_calibration = _get_dark_calibration()\n mcp = mcp-dark_calibration['mcp']\n andor = andor-dark_calibration['andor']\n # Take data within (relatively) narrow photon energy range:\n phot_in_range = (phot > 781) & (phot < 782)\n mcp = mcp[phot_in_range]\n andor = andor[phot_in_range]\n # make sure to only take data for which andor doesn't saturate\n mcp_percentile_cutoff = min([percentileofscore(andor, 4000), 99.9])\n mcp_cutoff_value = scoreatpercentile(mcp, mcp_percentile_cutoff)\n mcp_in_range = mcp < mcp_cutoff_value\n mcp = mcp[mcp_in_range]\n andor = andor[mcp_in_range]\n correction_polynomial = np.polyfit(\n mcp, \n andor*(np.mean(mcp)/np.mean(andor)),\n deg=3)\n plt.figure()\n plt.scatter(mcp, andor)\n plt.scatter(np.polyval(correction_polynomial, mcp), andor)\n pickle_on = open(MCP_CALIBRATION_FILE, 'wb')\n pickle.dump(correction_polynomial, pickle_on)\n pickle_on.close()", "def calibrate(raw_data, white_reference, dark_reference):\n # Auto-increment device\n params.device += 1\n\n # Collect the number of wavelengths present\n num_bands = len(white_reference.wavelength_dict)\n den = white_reference.array_data - dark_reference.array_data\n\n # Calibrate using reflectance = (raw data - dark reference) / (white reference - dark reference)\n output_num = []\n for i in range(0, raw_data.lines):\n ans = raw_data.array_data[i,].astype(np.float16) - dark_reference.array_data\n output_num.append(ans)\n num = np.stack(output_num, axis=2)\n output_calibrated = []\n for i in range(0, raw_data.lines):\n ans1 = raw_data.array_data[i,] / den\n output_calibrated.append(ans1)\n\n # Reshape into hyperspectral datacube\n scalibrated = np.stack(output_calibrated, axis=2)\n calibrated_array = np.transpose(scalibrated[0], (1, 0, 2))\n calibrated_array[np.where(calibrated_array < 0)] = 0\n\n # Find array min and max values\n max_pixel = float(np.amax(calibrated_array))\n min_pixel = float(np.amin(calibrated_array))\n\n # Make a new class instance with the calibrated hyperspectral image\n calibrated = Spectral_data(array_data=calibrated_array, max_wavelength=raw_data.max_wavelength,\n min_wavelength=raw_data.min_wavelength, max_value=max_pixel, min_value=min_pixel,\n d_type=raw_data.d_type,\n wavelength_dict=raw_data.wavelength_dict, samples=raw_data.samples,\n lines=raw_data.lines, interleave=raw_data.interleave,\n wavelength_units=raw_data.wavelength_units, array_type=raw_data.array_type,\n pseudo_rgb=None, filename=None, default_bands=raw_data.default_bands)\n\n # Make pseudo-rgb image for the calibrated image\n calibrated.pseudo_rgb = _make_pseudo_rgb(spectral_array=calibrated)\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(calibrated.pseudo_rgb)\n elif params.debug == \"print\":\n print_image(calibrated.pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_calibrated_rgb.png\"))\n\n return calibrated", "def accelsift_old(filenm):\n # Set a bunch of parameters from our params.py file\n min_num_DMs = params.min_num_DMs\n low_DM_cutoff = params.low_DM_cutoff\n sifting.sigma_threshold = params.sigma_threshold\n sifting.c_pow_threshold = params.c_pow_threshold\n sifting.known_birds_p = params.known_birds_p\n sifting.known_birds_f = params.known_birds_f\n sifting.r_err = params.r_err\n sifting.short_period = params.short_period\n sifting.long_period = params.long_period\n sifting.harm_pow_cutoff = params.harm_pow_cutoff\n \n # Try to read the .inf files first, as _if_ they are present, all of \n # them should be there. (if no candidates are found by accelsearch \n # we get no ACCEL files... \n inffiles = glob(\"*.inf\")\n candfiles = glob(\"*ACCEL_\" + str(params.zmax))\n # Check to see if this is from a short search \n if len(re.findall(\"_[0-9][0-9][0-9]M_\" , inffiles[0])):\n dmstrs = [x.split(\"DM\")[-1].split(\"_\")[0] for x in candfiles]\n else:\n dmstrs = [x.split(\"DM\")[-1].split(\".inf\")[0] for x in inffiles]\n dms = map(float, dmstrs)\n dms.sort()\n dmstrs = [\"%.2f\"%x for x in dms]\n\n # Read in all the candidates\n cands = sifting.read_candidates(candfiles)\n # Remove candidates that are duplicated in other ACCEL files\n if len(cands):\n cands = sifting.remove_duplicate_candidates(cands)\n # Remove candidates with DM problems\n if len(cands):\n cands = sifting.remove_DM_problems(cands, min_num_DMs, dmstrs, low_DM_cutoff)\n # Remove candidates that are harmonically related to each other\n # Note: this includes only a small set of harmonics\n if len(cands):\n cands = sifting.remove_harmonics(cands)\n # Write candidates to STDOUT\n if len(cands):\n cands.sort(sifting.cmp_snr)\n sifting.write_candlist(cands, candfilenm=filenm)", "def CalibrateDifficulty(self):\n thinkplot.Clf()\n thinkplot.PrePlot(num=2)\n\n cdf = thinkbayes2.Cdf(self.raw, label='data')\n thinkplot.Cdf(cdf)\n\n efficacies = thinkbayes2.MakeNormalPmf(0, 1.5, 3)\n pmf = self.MakeRawScoreDist(efficacies)\n cdf = thinkbayes2.Cdf(pmf, label='model')\n thinkplot.Cdf(cdf)\n \n thinkplot.Save(root='sat_calibrate',\n xlabel='raw score',\n ylabel='CDF',\n formats=['pdf', 'eps'])", "def Cardelli89(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def subtract_median_bias_residue(DataCube,no_channels=32,time=None,array_size=2048):\n\n hsize = int(DataCube.shape[1]/2)\n if time is None:\n time = np.arange(DataCube.shape[0])\n\n CorrectedCubes = []\n for ChannelCube in np.split(DataCube,np.arange(1,no_channels)*int(array_size/no_channels),axis=2):\n # We have to do the corection independently for each channel. \n # Since the median flux levels in each channel will be different.\n\n # Calculate top bias values (Odd and even seperately concatenated)\n TopOddEvenBiases = [robust_median_from_percentiles(tile) for tile in ChannelCube[:,0:hsize,1::2]] +\\\n [robust_median_from_percentiles(tile) for tile in ChannelCube[:,0:hsize,0::2]]\n # Calculate bottom bias values\n BottomOddEvenBiases = [robust_median_from_percentiles(tile) for tile in ChannelCube[:,hsize:,1::2]] +\\\n [robust_median_from_percentiles(tile) for tile in ChannelCube[:,hsize:,0::2]]\n\n # Fit a straight line and calcuate the residue shifts due to bias fluctuations\n TopResidue = fit_slope_1d_residue(np.tile(time,2), TopOddEvenBiases)\n BottomResidue = fit_slope_1d_residue(np.tile(time,2), BottomOddEvenBiases)\n\n TopOddResidues, TopEvenResidues = np.split(TopResidue,2)\n BottomOddResidues, BottomEvenResidues = np.split(BottomResidue,2)\n\n # Apply the residue shift correction to odd and even columns of each channel\n\n CorrChannelCube = ChannelCube.copy()\n x = np.arange(ChannelCube.shape[1])\n\n OddResidueCorrectionSlopes = (TopOddResidues - BottomOddResidues)/(hsize/2 - (hsize + hsize/2))\n OddResidueCorrection = BottomOddResidues[:,np.newaxis] + OddResidueCorrectionSlopes[:,np.newaxis] * (x - (hsize + hsize/2))[np.newaxis,:]\n CorrChannelCube[:,:,1::2] = ChannelCube[:,:,1::2] + OddResidueCorrection[:,:,np.newaxis]\n\n\n EvenResidueCorrectionSlopes = (TopEvenResidues - BottomEvenResidues)/(hsize/2 - (hsize + hsize/2))\n EvenResidueCorrection = BottomEvenResidues[:,np.newaxis] + EvenResidueCorrectionSlopes[:,np.newaxis] * (x - (hsize + hsize/2))[np.newaxis,:]\n CorrChannelCube[:,:,0::2] = ChannelCube[:,:,0::2] + EvenResidueCorrection[:,:,np.newaxis]\n \n CorrectedCubes.append(CorrChannelCube)\n\n return np.dstack(CorrectedCubes)", "def doCalScans(calsrcs,lst_hours,cal_dwell_sec,cal_gap_secs,timeres,freqres):\n print >> fpcmd, \"# *** Dedicated calibration scans for LST: %.2f hrs\" % (lst_hours)\n cals = mwasources.FindCal(calsrcs,lst_hours*15.0) # this returns a list of tuples where each tuple is (mwasource,za)\n if len(cals) == 0:\n print >> sys.stderr, \"WARNING: Unable to find calibrator for start of scan\"\n else:\n # send the first item in the cals list, without the zenith angle arg\n track_srcs.trackSources([x[0] for x in cals], lst_hours, thetime, 1, cal_gap_secs, proj_id, fpcmd=fpcmd, freqs_override=cent_freqs, name_override=creator, dwell_override_secs=cal_dwell,timeres=timeres,freqres=freqres)", "def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x", "def shiftDetector(frame):\n \n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts", "def change_referential_spectrum(\n freqs,\n wavenumbers_circ,\n rotation_speed,\n spectrum,\n atol=1e-9,\n freqs_new=np.array([]),\n I1=np.array([]),\n Irf_un=np.array([]),\n is_double_f0=False,\n atol_freq=1e-6,\n):\n Nf = freqs.size\n Nr = wavenumbers_circ.size\n # Get number of slices depending on input spectrum shape\n if spectrum.ndim > 2:\n Nslice = spectrum.shape[2]\n is_squeeze = False\n else:\n Nslice = 1\n is_squeeze = True\n spectrum = spectrum[:, :, None]\n\n if freqs_new.size == 0:\n # Calculate new frequency values by shifting frequencies\n Xwavenb, Xfreqs = np.meshgrid(wavenumbers_circ, freqs)\n Xfreqs_new = Xfreqs + Xwavenb * rotation_speed / 60\n\n # Get unique frequencies\n freqs_new, If0 = unique_tol(\n Xfreqs_new.ravel(\"C\"),\n return_inverse=True,\n axis=0,\n tol=atol_freq,\n is_abs_tol=True,\n )\n\n # Get frequency/wavenumber_circ position in new matrix [Nf_new, Nr]\n Ir0 = np.tile(np.arange(Nr, dtype=int), Nf)\n Irf = np.concatenate((If0[:, None], Ir0[:, None]), axis=1)\n\n # Get unique couples of frequency/wavenumber to sum on same harmonics\n Irf_un, I1 = np.unique(Irf, return_inverse=True, axis=0)\n\n # Number of frequencies in new referential\n Nf_new = freqs_new.size\n\n if is_double_f0:\n # Multiply by two spectrum components which have f=0, r!=0\n jf0 = np.abs(freqs) < 1e-4\n jr = wavenumbers_circ != 0\n if any(jf0) and any(jr):\n spectrum[jf0, jr, :] = 2 * spectrum[jf0, jr, :]\n\n # Calculate spectrum amplitude in new referential by summing all contributions\n # which have the same orders and wavenumber for each slice\n spectrum_new = np.zeros((Nf_new, Nr, Nslice), dtype=spectrum.dtype)\n for k in range(Nslice):\n # Reshape values for kth slice columnwise\n amp_k = spectrum[:, :, k].ravel(\"C\")\n # Sum all contributions which have the same orders and wavenumber as given by I1\n if spectrum.dtype == complex:\n # bincount fails on complex numbers, real and imaginary parts must be treated separately\n amp_new_k = np.bincount(I1, weights=amp_k.real) + 1j * np.bincount(\n I1, weights=amp_k.imag\n )\n else:\n amp_new_k = np.bincount(I1, weights=amp_k)\n # Store amplitudes at new frequency/wavenumber positions\n spectrum_new[Irf_un[:, 0], Irf_un[:, 1], k] = amp_new_k\n\n if is_double_f0 and any(jf0) and any(jr):\n # Divide by two spectrum components which have f=0, r!=0\n spectrum[jf0, jr, :] = spectrum[jf0, jr, :] / 2\n\n if atol > 0:\n # Filter harmonics that are below input absolute tolerance\n Imask = (\n np.sum(np.sum(np.abs(spectrum_new), axis=2), axis=1)\n > np.max(np.abs(spectrum_new)) * atol\n )\n spectrum_new = spectrum_new[Imask, ...]\n freqs_new = freqs_new[Imask]\n\n if is_squeeze:\n # Squeeze spectrum back to 2D\n spectrum_new = spectrum_new[:, :, 0]\n\n return spectrum_new, freqs_new, I1, Irf_un", "def calibrer(self):\n self._angle_courant = self._angle_initial\n self.angle(self._angle_initial)", "def runCalFlat(lst, hband=False, darkLst=None, rootFolder='', nlCoef=None, satCounts=None, BPM=None, distMapLimitsFile='', plot=True, nChannel=32, nRowsAvg=0,rowSplit=1,nlSplit=32, combSplit=32,bpmCorRng=100, crReject=False, skipObsinfo=False,winRng=51, polyFitDegree=3, imgSmth=5,nlFile='',bpmFile='', satFile='',darkFile='',flatCutOff=0.1,flatSmooth=0, logfile=None, gain=1., ron=None, dispAxis=0,limSmth=20, ask=True, obsCoords=None,satSplit=32, centGuess=None, flatCor=False, flatCorFile=''):\n\n colorama.init()\n \n plt.ioff()\n\n t0 = time.time()\n \n #create processed directory, in case it doesn't exist\n wifisIO.createDir('processed')\n wifisIO.createDir('quality_control')\n\n if hband:\n print('*** WORKING ON H-BAND DATA ***')\n \n #create processed directory, in case it doesn't exist\n wifisIO.createDir('processed')\n\n if (plot):\n wifisIO.createDir('quality_control')\n\n procFlux = []\n procSigma = []\n procSatFrame = []\n\n #go through list and process each file individually\n #************\n #eventually need to add capability to create master flat from groups\n #************\n\n for lstNum in range(len(lst)):\n if (lst.ndim>1):\n folder = lst[lstNum,0]\n else:\n folder = lst[lstNum]\n\n t1 = time.time()\n\n savename = 'processed/'+folder\n\n #first check master flat and limits exists\n \n if(os.path.exists(savename+'_flat.fits') and os.path.exists(savename+'_flat_limits.fits') and os.path.exists(savename+'_flat_slices.fits') and os.path.exists(savename+'_flat_slices_norm.fits')):\n cont = 'n'\n if ask:\n cont = wifisIO.userInput('All processed flat field files already exists for ' +folder+', do you want to continue processing (y/n)?')\n else:\n cont = 'y'\n \n if (cont.lower() == 'y'):\n print('*** Working on folder ' + folder + ' ***')\n\n if (os.path.exists(savename+'_flat.fits')):\n cont = 'n'\n cont = wifisIO.userInput('Processed flat field file already exists for ' +folder+', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n print('Reading image '+savename+'_flat.fits instead')\n flatImgs, hdr= wifisIO.readImgsFromFile(savename+'_flat.fits')\n flatImg, sigmaImg, satFrame = flatImgs\n if (type(hdr) is list):\n hdr = hdr[0]\n contProc2=False\n else:\n contProc2=True\n else:\n contProc2=True\n \n if contProc2:\n flatImg, sigmaImg, satFrame, hdr = processRamp.auto(folder, rootFolder,savename+'_flat.fits', satCounts, nlCoef, BPM, nChannel=nChannel, rowSplit=rowSplit, nlSplit=nlSplit, combSplit=combSplit, crReject=crReject, bpmCorRng=bpmCorRng, skipObsinfo=skipObsinfo, nRows=nRowsAvg, rampNum=None, nlFile=nlFile, satFile=satFile, bpmFile=bpmFile, gain=gain, ron=ron, logfile=logfile, obsCoords=obsCoords, avgAll=True, satSplit=satSplit)\n \n #carry out dark subtraction\n if darkLst is not None and darkLst[0] is not None:\n print('Subtracting dark ramp')\n if len(darkLst)>1:\n dark, darkSig = darkLst[:2]\n sigmaImg = np.sqrt(sigmaImg**2 + darkSig**2)\n else:\n dark = darkLst[0]\n logfile.write('*** Warning: No uncertainty associated with dark image ***\\n')\n print(colorama.Fore.RED+'*** WARNING: No uncertainty associated with dark image ***'+colorama.Style.RESET_ALL)\n\n flatImg -= dark\n hdr.add_history('Dark image subtracted using file:')\n hdr.add_history(darkFile)\n if logfile is not None:\n logfile.write('Subtracted dark image using file:\\n')\n logfile.write(darkFile+'\\n')\n else:\n print(colorama.Fore.RED+'*** WARNING: No dark image provided, or file does not exist ***'+colorama.Style.RESET_ALL)\n if logfile is not None:\n logfile.write('*** WARNING: No dark image provided, or file ' + str(darkFile)+' does not exist ***')\n\n if os.path.exists(savename+'_flat_limits.fits'):\n cont = wifisIO.userInput('Limits file already exists for ' +folder+ ', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n print('Reading limits '+savename+'_flat_limits.fits instead')\n finalLimits, limitsHdr= wifisIO.readImgsFromFile(savename+'_flat_limits.fits')\n shft = limitsHdr['LIMSHIFT']\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Finding slice limits and extracting slices')\n\n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n #find limits of each slice with the reference pixels, but the returned limits exclude them\n limits = slices.findLimits(flatImg, dispAxis=dispAxis, winRng=winRng, imgSmth=imgSmth, limSmth=limSmth, rmRef=True,centGuess=centGuess)\n\n if logfile is not None:\n logfile.write('Identified slice limits using the following parameters:\\n')\n logfile.write('dispAxis: '+str(dispAxis)+'\\n')\n logfile.write('winRng: ' + str(winRng)+'\\n')\n logfile.write('imgSmth: ' + str(imgSmth)+'\\n')\n logfile.write('limSmth: ' + str(limSmth)+'\\n')\n \n if hband:\n print('Using suitable region of detector to determine flat limits')\n if logfile is not None:\n logfile.write('Using suitable region of detector to determine flat limits:\\n')\n\n #only use region with suitable flux\n if dispAxis == 0:\n flatImgMed = np.nanmedian(flatImg[4:-4,4:-4], axis=1)\n else:\n flatImgMed = np.nanmedian(flatImg[4:-4,4:-4], axis=0)\n \n flatImgMedGrad = np.gradient(flatImgMed)\n medMax = np.nanargmax(flatImgMed)\n lim1 = np.nanargmax(flatImgMedGrad[:medMax])\n lim2 = np.nanargmin(flatImgMedGrad[medMax:])+medMax\n\n if logfile is not None:\n logfile.write('Using following detector limits to set slice limits:\\n')\n logfile.write(str(lim1)+ ' ' + str(lim2)+'\\n')\n \n polyLimits = slices.polyFitLimits(limits, degree=2, sigmaClipRounds=2, constRegion=[lim1,lim2])\n else:\n #get smoother limits, if desired, using polynomial fitting\n polyLimits = slices.polyFitLimits(limits, degree=polyFitDegree, sigmaClipRounds=2)\n\n if logfile is not None:\n logfile.write('Fit polynomial to slice edge traces using:\\n')\n logfile.write('Polynomial degree: ' + str(polyFitDegree)+'\\n')\n logfile.write('sigmaClipRounds: ' + str(2)+'\\n')\n\n if hband:\n logfile.write('Only used pixels between ' + str(lim1) +' and ' + str(lim2)+'\\n')\n \n if os.path.exists(distMapLimitsFile):\n print('Finding slice limits relative to distortion map file')\n hdr.add_history('Slice limits are relative to the following file:')\n hdr.add_history(distMapLimitsFile)\n distMapLimits = wifisIO.readImgsFromFile(distMapLimitsFile)[0]\n if logfile is not None:\n logfile.write('Finding slice limits relative to distortion map file:\\n')\n logfile.write(distMapLimitsFile+'\\n')\n\n if hband:\n shft = int(np.nanmedian(polyLimits[1:-1,lim1:lim2+1] - distMapLimits[1:-1,lim1:lim2+1]))\n else:\n shft = int(np.nanmedian(polyLimits[1:-1,:] - distMapLimits[1:-1,:]))\n \n if logfile is not None:\n logfile.write('Median pixel shift using all inner edge limits is ' + str(shft)+'\\n')\n finalLimits = distMapLimits\n else:\n finalLimits = polyLimits\n shft = 0\n\n if logfile is not None:\n logfile.write('*** WARNING:No slice limits provided for distortion map. Finding independent slice limits ***\\n')\n logfile.write(distMapLimitsFile+'\\n')\n \n \n #write distMapLimits + shft to file\n hdr.set('LIMSHIFT',shft, 'Limits shift relative to Ronchi slices')\n hdr.add_comment('File contains the edge limits for each slice')\n\n wifisIO.writeFits(finalLimits.astype('float32'),savename+'_flat_limits.fits', hdr=hdr, ask=False)\n\n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n #save figures of tracing results for quality control purposes\n if (plot):\n print('Plotting results')\n plt.ioff()\n wifisIO.createDir('quality_control')\n \n pdfName = 'quality_control/'+folder+'_flat_slices_traces.pdf'\n with PdfPages(pdfName) as pdf:\n fig = plt.figure()\n #med1= np.nanmedian(flatImg)\n interval = ZScaleInterval()\n lims = interval.get_limits(flatImg[4:-4,4:-4])\n #plt.imshow(flatImg[4:-4,4:-4], aspect='auto', cmap='jet', clim=[0,2.*med1], origin='lower')\n plt.imshow(flatImg[4:-4,4:-4], aspect='auto', cmap='jet', clim=lims, origin='lower')\n \n plt.xlim=(0,2040)\n plt.colorbar()\n for l in range(limits.shape[0]):\n if dispAxis==0:\n plt.plot(limits[l], np.arange(limits.shape[1]),'k', linewidth=1) #drawn limits\n plt.plot(np.clip(finalLimits[l]+shft,0, flatImg[4:-4,4:-4].shape[0]-1), np.arange(limits.shape[1]),'r--', linewidth=1) #shifted ronchi limits, if provided, or polynomial fit\n else:\n plt.plot(np.arange(limits.shape[1]),limits[l],'k', linewidth=1) #drawn limits\n plt.plot(np.arange(limits.shape[1]),np.clip(finalLimits[l]+shft,0, flatImg[4:-4,4:-4].shape[0]-1),'r--', linewidth=1) #shifted ronchi limits\n\n if hband:\n if dispAxis==0:\n plt.plot([0,flatImg[4:-4,4:-4].shape[1]-1],[lim1,lim1],'b:',linewidth=1)\n plt.plot([0,flatImg[4:-4,4:-4].shape[1]-1],[lim2,lim2],'b:',linewidth=1)\n else:\n plt.plot([lim1,lim1],[0,flatImg[4:-4,4:-4].shape[1]-1],'b:',linewidth=1)\n plt.plot([lim2,lim2],[0,flatImg[4:-4,4:-4].shape[1]-1],'b:',linewidth=1)\n\n plt.tight_layout()\n pdf.savefig()\n plt.close(fig)\n\n #get rid of reference pixels\n flatImg = flatImg[4:-4, 4:-4]\n sigmaImg = sigmaImg[4:-4, 4:-4]\n satFrame = satFrame[4:-4,4:-4]\n\n if logfile is not None:\n logfile.write('Removing reference pixels\\n')\n \n if os.path.exists(savename+'_flat_slices.fits'):\n cont='n'\n cont = wifisIO.userInput('Flat slices file already exists for ' +folder+ ', do you want to continue processing (y/n)?')\n\n if (not cont.lower() == 'y'):\n print('Reading slices file '+savename+'_flat_slices.fits instead')\n flatSlices = wifisIO.readImgsFromFile(savename+'_flat_slices.fits')[0]\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Extracting slices') \n #now extract the individual slices\n flatSlices = slices.extSlices(flatImg, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in flatSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted flat slices\\n')\n \n #extract uncertainty slices\n sigmaSlices = slices.extSlices(sigmaImg, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in sigmaSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted uncertainty slices\\n')\n \n #extract saturation slices\n satSlices = slices.extSlices(satFrame, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in satSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted saturation info slices\\n')\n \n #write slices to file\n hdr.add_comment('File contains each slice image as separate extension')\n wifisIO.writeFits(flatSlices+sigmaSlices+satSlices,savename+'_flat_slices.fits',hdr=hdr, ask=False)\n \n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n if os.path.exists(savename+'_flat_slices_norm.fits'):\n cont = 'n'\n cont = wifisIO.userInput('Normalized flat slices file already exists for ' +folder+', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Getting normalized flat field')\n #now get smoothed and normalized response function\n flatNorm = slices.getResponseAll(flatSlices, flatSmooth, flatCutOff)\n for slc in flatNorm:\n slc = slc.astype('float32')\n \n hdr.add_comment('File contains the normalized flat-field response function')\n hdr.add_history('Smoothed using Gaussian with 1-sigma width of ' + str(flatSmooth) + ' pixels')\n hdr.add_history('Normalized cutoff threshold is ' + str(flatCutOff))\n\n if logfile is not None:\n logfile.write('Computed normalized response function from flat slices using the following parameters:\\n')\n logfile.write('flatSmooth: ' + str(flatSmooth)+'\\n')\n logfile.write('flatCutoff: ' + str(flatCutOff)+'\\n')\n \n sigmaNorm = slices.ffCorrectAll(sigmaSlices, flatNorm)\n for slc in sigmaNorm:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Computed uncertainties for normalized response function for each slice\\n')\n\n if flatCor:\n print('Correcting flat field response function')\n logfile.write('Correcting flat field response function using file:\\n')\n logfile.write(flatCorFile+'\\n')\n \n flatCorSlices = wifisIO.readImgsFromFile(flatCorFile)[0]\n flatNorm = slices.ffCorrectAll(flatNorm, flatCorSlices)\n hdr.add_history('Corrected flat field response function using file:')\n hdr.add_history(flatCorFile)\n\n if len(flatCorSlices)>nSlices:\n hdr.add_history('Uncertainties include correction')\n sigmaNorm = wifisUncertainties.multiplySlices(flatNorm,sigmaNorm,flatCorSlices[:nSlices],flatCorSlices[nSlices:2*nSlices])\n\n else:\n hdr.add_history('Uncertainties do not include correction')\n logfile.write('*** WARNING: Response correction does not include uncertainties***\\n')\n\n else:\n #print(colorama.Fore.RED+'*** WARNING: Flat field correction file does not exist, skipping ***'+colorama.Style.RESET_ALL)\n \n #logfile.write('*** WARNING: Flat field correction file does not exist, skipping ***\\n')\n print('Flat field correction file off...skipping')\n \n logfile.write('Flat field correction file off...skipping\\n')\n \n #write normalized images to file\n wifisIO.writeFits(flatNorm + sigmaNorm + satSlices,savename+'_flat_slices_norm.fits',hdr=hdr, ask=False)\n print('*** Finished processing ' + folder + ' in ' + str(time.time()-t1) + ' seconds ***')\n \n return", "def spec_flex_shift(obj_skyspec, arx_skyspec, mxshft=20):\n\n # TODO None of these routines should have dependencies on XSpectrum1d!\n\n # Determine the brightest emission lines\n msgs.warn(\"If we use Paranal, cut down on wavelength early on\")\n arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig \\\n = arc.detect_lines(arx_skyspec.flux.value)\n obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj \\\n = arc.detect_lines(obj_skyspec.flux.value)\n\n # Keep only 5 brightest amplitude lines (xxx_keep is array of\n # indices within arx_w of the 5 brightest)\n arx_keep = np.argsort(arx_amp[arx_w])[-5:]\n obj_keep = np.argsort(obj_amp[obj_w])[-5:]\n\n # Calculate wavelength (Angstrom per pixel)\n arx_disp = np.append(arx_skyspec.wavelength.value[1]-arx_skyspec.wavelength.value[0],\n arx_skyspec.wavelength.value[1:]-arx_skyspec.wavelength.value[:-1])\n obj_disp = np.append(obj_skyspec.wavelength.value[1]-obj_skyspec.wavelength.value[0],\n obj_skyspec.wavelength.value[1:]-obj_skyspec.wavelength.value[:-1])\n\n # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need\n # this? can just use sigmas\n arx_idx = (arx_cent+0.5).astype(np.int)[arx_w][arx_keep] # The +0.5 is for rounding\n arx_res = arx_skyspec.wavelength.value[arx_idx]/\\\n (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])\n obj_idx = (obj_cent+0.5).astype(np.int)[obj_w][obj_keep] # The +0.5 is for rounding\n obj_res = obj_skyspec.wavelength.value[obj_idx]/ \\\n (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])\n\n if not np.all(np.isfinite(obj_res)):\n msgs.warn('Failed to measure the resolution of the object spectrum, likely due to error '\n 'in the wavelength image.')\n return None\n msgs.info(\"Resolution of Archive={0} and Observation={1}\".format(np.median(arx_res),\n np.median(obj_res)))\n\n # Determine sigma of gaussian for smoothing\n arx_sig2 = np.power(arx_disp[arx_idx]*arx_wid[arx_w][arx_keep], 2)\n obj_sig2 = np.power(obj_disp[obj_idx]*obj_wid[obj_w][obj_keep], 2)\n\n arx_med_sig2 = np.median(arx_sig2)\n obj_med_sig2 = np.median(obj_sig2)\n\n if obj_med_sig2 >= arx_med_sig2:\n smooth_sig = np.sqrt(obj_med_sig2-arx_med_sig2) # Ang\n smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])\n arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))\n else:\n msgs.warn(\"Prefer archival sky spectrum to have higher resolution\")\n smooth_sig_pix = 0.\n msgs.warn(\"New Sky has higher resolution than Archive. Not smoothing\")\n #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)\n\n #Determine region of wavelength overlap\n min_wave = max(np.amin(arx_skyspec.wavelength.value), np.amin(obj_skyspec.wavelength.value))\n max_wave = min(np.amax(arx_skyspec.wavelength.value), np.amax(obj_skyspec.wavelength.value))\n\n #Smooth higher resolution spectrum by smooth_sig (flux is conserved!)\n# if np.median(obj_res) >= np.median(arx_res):\n# msgs.warn(\"New Sky has higher resolution than Archive. Not smoothing\")\n #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)\n# else:\n #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)\n# arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))\n #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)\n\n # Define wavelengths of overlapping spectra\n keep_idx = np.where((obj_skyspec.wavelength.value>=min_wave) &\n (obj_skyspec.wavelength.value<=max_wave))[0]\n #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]\n\n #Rebin both spectra onto overlapped wavelength range\n if len(keep_idx) <= 50:\n msgs.warn(\"Not enough overlap between sky spectra\")\n return None\n\n # rebin onto object ALWAYS\n keep_wave = obj_skyspec.wavelength[keep_idx]\n arx_skyspec = arx_skyspec.rebin(keep_wave)\n obj_skyspec = obj_skyspec.rebin(keep_wave)\n # Trim edges (rebinning is junk there)\n arx_skyspec.data['flux'][0,:2] = 0.\n arx_skyspec.data['flux'][0,-2:] = 0.\n obj_skyspec.data['flux'][0,:2] = 0.\n obj_skyspec.data['flux'][0,-2:] = 0.\n\n # Normalize spectra to unit average sky count\n norm = np.sum(obj_skyspec.flux.value)/obj_skyspec.npix\n obj_skyspec.flux = obj_skyspec.flux / norm\n norm2 = np.sum(arx_skyspec.flux.value)/arx_skyspec.npix\n arx_skyspec.flux = arx_skyspec.flux / norm2\n if norm < 0:\n msgs.warn(\"Bad normalization of object in flexure algorithm\")\n msgs.warn(\"Will try the median\")\n norm = np.median(obj_skyspec.flux.value)\n if norm < 0:\n msgs.warn(\"Improper sky spectrum for flexure. Is it too faint??\")\n return None\n if norm2 < 0:\n msgs.warn('Bad normalization of archive in flexure. You are probably using wavelengths '\n 'well beyond the archive.')\n return None\n\n # Deal with bad pixels\n msgs.work(\"Need to mask bad pixels\")\n\n # Deal with underlying continuum\n msgs.work(\"Consider taking median first [5 pixel]\")\n everyn = obj_skyspec.npix // 20\n bspline_par = dict(everyn=everyn)\n mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value, obj_skyspec.flux.value, 3,\n function='bspline', sigma=3., bspline_par=bspline_par)\n obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline')\n obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont\n mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value, arx_skyspec.flux.value, 3,\n function='bspline', sigma=3., bspline_par=bspline_par)\n arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value, 'bspline')\n arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont\n\n # Consider sharpness filtering (e.g. LowRedux)\n msgs.work(\"Consider taking median first [5 pixel]\")\n\n #Cross correlation of spectra\n #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, \"same\")\n corr = np.correlate(arx_sky_flux, obj_sky_flux, \"same\")\n\n #Create array around the max of the correlation function for fitting for subpixel max\n # Restrict to pixels within maxshift of zero lag\n lag0 = corr.size//2\n #mxshft = settings.argflag['reduce']['flexure']['maxshift']\n max_corr = np.argmax(corr[lag0-mxshft:lag0+mxshft]) + lag0-mxshft\n subpix_grid = np.linspace(max_corr-3., max_corr+3., 7)\n\n #Fit a 2-degree polynomial to peak of correlation function. JFH added this if/else to not crash for bad slits\n if np.any(np.isfinite(corr[subpix_grid.astype(np.int)])):\n fit = utils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)], 'polynomial', 2)\n success = True\n max_fit = -0.5 * fit[1] / fit[2]\n else:\n fit = utils.func_fit(subpix_grid, 0.0*subpix_grid, 'polynomial', 2)\n success = False\n max_fit = 0.0\n msgs.warn('Flexure compensation failed for one of your objects')\n\n #Calculate and apply shift in wavelength\n shift = float(max_fit)-lag0\n msgs.info(\"Flexure correction of {:g} pixels\".format(shift))\n #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]\n\n return dict(polyfit=fit, shift=shift, subpix=subpix_grid,\n corr=corr[subpix_grid.astype(np.int)], sky_spec=obj_skyspec, arx_spec=arx_skyspec,\n corr_cen=corr.size/2, smooth=smooth_sig_pix, success=success)", "def photometric_calibration():\n pass", "def imu_calibrate(self):\n self.imu.calibrate_accelerometer()\n self.imu.calibrate_gyroscope()\n self.imu.calibrate_magnetometer()", "def calibrate(self):\n\n self.update_message('Calibrating platform...')\n\n self.calibrate_platform()\n\n self.update_message('Platform Calibrated.')\n\n # calibrate arm x axis\n self.update_message('Calibrating x axis...')\n calibrated = self.calibrate_arm(0)\n\n if not calibrated:\n self.update_message('ERROR: Could not calibrate x axis.')\n return 0\n else:\n self.update_message('x axis calibrated.')\n\n # calibrate arm y axis\n self.update_message('Calibrating y axis...')\n calibrated = self.calibrate_arm(1)\n\n if not calibrated:\n self.update_message('ERROR: Could not calibrate y axis.')\n return 0\n else:\n self.update_message('y axis calibrated.')\n\n # calibrate arm z axis\n self.update_message('Calibrating z axis...')\n calibrated = self.calibrate_arm(2)\n\n if not calibrated:\n self.update_message('ERROR: Could not calibrate z axis.')\n return 0\n else:\n self.update_message('z axis calibrated.\\n'\n 'Autocalibration done.\\n'\n 'Accuracy: {}'.format(self.matrix_accuracy()))\n\n # Getting the direction to withdraw pipette along x axis\n self.get_withdraw_sign()\n self.inv_mat = np.linalg.inv(self.mat)\n self.calibrated = 1\n self.cam.click_on_window = True\n self.save_calibration()\n return 1" ]
[ "0.61754125", "0.56453633", "0.5504699", "0.5399412", "0.5393503", "0.5272751", "0.5264761", "0.5158035", "0.5157666", "0.51486784", "0.51470107", "0.5123972", "0.51041424", "0.50907195", "0.5079858", "0.5070017", "0.5054352", "0.50522673", "0.5047045", "0.50407064", "0.50199366", "0.5008871", "0.50031376", "0.49525067", "0.49328467", "0.49030977", "0.48967198", "0.48689806", "0.48678187", "0.48554978", "0.48424947", "0.48356003", "0.48306268", "0.48162463", "0.4814167", "0.48080385", "0.4788236", "0.47763303", "0.47502136", "0.47498015", "0.47392693", "0.4737643", "0.47253782", "0.47225344", "0.4715541", "0.47057146", "0.47009188", "0.46974757", "0.46908513", "0.46840987", "0.46665546", "0.46615463", "0.46469462", "0.4639904", "0.4633269", "0.46320447", "0.46272954", "0.46221057", "0.46208435", "0.46145582", "0.4609191", "0.46000594", "0.45935228", "0.4586281", "0.4578419", "0.4555459", "0.45553294", "0.45502758", "0.45500466", "0.45491177", "0.45465744", "0.45447707", "0.45351386", "0.45336217", "0.45330477", "0.45324272", "0.45212656", "0.4519654", "0.4518157", "0.45098394", "0.45064667", "0.45034695", "0.44982448", "0.44954705", "0.4492937", "0.44854733", "0.4484588", "0.44834188", "0.44819427", "0.44742665", "0.4473452", "0.44728452", "0.44681883", "0.44671872", "0.4466575", "0.4464117", "0.4461446", "0.4453391", "0.44527262", "0.444891" ]
0.6538406
0
Map each unique spin link to all of its corresponding peaks. NOESY peak lists represent spin links between Hydrogen atoms. Whether 2D, 3D or 4D, each peak in a NOESY peak list has exactly two Hydrogen spins. Here, a spin link is represented by a frozenset containing the spin.assignment tuples for each Hydrogen atom. This function returns a dictionary mapping each unique spin link to a list of the Peaks in the PeakList that contain those two Hydrogen atoms. Examples >>> spin_link_dict = peaklist.spin_link_dict() >>> spin_link, peaks = spin_link_dict.popitem() >>> spin_link frozenset([Assignment(res_type='Q', res_num=21, atom='HN'), Assignment( res_type='G', res_num=17, atom='HN')]) >>> print(peaks[0]) Peak(spins=[ Spin(res_type=G, res_num=17, atom=HN), Spin(res_type=G, res_num=17, atom=N), Spin(res_type=Q, res_num=21, atom=HN)]) >>> print(peaks[1]) Peak(spins=[ Spin(res_type=Q, res_num=21, atom=HN), Spin(res_type=Q, res_num=21, atom=N), Spin(res_type=G, res_num=17, atom=HN)]) Returns
def get_spin_link_dict(peaklist): spin_link_dict = {} for peak in peaklist: spins = [spin for spin in peak if spin.atom is not None and spin.atom[0] == 'H'] if len(spins) != 2: err = ('expected 2 Hydrogens in each peak, ' 'found %d' % len(spins)) raise ValueError(err) link = frozenset(spin.assignment for spin in spins) spin_link_dict.setdefault(link, []).append(peak) return spin_link_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mod_map(mods, plinkmap):\n modmap = {}\n for chrom in plinkmap:\n if chrom not in modmap:\n modmap[chrom] = []\n markers = plinkmap[chrom]\n modif = mods[chrom]\n for i, m in enumerate(modif):\n if m == 'I':\n p2 = float(markers[i+1][3])\n p1 = float(markers[i-1][3])\n pk = float(markers[i][3])\n g2 = float(markers[i+1][2])\n g1 = float(markers[i-1][2])\n d = (p2 - pk) / (p2 - p1)\n gu = g2 - d*(g2 - g1)\n if g2 == gu:\n gi = str(round((g2 + g1)/2, ndigits=2))\n else:\n gi = str(round(gu, ndigits=2))\n modmar = [markers[i][0], markers[i][1], gi, markers[i][3]]\n elif m == 'J':\n jgpos = marker[i][2] + '1'\n modmar = [markers[i][0], markers[i][1], jgpos, markers[i][3]]\n else:\n modmar = markers[i]\n modmap[chrom].append(modmar)\n return modmap", "def make_links_dict(pairs_dict):\n links_dict = {}\n for end1 in pairs_dict:\n \n if (end1 in pairs_dict) and (len(pairs_dict[end1])) > 0:\n best_pair = max(pairs_dict[end1], key = pairs_dict[end1].get)\n \n if best_pair in pairs_dict and len(pairs_dict[best_pair]) > 0:\n \n if max(pairs_dict[best_pair], key = pairs_dict[best_pair].get) == end1:\n links_dict[end1] = best_pair\n links_dict[best_pair] = end1\n return links_dict", "def anchors(self):\n dims = self.dims\n anchors = []\n for peak in self:\n possible_anchors = []\n for combination in combinations(range(dims), 2):\n spins = [peak[i] for i in combination]\n if any(s.res_num is None or s.atom is None for s in spins):\n continue\n res_nums = [spin.res_num for spin in spins]\n atoms = [spin.atom for spin in spins]\n elements = [atom[0] for atom in atoms]\n positions = [atom[1:] for atom in atoms]\n same_res_num = res_nums[0] == res_nums[1]\n valid_pairs = [set(('H', 'N')), set(('H', 'C'))]\n is_proton_heavy_pair = set(elements) in valid_pairs\n same_position = all(c[0] == c[1] for c in zip(*positions))\n if same_res_num and is_proton_heavy_pair and same_position:\n if '' in positions and set(elements) != set(('H', 'N')):\n # One of the atom names must have been 'H', 'N' or 'C'\n # Of these, only the amide proton anchor is valid\n continue\n if elements[0] == 'H':\n possible_anchors.append(combination)\n else:\n possible_anchors.append(combination[::-1])\n if len(possible_anchors) > 1:\n pa_sets = [set(pa) for pa in possible_anchors]\n overlap = set.intersection(*pa_sets)\n if overlap:\n # Ambiguous, overlapping anchors\n continue\n for poss_anc in possible_anchors:\n if poss_anc not in anchors:\n anchors.append(poss_anc)\n anchors = tuple(anchors)\n return anchors", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def getSeqSpinSystemLinks(spinSystem, delta=None):\n\n seqLinks = {}\n for link in spinSystem.findAllResonanceGroupProbs(linkType='sequential',isSelected=True):\n if delta is None:\n seqLinks[link] = None\n \n elif link.sequenceOffset == delta:\n seqLinks[link] = None\n\n for link in spinSystem.findAllFromResonanceGroups(linkType='sequential',isSelected=True):\n if delta is None:\n seqLinks[link] = None\n \n elif link.sequenceOffset == -delta:\n seqLinks[link] = None\n\n return seqLinks.keys()", "def get_link_inr(network_name: str, rx_pair_inr: Dict) -> Dict:\n results: DefaultDict = defaultdict(list)\n for (rx_node, rx_from_node), inr_power in rx_pair_inr.items():\n link_name = Topology.mac_to_link_name.get(network_name, {}).get(\n (rx_node, rx_from_node)\n )\n if link_name is None:\n continue\n\n inr_db = 10 * np.log10(inr_power)\n if inr_db < HardwareConfig.MINIMUM_SNR_DB:\n continue\n\n results[link_name].append(\n {\"rx_node\": rx_node, \"rx_from_node\": rx_from_node, \"inr_curr_power\": inr_db}\n )\n return results", "def parse_map(plinkmap):\n plink = {}\n with open(plinkmap, 'r') as f:\n for line in f:\n tmp = line.strip().split()\n chrom = tmp[0]\n if chrom not in plink:\n plink[chrom] = []\n plink[chrom].append(tmp)\n # Then sort on physical position\n for c in plink:\n plink[c] = sorted(plink[c], key=lambda x: int(x[3]))\n return plink", "def create_links_dict(all_pages):\n links_dict = dict()\n\n n_link = 0\n for j in range(N_PROCESSES):\n for n_site, site in enumerate(all_pages[j]):\n link = site[\"link\"]\n link = reduce_to_domain(link)\n\n if len(link) >= MIN_LINK_LEN and links_dict.get(link, -1) == -1:\n links_dict[link] = n_link\n n_link += 1\n\n if site[\"hyperlinks\"] is None:\n continue\n\n for child_link in site[\"hyperlinks\"]:\n child_link = reduce_to_domain(child_link)\n\n if len(child_link) >= MIN_LINK_LEN and links_dict.get(child_link, -1) == -1:\n links_dict[child_link] = n_link\n n_link += 1\n\n with open(os.path.join(\"..\", \"files\", \"all_links.json\"), \"w\", encoding=\"utf-8\") as f:\n json.dump(links_dict, f, indent=4, ensure_ascii=False)", "def get_all_backup_links(\n network_name: str,\n node_mac_map: DefaultDict,\n link_name_map: Dict[str, Dict],\n conn_list: List,\n) -> DefaultDict:\n backup_links: DefaultDict = defaultdict(dict)\n for conn_list_item in conn_list:\n tx_node_mac = conn_list_item[\"tx_node\"]\n rx_node_mac = conn_list_item[\"rx_node\"]\n backup_link_candidate = {\n \"link_type\": 1,\n \"linkup_attempts\": 0,\n \"is_alive\": False,\n \"name\": \"\",\n \"is_backup_cn_link\": True,\n }\n\n if tx_node_mac not in node_mac_map or rx_node_mac not in node_mac_map:\n logging.debug(f\"One of the mac addresses is not in {network_name}.\")\n continue\n\n # TODO: This part will be used in the later version.\n # No CNs can be tested at this point in the live network.\n # Will come back to complete the logic later on.\n tx_node_type = node_mac_map[tx_node_mac][\"type\"]\n rx_node_type = node_mac_map[rx_node_mac][\"type\"]\n if tx_node_type == NodeType.CN or rx_node_type == NodeType.CN:\n backup_link_candidate[\"is_backup_cn_link\"] = True\n\n if node_mac_map[tx_node_mac][\"name\"] < node_mac_map[rx_node_mac][\"name\"]:\n backup_link_candidate[\"a_node_mac\"] = tx_node_mac\n backup_link_candidate[\"z_node_mac\"] = rx_node_mac\n backup_link_candidate[\"a_node_name\"] = node_mac_map[tx_node_mac][\"name\"]\n backup_link_candidate[\"z_node_name\"] = node_mac_map[rx_node_mac][\"name\"]\n else:\n backup_link_candidate[\"a_node_mac\"] = rx_node_mac\n backup_link_candidate[\"z_node_mac\"] = tx_node_mac\n backup_link_candidate[\"a_node_name\"] = node_mac_map[rx_node_mac][\"name\"]\n backup_link_candidate[\"z_node_name\"] = node_mac_map[tx_node_mac][\"name\"]\n\n backup_link_candidate_name = (\n f\"link-{backup_link_candidate['a_node_name']}\"\n f\"-{backup_link_candidate['z_node_name']}\"\n )\n backup_link_candidate[\"name\"] = backup_link_candidate_name\n # Do not process any active links in the topology file\n # TODO: check whether this part is necessary.\n # If it is the case, we need to check node macs instead of link name only.\n if backup_link_candidate_name not in link_name_map:\n backup_links[backup_link_candidate_name][\"link\"] = backup_link_candidate\n if len(conn_list_item[\"routes\"]) != 0:\n (_tx_beam_idx, _rx_beam_idx, snr) = conn_list_item[\"routes\"][0]\n backup_links[backup_link_candidate_name][\"snr\"] = snr\n\n return backup_links", "def compute_pagerank(urls, inlinks, outlinks, b=.85, iters=20):\n ###TODO\n pagerank = defaultdict(lambda: 1.0)\n N = len(urls)\n for url in urls:\n pagerank[url]\n for i in range(0, iters):\n for url in urls:\n result_sum = 0.0\n for link in inlinks[url]:\n if len(outlinks[link]) is not 0:\n result_sum += (pagerank[link] / len(outlinks[link]))\n pagerank[url] = (1/N) * (1-b) + (b * result_sum)\n return pagerank\n pass", "def backlinks(self) -> Dict[str, List[str]]:\n bk_links: Dict[str, List[str]] = {}\n for note in filter(lambda n: n.links_to is not None, self.by_id.values()):\n for fwd in note.links_to:\n if fwd not in bk_links:\n bk_links[fwd] = [note.id]\n else:\n bk_links[fwd].append(note.id)\n\n return bk_links", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def _make_links(self,\n links: Mapping[str, Union[str, Dict[str, Any]]],\n relationship: Optional[str] = None):\n evaluated_links = {}\n for name, link_payload in links.items():\n evaluated_links[name] = link_payload\n for param, arg in link_payload.items():\n evaluated_links[name][param] = (\n arg(self) if callable(arg) else arg)\n links_factories = self.__links_factories__\n return {\n name: links_factories[self._qualname(name, relationship)](**evaluated_links[name])\n if links_factories.get(self._qualname(name, relationship)) is not None\n else evaluated_links[name]\n for name in evaluated_links\n }", "def get_dictionary_of_peptides_and_isomeric_peak_areas(self, fout_peptides_isomeric_peak_areas):\n \n try:\n fin_handle = open(fout_peptides_isomeric_peak_areas)\n\n except IOError:\n raise(\"Provide a file containing percentage peak_area of isomeric peptides\")\n\n # local list; this appends all lines within a block and is emptied at the end of the block\n L_peptide_isomeric_peak_area = []\n\n block_start = False; pep_NC = \"\"\n \n for line in fin_handle:\n \n line = line.strip()\n \n # skipping the blank line\n if not line.strip():\n continue\n \n # skipping the comment line\n if line[0]==\"#\":\n continue\n \n \n if line==\"PEPSTART\": block_start=True\n\n elif line==\"PEPEND\" :\n block_start=False\n \n #end elif\n \n if block_start and line!=\"PEPSTART\":\n L = line.split(\":\")\n if L[0].strip() == \"peptide\":\n pep_NC = L[1].strip() #e.g, '15-25'\n\n elif L[0].strip()==\"IsomericPeptidesPeakArea\":\n right_side = L[1].strip()\n\n L_modtypes_freq_peak_area = [m.strip() for m in right_side.split(\" \")]\n percentage_peak_area = L_modtypes_freq_peak_area[-1] # last column\n D_modtype_freq = {}\n\n # running the loop so as to skip the last element\n for i, m in enumerate(L_modtypes_freq_peak_area[:-1]):\n mtype = (m.split('=')[0]).strip()\n freq = (m.split('=')[1]).strip()\n D_modtype_freq[mtype] = freq\n\n #end for\n\n L_peptide_isomeric_peak_area.append((D_modtype_freq, percentage_peak_area))\n \n # end if block_start and line!=\"PEPSTART\" \n\n # pushing into the dictionary after end of each block\n\n if line==\"PEPEND\":\n\n # sorting the list based on total frequency of isomeric peptides\n L_sorted = sorted(L_peptide_isomeric_peak_area, key=lambda x: sum([int(f) for f in x[0].values()]))\n \n self._D_peptide_isomeric_peak_areas[pep_NC] = L_sorted\n \n #emptying the list for next block\n L_peptide_isomeric_peak_area = []\n \n # emptying the peptide N_loc, C_loc string at the end of the block\n pep_NC = \"\"", "def get_linked_neighbors(self, directions=None):\n if not directions:\n directions = REVERSE_DIRECTIONS.keys()\n\n xygrid = self.xymap.xygrid\n links = {}\n for direction in directions:\n dx, dy = MAPSCAN[direction]\n end_x, end_y = self.x + dx, self.y + dy\n if end_x in xygrid and end_y in xygrid[end_x]:\n # there is is something there, we need to check if it is either\n # a map node or a link connecting in our direction\n node_or_link = xygrid[end_x][end_y]\n if node_or_link.multilink or node_or_link.get_direction(direction):\n links[direction] = node_or_link\n return links", "def _partition_pairs_by_slot(\n self, mapping: Mapping[AnyKeyT, EncodableT]\n ) -> Dict[int, List[EncodableT]]:\n\n slots_to_pairs = {}\n for pair in mapping.items():\n slot = key_slot(self.encoder.encode(pair[0]))\n slots_to_pairs.setdefault(slot, []).extend(pair)\n\n return slots_to_pairs", "def getSlotMap(self):\n slotMap = dict()\n for entry in self.slots:\n slotMap[entry] = self.__getattribute__(\"on_\" + entry)\n return slotMap", "def get_links(user):\n # secure_filename('some.file') strips hacker attempts away from input. \n linksfile = secure_filename('%s.links'%(user))\n\n # Here we should check if file exists with -> os.path.isfile(path)\n\n try:\n with codecs.open(linksfile, 'rb') as userfile: \n links = pickle.loads(userfile.read())\n except IOError:\n links = {}\n return links", "def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" in header:\r\n rev_primer_ix = header.index(\"ReversePrimer\")\r\n else:\r\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\r\n\r\n iupac = {'A': 'A', 'T': 'T', 'G': 'G', 'C': 'C', 'R': '[AG]', 'Y': '[CT]',\r\n 'S': '[GC]', 'W': '[AT]', 'K': '[GT]', 'M': '[AC]', 'B': '[CGT]',\r\n 'D': '[AGT]', 'H': '[ACT]', 'V': '[ACG]', 'N': '[ACGT]'}\r\n\r\n raw_forward_primers = set([])\r\n raw_forward_rc_primers = set([])\r\n raw_reverse_primers = set([])\r\n raw_reverse_rc_primers = set([])\r\n\r\n for line in mapping_data:\r\n # Split on commas to handle pool of primers\r\n raw_forward_primers.update([upper(primer).strip() for\r\n primer in line[primer_ix].split(',')])\r\n raw_forward_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_forward_primers])\r\n raw_reverse_primers.update([upper(primer).strip() for\r\n primer in line[rev_primer_ix].split(',')])\r\n raw_reverse_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_reverse_primers])\r\n\r\n if not raw_forward_primers:\r\n raise ValueError((\"No forward primers detected in mapping file.\"))\r\n if not raw_reverse_primers:\r\n raise ValueError((\"No reverse primers detected in mapping file.\"))\r\n\r\n # Finding the forward primers, or rc of reverse primers indicates forward\r\n # read. Finding the reverse primer, or rc of the forward primers, indicates\r\n # the reverse read, so these sets are merged.\r\n raw_forward_primers.update(raw_reverse_rc_primers)\r\n raw_reverse_primers.update(raw_forward_rc_primers)\r\n\r\n forward_primers = []\r\n reverse_primers = []\r\n for curr_primer in raw_forward_primers:\r\n forward_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n for curr_primer in raw_reverse_primers:\r\n reverse_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n\r\n return forward_primers, reverse_primers", "def testDereferenceLinks(self):\n ddict = {\"ext_group\": {\"dataset\": 10}}\n dictdump.dicttonx(ddict, self.h5_ext_fname)\n ddict = {\"links\": {\"group\": {\"dataset\": 10, \">relative_softlink\": \"dataset\"},\n \">relative_softlink\": \"group/dataset\",\n \">absolute_softlink\": \"/links/group/dataset\",\n \">external_link\": \"nx_ext.h5::/ext_group/dataset\"}}\n dictdump.dicttonx(ddict, self.h5_fname)\n\n ddict = dictdump.h5todict(self.h5_fname, dereference_links=True)\n self.assertTrue(ddict[\"links\"][\"absolute_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"relative_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"external_link\"], 10)\n self.assertTrue(ddict[\"links\"][\"group\"][\"relative_softlink\"], 10)", "def dict() -> Dict[str, Pin]:", "def link_residues(self) -> None:\n ...", "def addPeakResonancesToSeqSpinSystems(peak, seqOffsets):\n \n assert len(peak.peakDims) == len(seqOffsets)\n assert None in seqOffsets # otherwise no reference point\n\n spinSystems = []\n resonanceList = []\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n spinSystem = None\n resonances = []\n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n resonances.append(resonance)\n \n if resonance.resonanceGroup:\n if not spinSystem:\n spinSystem = resonance.resonanceGroup\n\n elif spinSystem is not resonance.resonanceGroup:\n msg = 'There are multiple spin systems for peak dimension %d.\\n' % (i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(resonance.resonanceGroup,spinSystem)\n else:\n return\n\n resonanceList.append(resonances)\n spinSystems.append( spinSystem )\n\n ref = None\n I = 0\n for i, spinSystem in enumerate(spinSystems):\n if spinSystem is not None:\n if seqOffsets[i] is None:\n if ref is None:\n ref = spinSystem\n I = i\n \n else:\n if spinSystem is not ref:\n msg = 'Dimensions %d and %d have different spin systems.\\n' % (I+1,i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(spinSystem, ref)\n else:\n return\n \n if ref is not None:\n for i, seqOffset in enumerate(seqOffsets):\n \n if seqOffset:\n spinSystem = findConnectedSpinSystem(ref, seqOffset)\n if spinSystems[i] is ref:\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n showWarning('Failure','Spin system cannot be both i and i%s (dimension %d)' % (deltaText,i+1))\n continue\n \n \n if spinSystem and spinSystems[i]:\n if spinSystem is not spinSystems[i]:\n if (not spinSystem.residue) or (not spinSystems[i].residue):\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n \n msg = 'There is an i%s spin system already present (dimension %d).\\n' % (deltaText, i+1)\n msg += 'Merge spin systems together?'\n if showOkCancel('Confirm', msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n elif spinSystem.residue is spinSystems[i].residue:\n name = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n msg = 'There are multiple spin systems for residue %s.\\n?' % name\n msg += 'Merge spin systems together?'\n \n if showOkCancel('Confirm',msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n else:\n txt1 = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n txt2 = '%d%s' % (spinSystems[i].residue.seqCode,spinSystems[i].residue.ccpCode)\n msg = 'Cannot set spin system for F%d dim' % (i+1)\n msg += 'Offset %d causes conflict between %s and %s' % (seqOffset, txt1, txt2)\n showWarning('Failure',msg)\n return\n \n if resonanceList[i]:\n nmrProject = resonanceList[i][0].nmrProject\n if not spinSystem:\n if spinSystems[i]:\n spinSystem = spinSystems[i]\n else:\n spinSystem = nmrProject.newResonanceGroup()\n \n makeSeqSpinSystemLink(ref, spinSystem, seqOffsets[i])\n \n for resonance in resonanceList[i]:\n if resonance.resonanceGroup is not spinSystem:\n addSpinSystemResonance(spinSystem,resonance)", "def relabel(peak_ids, oldparams, mask):\n spot_data = {}\n peak_num = 1\n for peak in peak_ids:\n #coords = np.where(mask == peak)\n paramsnew = oldparams[peak-1,:] # object 1 will be fitparams row 0\n # Rearrange params from fit function so coordinates lead.\n spot_data[peak_num] = paramsnew[[1,2,3,0,4,5,6]]\n peak_num = peak_num + 1\n return spot_data", "def getLinkEnds(self):\n dataDict = self.__dict__\n result = set(ca.boundLinkEnd for ca in self.chemAtoms if isinstance(ca,LinkAtom))\n if None in result:\n result.remove(None)\n result = frozenset(result)\n return result", "def build_graph(link_data, links):\n graph = {}\n\n # add all data for links\n for l in links:\n #print(\"Adding \"+l)\n #print(link_data.get(l))\n graph[l] = list(link_data.get(l))\n\n # add all links that point to links\n for slink in link_data:\n for l in links:\n # the links is already in graph, skip\n if graph.has_key(slink):\n continue\n\n try:\n dest_links = list(link_data.get(slink))\n # if slink points to l\n _ = dest_links.index(l)\n # add the slink to graph\n graph[slink] = dest_links\n #print(\"Adding \"+slink)\n except Exception as e:\n pass\n\n #print(len(graph))\n #print(graph)\n\n return graph", "def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matter if they are functional\n for i in range(0, len(self._partner_indices)):\n graph_dict_incomplete[i] = set(self._partner_indices[i])\n if self._variant[0] == \"V0_instant\":\n self.graph_dict = graph_dict_incomplete\n else:\n # helper\n link_list = []\n link_list2 = []\n for vertex in graph_dict_incomplete:\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=set())\n for neighbour in graph_dict_incomplete[vertex]:\n # Iterate through all plants and the neighbours\n # If a new pair occurs it will be appended in link_list2\n # If the pair occurs again it wll be appended in link_list\n # This means that the link (or rgf process) is finished\n # for both plants\n if {neighbour, vertex} not in link_list2:\n link_list2.append({vertex, neighbour})\n else:\n # plants are only put in the dict. if they occur more\n # than once, i.e. both partners have finished rgf\n link_list.append({vertex, neighbour})\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=neighbour)", "def download_burstlink_mapping(fpath='burstlink.json'):\n\n try:\n url = 'https://raw.githubusercontent.com/soruly/burstlink/master/burstlink.json'\n # downloading\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) \\\n AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15'\n }\n resp = requests.get(url, headers=headers)\n mapping = resp.json()\n for i in range(len(mapping)):\n item = mapping[i]\n item['mal'] = None if 'mal' not in item else item['mal']\n item['anidb'] = None if 'anidb' not in item else item['anidb']\n item['anilist'] = None if 'anilist' not in item else item['anilist']\n mapping[i] = item\n with open(fpath, 'w', encoding='utf-8') as f:\n json.dump(mapping, f, indent=2, ensure_ascii=False)\n return True\n except Exception:\n traceback.print_exc()\n return False", "def addPeakResonancesToSpinSystem(peaks):\n \n # TBD check experiment type of the peak\n \n if not peaks:\n return\n \n resonances = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n resonances.append(contrib.resonance)\n \n spinSystems = []\n for resonance in resonances:\n resonanceGroup = resonance.resonanceGroup\n if resonanceGroup and (resonanceGroup not in spinSystems):\n spinSystems.append(resonanceGroup)\n\n spinSystem = None\n if len(spinSystems) == 1:\n spinSystem = spinSystems[0]\n elif len(spinSystems) > 1:\n msg = 'There are multiple spin systems for these peaks.\\n'\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm',msg):\n spinSystem = spinSystems[0]\n for spinSystem2 in spinSystems[1:]:\n mergeSpinSystems(spinSystem2,spinSystem)\n else:\n return\n \n if spinSystem is None:\n spinSystem = peaks[0].topObject.newResonanceGroup()\n\n for resonance in resonances:\n addSpinSystemResonance(spinSystem,resonance)\n\n return spinSystem", "def __load_url_mapping():\n try:\n return pickle.load(open(\"short_to_url.p\", \"rb\"))\n except IOError:\n return {}", "def build_links(self):\n super().build_links()\n if len(self.links) > 1:\n raise MapParserError(\"may have at most one link connecting to it.\", self)", "def linkage(self):\n self.tree = {}\n un_linked = []\n for i in range(len(self.leaves)):\n leaf = self.leaves[i]\n un_linked.append({\n 'id': i,\n 'x': 0,\n 'y': 0,\n 'value': 0,\n 'set': leaf,\n 'children': []\n })\n pass\n while len(un_linked) > 1:\n # for i in tqdm(range(len(un_linked))):\n # print(\"Linking... {} nodes left\".format(len(un_linked)))\n for node in un_linked:\n for d in node['set']:\n node['x'] += d['x']\n node['y'] += d['y']\n node['value'] += d['value']\n pass\n node['x'] /= len(node['set'])\n node['y'] /= len(node['set'])\n node['value'] /= len(node['set'])\n pass\n # min_dif = ((un_linked[1]['x'] - un_linked[0]['x']) ** 2 + (un_linked[1]['y'] - un_linked[0]['y']) ** 2) \\\n # * self._alpha + (un_linked[1]['value'] - un_linked[0]['value']) * (1 - self._alpha)\n min_dif = ((un_linked[1]['x'] - un_linked[0]['x']) ** 2 + (un_linked[1]['y'] - un_linked[0]['y']) ** 2)\n min_cp = [0, 1]\n for i in range(len(un_linked) - 1):\n for j in range(i + 1, len(un_linked)):\n # dif = self._alpha * ((un_linked[j]['x'] - un_linked[i]['x']) ** 2\n # + (un_linked[j]['x'] - un_linked[i]['x']) ** 2) \\\n # + (1 - self._alpha) * (un_linked[j]['value'] - un_linked[i]['value'])\n dif = ((un_linked[j]['x'] - un_linked[i]['x']) ** 2\n + (un_linked[j]['x'] - un_linked[i]['x']) ** 2)\n if dif < min_dif:\n min_dif = dif\n min_cp = [i, j]\n pass\n pass\n pass\n set_a = []\n for each in un_linked[min_cp[0]]['set']:\n set_a.append(each)\n pass\n for each in un_linked[min_cp[1]]['set']:\n set_a.append(each)\n pass\n next_un_linked = []\n new_children = []\n if len(un_linked[min_cp[0]]['children']) != 0:\n new_children.append({'children': un_linked[min_cp[0]]['children'],\n 'value': len(un_linked[min_cp[0]]['set'])})\n pass\n else:\n new_children.append({'id': un_linked[min_cp[0]]['id'],\n 'value': len(un_linked[min_cp[0]]['set'])})\n if len(un_linked[min_cp[1]]['children']) != 0:\n new_children.append({'children': un_linked[min_cp[1]]['children'],\n 'value': len(un_linked[min_cp[1]]['set'])})\n pass\n else:\n new_children.append({'id': un_linked[min_cp[1]]['id'],\n 'value': len(un_linked[min_cp[1]]['set'])})\n pass\n next_un_linked.append({\n 'x': 0,\n 'y': 0,\n 'value': 0,\n 'set': set_a,\n 'children': new_children\n })\n del un_linked[min_cp[0]]['set']\n del un_linked[min_cp[0]]['x']\n del un_linked[min_cp[0]]['y']\n # del un_linked[min_cp[0]]['value']\n del un_linked[min_cp[1]]['set']\n del un_linked[min_cp[1]]['x']\n del un_linked[min_cp[1]]['y']\n # del un_linked[min_cp[1]]['value']\n for s in range(len(un_linked)):\n if s not in min_cp:\n next_un_linked.append(un_linked[s])\n pass\n pass\n un_linked = next_un_linked\n pass\n del un_linked[0]['set']\n del un_linked[0]['x']\n del un_linked[0]['y']\n # del un_linked[0]['value']\n self.tree = un_linked[0]\n self._count = 0\n\n self.tree = self._resolve(self.tree)\n return", "def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links", "def buildPeakgroupMap(multipeptides, peakgroup_map):\n\n for m in multipeptides:\n pg = m.find_best_peptide_pg()\n peptide_name = pg.get_value(\"FullPeptideName\")\n peptide_name = peptide_name.split(\"_run0\")[0]\n charge_state = pg.get_value(\"Charge\")\n if charge_state == \"NA\" or charge_state == \"\":\n charge_state = \"0\"\n\n key = peptide_name + \"/\" + charge_state\n prkey = peptide_name + \"/\" + charge_state + \"_pr\"\n\n # identifier for precursor, see mapRow\n peakgroup_map[ key ] = m\n peakgroup_map[ prkey ] = m", "def __init_freeze_map(self):\n sequences = self._pattern.extract_flat_sequences()\n for freezer_event_name in self._pattern.consumption_policy.freeze_names:\n current_event_name_set = set()\n for sequence in sequences:\n if freezer_event_name not in sequence:\n continue\n for name in sequence:\n current_event_name_set.add(name)\n if name == freezer_event_name:\n break\n if len(current_event_name_set) > 0:\n self.__freeze_map[freezer_event_name] = current_event_name_set", "def _get_linkable_scripts(self, scripts):\n avail_scripts = {}\n for script in scripts:\n script_name = script.__class__.__name__.upper()\n if script_name not in avail_scripts and hasattr(script, 'components'):\n l_comp = self._get_linkable_component(script)\n if l_comp:\n avail_scripts[script_name] = l_comp\n\n return avail_scripts", "def cache_links(self):\n for source_location in self.gen_locations():\n for vi, delta in vi_delta_pairs:\n drow, dcol = delta\n for command, magnitude in ((vi, 1), (vi.upper(), 8)):\n target_location = source_location\n for i in range(magnitude):\n trow, tcol = target_location\n next_target_location = (trow + drow, tcol + dcol)\n if self.is_inbounds(next_target_location):\n target_location = next_target_location\n else:\n break\n triple = (source_location, target_location, command)\n self.cached_links.append(triple)", "def upsert_link_set(self, link_type, link_set):\n if link_set is None:\n self.links.pop(link_type, None)\n return\n links = copy.deepcopy(self.links)\n links.update({link_type: link_set})\n self.links = links", "def keep_and_peer_link_member(duthosts, collect, mg_facts):\n res = defaultdict(dict)\n for dut in duthosts:\n port_indices = {\n mg_facts[dut.hostname]['minigraph_port_indices'][k]: k\n for k in mg_facts[dut.hostname]['minigraph_port_indices']}\n keep_alive_interface = port_indices[int(collect[dut.hostname]['devices_interconnect_interfaces'][0])]\n peer_link_member = port_indices[int(collect[dut.hostname]['devices_interconnect_interfaces'][-1])]\n res[dut.hostname]['keepalive'] = keep_alive_interface\n res[dut.hostname]['peerlink'] = peer_link_member\n return res", "def entryPair(self):\n exitPair = {}\n deadEnd = self.searchDeadEnd()\n deadPoints = []\n deadRoads = deadEnd.copy()\n for (x,y) in deadEnd:\n # if (x,y) not in deadPoints:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n BFS = util.Queue()\n visited = []\n BFS.push((x,y))\n while not BFS.isEmpty():\n (i,j) = BFS.pop()\n visited.append((i, j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in deadRoads:\n exitPoint = position\n if position in deadRoads and not position in visited:\n BFS.push(position)\n if exitPoint in exitPair.keys():\n exitPair[exitPoint] = list(set(visited).union(set(exitPair[exitPoint])))\n else:\n exitPair[exitPoint] = visited\n deadPoints = list(set(deadEnd).difference(set(visited)))\n\n return exitPair", "def convert_slots(slots: typing.List[Slot]) -> {}:\n resolved = {}\n\n for slot in slots:\n slot_name = slot.slot_name\n slot_value = slot.value\n\n slot_entity = slot.entity\n if slot_entity.startswith('snips/'):\n resolved[slot_name] = slot.value\n resolved[slot_name + '_raw'] = slot.raw_value\n else:\n # assuming Rasa NLU slot\n slot_extractor = slot_value['extractor']\n if not slot_extractor:\n slot_extractor = 'Unknown'\n else:\n del slot_value['extractor']\n\n if slot_name not in resolved:\n resolved[slot_name] = {}\n if slot_extractor not in resolved[slot_name]:\n resolved[slot_name][slot_extractor] = []\n\n # take the text entity extractor as the raw value\n if slot_extractor == 'CRFEntityExtractor':\n resolved[slot_name + '_raw'] = slot.raw_value\n\n resolved[slot_name][slot_extractor].append(slot_value)\n\n return resolved", "def build_link_graph(passages):\n def is_link(item):\n if isinstance(item, HarloweLink):\n return True\n if not isinstance(item, HarloweMacro):\n return False\n return item.canonical_name in ['linkgoto', 'goto']\n\n missing_links = []\n\n for passage_name, passage in passages.items():\n for link in passage.find_matches(is_link):\n if isinstance(link, HarloweLink):\n try:\n linked_passage_name = ''.join([str(item) for item in link.passage_name])\n except TypeError: # If there is no passage name, then passage_name is None\n linked_passage_name = ''.join([str(item) for item in link.link_text])\n else: # Handle the macro\n # TODO this doesn't actually cover all cases, since the macro's code can include\n # Javascript and the like. Instead, what we need is a JS-aware tokenizer\n code_str = ''.join([str(item) for item in link.code])\n if link.canonical_name == 'linkgoto':\n # TODO If there are commas other than the one separating arguments, this will fail\n linked_passage_name = code_str.split(',', maxsplit=1)[-1]\n else:\n linked_passage_name = code_str\n # Strip whitespace and then the quote marks\n linked_passage_name = linked_passage_name.strip()\n linked_passage_name = linked_passage_name.strip(linked_passage_name[0])\n\n try:\n linked_passage = passages[linked_passage_name]\n passage.destinations.add(linked_passage)\n linked_passage.parents.add(passage)\n except KeyError:\n missing_links.append((passage, str(link)))\n\n return missing_links", "def __init__(self):\n self.head = Block()\n self.tail = Block()\n self.head.next = self.tail\n self.tail.prev = self.head\n self.mapping = {}", "def get_external_links(parsed_drug_doc):\n\n external_link_info = list(parsed_drug_doc.find(id='external-links').next_sibling.dl.children)\n external_links = {}\n for i in range(0, len(external_link_info), 2):\n source = external_link_info[i].text\n value = external_link_info[i+1].text\n # Ignoring a few sources for this MVP that don't give obvious alternate IDs.\n if source not in [\"RxList\", \"Drugs.com\", \"PDRhealth\"]:\n external_links[source] = value\n\n return external_links", "def buildLinksDict(self):\n \n arcpy.env.workspace = PublicTransit.RTD_PATH\n # Check if feature layer already exists; if so, delete it.\n if arcpy.Exists(PublicTransit.ROUTE_EDGES_FEATURE_LYR):\n arcpy.Delete_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR)\n # Create a feature layer based on bus route traversal edges, and join to\n # the Roadways feature class.\n arcpy.MakeFeatureLayer_management(PublicTransit.BUS_ROUTE_TRAVERSAL_EDGES,\n PublicTransit.ROUTE_EDGES_FEATURE_LYR)\n routeTraversalEdgesJoinField = \"SourceOID\"\n roadwaysJoinField = \"LinkId\"\n arcpy.AddJoin_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR,\n routeTraversalEdgesJoinField,\n PublicTransit.ROADWAYS_FC,\n roadwaysJoinField,\n \"KEEP_COMMON\")\n self.linksDict = dict()\n \n linkIdField = \"Roadways.LinkId\"\n fromNodeField = \"Roadways.F_JNCTID\"\n toNodeField = \"Roadways.T_JNCTID\"\n onewayField = \"Roadways.ONEWAY\"\n \n links = arcpy.SearchCursor(PublicTransit.ROUTE_EDGES_FEATURE_LYR, \"\", \"\",\n linkIdField + \";\" + fromNodeField + \";\" +\n toNodeField + \";\" + onewayField, \"\") \n print \"Found %d links\" % \\\n int(arcpy.GetCount_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR).getOutput(0))\n \n linkIter = 0\n # Add link to dictionary if both the from and to node are in the nodes dictionary.\n for l in links:\n linkId = l.getValue(linkIdField)\n fromNode = self.__getIdHash(l.getValue(fromNodeField))\n toNode = self.__getIdHash(l.getValue(toNodeField))\n oneWay = l.getValue(onewayField)\n if (linkId not in self.linksDict):\n if (fromNode in self.nodesDict and toNode in self.nodesDict):\n self.linksDict[linkId] = Link(linkId, self.nodesDict[fromNode],\n self.nodesDict[toNode], oneWay)\n linkIter += 1\n if (linkIter % 10000 == 0):\n print \"processed %d links\" % (linkIter)\n del l\n del links\n arcpy.Delete_management(PublicTransit.ROUTE_EDGES_FEATURE_LYR)", "def filter_to_candidate(self):\n filtered = { k: [] for k in self.annsets }\n for key, annset in self.annsets.items():\n for a in annset:\n if a.overlaps(self.candidate):\n filtered[key].append(a)\n self.annsets = filtered", "def link_ids(self):\n return self._link_ids", "def _link_items(self):\n pass", "def peaks(n, binCenters, method=\"JI\", window=100, peakAmpThresh=0.00005, valleyThresh=0.00003):\n data = zip(binCenters, n)\n binCenters = np.array(binCenters)\n firstCenter = (min(binCenters)+1.5*window)/window*window\n lastCenter = (max(binCenters)-window)/window*window\n if firstCenter < -1200: firstCenter = -1200\n if lastCenter > 3600: lastCenter = 3600\n\n\n if method == \"slope\" or method == \"hybrid\":\n peaks = {}\n peakInfo = peaksBySlope(n, binCenters, lookahead=20, delta=valleyThresh, averageHist=True)\n\n #find correspondences between peaks and valleys, and set valleys are left and right Indices\n #see the other method(s) for clarity!\n\n peakData = peakInfo[\"peaks\"]\n valleyData = peakInfo[\"valleys\"]\n\n #print len(peakData[0]), len(peakData[1])\n for i in xrange(len(peakData[0])):\n nearestIndex = findNearestIndex(valleyData[0], peakData[0][i])\n if valleyData[0][nearestIndex] < peakData[0][i]:\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][nearestIndex+1:]) == 0):\n rightIndex = findNearestIndex(binCenters, peakData[0][i]+window/2.0)\n else:\n offset = nearestIndex+1\n nearestIndex = offset+findNearestIndex(valleyData[0][offset:], peakData[0][i])\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n else:\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][:nearestIndex]) == 0):\n leftIndex = findNearestIndex(binCenters, peakData[0][i]-window/2.0)\n else:\n nearestIndex = findNearestIndex(valleyData[0][:nearestIndex], peakData[0][i])\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n\n pos = findNearestIndex(binCenters, peakData[0][i])\n #print binCenters[pos], peakData[1][i], binCenters[leftIndex], binCenters[rightIndex]\n peaks[pos] = [peakData[1][i], leftIndex, rightIndex]\n\n if method == \"hybrid\": slopePeaks = peaks\n \n if method == \"JI\" or method == \"ET\" or method == \"hybrid\":\n peaks = {}\n #Obtain max value per interval\n if method == \"JI\" or method == \"hybrid\":\n firstCenter = nearestJI(firstCenter)\n lastCenter = nearestJI(lastCenter)\n\n interval = firstCenter\n prevInterval = firstCenter-window\n #NOTE: All *intervals are in cents. *indices are of binCenters/n\n while interval < lastCenter:\n if method == \"ET\":\n leftIndex = findNearestIndex(binCenters, interval-window/2)\n rightIndex = findNearestIndex(binCenters, interval+window/2)\n interval += window\n elif method == \"JI\" or method == \"hybrid\":\n leftIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n prevInterval = interval\n interval = nextJI(interval)\n rightIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n peakPos = np.argmax(n[leftIndex:rightIndex])\n peakAmp = n[leftIndex+peakPos]\n peaks[leftIndex+peakPos] = [peakAmp, leftIndex, rightIndex]\n \n #print binCenters[leftIndex], binCenters[rightIndex], binCenters[leftIndex+peakPos], peakAmp\n #NOTE: All the indices (left/rightIndex, peakPos) are to be changed to represent respective cent \n #value corresponding to the bin. Right now, they are indices of respective binCenters in the array.\n \n if method == \"hybrid\":\n #Mix peaks from slope method and JI method.\n p1 = slopePeaks.keys()\n p2 = peaks.keys()\n allPeaks = {} #overwriting peaks dict\n for p in p1:\n nearIndex = findNearestIndex(p2, p)\n if abs(p-p2[nearIndex]) < window/2.0: p2.pop(nearIndex)\n \n for p in p1: allPeaks[p] = slopePeaks[p]\n for p in p2: allPeaks[p] = peaks[p]\n peaks = allPeaks\n\n #Filter the peaks and retain eligible peaks, also get their valley points.\n\n # ----> peakAmpThresh <---- : remove the peaks which are below that\n\n for pos in peaks.keys():\n #pos is an index in binCenters/n. DOES NOT refer to a cent value.\n if peaks[pos][0] < peakAmpThresh:\n #print \"peakAmp: \", binCenters[pos]\n peaks.pop(pos)\n\n #Check if either left or right valley is deeper than ----> valleyThresh <----.\n valleys = {}\n for pos in peaks.keys():\n leftLobe = n[peaks[pos][1]:pos]\n rightLobe = n[pos:peaks[pos][2]]\n #Sanity check: Is it a genuine peak? Size of distributions on either side of the peak should be comparable.\n if len(leftLobe) == 0 or len(rightLobe) == 0:\n continue\n if 1.0*len(leftLobe)/len(rightLobe) < 0.15 or 1.0*len(leftLobe)/len(rightLobe) > 6.67:\n #print \"size: \", binCenters[pos]\n #peaks.pop(pos)\n continue\n\n leftValleyPos = np.argmin(leftLobe)\n rightValleyPos = np.argmin(rightLobe)\n if (abs(leftLobe[leftValleyPos]-n[pos]) < valleyThresh and abs(rightLobe[rightValleyPos]-n[pos]) < valleyThresh):\n #print \"valley: \", binCenters[pos]\n peaks.pop(pos)\n else:\n valleys[peaks[pos][1]+leftValleyPos] = leftLobe[leftValleyPos]\n valleys[pos+rightValleyPos] = rightLobe[rightValleyPos]\n \n if len(peaks) > 0:\n temp1 = np.array(peaks.values())\n temp1 = temp1[:, 0]\n\n return {'peaks':[binCenters[peaks.keys()], temp1], 'valleys':[binCenters[valleys.keys()], valleys.values()]}\n else:\n return {'peaks':[[], []], 'valleys':[[], []]}", "def links(self):\n return self._link_reg", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def build_links_capacity(self):\n\n links_capacity = {}\n # Iterates all the edges in the topology formed by switches\n for src, dst in self.topo.keep_only_p4switches().edges:\n bw = self.topo.edges[(src, dst)]['bw']\n # add both directions\n links_capacity[(src, dst)] = bw\n links_capacity[(dst, src)] = bw\n\n return links_capacity", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n return {\n \"topic\": [\n self.from_text(),\n ],\n }", "def find_bad_order(plinkmap):\n modif = {}\n for chrom in plinkmap:\n # For each pair of adjacent markers, identify the type of modification\n # that needs to happen\n if chrom not in modif:\n modif[chrom] = []\n # Iterate from 1 to the number of markers on the chromosome.\n for i in range(1, len(plinkmap[chrom])):\n g2 = float(plinkmap[chrom][i][2])\n g1 = float(plinkmap[chrom][i-1][2])\n if g2 - g1 < 0:\n modif[chrom].append('I')\n elif g2 - g1 == 0:\n modif[chrom].append('J')\n else:\n modif[chrom].append(None)\n return modif", "def sort_links(links):\n\n temp_dict={}\n temp_list=[]\n sorted_list=[]\n ctr=0\n # Open the file where the results is saved and copy the tuple values into an empty list\n h=open('prresults.txt','r')\n for line in h:\n temp_list.append(line)\n #find the comma seperator between the key and the value, and\n #split them, in order to put in dic\n for x in temp_list:\n index=x.find(',')\n key=int(x[0:index])\n val=float(x[index+1:len(x)])\n for y in links:\n if y!= key and ctr==len(links):\n pass\n if y==key:\n temp_dict[key]=val\n break\n #Take dictionary, put it into a list of tuples, \n #then sort based on the pagerank value, rather then key\n sorted_list= temp_dict.items()\n sorted_list.sort(key=lambda x: x[1],reverse=True)\n \n h.close()\n return sorted_list", "def link_name_list(self):\n return list(self._link_reg.keys())", "def _assign_reads( medians, centroids ):\n log.info(\"Assigning subreads reads to the closet amplicon cluster\")\n assignments = {'5p':set(), '3p':set()}\n five_prime, three_prime = centroids\n for read, median in medians.iteritems():\n five_prime_diff = abs(median - five_prime)\n three_prime_diff = abs(median - three_prime)\n if five_prime_diff < three_prime_diff:\n assignments['5p'].add( read )\n else:\n assignments['3p'].add( read )\n return assignments", "def copy_raw_links(in_stream, out_stream, allowed_shas, copied_shas):\n count = 0\n while True:\n hdr = checked_read(in_stream, LINK_HEADER_LEN, True)\n if hdr == '':\n return count # Clean EOF\n length, age, parent = struct.unpack(LINK_HEADER_FMT, hdr)\n sha_value = sha1(str(age))\n sha_value.update(parent)\n rest = checked_read(in_stream, length - LINK_HEADER_LEN)\n sha_value.update(rest)\n value = sha_value.digest()\n if value in copied_shas:\n continue # Only copy once.\n\n if allowed_shas is None or value in allowed_shas:\n out_stream.write(hdr)\n out_stream.write(rest)\n count += 1\n copied_shas.add(value)", "def readLinking(goldStdFile):\n linking = dict()\n for line in open(goldStdFile):\n d = re.split(\"\\s+\", line.strip())\n mention = d[0].upper()\n kb_id = d[1].upper()\n\n if kb_id in linking.keys():\n linking[kb_id].add(mention)\n else:\n linking[kb_id] = set([mention])\n return linking", "def get_chunks_cache(chunks):\n bin_chunks = [binascii.unhexlify(chunk.encode('utf-8'))\n for chunk in chunks]\n links = LinkageEntity.query.filter(\n LinkageEntity.linkage_hash.in_(bin_chunks)).all()\n links_cache = {link.friendly_hash(): link for link in links}\n\n result = {}\n for chunk in chunks:\n result[chunk] = links_cache.get(chunk, None)\n\n return result", "def get_ngbrs_lnk_addrs(self):\n return self._ngbrs.keys()", "def get_citations_ids_map(id_list):\n create_unverified_context()\n logging.debug('============== IN get_citations_ids_map: ================')\n logging.debug('============== ID LIST: ================')\n logging.debug(id_list)\n linked = {}\n for i in range(0, len(id_list)):\n handle = Entrez.elink(\n dbfrom=\"pubmed\", id=id_list[i], linkname=\"pubmed_pubmed_refs\")\n results = Entrez.read(handle)\n logging.debug('============== RESULTS: ================')\n logging.debug(results)\n handle.close()\n if len(results[0][\"LinkSetDb\"]) != 0:\n linked[id_list[i]] = [\n link[\"Id\"] for link in results[0][\"LinkSetDb\"][0][\"Link\"]\n ]\n logging.debug('============== LINKED ARTICLES: ================')\n logging.debug(linked)\n logging.debug('============== ARTICLE ID: ================')\n logging.debug(id_list[i])\n return linked", "def link_binding_sites(self, binding_sites):\n # do a double-check to make sure we don't add duplicate binding sites\n for binding_site in binding_sites:\n if binding_site.id not in [bs.id for bs in self.tf_binding_sites]:\n self.tf_binding_sites.append(binding_site)\n binding_site.link_promoter(self)", "def spinnaker_link_id(self):\n return self._spinnaker_link_id", "def link_removed_zones(oldzones, newzones, links):\n aliases = {}\n for key in oldzones:\n if key not in newzones and key in links:\n sys.stderr.write(\"Linking %s to %s\\n\" % (key, links[key]))\n aliases[key] = {\"aliasTo\": links[key]}\n return aliases", "def get_reverse_primers(id_map):\r\n\r\n rev_primers = {}\r\n for n in id_map.items():\r\n # Generate a dictionary with Barcode:reverse primer\r\n # Convert to reverse complement of the primer so its in the\r\n # proper orientation with the input fasta sequences\r\n rev_primers[n[1]['BarcodeSequence']] =\\\r\n [str(DNASequence(curr_rev_primer).rc()) for curr_rev_primer in\r\n (n[1]['ReversePrimer']).split(',')]\r\n\r\n return rev_primers", "def card_linkmarker(linkmarker):\n if linkmarker not in constants.LINK_MARKERS:\n raise exceptions.LinkMarkerInvalid()", "def _decode_link(self, link):\n\n if link.HasField(\"bucket\"):\n bucket = link.bucket\n else:\n bucket = None\n if link.HasField(\"key\"):\n key = link.key\n else:\n key = None\n if link.HasField(\"tag\"):\n tag = link.tag\n else:\n tag = None\n\n return (bucket, key, tag)", "def _link_single_worker(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n\n # Single-process implementation\n # For each UID, cur_anno and cur_decl are relevant\n for uid in np.unique(self.annotations_table.uid.values):\n cur_anno = self.annotations_table[\n self.annotations_table.uid.values == uid\n ]\n cur_decl = self.declarations_table[\n self.declarations_table.uid.values == uid\n ]\n\n anno_frames = cur_anno.frame.values\n decl_frames = cur_decl.frame.values\n\n # For each frame, frame_anno and frame_decl are relevant. We don't\n # have to loop over decl_frames, since any decl with decl_frame not\n # present in anno_frames can't be linked to any annots\n for frame in np.unique(anno_frames):\n frame_anno = cur_anno[anno_frames == frame]\n frame_decl = cur_decl[decl_frames == frame]\n\n # Note that due to PANDAS index magic, the resulting\n # links and id_links map key values are the global indices, and\n # not simply [1 ... len(frame_anno)]\n links, id_links = self._link_frame(frame_anno, frame_decl)\n\n self.det_link_map.update(links)\n self.id_link_map.update(id_links)", "def directed(self):\n seen = {}\n for up, down in self:\n if (up is None) or (down is None):\n continue #omit unpaired bases\n if up > down:\n up, down = down, up\n seen[(up, down)] = True\n result = seen.keys()\n return Pairs(result)", "def _map_invarioms(self):\n self.map = {}\n for invariom in self.invariom_list:\n kill = False\n for molecule in self.sorted_molecules:\n for atom in molecule.atoms:\n if invariom in atom.invarioms:\n self.map[invariom] = molecule.name\n kill = True\n break\n if kill:\n break", "def block_seen(self):\n self.blocklist.update(self.mapping.values())\n self.mapping = dict()", "def _parse_committee_links(self, links):\n committees = {}\n\n for link in links:\n url = link.attrs[\"href\"]\n id_ = re.sub(\"/index.shtml$\", \"\", url)\n id_ = id_[id_.rfind(\"/\") + 1 :]\n name, date = link.text.rsplit(\"(\", maxsplit=1)\n if not date.endswith(\"–)\"):\n raise StopIteration()\n date = date.rstrip(\"–)\")\n committees[id_] = {\"url\": url, \"name\": name.strip(), \"since\": date.strip()}\n\n return committees", "def updateAtomSetMapping(atomSetMapping, atomSets=None):\n\n if not atomSets:\n atomSets = list(atomSetMapping.atomSets)\n else:\n atomSets = list(atomSets)\n\n if not atomSets:\n # nothing to be done\n return\n \n resSerials = []\n if atomSetMapping.mappingType == 'ambiguous':\n for atomSet in atomSets:\n for resonanceSet in atomSet.resonanceSets:\n if len(resonanceSet.atomSets) == 1:\n # must be stereo or simple\n resSerials.append(resonanceSet.findFirstResonance().serial)\n\n if len(resSerials) < 2:\n resSerials = []\n \n elif atomSetMapping.mappingType == 'nonstereo':\n for resonanceSet in atomSets[0].resonanceSets:\n if len(resonanceSet.atomSets) > 1:\n # must be non-stereo\n for resonance in resonanceSet.resonances:\n letter1 = getAmbigProchiralLabel(resonance)\n letter2 = atomSetMapping.name[-1]\n if letter2 == '*':\n if letter1 == atomSetMapping.name[-2]:\n resSerials.append(resonance.serial)\n break\n \n elif letter1 == atomSetMapping.name[-1]:\n resSerials.append(resonance.serial)\n break\n\n else:\n for resonanceSet in atomSets[0].resonanceSets:\n if len(resonanceSet.atomSets) == 1:\n # must be stereo or simple\n resSerials.append(resonanceSet.findFirstResonance().serial)\n\n atomSetMapping.setResonanceSerials( resSerials )", "def _extract_intra_links_from_multiplex(pajek):\n string_links = re.findall(r\"\\d+ \\d+ \\d+ \\d+.*\", pajek.split(\"*multiplex\")[1])\n value_links = [list(map(eval, link.split())) for link in string_links]\n G_arr_intra = defaultdict(lambda: nx.Graph)\n for l in value_links:\n if l[0] == l[2]:\n G_arr_intra[l[0]].add_edge(l[1], l[3])\n return G_arr_intra", "def check_map(infile, disable_primer_check, barcode_type=\"golay_12\",\r\n added_demultiplex_field=None, has_barcodes=True):\r\n\r\n if barcode_type == \"variable_length\":\r\n var_len_barcodes = True\r\n else:\r\n var_len_barcodes = False\r\n\r\n if barcode_type == \"0\":\r\n has_barcodes = False\r\n\r\n # hds, id_map, dsp, run_description, errors, warnings\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(infile, has_barcodes=has_barcodes,\r\n disable_primer_check=disable_primer_check,\r\n added_demultiplex_field=added_demultiplex_field,\r\n variable_len_barcodes=var_len_barcodes)\r\n\r\n if errors:\r\n raise ValueError('Errors were found with mapping file, ' +\r\n 'please run validate_mapping_file.py to ' +\r\n 'identify problems.')\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n barcode_to_sample_id = {}\r\n\r\n primer_seqs_lens = {}\r\n all_primers = {}\r\n\r\n for sample_id, sample in id_map.items():\r\n if added_demultiplex_field:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper() + \",\" +\r\n sample[added_demultiplex_field]] = sample_id\r\n else:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper()] = sample_id\r\n if not disable_primer_check:\r\n raw_primers = sample['LinkerPrimerSequence'].upper().split(',')\r\n\r\n if len(raw_primers[0].strip()) == 0:\r\n raise ValueError('No primers detected, please use the ' +\r\n '-p parameter to disable primer detection.')\r\n expanded_primers = expand_degeneracies(raw_primers)\r\n curr_bc_primers = {}\r\n for primer in expanded_primers:\r\n curr_bc_primers[primer] = len(primer)\r\n all_primers[primer] = len(primer)\r\n primer_seqs_lens[sample['BarcodeSequence']] = curr_bc_primers\r\n\r\n return hds, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers", "def get_bam_pairs(bams):\n ### TODO Use the read group to merge the reads, far smarter!\n bam_list = {}\n for bam in bams:\n sam = pysam.AlignmentFile(bam,'rb')\n sample_id = (sam.header['RG'][0]['SM'])\n try:\n bam_list[sample_id].append(bam)\n except KeyError:\n bam_list[sample_id] = [bam]\n return bam_list", "def find_linked_buds(self, linked_clusters, cluster):\n\n # Find any buds\n linked_buds = [check_cluster for check_cluster in linked_clusters \\\n if check_cluster.leaf_cluster and check_cluster.antecedent is None\n and not independent_leaf_cluster(self, check_cluster, \\\n linked_clusters, cluster)]\n\n return linked_buds", "def links(self, links):\n self._links = links", "def getMyLinks(self, link_list, plant):\n my_links = []\n for links in link_list:\n if plant in links:\n my_links.append(links)\n return my_links", "def _prepare_links_table(self):\n\n links_tbl = OrderedDict()\n for colname in itertools.islice(self._pinfos, 1, None):\n links_tbl[colname] = {}\n links_tbl[colname][\"name\"] = f\"{colname}\"\n fname = colname.replace(\"%\", \"_pcnt\") + \".html\"\n links_tbl[colname][\"fname\"] = fname\n links_tbl[colname][\"hlink\"] = f\"<a href=\\\"{fname}\\\">{colname}</a>\"\n\n return links_tbl", "def map_uses(self):\n out = {}\n for node in self.nodes.values():\n baddies = set()#track incomplete connections and relegate to attributes\n for rtype, dest in node.outgoing_relations:\n try:\n self.nodes[dest].add_predecessor(rtype, node.name)\n out.setdefault(rtype, set()).add((node.name, dest))\n except KeyError:\n baddies.add((rtype, dest))\n for rtype, dest in baddies:\n node.remove_relation(rtype, dest)\n node.add_attribute(rtype, dest)\n\n atc = node.attributes.copy()\n #check if any attributes have corresponding nodes\n for atype, attrib in atc:\n if attrib in self.nodes:\n node.remove_attribute(atype, attrib)\n node.add_relation(atype, attrib)\n self.nodes[attrib].add_predecessor(atype, node.name)\n out.setdefault(atype, set()).add((node.name, attrib))\n \n return out", "def get_all_links(soup_list):\n companies_dict_all_pages = {}\n\n for i in soup_list:\n companies_dict_all_pages.update(get_link_from_main_table(i))\n return companies_dict_all_pages", "def getLinkList(self, graph_dict):\n link_list = []\n for vertex in graph_dict:\n for neighbour in graph_dict[vertex]:\n if {neighbour, vertex} not in link_list:\n link_list.append({vertex, neighbour})\n link_list = [x for x in link_list if len(x) > 1]\n return link_list", "def __init__(self, links: Sequence[Link]) -> None:\n self._links = [] # type: Sequence[Link]\n self._optimization_mask = [False] * len(links)\n\n self.links = links", "def convert_LinkTimeMap_to_dict(timemap_text, skipErrors=False):\n\n def process_local_dict(local_dict, working_dict):\n\n first = False\n last = False\n\n for uri in local_dict:\n\n relation = local_dict[uri][\"rel\"]\n\n if relation == \"original\":\n working_dict[\"original_uri\"] = uri\n\n elif relation == \"timegate\":\n working_dict[\"timegate_uri\"] = uri\n\n elif relation == \"self\":\n working_dict[\"timemap_uri\"] = {}\n working_dict[\"timemap_uri\"][\"link_format\"] = uri\n\n elif \"memento\" in relation:\n working_dict.setdefault(\"mementos\", {})\n\n if \"first\" in relation:\n working_dict[\"mementos\"][\"first\"] = {}\n working_dict[\"mementos\"][\"first\"][\"uri\"] = uri\n first = True\n\n if \"last\" in relation:\n working_dict[\"mementos\"][\"last\"] = {}\n working_dict[\"mementos\"][\"last\"][\"uri\"] = uri\n last = True\n\n working_dict[\"mementos\"].setdefault(\"list\", [])\n\n local_memento_dict = {\n \"datetime\": None,\n \"uri\": uri\n }\n\n if \"datetime\" in local_dict[uri]:\n\n mdt = datetime.strptime(local_dict[uri][\"datetime\"],\n \"%a, %d %b %Y %H:%M:%S GMT\")\n\n local_memento_dict[\"datetime\"] = mdt\n\n working_dict[\"mementos\"][\"list\"].append(local_memento_dict)\n\n if first:\n working_dict[\"mementos\"][\"first\"][\"datetime\"] = mdt\n\n if last:\n working_dict[\"mementos\"][\"last\"][\"datetime\"] = mdt\n \n return working_dict\n\n\n dict_timemap = {}\n\n # current_char = \"\"\n uri = \"\"\n key = \"\"\n value = \"\"\n local_dict = {}\n state = 0\n charcount = 0\n\n for character in timemap_text:\n charcount += 1\n\n if state == 0:\n\n local_dict = {}\n uri = \"\"\n\n if character == '<':\n state = 1\n elif character.isspace():\n pass\n else:\n if not skipErrors:\n raise MalformedLinkFormatTimeMap(\n \"issue at character {} while looking for next URI\"\n .format(charcount))\n\n elif state == 1:\n\n if character == '>':\n # URI should be saved by this point\n state = 2\n uri = uri.strip()\n local_dict[uri] = {}\n else:\n uri += character\n\n elif state == 2:\n\n if character == ';':\n state = 3\n\n elif character.isspace():\n pass\n\n else:\n if not skipErrors:\n raise MalformedLinkFormatTimeMap(\n \"issue at character {} while looking for relation\"\n .format(charcount))\n\n elif state == 3:\n\n if character == '=':\n state = 4\n else:\n key += character\n\n elif state == 4:\n\n if character == ';':\n state = 3\n elif character == ',':\n state = 0\n\n process_local_dict(local_dict, dict_timemap)\n\n elif character == '\"':\n state = 5\n elif character.isspace():\n pass\n else:\n if not skipErrors:\n raise MalformedLinkFormatTimeMap(\n \"issue at character {} while looking for value\"\n .format(charcount))\n\n elif state == 5:\n\n if character == '\"':\n state = 4\n\n key = key.strip()\n value = value.strip()\n local_dict[uri][key] = value\n key = \"\"\n value = \"\"\n\n else:\n value += character\n\n else:\n \n if not skipErrors:\n raise MalformedLinkFormatTimeMap(\n \"discovered unknown state while processing TimeMap\")\n\n process_local_dict(local_dict, dict_timemap)\n\n return dict_timemap", "def addInLink(source, target):\n if inlinkGraph.has_key(source):\n # if target not in inlinkGraph[source]:# uncomment to remove repetitives\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = inlinkGraphDegree[source] + 1\n else:\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = 1", "def addresses(self):\n addresses = collections.OrderedDict()\n for key in self.keyring:\n address = pubtoaddr(key[\"public\"])\n addresses[address] = key\n return addresses", "def simplify_links(proj,exp,links):\n simple_links =[] \n\n for key in links:\n (node_name,x,y) = key.rpartition(':')\n node_name = node_name+\".\"+exp+\".\"+proj+\".emulab.net\"\n simple_links.append((node_name,links[key]['ipaddr']))\n\n return simple_links", "def get_links(self, area=None, lat=None):\n self.anomaly = {}\n self.links = {}\n self.strength = {}\n self.strengthmap = np.zeros((self.dimX,self.dimY))*np.nan\n if lat is not None:\n scale = np.sqrt(np.cos(np.radians(lat)))\n elif area is not None:\n scale = np.sqrt(area)\n else:\n scale = np.ones((self.dimX,self.dimY))\n \n for A in self.nodes:\n temp_array = np.zeros(self.data.shape)*np.nan\n for cell in self.nodes[A]:\n temp_array[cell[0],cell[1],:] = np.multiply(self.data[cell[0],cell[1],:],scale[cell[0],cell[1]])\n self.anomaly[A] = np.nansum(temp_array, axis=(0,1))\n \n for A in self.anomaly:\n sdA = np.std(self.anomaly[A])\n for A2 in self.anomaly:\n sdA2 = np.std(self.anomaly[A2])\n if A2 != A:\n self.links.setdefault(A, []).append(stats.pearsonr(self.anomaly[A],self.anomaly[A2])[0]*(sdA*sdA2))\n elif A2 == A:\n self.links.setdefault(A, []).append(0)\n \n for A in self.links:\n absolute_links = [] \n for link in self.links[A]:\n absolute_links.append(abs(link))\n self.strength[A] = np.nansum(absolute_links)\n for cell in self.nodes[A]:\n self.strengthmap[cell[0],cell[1]] = self.strength[A]", "def buildCircularRingDictionary(self, ringPitch=1.0):\n runLog.extra(\n \"Building a circular ring dictionary with ring pitch {}\".format(ringPitch)\n )\n referenceAssembly = self.childrenByLocator[self.spatialGrid[0, 0, 0]]\n refLocation = referenceAssembly.spatialLocator\n pitchFactor = ringPitch / self.spatialGrid.pitch\n\n circularRingDict = collections.defaultdict(set)\n\n for a in self:\n dist = a.spatialLocator.distanceTo(refLocation)\n # To reduce numerical sensitivity, round distance to 6 decimal places\n # before truncating.\n index = int(round(dist * pitchFactor, 6)) or 1 # 1 is the smallest ring.\n circularRingDict[index].add(a.getLocation())\n\n return circularRingDict", "def _parse_link_date_map(self, response):\n link_date_map = defaultdict(list)\n for link in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16\"\n )[:1].css(\"a\"):\n link_str = link.xpath(\"./text()\").extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append(\n {\n \"title\": re.sub(r\"\\s+\", \" \", link_str.split(\" – \")[-1]).strip(),\n \"href\": link.attrib[\"href\"],\n }\n )\n for section in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel\"\n ):\n year_str = section.css(\".vc_tta-title-text::text\").extract_first().strip()\n for section_link in section.css(\"p > a\"):\n link_str = section_link.xpath(\"./text()\").extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append(\n {\n \"title\": re.sub(\n r\"\\s+\", \" \", link_str.split(\" – \")[-1]\n ).strip(),\n \"href\": section_link.xpath(\"@href\").extract_first(),\n }\n )\n return link_date_map", "def parse_psl_file(psl_file, head):\n\tsnps = {}\n\t\n\t#Throw away first 5 lines: Not needed when using -noHead with BLAT\n\tif not head:\n\t\tfor i in range(0, 5):\n\t\t\tpsl_file.readline()\n\t\n\tidentity = 0\n\tscore = 0\n\tbest_align = False\n\t#Parses the psl file and adds the entries to the dictionary called snps\t\t\n\tfor line in psl_file:\n\t\telements = line.split()\n\t\tscore = calc_score(int(elements[0]), int(elements[2]), int(elements[1]), int(elements[4]), int(elements[6]))\n\t\tidentity = 100.0 - calc_millibad(int(elements[12]), int(elements[11]), int(elements[16]), int(elements[15]), int(elements[0]), int(elements[2]), int(elements[1]), int(elements[4])) * 0.1\n\t\telements.append(score)\n\t\telements.append(identity)\n\t\telements.append(best_align)\n\t\t#a bit of a hack, but I want to sort on the alignment against the longest target seq\n\t\t#and need that as an int. Not sure how else I'd do it.\n\t\telements[14]= int(elements[14])\n\t\t#elements.append(int(elements[14]))\n\t\t#print (elements[0], elements[21])\n\t\tif elements[9] in snps:\n\t\t\tsnps[elements[9]].append(elements)\n\t\telse:\n\t\t\tsnps[elements[9]] = [elements]\n\n\t\t\t\n\treturn snps", "def assign_rings(self):\n rings = self.make_rings()\n ring_angles = [rings[r][0] for r in rings]\n self.rp = np.zeros((self.npks), dtype=int)\n for i in range(self.npks):\n self.rp[i] = (np.abs(self.polar_angle[i] - ring_angles)).argmin()", "def retrieve_instance_links(self):\n instance_links = copy.copy(LINKS)\n self.log.debug('Default instance links: %s', instance_links)\n instance_links.update(self.pipeline_config['instance_links'])\n self.log.debug('Updated instance links: %s', instance_links)\n\n return instance_links", "def makeSeqSpinSystemLink(spinSystemA, spinSystemB, delta=1):\n\n if spinSystemA is spinSystemB:\n showWarning('Failure','Attempt to link spin system to itself.')\n return\n\n residueA = spinSystemA.residue\n residueB = spinSystemB.residue\n\n if residueA:\n idA = '%d %s' % (residueA.seqCode, residueA.ccpCode)\n residueC = residueA.chain.findFirstResidue(seqId=residueA.seqId + delta)\n if not residueC:\n if delta >= 0:\n d = '+%d' % delta\n else:\n d = '%d' % delta \n showWarning('Failure','Impossible spin system link attempted: %s to i%s' % (idA,d))\n return\n\n #idC = '%d %s' % (residueC.seqCode, residueC.ccpCode)\n \n if residueC is not residueB:\n assignSpinSystemResidue(spinSystemB, residueC, warnMerge=False) \n \n elif residueB:\n idB = '%d %s' % (residueB.seqCode, residueB.ccpCode)\n residueC = residueB.chain.findFirstResidue(seqId = residueB.seqId - delta)\n if not residueC:\n print 'Impossible spin system link attempted: %s to i - %d' % (idB,delta)\n else:\n assignSpinSystemResidue(spinSystemA, residueC, warnMerge=False) \n \n \n clearSeqSpinSystemLinks(spinSystemA, delta=delta)\n clearSeqSpinSystemLinks(spinSystemB, delta=-delta)\n \n link = spinSystemA.findFirstResonanceGroupProb(linkType='sequential',\n possibility=spinSystemB)\n \n if link:\n link.sequenceOffset = delta\n link.isSelected = True\n \n else:\n link = spinSystemA.newResonanceGroupProb(linkType='sequential', isSelected=True,\n possibility=spinSystemB, sequenceOffset=delta)\n \n return link", "def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])", "def reverse_map(coarse_grained, mapping_moieties, target=None, solvent_name=None, sol_per_bead=4, sol_cutoff=2, scaling_factor=5, parallel=True):\n\n aa_system = Compound()\n\n not_solvent = [mol for mol in coarse_grained.children if mol.name != solvent_name]\n is_solvent = [mol for mol in coarse_grained.children if mol.name == solvent_name]\n\n print(\"There are {} non-solvent molecules and {} solvent molecules.\".format(len(not_solvent), len(is_solvent)))\n\n # For each bead, replace it with the appropriate mb compound\n # Iterate through each molecule (set of particles that are bonded together)\n if parallel:\n pool = mp.Pool(processes=mp.cpu_count())\n\n # get the solvent molecules mapped in parallel\n inp = zip(is_solvent,\n [target[solvent_name]]*len(is_solvent),\n [sol_per_bead]*len(is_solvent),\n [sol_cutoff]*len(is_solvent))\n chunksize = int(len(is_solvent) / mp.cpu_count()) + 1\n solvent_list = pool.starmap(reverse_map_solvent, inp, chunksize)\n # name the solvents\n\n # get the non_solvent molecules mapped in parallel\n inp = zip(not_solvent,\n [target]*len(not_solvent),\n [mapping_moieties]*len(not_solvent))\n chunksize = int(len(not_solvent) / mp.cpu_count()) + 1\n molecule_list = pool.starmap(reverse_map_molecule, inp, chunksize)\n\n\n # put put solvents in one list\n solvent_molecule_list = []\n for i in solvent_list:\n solvent_molecule_list += i\n\n # put lipids in a box and get the box size\n for molecule in molecule_list:\n aa_system.add(molecule)\n\n print(aa_system.boundingbox)\n\n # put everything in a box\n for molecule in solvent_molecule_list:\n aa_system.add(molecule)\n\n else:\n [aa_system.add(reverse_map_molecule(molecule, target, mapping_moieties)) for molecule in not_solvent]\n solvent_compound = reverse_map_solvent(is_solvent, target[solvent_name], sol_per_bead, sol_cutoff)\n [aa_system.add(molecule) for molecule in solvent_compound.children]\n\n\n return aa_system" ]
[ "0.4860738", "0.4834604", "0.48159462", "0.4767646", "0.47199568", "0.46659744", "0.46064976", "0.4587481", "0.4538893", "0.45380697", "0.45124298", "0.44684395", "0.44490376", "0.4414482", "0.4384815", "0.43777874", "0.4372807", "0.43616307", "0.43376932", "0.43176925", "0.43066147", "0.42994502", "0.42880207", "0.42873344", "0.42709255", "0.4265717", "0.42531103", "0.42473865", "0.42406118", "0.42374805", "0.42303294", "0.42134368", "0.41842908", "0.41781056", "0.41708124", "0.4168597", "0.4159307", "0.41556615", "0.41455677", "0.41276124", "0.41008124", "0.40947893", "0.40869057", "0.40771404", "0.4075371", "0.4054451", "0.4052049", "0.4051446", "0.40463495", "0.40406364", "0.40329424", "0.40307993", "0.40307993", "0.40307495", "0.4025913", "0.4021104", "0.4019564", "0.40178126", "0.40153772", "0.4012949", "0.40087658", "0.4008564", "0.40051103", "0.39987206", "0.39882374", "0.39816883", "0.39720047", "0.39698437", "0.3958899", "0.3956337", "0.39506587", "0.3936305", "0.39342138", "0.39265528", "0.39265516", "0.39232162", "0.39198926", "0.3910417", "0.39099222", "0.39092165", "0.39090717", "0.39080605", "0.3907024", "0.39035058", "0.39031178", "0.39028668", "0.3902379", "0.39003286", "0.38946226", "0.38922268", "0.38899976", "0.38832897", "0.38829026", "0.3877603", "0.3875937", "0.38759345", "0.38720268", "0.38714615", "0.38660166", "0.38658598" ]
0.7973843
0
Sort peaks by the assignments of their constituent spins. Sort the peaks by the assignments of spins in particular dimensions. The default order sorts the peaks by the dimensions associated with spin anchors first then by the remaining dimensions in the order they appear in each peak. Optionally place all commented peaks at the end of the peak list.
def sort_by_assignments(peaklist, order=None, commented_at_end=False): anchors = peaklist.anchors anchored = tuple(i for anchor in anchors for i in anchor) unanchored = set(range(peaklist.dims)) - set(anchored) default_order = anchored + tuple(sorted(unanchored)) order = order if order is not None else default_order peaklist.sort(key=lambda peak: tuple(peak[i] for i in order)) if commented_at_end: peaklist.sort(key=lambda peak: peak.commented) return peaklist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the same number \n # of dimensions to be compared.\n if len(cube.coord(self.time_coord).points) == 1:\n cube = iris.util.new_axis(cube, scalar_coord=self.time_coord)\n \n # Chop cubes into individual realizations for relabelling.\n member_slices = get_coordinate_slice_dimensions(\n cube, [self.realization,self.forecast_ref_time],\n ignore_missing_coords=True)\n for member_slice in cube.slices(member_slices):\n \n if self.realization in [coord.name() \n for coord in member_slice.coords()]:\n member_slice.coord(\n self.realization).points = [realization_num]\n else:\n realization_coord = iris.coords.AuxCoord([realization_num],\n self.realization)\n member_slice.add_aux_coord(realization_coord)\n \n member_slice.cell_methods = None\n sorted_cubelist.append(member_slice)\n realization_num += 1\n \n sorted_cubelist = iris.cube.CubeList(sorted_cubelist)\n # Mask missing time steps so merging can be done.\n sorted_cubelist = pad_coords(sorted_cubelist, self.time_coord)\n cube = sorted_cubelist.merge_cube()\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n \n if cube.coord_dims(cube.coord(self.realization)) == \\\n cube.coord_dims(cube.coord(self.forecast_ref_time)):\n # Re order realizations in initialisation date order.\n ordered_inits = sorted(cube.coord('forecast_reference_time').points)\n ordered_mems = range(1, len(cube.coord('realization').points)+1)\n ordered_cubes = []\n for member_slice in cube.slices(member_slices):\n mem_index = ordered_inits.index(\n member_slice.coord(self.forecast_ref_time).points[0])\n member_slice.coord('realization').points = ordered_mems[mem_index]\n del ordered_inits[mem_index]\n del ordered_mems[mem_index]\n ordered_cubes.append(member_slice)\n cube = iris.cube.CubeList(ordered_cubes).merge_cube()\n \n return cube", "def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def preference_ordering(self) -> None:\n for i in self._destinations:\n self._destinations[i] = sorted(self._destinations[i])", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j].fitness < self.genepool[0][j-1].fitness:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j].fitness < self.genepool[1][j-1].fitness:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n for cube in year_cubelist.merge():\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, \n self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n sorted_cubelist.append(cube)\n return iris.cube.CubeList(sorted_cubelist)", "def order_pseudotime(self):\n # within segs_tips, order tips according to pseudotime\n if self.iroot is not None:\n for itips, tips in enumerate(self.segs_tips):\n if tips[0] != -1:\n indices = np.argsort(self.pseudotime[tips])\n self.segs_tips[itips] = self.segs_tips[itips][indices]\n else:\n logg.debug(f' group {itips} is very small')\n # sort indices according to segments\n indices = np.argsort(self.segs_names)\n segs_names = self.segs_names[indices]\n # find changepoints of segments\n changepoints = np.arange(indices.size - 1)[np.diff(segs_names) == 1] + 1\n if self.iroot is not None:\n pseudotime = self.pseudotime[indices]\n for iseg, seg in enumerate(self.segs):\n # only consider one segment, it's already ordered by segment\n seg_sorted = seg[indices]\n # consider the pseudotime on this segment and sort them\n seg_indices = np.argsort(pseudotime[seg_sorted])\n # within the segment, order indices according to increasing pseudotime\n indices[seg_sorted] = indices[seg_sorted][seg_indices]\n # define class members\n self.indices = indices\n self.changepoints = changepoints", "def changePeaks(self):\n # Change the number of peaks\n if self.minpeaks is not None and self.maxpeaks is not None:\n npeaks = len(self.peaks_function)\n u = self.random.random()\n r = self.maxpeaks - self.minpeaks\n if u < 0.5:\n # Remove n peaks or less depending on the minimum number of peaks\n u = self.random.random()\n n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n idx = self.random.randrange(len(self.peaks_function))\n self.peaks_function.pop(idx)\n self.peaks_position.pop(idx)\n self.peaks_height.pop(idx)\n self.peaks_width.pop(idx)\n self.last_change_vector.pop(idx)\n else:\n # Add n peaks or less depending on the maximum number of peaks\n u = self.random.random()\n n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n self.peaks_function.append(self.random.choice(self.pfunc_pool))\n self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])\n self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))\n self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))\n self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])\n\n for i in range(len(self.peaks_function)):\n # Change peak position\n shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]\n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n \n shift = [shift_length * (1.0 - self.lambda_) * s \\\n + self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]\n \n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n\n shift = [s*shift_length for s in shift]\n \n new_position = []\n final_shift = []\n for pp, s in zip(self.peaks_position[i], shift):\n new_coord = pp + s\n if new_coord < self.min_coord:\n new_position.append(2.0 * self.min_coord - pp - s)\n final_shift.append(-1.0 * s)\n elif new_coord > self.max_coord:\n new_position.append(2.0 * self.max_coord - pp - s)\n final_shift.append(-1.0 * s)\n else:\n new_position.append(new_coord)\n final_shift.append(s)\n\n self.peaks_position[i] = new_position\n self.last_change_vector[i] = final_shift\n\n # Change peak height\n change = self.random.gauss(0, 1) * self.height_severity\n new_value = change + self.peaks_height[i]\n if new_value < self.min_height:\n self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change\n elif new_value > self.max_height:\n self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change\n else:\n self.peaks_height[i] = new_value\n\n # Change peak width\n change = self.random.gauss(0, 1) * self.width_severity\n new_value = change + self.peaks_width[i]\n if new_value < self.min_width:\n self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change\n elif new_value > self.max_width:\n self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change\n else:\n self.peaks_width[i] = new_value\n\n self._optimum = None", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def panPeakDetect(detection, fs):\n\n min_distance = int(0.25 * fs)\n\n signal_peaks = [0]\n noise_peaks = []\n\n SPKI = 0.0\n NPKI = 0.0\n\n threshold_I1 = 0.0\n threshold_I2 = 0.0\n\n RR_missed = 0\n index = 0\n indexes = []\n\n missed_peaks = []\n peaks = []\n\n for i in range(len(detection)):\n\n if 0 < i < len(detection) - 1:\n if detection[i - 1] < detection[i] and detection[i + 1] < detection[i]:\n peak = i\n peaks.append(i)\n\n if detection[peak] > threshold_I1 and (peak - signal_peaks[-1]) > 0.25 * fs:\n\n signal_peaks.append(peak)\n indexes.append(index)\n SPKI = 0.125 * detection[signal_peaks[-1]] + 0.875 * SPKI\n if RR_missed != 0:\n if signal_peaks[-1] - signal_peaks[-2] > RR_missed:\n missed_section_peaks = peaks[indexes[-2] + 1:indexes[-1]]\n missed_section_peaks2 = []\n for missed_peak in missed_section_peaks:\n if missed_peak - signal_peaks[-2] > min_distance and signal_peaks[\n -1] - missed_peak > min_distance and detection[missed_peak] > threshold_I2:\n missed_section_peaks2.append(missed_peak)\n\n if len(missed_section_peaks2) > 0:\n missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]\n missed_peaks.append(missed_peak)\n signal_peaks.append(signal_peaks[-1])\n signal_peaks[-2] = missed_peak\n\n else:\n noise_peaks.append(peak)\n NPKI = 0.125 * detection[noise_peaks[-1]] + 0.875 * NPKI\n\n threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)\n threshold_I2 = 0.5 * threshold_I1\n\n if len(signal_peaks) > 8:\n RR = np.diff(signal_peaks[-9:])\n RR_ave = int(np.mean(RR))\n RR_missed = int(1.66 * RR_ave)\n\n index = index + 1\n # First possible peak detection\n first_possible_peak = np.argmax(detection[0:int(0.25 * fs)])\n if detection[first_possible_peak] > SPKI:\n signal_peaks[0] = first_possible_peak\n else:\n signal_peaks.pop(0)\n signal_peaks = np.array(signal_peaks)\n return signal_peaks", "def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def parks(self):\n point_array = [0, 2, 8, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14]\n park_coords = []\n parks_sorted = []\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'p':\n park_coords.append(tuple([i, j]))\n while len(park_coords) > 0:\n x, y = park_coords.pop(0)\n if len(parks_sorted) == 0:\n parks_sorted.append([(x, y)])\n else:\n borders_bool = []\n for block_no, park_block in enumerate(parks_sorted):\n borders_bool.append(False)\n for i, j in park_block:\n if abs(x - i) + abs(y - j) == 1:\n borders_bool[block_no] = True\n if (num_true := borders_bool.count(True)) == 1:\n parks_sorted[borders_bool.index(True)].append((x, y))\n elif num_true > 1:\n new_parks_sorted = []\n i_mega_park = None\n for block_no, park_block in enumerate(parks_sorted):\n if borders_bool[block_no]: # If it is bordering\n if i_mega_park is None:\n i_mega_park = block_no\n new_parks_sorted.append(park_block)\n else:\n new_parks_sorted[i_mega_park] += park_block\n new_parks_sorted[i_mega_park] += [(x, y)]\n parks_sorted = new_parks_sorted\n else:\n new_parks_sorted.append(park_block)\n parks_sorted = new_parks_sorted\n else:\n parks_sorted.append([(x, y)])\n\n return sum([point_array[len(block)] for block in parks_sorted])", "def sortNondominated(fitness, k=None, first_front_only=False):\r\n if k is None:\r\n k = len(fitness)\r\n\r\n # Use objectives as keys to make python dictionary\r\n map_fit_ind = defaultdict(list)\r\n for i, f_value in enumerate(fitness): # fitness = [(1, 2), (2, 2), (3, 1), (1, 4), (1, 1)...]\r\n map_fit_ind[f_value].append(i)\r\n fits = list(map_fit_ind.keys()) # fitness values\r\n\r\n current_front = []\r\n next_front = []\r\n dominating_fits = defaultdict(int) # n (The number of people dominate you)\r\n dominated_fits = defaultdict(list) # Sp (The people you dominate)\r\n\r\n # Rank first Pareto front\r\n # *fits* is a iterable list of chromosomes. Each has multiple objectives.\r\n for i, fit_i in enumerate(fits):\r\n for fit_j in fits[i + 1:]:\r\n # Eventhougn equals or empty list, n & Sp won't be affected\r\n if dominates(fit_i, fit_j):\r\n dominating_fits[fit_j] += 1\r\n dominated_fits[fit_i].append(fit_j)\r\n elif dominates(fit_j, fit_i):\r\n dominating_fits[fit_i] += 1\r\n dominated_fits[fit_j].append(fit_i)\r\n if dominating_fits[fit_i] == 0:\r\n current_front.append(fit_i)\r\n\r\n fronts = [[]] # The first front\r\n for fit in current_front:\r\n fronts[-1].extend(map_fit_ind[fit])\r\n pareto_sorted = len(fronts[-1])\r\n\r\n # Rank the next front until all individuals are sorted or\r\n # the given number of individual are sorted.\r\n # If Sn=0 then the set of objectives belongs to the next front\r\n if not first_front_only: # first front only\r\n N = min(len(fitness), k)\r\n while pareto_sorted < N:\r\n fronts.append([])\r\n for fit_p in current_front:\r\n # Iterate Sn in current fronts\r\n for fit_d in dominated_fits[fit_p]:\r\n dominating_fits[fit_d] -= 1 # Next front -> Sn - 1\r\n if dominating_fits[fit_d] == 0: # Sn=0 -> next front\r\n next_front.append(fit_d)\r\n # Count and append chromosomes with same objectives\r\n pareto_sorted += len(map_fit_ind[fit_d])\r\n fronts[-1].extend(map_fit_ind[fit_d])\r\n current_front = next_front\r\n next_front = []\r\n\r\n return fronts", "def find_peak_locations(data, tol=prominence_tolerance, ranked=False):\n\n prominences = [(i, calculate_peak_prominence(data, i)) for i in range(len(data))]\n\n # normalize to interval [0,1]\n prom_max = max([x[1] for x in prominences])\n if prom_max == 0 or len(prominences) == 0:\n # failure to find any peaks; probably monotonically increasing / decreasing\n return []\n\n prominences[:] = [(x[0], x[1] / prom_max) for x in prominences]\n\n # take only the tallest peaks above given tolerance\n peak_locs = [x for x in prominences if x[1] > tol]\n\n # if a peak has a flat top, then both 'corners' of that peak will have high prominence; this\n # is rather unavoidable. just check for adjacent peaks with exactly the same prominence and\n # remove the lower one\n to_remove = [\n peak_locs[i]\n for i in range(len(peak_locs) - 2)\n if peak_locs[i][1] == peak_locs[i + 1][1]\n ]\n for r in to_remove:\n peak_locs.remove(r)\n\n if ranked:\n peak_locs.sort(key=lambda x: x[1] * -1)\n else:\n peak_locs[:] = [x[0] for x in peak_locs]\n\n return peak_locs", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j] < self.genepool[0][j-1]:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j] < self.genepool[1][j-1]:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break", "def addPeakResonances(peaks):\n \n contribs = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) < 1:\n contrib = assignResToDim(peakDim)\n contribs.append(contrib)\n \n resonances = [c.resonance for c in contribs]\n \n return resonances", "def _sort_measurements(self):\n if self._unsorted:\n sorted_ndxs = np.argsort(self._angles)\n self._distances = self._distances[sorted_ndxs]\n self._angles = self._angles[sorted_ndxs]\n self._intensities = self._intensities[sorted_ndxs]\n self._error_codes = self._error_codes[sorted_ndxs]\n self._unsorted = False", "def movePeaks(hist, peaks, dist=20):\n peakList = []\n smooth_hist = smooth(hist)\n for pk in peaks:\n p = int(round(pk))\n while True:\n start = int(round(max(0, p - dist)))\n end = int(round(min(len(hist), p + dist)))\n if end < start:\n new_peak = p\n break\n new_peak = start + np.argmax(hist[int(start):int(end)])\n\n # if the local maximum is not far from initital peak, break\n if abs(p - new_peak) <= 5: #\n break\n else:\n left = min(p, new_peak)\n right = max(p, new_peak)\n\n # Check if between initial peak and local maximum has valley\n if all(smooth_hist[left + 1:right] > smooth_hist[p]):\n break\n dist = dist / 2\n peakList.append(new_peak)\n return list(peakList)", "def quick_sort(data, head, tail, draw_data, time_tick):\n if head < tail:\n partition_index = partition(data, head, tail, draw_data, time_tick)\n\n # Left partition\n quick_sort(data, head, partition_index-1, draw_data, time_tick)\n\n # Right partition\n quick_sort(data, partition_index+1, tail, draw_data, time_tick)", "def stitchpeaklist(inpeak_list,mergethreshold):\n peak_list=[]\n prev_peak=['chr0',0,1]\n inpeak_list.sort()\n for curr_peak in inpeak_list:\n if curr_peak[0]==prev_peak[0] and prev_peak[2]+mergethreshold>=curr_peak[1]:\n curr_peak[1]=min(prev_peak[1],curr_peak[1])\n curr_peak[2]=max(prev_peak[2],curr_peak[2])\n else:\n if prev_peak!=['chr0',0,1]:\n peak_list.append(prev_peak)\n prev_peak=curr_peak[:]\n peak_list.append(prev_peak)\n return peak_list", "def standard_sorting(cls, zmat):\n if zmat is None:\n return None\n nats = len(zmat)\n ncoords = 3*nats - 6\n if nats < 4:\n return None\n else:\n r_coords = [0, 1, 3]\n a_coords = [2, 4]\n t_coords = [5]\n if nats > 4:\n extra = np.arange(6, ncoords+1)\n r_coords += extra[::4].tolist()\n a_coords += extra[1::4].tolist()\n t_coords += extra[2::4].tolist()\n return np.argsort(np.concatenate([r_coords, a_coords, t_coords]))", "def parse_peaks(self):\n peaks = []\n if self._mir_root.tag == 'method':\n for peak in self._mir_root[0].findall(\"peak\"):\n p = dict(peak.items())\n peaks.append(Peak(float(p['m_z']), float(p['tolerance'])))\n return sorted(peaks, key=lambda x: x[0])", "def expression_peaks(cluster, magnitude, group1 = [ \"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\" ], group2 = [ \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ]):\n if cluster.averaged == False:\n cluster.average_matrix(group1 + group2)\n verbalise(\"G\", cluster.sample_header)\n peaklist = {}\n\n for gene in range(cluster.genenumber):\n # for group 1:\n datalist = list(cluster.data_matrix[:,gene])\n maxexpression = max(datalist[:len(group1)])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient:\n if maxexpression >= magnitude + datalist[0]:\n # check adjacent peaks are not too big:\n # difference of 5.64 corresponds to 2% of the untransformed fpkm value\n # difference of 1.00 corresponds to 50% of the untransformed fpkm value\n if maxposn == len(group1) - 1:\n if (maxexpression - 5.64 < datalist[maxposn - 1] < maxexpression - 1):\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n # for group 2:\n maxexpression = max(datalist[len(group1):])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient for reciprocal swap:\n if maxexpression >= magnitude * datalist[len(group1)]:\n # check adjacent peaks are not too big:\n try:\n if maxposn == len(group1+group2) - 1:\n if (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5):\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n except IndexError as inst:\n verbalise(\"R\", inst)\n verbalise(\"R\", datalist)\n verbalise(\"R\", \"Max is %.3f at position %d\" % (maxexpression, maxposn))\n\n verbalise(\"G\", len(peaklist), \"significant peaks found.\")\n return peaklist", "def get_top_spec(mz_list,intensity_list,min_perc=False,windowed_mode=False,top=10,window_size=100,add_dummy_peak=True):\n\tgr_intensity_list = []\n\tgr_mz_list = []\n\t\n\t#In the case of minimal percentage... calculate perc intensity and filter\n\tif min_perc:\n\t\tfor i,mz in zip(intensity_list,mz_list):\n\t\t\tif i > min_perc:\n\t\t\t\tgr_intensity_list.append(i)\n\t\t\t\tgr_mz_list.append(mz)\n\t\n\t#In the case of windowed mode... iterate over the possible windows and intensity values; take the top per window\n\tif windowed_mode:\n\t\tstart_index = 0\n\t\tfor w in range(window_size,int(max(mz_list)),window_size):\n\t\t\ttemp_mz = []\n\t\t\ttemp_intens = []\n\t\t\ttemp_start_index = 0\n\t\t\t\n\t\t\t#Iterate over all m/z values and see if they fall within the window\n\t\t\tfor mz,intens in zip(mz_list[start_index:],intensity_list[start_index:]):\n\t\t\t\tif mz > w and mz <= w+window_size:\n\t\t\t\t\ttemp_start_index += 1\n\t\t\t\t\ttemp_mz.append(mz)\n\t\t\t\t\ttemp_intens.append(intens)\n\t\t\t\tif mz > w+window_size:\n\t\t\t\t\tbreak\n\t\t\t#Next window ignore all these lower values\n\t\t\tstart_index = copy.deepcopy(temp_start_index)\n\t\t\t\n\t\t\t#Use all if there are less peaks than the top number of peaks it should select\n\t\t\tif len(temp_mz) <= top:\n\t\t\t\tgr_mz_list.extend(temp_mz)\n\t\t\t\tgr_intensity_list.extend(temp_intens)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t#Get the indexes of the top peaks\n\t\t\tidxs = np.sort(np.argpartition(np.array(temp_intens), -top)[-top:])\n\t\t\tgr_mz_list.extend([temp_mz[idx] for idx in idxs])\n\t\t\tgr_intensity_list.extend([temp_intens[idx] for idx in idxs])\n\t\n\t#If not windowed or min perc use a simple top peaks\n\tif not windowed_mode and not min_perc:\n\t\tif len(intensity_list) > top:\n\t\t\t#Get the indexes of the top peaks\n\t\t\tidxs = np.sort(np.argpartition(np.array(intensity_list), -top)[-top:])\n\t\t\tgr_mz_list = [mz_list[idx] for idx in idxs]\n\t\t\tgr_intensity_list = [intensity_list[idx] for idx in idxs]\n\t\telse:\n\t\t\t#If there are less peaks than top peaks; return all\n\t\t\tgr_mz_list = mz_list\n\t\t\tgr_intensity_list = intensity_list\n\t\n\t#If needed add a dummy peak; this is important later since I want to take into account immonium ions and small fragments\n\tif add_dummy_peak:\n\t\tgr_mz_list.insert(0,0.0)\n\t\tgr_intensity_list.insert(0,1.0)\n\t\n\treturn(gr_mz_list,gr_intensity_list)", "def sort_holes(self, wall, holes):\n center = wall.matrix_world @ self.center\n holes = [(o, (o.matrix_world.translation - center).length) for o in holes]\n self.quicksort(holes)\n return [o[0] for o in holes]", "def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))", "def sort_segment_points(Aps, Bps):\n mid = []\n j = 0\n mid.append(Aps[0])\n for i in range(len(Aps)-1):\n dist = distance_tt_point(Aps[i], Aps[i+1])\n for m in range(j, len(Bps)):\n distm = distance_tt_point(Aps[i], Bps[m])\n if dist > distm:\n direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr()))\n if direction > 0:\n j = m + 1\n mid.append(Bps[m])\n break\n\n mid.append(Aps[i+1])\n for m in range(j, len(Bps)):\n mid.append(Bps[m])\n return mid", "def getAndSortFiducialPoints(self, center):\r\n # self.__registrationStatus.setText('Registration processing...')\r\n # pNode = self.parameterNode()\r\n # fixedAnnotationList = slicer.mrmlScene.GetNodeByID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if fixedAnnotationList != None:\r\n # fixedAnnotationList.RemoveAllChildrenNodes()\r\n markerCenters = center\r\n nbCenter = len(center)\r\n for k in range(nbCenter):\r\n point = [0]\r\n for i in range(nbCenter):\r\n U,V,W = 0,0,0\r\n for j in range(nbCenter):\r\n d = 0\r\n if i != j and markerCenters[i]!=(0,0,0):\r\n d2 = (markerCenters[i][0]-markerCenters[j][0])**2+(markerCenters[i][1]-markerCenters[j][1])**2+(markerCenters[i][2]-markerCenters[j][2])**2\r\n d = d2**0.5\r\n # print markerCenters[i],markerCenters[j]\r\n #print d\r\n if d >=45 and d<=53:\r\n U += 1\r\n elif d >53 and d<60:\r\n V +=1\r\n elif d >=70 and d<80:\r\n W +=1\r\n #print U,V,W\r\n if U+V+W>=3:\r\n #print markerCenters[i]\r\n point.extend([i])\r\n point.remove(0)\r\n minX = [999,999,999,999]\r\n maxX = [-999,-999,-999,-999]\r\n sorted = [[0,0,0] for l in range(4)]\r\n sortedConverted = [[0,0,0] for l in range(4)]\r\n for i in range(2):\r\n for k in point:\r\n if markerCenters[k][0]<= minX[0]:\r\n minX[0] = markerCenters[k][0]\r\n minX[1] = k\r\n elif markerCenters[k][0]<= minX[2]:\r\n minX[2] = markerCenters[k][0]\r\n minX[3] = k\r\n if markerCenters[k][0]>= maxX[0]:\r\n maxX[0] = markerCenters[k][0]\r\n maxX[1] = k\r\n elif markerCenters[k][0]>= maxX[2]:\r\n maxX[2] = markerCenters[k][0]\r\n maxX[3] = k\r\n if markerCenters[minX[1]][1] < markerCenters[minX[3]][1]:\r\n sorted[0] = minX[1]\r\n sorted[1] = minX[3]\r\n else:\r\n sorted[0] = minX[3]\r\n sorted[1] = minX[1]\r\n if markerCenters[maxX[1]][1]>markerCenters[maxX[3]][1]:\r\n sorted[2] = maxX[1]\r\n sorted[3] = maxX[3]\r\n else:\r\n sorted[2] = maxX[3]\r\n sorted[3] = maxX[1]\r\n sorted2 = [0,0,0,0]\r\n if 1:#self.horizontalTemplate.isChecked():\r\n sorted2[0]=sorted[2]\r\n sorted2[2]=sorted[0]\r\n sorted2[1]=sorted[3]\r\n sorted2[3]=sorted[1]\r\n else:\r\n sorted2[0]=sorted[3]\r\n sorted2[2]=sorted[1]\r\n sorted2[1]=sorted[0]\r\n sorted2[3]=sorted[2]\r\n # logic = slicer.modules.annotations.logic()\r\n # logic.SetActiveHierarchyNodeID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if pNode.GetParameter(\"Template\")=='4points':\r\n # nbPoints=4\r\n # elif pNode.GetParameter(\"Template\")=='3pointsCorners':\r\n # nbPoints=3\r\n l = slicer.modules.annotations.logic()\r\n l.SetActiveHierarchyNodeID(slicer.util.getNode('Fiducial List_fixed').GetID())\r\n for k in range(4) :\r\n fiducial = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n fiducial.SetReferenceCount(fiducial.GetReferenceCount()-1)\r\n fiducial.SetFiducialCoordinates(markerCenters[sorted2[k]])\r\n fiducial.SetName(str(k))\r\n fiducial.Initialize(slicer.mrmlScene)\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed ==None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n # sRed.SetSliceVisible(1)\r\n m= sRed.GetSliceToRAS()\r\n m.SetElement(0,3,sortedConverted[3][0])\r\n m.SetElement(1,3,sortedConverted[3][1])\r\n m.SetElement(2,3,sortedConverted[3][2])\r\n sRed.Modified()\r\n return sorted2", "def sort_slices(slices):\n result = []\n for x in slices:\n sorted = []\n semi_sorted = []\n # Sort by stations\n x.sort(key=lambda y: y[2])\n\n # Sort by channels\n found_channels = []\n current_station = x[0][2]\n for y in x:\n if current_station != y[2]:\n current_station = y[2]\n found_channels = []\n if y[3][-1] in found_channels:\n continue\n if y[3][-1] in config.archive_channels_order:\n found_channels.append(y[3][-1])\n semi_sorted.append(y)\n\n current_station = \"\"\n index = 0\n for y in semi_sorted:\n if y[2] != current_station:\n current_station = y[2]\n for channel in config.archive_channels_order:\n sorting_index = index\n while sorting_index < len(semi_sorted) and semi_sorted[sorting_index][2] == current_station:\n if semi_sorted[sorting_index][3][-1] == channel:\n sorted.append(semi_sorted[sorting_index])\n break\n sorting_index += 1\n index += 1\n\n result.append(sorted)\n\n return result", "def kwiksort(dict_prefs, list_els, runs=10, random_seed=None):\n best_score=float(\"-infinity\")\n if random_seed is not None:\n np.random.seed(random_seed)\n for run in range(runs):\n ordering=_kwiksort(list_els,dict_prefs)\n score=eval_ordering(ordering,dict_prefs)\n if score>best_score:\n best_score=score\n best_order=ordering\n return best_order", "def sort(self):\n self.fragment_list.sort()", "def sorted_checkpoints(self):\n reg = re.compile(r\"{}.*\\{}\".format(self.prefix, Checkpointer.EXTENSION))\n if not os.path.exists(self.root):\n all_checkpoints = []\n else:\n all_checkpoints = [f for f in os.listdir(\n self.root) if reg.match(f)]\n mtimes = []\n for f in all_checkpoints:\n mtimes.append(os.path.getmtime(os.path.join(self.root, f)))\n\n mf = sorted(zip(mtimes, all_checkpoints))\n chkpts = [m[1] for m in reversed(mf)]\n log.debug(\"Sorted checkpoints {}\".format(chkpts))\n return chkpts", "def sort_with_beam(self, pattern_arr):\n\n def _cmp(x, y):\n \"\"\"Cmp function to sort pattern.\"\"\"\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL\n\n pattern_arr = sorted(pattern_arr.items(), key=functools.cmp_to_key(_cmp),\n reverse=True)\n if len(pattern_arr) > self.beam_width:\n pattern_arr = pattern_arr[:self.beam_width]\n return OrderedDict(pattern_arr)", "def addPeakResonancesToSeqSpinSystems(peak, seqOffsets):\n \n assert len(peak.peakDims) == len(seqOffsets)\n assert None in seqOffsets # otherwise no reference point\n\n spinSystems = []\n resonanceList = []\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n spinSystem = None\n resonances = []\n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n resonances.append(resonance)\n \n if resonance.resonanceGroup:\n if not spinSystem:\n spinSystem = resonance.resonanceGroup\n\n elif spinSystem is not resonance.resonanceGroup:\n msg = 'There are multiple spin systems for peak dimension %d.\\n' % (i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(resonance.resonanceGroup,spinSystem)\n else:\n return\n\n resonanceList.append(resonances)\n spinSystems.append( spinSystem )\n\n ref = None\n I = 0\n for i, spinSystem in enumerate(spinSystems):\n if spinSystem is not None:\n if seqOffsets[i] is None:\n if ref is None:\n ref = spinSystem\n I = i\n \n else:\n if spinSystem is not ref:\n msg = 'Dimensions %d and %d have different spin systems.\\n' % (I+1,i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(spinSystem, ref)\n else:\n return\n \n if ref is not None:\n for i, seqOffset in enumerate(seqOffsets):\n \n if seqOffset:\n spinSystem = findConnectedSpinSystem(ref, seqOffset)\n if spinSystems[i] is ref:\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n showWarning('Failure','Spin system cannot be both i and i%s (dimension %d)' % (deltaText,i+1))\n continue\n \n \n if spinSystem and spinSystems[i]:\n if spinSystem is not spinSystems[i]:\n if (not spinSystem.residue) or (not spinSystems[i].residue):\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n \n msg = 'There is an i%s spin system already present (dimension %d).\\n' % (deltaText, i+1)\n msg += 'Merge spin systems together?'\n if showOkCancel('Confirm', msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n elif spinSystem.residue is spinSystems[i].residue:\n name = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n msg = 'There are multiple spin systems for residue %s.\\n?' % name\n msg += 'Merge spin systems together?'\n \n if showOkCancel('Confirm',msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n else:\n txt1 = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n txt2 = '%d%s' % (spinSystems[i].residue.seqCode,spinSystems[i].residue.ccpCode)\n msg = 'Cannot set spin system for F%d dim' % (i+1)\n msg += 'Offset %d causes conflict between %s and %s' % (seqOffset, txt1, txt2)\n showWarning('Failure',msg)\n return\n \n if resonanceList[i]:\n nmrProject = resonanceList[i][0].nmrProject\n if not spinSystem:\n if spinSystems[i]:\n spinSystem = spinSystems[i]\n else:\n spinSystem = nmrProject.newResonanceGroup()\n \n makeSeqSpinSystemLink(ref, spinSystem, seqOffsets[i])\n \n for resonance in resonanceList[i]:\n if resonance.resonanceGroup is not spinSystem:\n addSpinSystemResonance(spinSystem,resonance)", "def FindPeaks_graph(self):\n import string\n \n maxima = self['FP_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['FP_DETECT'].copy()\n \n id = self._getGraphId()\n root = 'FindPeaks_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NPEAKS']))\n text = 'NP=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindPeaks'] = epsname\n self['figcomms']['FindPeaks'] = text", "def determine_peaks_and_limits(\n data, smoothed, limits,\n peak_prom, peak_height,\n valley_prom, valley_height,\n debug, smooth_window_size, outfile,\n skip_smooth,\n):\n mm = max(smoothed)\n peaks, props = find_peaks(smoothed, height=peak_height, prominence=peak_prom) # maxima (peaks positions)\n rpeaks, rprops = find_peaks([-i+mm for i in smoothed], height=valley_height, prominence=valley_prom) # minima (peaks limits)\n\n if len(peaks) > 3 :\n print(\"WARNING: More than 3 peaks detected.\\nPossible erroneous detection:\\n\\t-Restart setting the -ll parameter.\\n\\t-check histogram and modify peak height and prominence arguments accordingly.\\n\\t-Contaminant peaks may also break detection, remove them with tools such as blobtools or by hard-filtering low coverage contigs.\")\n print(\"NOTE: Assuming the last 2 peaks are diploid and haploid...\")\n\n if debug :\n debug_plot_peak_errors(data, smoothed, peaks, limits.values(), rpeaks, smooth_window_size, outfile, skip_smooth)\n\n if len(peaks) > 0 :\n print(\"Peaks found: \" + \"x, \".join(str(p) for p in peaks) + \"x\")\n else :\n raise Exception(\"No peaks found! Try changing the input parameters or setting thresholds manually!\")\n if len(rpeaks) > 0 :\n print(\"Valleys found: \" + \"x, \".join(str(p) for p in rpeaks) + \"x\")\n else :\n print(\"No valleys found!\")\n\n valleys = [0] + list(rpeaks) + [len(smoothed)]\n thresholds = get_threshold_between_peaks(smoothed, peaks, valleys)\n\n relevant_peaks = peaks[-3:]\n #valleys = rpeaks[-3:]\n print(\"Relevant peaks: \" + \"x, \".join(str(p) for p in relevant_peaks) + \"x\")\n print(\"Thresholds:\\n\\t- \" + \"\\t- \".join(\"{}: {}x\\n\".format(k,p) for k,p in thresholds.items()))\n\n return relevant_peaks, thresholds", "def stichAnchors(chrom, loops, margin=1):\n cov = set()\n for i, loop in enumerate(loops):\n cov.update(range(loop.x_start, loop.x_end + 1))\n cov.update(range(loop.y_start, loop.y_end + 1))\n cov = list(cov)\n cov.sort()\n npeaks = []\n i = 0\n while i < len(cov) - 1:\n j = i + 1\n while j < len(cov):\n if cov[j] - cov[j - 1] > margin:\n break\n else:\n j += 1\n peak = Peak()\n peak.chrom = chrom\n peak.start = cov[i]\n peak.end = cov[j - 1]\n peak.length = cov[j - 1] - cov[i] + 1\n npeaks.append(peak)\n i = j #update search start\n return npeaks", "def quicksort(self, points):\n if len(points) <= 1:\n return points\n smaller, equal, larger = [], [], []\n pivot_angle = self.polar_angle(\n points[randint(0, len(points) - 1)]\n ) # select random pivot\n for p in points:\n angle = self.polar_angle(p) # calculate current angle\n if angle < pivot_angle:\n smaller.append(p)\n elif angle == pivot_angle:\n equal.append(p)\n else:\n larger.append(p)\n return (\n self.quicksort(smaller)\n + sorted(equal, key=self.distance)\n + self.quicksort(larger)\n )", "def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)", "def clean(data, N_peaks, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- clean')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n # Standard frequency resolution:\n T = data0[-1,0]-data[0,0]\n if f_resolution==None:\n f_resolution = 1/T\n \n # Avoid 0 as input as not peaks are found:\n if f_interval[0]==0:\n f_interval = [f_resolution, f_interval[1]]\n \n # Constants:\n SAMPLING = 1\n f_RES = 0.1*f_resolution # Standard frequency resolution\n picon = 2*np.pi*data0[:,0] # Optimization constant\n f_peaks = np.zeros(N_peaks)\n A_peaks = np.zeros(N_peaks)\n \n for i in range(N_peaks):\n k = i+1\n print '%s. Peak' %k\n\n # 1. Iteration - start finding largest peak:\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1]) # Smaller f_int (Tuple instead of array for optimization)\n\n # Testing that the frequency resolution > sigma_f to continue:\n A_peak = P[j]\n A_av = np.mean(np.sqrt(P))\n sigma_a = 0.8*A_av\n sigma_phi = sigma_a/A_peak\n sigma_f = np.sqrt(3)*sigma_phi/(np.pi*T)\n if f_RES>sigma_f: \n \n # 2. Iteration: uses now f_res and so on..\n Pf_power, _, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1])\n \n # 3. Iteration: last\n Pf_power, P_comp, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n fpicon = picon*f[j] # Optimization constant\n alpha = P_comp[:,0]; beta = P_comp[:,1]\n alpha0 = alpha[j]*np.sin(fpicon)\n beta0 = beta[j]* np.cos(fpicon)\n data0[:,1] = data0[:,1] - alpha0 - beta0\n f_peaks[i] = f[j]\n A_peaks[i] = np.sqrt(P[j])\n\n # Output:\n St_clean = data0\n print f_peaks, A_peaks\n return St_clean, f_peaks, A_peaks", "def get_peaks(self):\n peaks = np.array([i for i in range(self.npks)\n if self.polar_angle[i] < self.polar_max])\n x, y, z = (np.rint(self.xp[peaks]).astype(np.int16),\n np.rint(self.yp[peaks]).astype(np.int16),\n np.rint(self.zp[peaks]).astype(np.int16))\n polar, azi = self.polar_angle[peaks], self.azimuthal_angle[peaks]\n intensity = self.intensity[peaks]\n if self.Umat is not None:\n H, K, L = self.get_hkls()\n H = np.array(H)[peaks]\n K = np.array(K)[peaks]\n L = np.array(L)[peaks]\n diffs = np.array([self.diff(i) for i in peaks])\n else:\n H = K = L = diffs = np.zeros(peaks.shape, dtype=float)\n return list(zip(peaks, x, y, z, polar, azi, intensity, H, K, L, diffs))", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)", "def pigeon_sort(array):\n if len(array) == 0:\n return array\n\n # Manually finds the minimum and maximum of the array.\n min = array[0]\n max = array[0]\n\n for i in range(len(array)):\n if array[i] < min:\n min = array[i]\n elif array[i] > max:\n max = array[i]\n\n # Compute the variables\n holes_range = max - min + 1\n holes = [0 for _ in range(holes_range)]\n holes_repeat = [0 for _ in range(holes_range)]\n\n # Make the sorting.\n for i in range(len(array)):\n index = array[i] - min\n if holes[index] != array[i]:\n holes[index] = array[i]\n holes_repeat[index] += 1\n else:\n holes_repeat[index] += 1\n\n # Makes the array back by replacing the numbers.\n index = 0\n for i in range(holes_range):\n while holes_repeat[i] > 0:\n array[index] = holes[i]\n index += 1\n holes_repeat[i] -= 1\n\n # Returns the sorted array.\n return array", "def lineshape_from_peaklist(peaklist, w=0.5, points=800, limits=None):\n peaklist.sort()\n if limits:\n try:\n l_limit, r_limit = limits\n l_limit = float(l_limit)\n r_limit = float(r_limit)\n except Exception as e:\n print(e)\n print('limits must be a tuple of two numbers')\n raise\n if l_limit > r_limit:\n l_limit, r_limit = r_limit, l_limit\n else:\n l_limit = peaklist[0][0] - 50\n r_limit = peaklist[-1][0] + 50\n x = np.linspace(l_limit, r_limit, points)\n y = add_lorentzians(x, peaklist, w)\n return x, y", "def summarize(self, data, order=11, verbose=False):\n self.intervals = np.diff(self.timebase[self.onsets]) # event intervals\n i_decay_pts = int(2*self.taus[1]/self.dt) # decay window time (points)\n self.peaks = []\n self.smpkindex = []\n self.smoothed_peaks = []\n self.amplitudes = []\n self.Qtotal = []\n self.averaged = False # set flags in case of no events found\n self.individual_events = False\n self.fitted = False\n self.fitted_tau1 = np.nan\n self.fitted_tau2 = np.nan\n self.Amplitude = np.nan\n self.avg_fiterr = np.nan\n ndata = len(data)\n avgwin = 5 # int(1.0/self.dt) # 5 point moving average window for peak detection\n# print('dt: ', self.dt)\n mwin = int((0.050)/self.dt)\n# print('mwin: ', mwin)\n #order = int(0.0004/self.dt)\n # print('onsets: ', self.onsets)\n if self.sign > 0:\n nparg = np.greater\n else:\n nparg = np.less\n if len(self.onsets) > 0: # original events\n# print('no: ', len(self.onsets))\n acceptlist = []\n for j in range(len(data[self.onsets])):\n if self.sign > 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] < self.eventstartthr:\n continue\n if self.sign < 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] > -self.eventstartthr:\n continue\n svwinlen = data[self.onsets[j]:(self.onsets[j]+mwin)].shape[0]\n if svwinlen > 11:\n svn = 11\n else:\n svn = svwinlen\n if svn % 2 == 0: # if even, decrease by 1 point to meet ood requirement for savgol_filter\n svn -=1\n \n if svn > 3: # go ahead and filter\n p = scipy.signal.argrelextrema(scipy.signal.savgol_filter(data[self.onsets[j]:(self.onsets[j]+mwin)], svn, 2), nparg, order=order)[0]\n else: # skip filtering\n p = scipy.signal.argrelextrema(data[self.onsets[j]:(self.onsets[j]+mwin)], nparg, order=order)[0]\n if len(p) > 0:\n self.peaks.extend([int(p[0]+self.onsets[j])])\n amp = self.sign*(self.data[self.peaks[-1]] - data[self.onsets[j]])\n\n self.amplitudes.extend([amp])\n i_end = i_decay_pts + self.onsets[j] # distance from peak to end\n i_end = min(ndata, i_end) # keep within the array limits\n if j < len(self.onsets)-1:\n if i_end > self.onsets[j+1]:\n i_end = self.onsets[j+1]-1 # only go to next event start\n move_avg, n = moving_average(data[self.onsets[j]:i_end], n=min(avgwin, len(data[self.onsets[j]:i_end])))\n if self.sign > 0:\n pk = np.argmax(move_avg) # find peak of smoothed data\n else:\n pk = np.argmin(move_avg)\n self.smoothed_peaks.extend([move_avg[pk]]) # smoothed peak\n self.smpkindex.extend([self.onsets[j]+pk])\n acceptlist.append(j)\n if len(acceptlist) < len(self.onsets):\n if verbose:\n print('Trimmed %d events' % (len(self.onsets)-len(acceptlist)))\n self.onsets = self.onsets[acceptlist] # trim to only the accepted values\n # print(self.onsets)\n self.avgevent, self.avgeventtb, self.allevents = self.average_events(self.onsets) \n if self.averaged:\n self.fit_average_event(self.avgeventtb, self.avgevent, debug=False)\n \n else:\n if verbose:\n print('No events found')\n return", "def _sort_locations(self,locations):\n i = np.lexsort(np.transpose(locations*np.array((1,-1))))\n return locations[i]", "def sortAssemsByRing(self):\n sortKey = lambda a: a.spatialLocator.getRingPos()\n self._children = sorted(self._children, key=sortKey)", "def calc_scores(x, y, peaks, score_measure, n_peaks_influence):\n\n scores = []\n n_peaks_all = []\n\n for i, row in enumerate(peaks):\n n_peaks = len(row)\n if n_peaks == 0:\n score = 0\n elif score_measure == 0:\n score = 1\n elif score_measure == 1: # median height\n heights = [y[i, k] for k in row]\n score = np.median(heights)\n elif score_measure == 2: # mean height\n heights = [y[i, k] for k in row]\n score = np.mean(heights)\n elif score_measure == 3: # mean area\n score = simpson(y[i], x[i]) / n_peaks\n elif score_measure == 4: # mean area\n score = simpson(y[i], x[i])\n\n scores.append(score)\n n_peaks_all.append(n_peaks)\n\n if n_peaks == 0:\n scores_peaks = 0\n elif n_peaks_influence == 0:\n scores_peaks = scores\n elif n_peaks_influence == 1:\n scores_peaks = [n*score for n, score in zip(n_peaks_all, scores)]\n elif n_peaks_influence == 2:\n scores_peaks = [score**(n/50)\n for n, score in zip(n_peaks_all, scores)]\n\n bar4.update(bar4.value + 1)\n\n n_peaks_all = [n_peaks for scores_peaks, n_peaks in sorted(zip(scores_peaks, n_peaks_all))]\n n_peaks_all.reverse()\n\n return scores_peaks, scores, n_peaks_all", "def optimal_points(segments):\n points = []\n segments.sort(key=lambda x: x.end)\n\n while len(segments) != 0:\n s = segments[0]\n points.append(s.end)\n j = 0\n while j < len(segments):\n temp = segments[j]\n if temp.start <= s.end and temp.end >= s.end:\n segments.remove(temp)\n else:\n j += 1\n return points", "def plot_peaks(wavelists: list, title: str, save: bool, plot_path: str):\n\n # if a single WaveList is passed, package it in a list so the method works\n if isinstance(wavelists, WaveList):\n wavelists = [wavelists]\n\n columns = [{'desc': ' Before Algorithm', 'source': 'peaks_initial'},\n {'desc': ' After Sub Algorithm A', 'source': 'peaks_sub_a'},\n {'desc': ' After Sub Algorithm B', 'source': 'peaks_sub_b'},\n {'desc': ' After Sub Algorithm C&D', 'source': 'peaks_sub_c'}]\n\n fig, axs = plt.subplots(nrows=len(wavelists), ncols=len(columns), sharex=True, figsize=(14, 7))\n plt.suptitle(title)\n\n for i, wavelist in enumerate(wavelists):\n for j, column in enumerate(columns):\n peaks = getattr(wavelist, column['source'])['location'].values\n axs[i, j].set_title(wavelist.series_name + column['desc'])\n axs[i, j].plot(wavelist.raw_data.values)\n axs[i, j].scatter(peaks, wavelist.raw_data.values[peaks.astype(int)], color='red', marker='o')\n axs[i, j].get_xaxis().set_visible(False)\n axs[i, j].get_yaxis().set_visible(False)\n\n fig.tight_layout()\n\n if save:\n plt.savefig(os.path.join(plot_path, title + '.png'))\n plt.close('all')", "def sort(data,start,end):\n if start < end:\n partition_index=partition(data,start,end)\n sort(data,start,partition_index-1)\n sort(data,partition_index+1,end)", "def quick_sort(partition_list, low, high):\n if low >= high:\n return\n part_point = get_partition(partition_list, low, high)\n quick_sort(partition_list, low, part_point - 1)\n quick_sort(partition_list, part_point + 1, high)", "def peakdetect_parabole(y_axis, x_axis, points = 9):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)\n min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)\n \n max_peaks = map(lambda x: [x[0], x[1]], max_)\n max_fitted = map(lambda x: x[-1], max_)\n min_peaks = map(lambda x: [x[0], x[1]], min_)\n min_fitted = map(lambda x: x[-1], min_)\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]", "def sort_values(self):\r\n for loopindex in range(0, self.population_size):\r\n index = self.cost_populations.index(min(self.cost_populations))\r\n \r\n if loopindex < int(self.population_size / 2):\r\n self.best_districts.append(self.district_population[index])\r\n self.best_costs.append(self.cost_populations[index])\r\n else:\r\n self.worst_districts.append(self.district_population[index])\r\n \r\n del self.cost_populations[index]\r\n del self.district_population[index]", "def distort(signals, labels, amps_p, amps_t):\n assert len(signals.shape) == 3\n assert len(labels.shape) == 3\n segments0 = []\n labels0 = []\n label_len = labels.shape[2]\n assert label_len == 8 or label_len == 9\n for l in range(signals.shape[0]):\n segments1 = []\n labels1 = []\n for k in range(signals.shape[1]):\n signal = signals[l, k, :]\n label = labels[l, k, :]\n p = label[1]\n if label_len == 8:\n t = label[6]\n else:\n t = label[7]\n index_array = np.arange(0, len(signal))\n\n width_p_l = 2\n width_p_r = 0.5\n a_l_p = index_array[0:p]\n a_r_p = index_array[p:]\n center_l_p = max((label[1] + label[0]) / 2 - 5, 0)\n center_r_p = min((label[2] + label[1]) / 2 + 5, len(signal) - 1)\n a_l_p = 1 / (1 + np.exp(-(a_l_p - center_l_p) / width_p_l))\n a_r_p = 1 - 1 / (1 + np.exp(-(a_r_p - center_r_p) / width_p_r))\n\n width_t_l = 10\n width_t_r = 10\n a_l_t = index_array[0: t]\n a_r_t = index_array[t:]\n if label_len == 8:\n center_r_t = min((label[6] + label[7]) / 2 + 5, len(signal) - 1)\n else:\n center_r_t = min((label[7] + label[8]) / 2 + 5, len(signal) - 1)\n center_l_t = 2 * t - center_r_t\n a_l_t = 1 / (1 + np.exp(-(a_l_t - center_l_t) / width_t_l))\n a_r_t = 1 - 1 / (1 + np.exp(-(a_r_t - center_r_t) / width_t_r))\n\n for amp_p in amps_p:\n for amp_t in amps_t:\n a_p = np.concatenate([a_l_p * amp_p, a_r_p * amp_p])\n raw_segment_p = np.multiply(signal, a_p)\n a_t = np.concatenate([a_l_t * amp_t, a_r_t * amp_t])\n raw_segment_t = np.multiply(signal, a_t)\n raw_segment = raw_segment_p + raw_segment_t + signal\n segments1.append(np.expand_dims(raw_segment, axis=0))\n labels1.append(np.expand_dims(label, axis=0))\n\n segments0.append(np.concatenate(segments1, axis=0))\n labels0.append(np.concatenate(labels1, axis=0))\n\n return np.array(segments0), np.array(labels0)", "def sort_by_pval(lines, ind):\r\n def _nan_safe_sort(line):\r\n \"\"\"Sort lines based on pvals and force nans to have inf pval.\"\"\"\r\n val = float(line.split('\\t')[ind])\r\n if not isnan(val):\r\n return val\r\n else:\r\n return inf\r\n return (\r\n # header+sorted lines\r\n [lines[0]] + sorted(lines[1:], key=_nan_safe_sort)\r\n )", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def sort_by_dominance(spart_list):\n score = [\n sum(\n [\n 1 for x in spart_list if x >= y\n ]) for y in spart_list\n ]\n what_would_sort = numpy.argsort(score)\n sorted_sparts = [spart_list[x] for x in what_would_sort]\n return sorted_sparts", "def organizeAndUpdate(self): \r\n for point in self.points:\r\n point.organize()\r\n point.update()", "def _quick_sort(self, start, end):\n if start >= end: # Length of 1 or less\n return\n\n pivot = self._arr[end] # Select pivot as last value in array\n left = start\n right = end - 1 # Begin one before the pivot\n\n while left <= right: # Continue until all values are ordered\n while left <= right and self._arr[left] < pivot: # Find first value greater than pivot\n left += 1\n while left <= right and pivot < self._arr[right]: # Find first value less than pivot\n right -= 1\n\n if left <= right: # If unordered, then swap two found values\n self._arr[left], self._arr[right] = self._arr[right], self._arr[left]\n left, right = left + 1, right - 1 # Increment for next iteration\n\n self._arr[left], self._arr[end] = self._arr[end], self._arr[left] # Move pivot to middle\n self._quick_sort(start, left - 1) # Sort left portion\n self._quick_sort(left + 1, end) # Sort right portion", "def intro_sort(data):\n recurssion_depth=2*math.log(len(data))\n if len(data) < 15:\n insertion_sort(data)\n elif recurssion_depth==0:\n merge_sort(data)\n else:\n quick_sort(data)", "def getTopPeakOverlap(a, b, percA=100, percB=100):\n import subprocess\n import math\n\n broadFactors = [\n \"H3K27ME1\", \"H3K27ME2\", \"H3K27ME3\",\n \"H3K36ME1\", \"H3K36ME2\", \"H3K36ME3\",\n \"H3K9ME1\", \"H3K9ME2\", \"H3K9ME3\",\n \"H3K72ME1\", \"H3K72ME2\", \"H3K72ME3\"\n ]\n\n peakA = a['peakFile'].reset_index(drop=True)[0]\n peakB = b['peakFile'].reset_index(drop=True)[0]\n\n peakA = re.sub(\"/fhgfs/groups/lab_bock/shared/projects/\", \"/media/afr/cemm-backup/\", peakA)\n peakB = re.sub(\"/fhgfs/groups/lab_bock/shared/projects/\", \"/media/afr/cemm-backup/\", peakB)\n\n if a.ip.values[0] not in broadFactors:\n topPeakA = re.sub(\"narrowPeak\", \"sorted.narrowPeak\", peakA)\n else:\n topPeakA = re.sub(\"broadPeak\", \"sorted.broadPeak\", peakA)\n if b.ip.values[0] not in broadFactors:\n topPeakB = re.sub(\"narrowPeak\", \"sorted.narrowPeak\", peakB)\n else:\n topPeakB = re.sub(\"broadPeak\", \"sorted.broadPeak\", peakB)\n\n # get total (to get top 1%)\n proc = subprocess.Popen([\"wc\", \"-l\", peakA], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n totalA = re.sub(\"\\D.*\", \"\", out)\n\n proc = subprocess.Popen([\"wc\", \"-l\", peakB], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n totalB = re.sub(\"\\D.*\", \"\", out)\n\n fracA = 100 / percA\n fracB = 100 / percB\n\n topA = str(math.trunc(int(totalA) / fracA))\n topB = str(math.trunc(int(totalB) / fracB))\n\n # sort files by score and get top X%\n ps = subprocess.Popen(('sort', '-k9rn', peakA), stdout=subprocess.PIPE)\n output = subprocess.check_output(('head', '-n', topA), stdin=ps.stdout)\n\n with open(topPeakA, 'w') as handle:\n handle.write(output)\n\n ps = subprocess.Popen(('sort', '-k9rn', peakB), stdout=subprocess.PIPE)\n output = subprocess.check_output(('head', '-n', topB), stdin=ps.stdout)\n\n with open(topPeakB, 'w') as handle:\n handle.write(output)\n\n # intersect top peaks\n proc = subprocess.Popen(\n [\"bedtools\", \"intersect\", \"-u\", \"-a\", topPeakA, \"-b\", topPeakB],\n stdout=subprocess.PIPE\n )\n out, err = proc.communicate()\n\n # return count\n try:\n print(a.sampleName, b.sampleName)\n print(len(out.split(\"\\n\")) / float(topA))\n return len(out.split(\"\\n\")) / float(topA)\n except ZeroDivisionError:\n return 0", "def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))", "def sort_values(\n self,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n ignore_index=False,\n ):\n return self.d_series.map_partitions(\n lambda s: s.list.sort_values(\n ascending, inplace, kind, na_position, ignore_index\n ),\n meta=self.d_series._meta,\n )", "def fix_metadata(self, cubes):\n cube = self.get_cube_from_list(cubes)\n\n # Add auxiliary coordinate from list of cubes\n coords_to_add = {\n 'ap': 1,\n 'b': 1,\n 'ps': (0, 2, 3),\n }\n add_aux_coords_from_cubes(cube, cubes, coords_to_add)\n cube.coord(var_name='ap').units = 'Pa'\n\n # Fix vertical coordinate bounds\n for coord_name in ('ap', 'b'):\n bounds_cube = get_bounds_cube(cubes, coord_name)\n bounds = bounds_cube.data.reshape(-1, 2)\n new_bounds_cube = iris.cube.Cube(bounds,\n **bounds_cube.metadata._asdict())\n cubes.remove(bounds_cube)\n cubes.append(new_bounds_cube)\n\n # Fix hybrid sigma pressure coordinate\n cubes = super().fix_metadata(cubes)\n\n # Fix horizontal coordinates bounds\n for coord_name in ('latitude', 'longitude'):\n cube.coord(coord_name).guess_bounds()\n return cubes", "def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points):\n func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m\n fitted_peaks = []\n for peak in raw_peaks:\n index = peak[0]\n x_data = x_axis[index - points // 2: index + points // 2 + 1]\n y_data = y_axis[index - points // 2: index + points // 2 + 1]\n # get a first approximation of tau (peak position in time)\n tau = x_axis[index]\n # get a first approximation of peak amplitude\n m = peak[1]\n \n # build list of approximations\n # k = -m as first approximation?\n p0 = (-m, tau, m)\n popt, pcov = curve_fit(func, x_data, y_data, p0)\n # retrieve tau and m i.e x and y value of peak\n x, y = popt[1:3]\n \n # create a high resolution data set for the fitted waveform\n x2 = np.linspace(x_data[0], x_data[-1], points * 10)\n y2 = func(x2, *popt)\n \n fitted_peaks.append([x, y, [x2, y2]])\n \n return fitted_peaks", "def _sort_phot(self, verbose=False):\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n sorted_cubelist.append(self._sort_cubelist(year_cubelist))\n return iris.cube.CubeList(sorted_cubelist)", "def anchors(self):\n dims = self.dims\n anchors = []\n for peak in self:\n possible_anchors = []\n for combination in combinations(range(dims), 2):\n spins = [peak[i] for i in combination]\n if any(s.res_num is None or s.atom is None for s in spins):\n continue\n res_nums = [spin.res_num for spin in spins]\n atoms = [spin.atom for spin in spins]\n elements = [atom[0] for atom in atoms]\n positions = [atom[1:] for atom in atoms]\n same_res_num = res_nums[0] == res_nums[1]\n valid_pairs = [set(('H', 'N')), set(('H', 'C'))]\n is_proton_heavy_pair = set(elements) in valid_pairs\n same_position = all(c[0] == c[1] for c in zip(*positions))\n if same_res_num and is_proton_heavy_pair and same_position:\n if '' in positions and set(elements) != set(('H', 'N')):\n # One of the atom names must have been 'H', 'N' or 'C'\n # Of these, only the amide proton anchor is valid\n continue\n if elements[0] == 'H':\n possible_anchors.append(combination)\n else:\n possible_anchors.append(combination[::-1])\n if len(possible_anchors) > 1:\n pa_sets = [set(pa) for pa in possible_anchors]\n overlap = set.intersection(*pa_sets)\n if overlap:\n # Ambiguous, overlapping anchors\n continue\n for poss_anc in possible_anchors:\n if poss_anc not in anchors:\n anchors.append(poss_anc)\n anchors = tuple(anchors)\n return anchors", "def _inplace_quick_sort(mylist, a, b):\n if a >= b:\n return # range is trivially sorted\n pivot_index = _choose_pivot(mylist, a, b) # select pivot index\n pivot = mylist[pivot_index] # get pivot value\n _swap(mylist, pivot_index, b) # move pivot to edge\n left = a # scans rightward\n right = b-1 # scans leftward\n while left <= right:\n # scan until reaching value equal or larger than pivot (or right marker)\n while left <= right and mylist[left] < pivot:\n left += 1\n # scan until reaching value equal or smaller than pivot (or left marker)\n while left <= right and pivot < mylist[right]:\n right -= 1\n if left <= right: # scans did not cross\n _swap(mylist, left, right) # swap\n left += 1\n right -= 1 # shrink the range we're looking at\n\n # put pivot into its final place (currently marked by left index)\n _swap(mylist, left, b)\n # recurse\n _inplace_quick_sort(mylist, a, left-1)\n _inplace_quick_sort(mylist, left+1, b)", "def sort_points(point, cloud):\n minsq = [distance_point_point_sqrd(p, point) for p in cloud]\n return sorted(zip(minsq, cloud, range(len(cloud))), key=lambda x: x[0])", "def sort(self):\n def get_fval(res):\n return res.fval if not np.isnan(res.fval) else np.inf\n\n self.list = sorted(self.list, key=get_fval)", "def stacking_peaks(dir, args, tool_list, score_dict, chr=None):\n\n peaks = []\n scores = []\n ref_char = \".REF_\"\n\n #if tool is None:\n # print \"combine error :: tool name is none.\"\n # exit()\n\n chr = str(chr)\n\n for tool in tool_list:\n name_tag = args.input.rsplit(\".\", 1)[0] + ref_char + chr\n print name_tag\n\n if \"homer\" in tool:\n input_name = dir + \"/HOMER/\" + name_tag + \".bam_peaks.bed\"\n waiting_result_file(input_name)\n score = score_dict['homer'][chr]['max_val']\n\n if \"macs\" in tool:\n input_name = dir + \"/MACS/\" + name_tag + \".bam_peaks.broadPeak\"\n waiting_result_file(input_name)\n score = score_dict['macs'][chr]['max_val']\n\n if \"spp\" in tool:\n input_name = dir + \"/SPP/\" + name_tag + \".bam_peaks.bed\"\n waiting_result_file(input_name)\n score = score_dict['spp'][chr]['max_val']\n\n if \"sicer\" in tool:\n input_name = dir + \"/SICER/\" + name_tag + \".bam_peaks.bed\"\n waiting_result_file(input_name)\n score = score_dict['sicer'][chr]['max_val']\n\n peaks.append(loadpeak(input_name))\n scores.append(score)\n\n return peaks, scores", "def sortNeedlePoints(self, needleNumber):\r\n # sort needle points\r\n nodes = slicer.util.getNodes('.%d-*' % needleNumber)\r\n controlPointsUnsorted = []\r\n for node in nodes.values():\r\n p = [0,0,0]\r\n node.GetFiducialCoordinates(p)\r\n controlPointsUnsorted.append(p)\r\n controlPoints = self.sortTable(controlPointsUnsorted, (2, 1, 0))\r\n return controlPoints", "def find_peaks(self, high=None, low=None, beta_std=None, **kwargs):\n peaks, left, right = {}, {}, {}\n for ch in range(self.nch):\n if beta_std is not None:\n self.high_threshold = np.mean(self.data[:,ch]) + beta_std * np.std(self.data[:,ch])\n self.low_threshold = np.mean(self.data[:,ch]) - beta_std * np.std(self.data[:,ch])\n _peaks_idx, _ = signal.find_peaks(self.data[:,ch], height=self.high_threshold, **kwargs)\n _left_idx, _right_idx = self.find_left_right_nearest(\n np.where(self.data[:, ch] < self.low_threshold)[0], _peaks_idx)\n elif high is not None and low is not None:\n _peaks_idx, _ = signal.find_peaks(self.data[:, ch], height=high, **kwargs)\n _left_idx, _right_idx = self.find_left_right_nearest(\n np.where(self.data[:, ch] < low)[0], _peaks_idx)\n peaks[ch] = TimeSeries(self.t[_peaks_idx], self.data[_peaks_idx], self.name+'_peaks_'+str(ch))\n left[ch] = TimeSeries(self.t[_left_idx], self.data[_left_idx], self.name+'_left_'+str(ch))\n right[ch] = TimeSeries(self.t[_right_idx], self.data[_right_idx], self.name+'_right_'+str(ch))\n return peaks, left, right", "def argsortxaxis(\n xaxis: List[str],\n xsort: List[bool],\n stats: Dict[str, np.ndarray],\n nbmatch = re.compile(r\"^\\d\\+-.*\")\n):\n axes = pd.DataFrame(dict(\n {\n str(2*i+1): (\n stats['x']\n if len(xaxis) == 1 else\n [stats['x'][k][i] for k in range(len(stats['x']))]\n )\n for i in range(len(xaxis))\n },\n value = -stats['boxcenter']\n ))\n\n for isort in xsort:\n axes.set_index(str(2*isort+1), inplace = True)\n axes[str(2*isort)] = axes.groupby(str(2*isort+1)).value.median()\n axes.reset_index(inplace = True)\n\n def _cnt(itm):\n return itm.count(INVISIBLE)\n\n for i in range(1, 2*len(xaxis)+1, 2):\n col = axes[str(i)]\n if any(np.issubdtype(col.dtype, j) for j in (np.number, np.bool_)):\n if str(i-1) in axes:\n # reverse orders: first the label, second the median value\n axes.rename(columns = {str(i): str(i-1), str(i-1): str(i)}, inplace = True)\n continue\n\n vals = col.unique()\n if all(nbmatch.match(j) for j in vals):\n # the column is of type; [\"1-track1\", \"2-track2\", ...]\n # we keep only the track index\n axes[str(i)] = [int(j.split('-')) for j in col]\n\n elif any(j.startswith(INVISIBLE) for j in vals):\n # the column has labels sorted according to the invisible character.\n # count those and set them as the main order\n col = col.apply(_cnt)\n if str(i-1) in axes:\n # reverse orders: first the label, second the median value\n axes[str(i)] = axes[str(i-1)]\n axes[str(i-1)] = col\n else:\n axes[str(i)] = col\n\n axes.sort_values(\n [*(str(i) for i in range(2*len(xaxis)+1) if str(i) in axes), 'value'],\n inplace = True\n )\n return axes.index.values", "def add(self, peak):\n self.peaks.append(peak)\n self.end = max(self.end, peak.end)\n self.len = self.start - self.end\n self.count += 1\n self.pops.append(peak.pop)\n # Get average fold change and log10p\n self.fold_change = (self.fold_change+peak.fold_change)/2\n self.log10p = (self.log10p+peak.log10p)/2", "def matchProfilePeakPair(pkp, cpp): #{\n vrbMsg(5, 'matchProfilePeakPair() pkp = ' + str(pkp) + ', cpp = ' + str(cpp))\n npp = [None, None]\n for i in range(0, len(pkp)): #{\n if(npp[0] is None): #{\n npp[0] = i\n elif(npp[1] is None): #}{\n npp[1] = i\n else: #{\n vrbMsg(5, 'matchProfilePeakPair() i = ' + str(i) + ', npp = ' + str(npp))\n d0 = abs(pkp[i] - cpp)\n d1 = abs(pkp[npp[0]] - cpp)\n if(d0 < d1): #{\n j = npp[0]\n npp[0] = i\n d0 = abs(pkp[j] - cpp)\n d1 = abs(pkp[npp[1]] - cpp)\n if(d0 < d1): #{\n npp[1] = j\n #}\n #}\n #}\n #}\n if((not (npp[1] is None)) and (npp[1] < npp[0])): #{\n i = npp[0]\n npp[0] = npp[1]\n npp[1] = i\n #}\n vrbMsg(5, 'matchProfilePeakPair() npp = ' + str(npp))\n return npp", "def splitDetectorPeakInfo(self):\r\n\t\tsplit_raw_min = np.amin(self.splitData)\r\n\t\tsplit_min = split_raw_min - self.splitBaseline\r\n\t\t\t\t\r\n\t\tsplit_raw_max = np.amax(self.splitData)\r\n\t\tsplit_max = split_raw_max - self.splitBaseline\r\n\t\r\n\t\tself.splitMax = split_max\r\n\t\tself.splitMin = split_min", "def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order", "def getAliasedPeakDimPositions(peakDim, shiftRanges=None, ppm=None):\n\n if not shiftRanges:\n shiftRanges = [getPeakDimFullShiftRange(peakDim), ]\n\n positions = []\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n sw = dataDimRef.dataDim.numPointsOrig\n \n if ppm is None:\n ppm = peakDim.realValue\n \n points = points0 = ppm2pnt(ppm, dataDimRef)\n while isShiftInRange(ppm, shiftRanges):\n positions.append( points )\n points -= sw\n ppm = pnt2ppm(points, dataDimRef)\n\n points = points0+sw\n while isShiftInRange(ppm, shiftRanges):\n positions.append( points )\n points += sw\n ppm = pnt2ppm(points, dataDimRef)\n \n else:\n positions.append(peakDim.position)\n \n\n return positions", "def run_arem(in_files, out_peaks, max_fdr):\n in_treat, in_control = in_files[0]\n matches = re.search(r'(.*\\.treat)(.*)\\.mapped_reads', in_treat).groups()\n name = matches[0] + matches[1] + '.arem.peaks'\n cmd = 'arem -t %s -c %s --name=%s %s' % (in_treat, in_control, name,\n cfg.get('peaks', 'arem_params'))\n sys_call(cmd)\n # convert to proper bedfile- ints for score and + for strand\n peaks_to_keep = set()\n with open(out_peaks, 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n start = str(max(0, int(fields[1])))\n score = str(max(0, min(1000, int(float(fields[6])))))\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], start, fields[2],\n 'AREM_peak_%s' % (index + 1), score])\n + '\\t+\\n')\n peaks_to_keep.add(index)\n # take region surrounding the peak summit\n summit_size = cfg.getint('peaks', 'peak_summit_size')\n with open(out_peaks + '_summits.%s_around' % \\\n cfg.get('peaks', 'peak_summit_size'), 'w') as outfile:\n with open(name + '_summits.bed') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.strip().split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n score = str(max(0, min(1000, int(float(fields[-1])))))\n start = str(max(0, int(fields[1]) - summit_size / 2))\n stop = str(int(fields[2]) + summit_size / 2)\n if index in peaks_to_keep:\n outfile.write('\\t'.join([fields[0], start, stop,\n 'AREM_peak_%s' % (index + 1), score])\n + '\\t+\\n')", "def _set_top_preps(self) -> None :\n prep_dict = self._system.getPReps(1, 20)\n prep_address_list = prep_dict['preps']\n for each_prep in prep_address_list:\n self._top_preps.put(each_prep['address'])", "def getPeakLocations(peak_filename, margin, pixel_size, sigma):\n if os.path.exists(peak_filename):\n print(\"Using peak starting locations specified in\", peak_filename)\n elif os.path.exists(os.path.basename(peak_filename)):\n peak_filename = os.path.basename(peak_filename)\n print(\"Using peak starting locations specified in\", peak_filename)\n\n # Check if the file is a storm-analysis HDF5 file.\n #\n if saH5Py.isSAHDF5(peak_filename):\n peak_locations_type = \"hdf5\"\n peak_locations = saH5Py.loadLocalizations(peak_filename)\n if not \"ysigma\" in peak_locations:\n if not \"xsigma\" in peak_locations:\n peak_locations[\"xsigma\"] = numpy.ones(peak_locations[\"x\"].size) * sigma\n peak_locations[\"ysigma\"] = peak_locations[\"xsigma\"].copy()\n \n else:\n peak_locations_type = \"text\"\n\n # Load peak x,y locations.\n peak_locs = numpy.loadtxt(peak_filename, ndmin = 2)\n\n # Create peak dictionary.\n peak_locations = {\"background\" : peak_locs[:,3],\n \"height\" : peak_locs[:,2],\n \"x\" : peak_locs[:,0],\n \"y\" : peak_locs[:,1]}\n\n peak_locations[\"xsigma\"] = numpy.ones(peak_locations[\"x\"].size) * sigma\n peak_locations[\"ysigma\"] = numpy.ones(peak_locations[\"x\"].size) * sigma\n peak_locations[\"z\"] = numpy.zeros(peak_locations[\"x\"].size)\n\n # Adjust positions for finding/fitting margin.\n peak_locations[\"x\"] += margin\n peak_locations[\"y\"] += margin\n\n print(\"Loaded\", peak_locations[\"x\"].size, \"peak locations\")\n #\n # We return is_text as the caller might want to do different things if\n # the file is text, like initialize the Z value.\n #\n return [peak_locations, peak_locations_type]", "def quipSort(lst):\n i = 0\n first = lst[0]\n last = lst[len(lst)-1]\n middle = lst[len(lst)//2]\n if first < last:\n if first < middle:\n pivot = first\n else:\n pivot = middle\n elif last < middle:\n pivot = last\n else:\n pivot = middle\n less, same, more = list(), list(), list()\n N = len(lst)\n limit = int(math.log(N,2))\n quipSortRec(lst, limit, less, same, more, pivot, i)", "def acf_prot(self, pmin=0.1, pmax=100, delta=0.01, lookahead=30,\n peak_to_trough=True, maxpeaks=1, plot=False, ax=None,\n fig_kwargs=None, savefig_filename=None):\n lags, ac = self.acf(pmin=pmin, pmax=pmax, smooth=pmax/10)\n\n # make sure lookahead isn't too long if pmax is small\n lookahead = min(lookahead, pmax)\n\n n_maxes = 0\n while n_maxes == 0:\n maxes, mins = peakdetect(ac, lags, delta=delta, lookahead=lookahead)\n\n # First max only counts if it's after a min.\n try:\n if mins[0][0] > maxes[0][0]:\n maxes.pop(0)\n except IndexError:\n pass\n\n n_maxes = len(maxes)\n if n_maxes == 0:\n delta /= 2\n\n maxheight = -np.inf\n pbest = np.nan\n for i, ((xhi, yhi), (xlo, ylo)) in enumerate(zip(maxes, mins)):\n if peak_to_trough:\n # Calculate mean(peak-to-trough) height\n h1 = yhi - ylo\n try:\n h2 = yhi - mins[i+1][1]\n height = (h1+h2)/2.\n except IndexError:\n height = h1\n else:\n height = yhi\n print(i, height)\n\n if height > maxheight:\n pbest = xhi\n maxheight = height\n if i == maxpeaks-1:\n break\n\n # Evaluate quality by fitting exp*sin\n x, y = lags, ac\n\n def fn(x, A, tau, T):\n return A*np.exp(-x/tau)*np.cos(2*np.pi*x/T)\n\n def chisq(p):\n A, tau = p\n\n mod = fn(x, A, tau, pbest)\n return ((mod - y)**2).sum()\n\n fit = minimize(chisq, [1., pbest*2])\n\n # Prevent tau from being unreasonably large\n tau = min(fit.x[1], pmax/pbest * 20)\n\n\n # Bigger is better. len(lags) is basically proportional to pmax\n quality = 1./ (fit.fun / len(lags) / maxheight)\n quality *= tau/pbest # enhance quality for long decay timescales.\n\n if ax is not None:\n plot = True\n if plot:\n if ax is None:\n fig, ax = plt.subplots(1,1)\n else:\n fig = ax.get_figure()\n\n if fig_kwargs is None:\n fig_kwargs = dict(color='k')\n\n ax.plot(lags, ac, **fig_kwargs)\n if np.isfinite(pbest):\n ax.axvline(pbest, ls=':', color='r')\n\n ax.plot(lags, fn(lags, fit.x[0], fit.x[1], pbest))\n\n ax.annotate('P={:.2f}\\ntau={:.2f}\\nQ={:.1f}'.format(pbest, tau, quality),\n xy=(0.95,0.95), xycoords='axes fraction', ha='right', va='top')\n\n if plot:\n if savefig_filename:\n fig.savefig(savefig_filename)\n return pbest, maxheight, tau, quality\n else:\n return pbest, maxheight, tau, quality, fig\n else:\n return pbest, maxheight, tau, quality", "def addPeakResonancesToSpinSystem(peaks):\n \n # TBD check experiment type of the peak\n \n if not peaks:\n return\n \n resonances = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n resonances.append(contrib.resonance)\n \n spinSystems = []\n for resonance in resonances:\n resonanceGroup = resonance.resonanceGroup\n if resonanceGroup and (resonanceGroup not in spinSystems):\n spinSystems.append(resonanceGroup)\n\n spinSystem = None\n if len(spinSystems) == 1:\n spinSystem = spinSystems[0]\n elif len(spinSystems) > 1:\n msg = 'There are multiple spin systems for these peaks.\\n'\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm',msg):\n spinSystem = spinSystems[0]\n for spinSystem2 in spinSystems[1:]:\n mergeSpinSystems(spinSystem2,spinSystem)\n else:\n return\n \n if spinSystem is None:\n spinSystem = peaks[0].topObject.newResonanceGroup()\n\n for resonance in resonances:\n addSpinSystemResonance(spinSystem,resonance)\n\n return spinSystem", "def sort_scores(self, data: List[int]) -> List[int]:\n # Base Case\n if len(data) < 2:\n return data\n\n pivot = data[0]\n l = self.sort_scores([x for x in data[1:] if x < pivot])\n u = self.sort_scores([x for x in data[1:] if x >= pivot])\n return l + [pivot] + u", "def posort(l, *cmps):\r\n comes_before = dict((a, set()) for a in l)\r\n comes_after = dict((a, set()) for a in l)\r\n\r\n def add_links(a, b): # b depends on a\r\n comes_after[a].add(b)\r\n comes_after[a].update(comes_after[b])\r\n for c in comes_before[a]:\r\n comes_after[c].update(comes_after[a])\r\n comes_before[b].add(a)\r\n comes_before[b].update(comes_before[a])\r\n for c in comes_after[b]:\r\n comes_before[c].update(comes_before[b])\r\n\r\n def check():\r\n \"\"\" Tests for cycles in manufactured edges \"\"\"\r\n for a in l:\r\n for b in l:\r\n assert not(b in comes_after[a] and a in comes_after[b])\r\n\r\n for cmp in cmps:\r\n for a in l:\r\n for b in l:\r\n if cmp(a, b) < 0: # a wants to come before b\r\n # if this wouldn't cause a cycle and isn't already known\r\n if not b in comes_before[a] and not b in comes_after[a]:\r\n add_links(a, b)\r\n # check() # debug code\r\n\r\n return _toposort(comes_after)", "def get_dictionary_of_peptides_and_isomeric_peak_areas(self, fout_peptides_isomeric_peak_areas):\n \n try:\n fin_handle = open(fout_peptides_isomeric_peak_areas)\n\n except IOError:\n raise(\"Provide a file containing percentage peak_area of isomeric peptides\")\n\n # local list; this appends all lines within a block and is emptied at the end of the block\n L_peptide_isomeric_peak_area = []\n\n block_start = False; pep_NC = \"\"\n \n for line in fin_handle:\n \n line = line.strip()\n \n # skipping the blank line\n if not line.strip():\n continue\n \n # skipping the comment line\n if line[0]==\"#\":\n continue\n \n \n if line==\"PEPSTART\": block_start=True\n\n elif line==\"PEPEND\" :\n block_start=False\n \n #end elif\n \n if block_start and line!=\"PEPSTART\":\n L = line.split(\":\")\n if L[0].strip() == \"peptide\":\n pep_NC = L[1].strip() #e.g, '15-25'\n\n elif L[0].strip()==\"IsomericPeptidesPeakArea\":\n right_side = L[1].strip()\n\n L_modtypes_freq_peak_area = [m.strip() for m in right_side.split(\" \")]\n percentage_peak_area = L_modtypes_freq_peak_area[-1] # last column\n D_modtype_freq = {}\n\n # running the loop so as to skip the last element\n for i, m in enumerate(L_modtypes_freq_peak_area[:-1]):\n mtype = (m.split('=')[0]).strip()\n freq = (m.split('=')[1]).strip()\n D_modtype_freq[mtype] = freq\n\n #end for\n\n L_peptide_isomeric_peak_area.append((D_modtype_freq, percentage_peak_area))\n \n # end if block_start and line!=\"PEPSTART\" \n\n # pushing into the dictionary after end of each block\n\n if line==\"PEPEND\":\n\n # sorting the list based on total frequency of isomeric peptides\n L_sorted = sorted(L_peptide_isomeric_peak_area, key=lambda x: sum([int(f) for f in x[0].values()]))\n \n self._D_peptide_isomeric_peak_areas[pep_NC] = L_sorted\n \n #emptying the list for next block\n L_peptide_isomeric_peak_area = []\n \n # emptying the peptide N_loc, C_loc string at the end of the block\n pep_NC = \"\"", "def sort(self, *pargs, **kwargs):\n if self._can_sort(*pargs, **kwargs):\n list.sort(self, *pargs, **kwargs)", "def sort_by_default(self):\n self.data.sort()", "def sort_results(metric_results):\n\n means, stds, params_list = metric_results\n dtype = [('index', int), ('params_list', object), ('std', float), ('mean', float)]\n\n #Sort will fail when attempting to rank based on the\n #dictionary 'params_list' when encountering identical mean and\n #standard deviations. To avoid this, use a list of distinct\n #integers to break the tie.\n values = zip(range(len(means)), params_list, stds, means)\n\n a = np.sort(np.array(list(values), dtype=dtype),\n kind='mergesort', order=['mean', 'std', 'index'])\n return np.flip(a, axis=-1)", "def pileup_as_peaks(in_pileup, out_peaks):\n with open(out_peaks, 'w') as outfile:\n for line in open(in_pileup):\n chrom, start, stop, count = line.strip().split('\\t')\n strand = '-' if 'minus' in in_pileup else '+'\n outfile.write('\\t'.join([chrom, start, stop, '.', count, strand]) + '\\n')", "def miniquicksort(first,last,cmp,swp):\r\n if last-first>1: #wenn die Laenge der Liste >=3\r\n if (cmp(first+1,first)==True and cmp(first,first+2)==True)or(cmp(first+2,first)==True and cmp(first,first+1)==True):\r\n index_pivot=first\r\n if (cmp(first,first+1)==True and cmp(first+1,first+2)==True)or(cmp(first+2,first+1)==True and cmp(first+1,first)==True):\r\n index_pivot=first+1\r\n if (cmp(first,first+2)==True and cmp(first+2,first+1)==True)or(cmp(first+1,first+2)==True and cmp(first+2,first)==True):\r\n index_pivot=first+2\r\n swp(first,index_pivot) #Pivot wird im 0. Position gelegt\r\n left,right,x=first+1,last,10 #Zeiger\r\n while x<100:\r\n while left<=right and cmp(left,first)==True: #Bewegung von linkem Zeiger\r\n left+=1\r\n while right>=left and cmp(first,right)==True: #Bewegung von rechtem Zeiger\r\n right-=1\r\n if right<left: #Schleife terminiert\r\n x=1000 \r\n else: #Schleife setzt fort, wiederhole oben\r\n swp(right,left)\r\n swp(first,right)\r\n miniquicksort(first,right-1,cmp,swp) #Rekursiv linke Seite der Liste\r\n miniquicksort(right+1,last,cmp,swp) #Rekurdiv rechte Seite der Liste\r\n else:\r\n #wenn die Laenge der Liste <=2. Rekursionsanker\r\n if not cmp(first,last)==True:\r\n swp(first,last)", "def quick_sort(lst, first, last):\r\n if first < last:\r\n split_marker = split_list(lst, first, last)\r\n\r\n quick_sort(lst, split_marker + 1, last)\r\n quick_sort(lst, first, split_marker - 1)", "def peak_vs_neighbors(pmap, k=4, median_dist=True, use_primary=False):\n # Get field centroids and peak rates from the spatial map \n if use_primary:\n udata = pmap.get_unit_data()\n x, y, peaks = udata['max_x'], udata['max_y'], udata['max_r']\n nfields = len(udata)\n else:\n fdata = pmap.get_field_data()\n x, y, peaks = fdata['x'], fdata['y'], fdata['peak']\n nfields = len(fdata)\n \n # Main loop through place fields\n neighbor_dists = numpy.empty(nfields, 'd')\n for f in xrange(nfields):\n d = numpy.sqrt((x - x[f])**2 + (y - y[f])**2)\n nearest_k = numpy.argsort(d)[1:k+1]\n if median_dist:\n neighbor_dists[f] = numpy.median(d[nearest_k])\n else:\n neighbor_dists[f] = d[nearest_k[-1]]\n \n return numpy.c_[peaks, neighbor_dists].T", "def test_find_peaks_nopeak(self):\n noise_amp = 1.0\n num_points = 100\n np.random.seed(181819141)\n test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)\n widths = np.arange(10, 50)\n found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)\n np.testing.assert_equal(len(found_locs), 0)", "def get_list_of_isomeric_modforms_peak_areas(self, fout_isomeric_modforms_peak_areas):\n \n L_isomeric_modforms_peak_areas = []\n \n try:\n fin_handle = open(fout_isomeric_modforms_peak_areas)\n\n except IOError:\n raise(\"Provide a file containing percentage peak areas of isomeric modforms\")\n\n block_start = False\n \n for line in fin_handle:\n \n line = line.strip()\n \n # skipping the blank line\n if not line.strip():\n continue\n \n # skipping the comment line\n if line[0]==\"#\":\n continue\n \n \n if line==\"START\": block_start=True\n elif line==\"END\" : block_start=False\n \n if block_start and line != \"START\":\n \n right_side = line.split(\":\")[1]\n right_side = right_side.strip()\n \n L_modtypes_freq_peak_area = [m.strip() for m in right_side.split(\" \")]\n percentage_peak_area = L_modtypes_freq_peak_area[-1] # last column\n \n D_modtype_freq = {}\n \n # running the loop so as to skip the last element\n for i, m in enumerate(L_modtypes_freq_peak_area[:-1]):\n mtype = (m.split('=')[0]).strip()\n freq = (m.split('=')[1]).strip()\n D_modtype_freq[mtype] = freq\n\n #end for\n\n L_isomeric_modforms_peak_areas.append((D_modtype_freq, percentage_peak_area))\n\n else: #if block_start and line != \"START\" and line[0]!=\"#\"\n continue\n \n #end for line in fin_handle \n \n # sorting the local list and storing it in a member attribute\n # sorting is based on sum of frequencies for isobaric modforms\n \n self._L_isomeric_modforms_peak_areas = sorted(L_isomeric_modforms_peak_areas, key=lambda x: sum([int(f) for f in x[0].values()]))" ]
[ "0.5235512", "0.51029795", "0.5057203", "0.50256133", "0.4997207", "0.49645668", "0.49587327", "0.49490315", "0.4915132", "0.4911893", "0.48918042", "0.4875538", "0.4869072", "0.48368287", "0.4831977", "0.48246452", "0.48140344", "0.47824505", "0.47735858", "0.4757741", "0.47366777", "0.47363457", "0.47206417", "0.47083598", "0.469233", "0.46840373", "0.46707654", "0.46692488", "0.46684074", "0.46657324", "0.46653312", "0.4661624", "0.46059492", "0.46008852", "0.45980492", "0.45859927", "0.45797893", "0.4572614", "0.45627174", "0.45510814", "0.45122865", "0.45071283", "0.4505228", "0.45047775", "0.44893524", "0.44857332", "0.44853413", "0.44827", "0.44778928", "0.4471205", "0.4469711", "0.4469178", "0.44639647", "0.4462726", "0.44612944", "0.44388825", "0.44303393", "0.44295877", "0.44253996", "0.44165552", "0.44113988", "0.44100922", "0.44089675", "0.44060025", "0.44019878", "0.4399606", "0.43978968", "0.4396497", "0.4396025", "0.43906128", "0.43857604", "0.43759528", "0.43716022", "0.4370152", "0.43699625", "0.43560588", "0.43316522", "0.43248695", "0.43137214", "0.43071175", "0.43034425", "0.43020645", "0.4301492", "0.43003696", "0.4300369", "0.42991945", "0.42991307", "0.42991203", "0.4295558", "0.4294763", "0.4289308", "0.42865592", "0.4285276", "0.4283115", "0.42829293", "0.4276271", "0.42724726", "0.42646548", "0.42604086", "0.42409316" ]
0.73416525
0
Return an shellescaped version of the string using single quotes. Reliably quote a string which may contain unsafe characters (e.g. space, quote, or other special characters such as '$'). The returned value can be used in a shell command line as one token that gets to be interpreted literally.
def SingleQuote(s): return pipes.quote(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shlex_quote(s):\n if not s:\n return \"''\"\n # PKGW: builtin not available in Python 2\n ###if _find_unsafe(s) is None:\n ### return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shlex_quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def quote(s):\n # Based on shlex.quote. Bun unlike shlex, it quotes every string and\n # not just the ones that contain unsafe characters.\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shellformat(string):\n return \"'\" + string.replace(\"'\", \"'\\\\''\") + \"'\"", "def _sh_quote(s):\n if not s:\n return b\"\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return b\"'\" + s.replace(b\"'\", b\"'\\\"'\\\"'\") + b\"'\"", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def DoubleQuote(s):\n if not s:\n return '\"\"'\n elif all(c in _SafeShellChars for c in s):\n return s\n else:\n return '\"' + s.replace('\"', '\\\\\"') + '\"'", "def sh_quote_safe(arg):\n return (\"'\" + str(arg).replace(\"'\", r\"'\\''\") + \"'\")", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return \"'\" + s + \"'\"\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def sh_quote_unsafe(arg):\n return ('\"' + _DQUOTE_RE.sub(r'\\1\\1\\\"', str(arg)) + '\"' )", "def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")", "def quoteString(s):\n if s is None:\n return None\n quoted = str(s).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n return \"'{}'\".format(quoted)", "def shquote(arg):\n for c in '\"', \"'\", \"\\\\\", \"#\":\n if c in arg:\n return repr(arg)\n if arg.split() != [arg]:\n return repr(arg)\n return arg", "def quot(string):\r\n return string.replace('\"', \"'\")", "def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n return shlex.quote(val)\n return shlex.quote(s)", "def quote(value):\n return DoubleQuotedScalarString(value)", "def c_stringify(cls, st):\n return '\"{0}\"'.format(st.value.replace('\"', '\\\\\"'))", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def shell_escape(s):\n from tempfile import mkstemp\n fd, path = mkstemp()\n try:\n with os.fdopen(fd, 'w') as f:\n f.write(s)\n cmd = r\"\"\"cat %s | sed -e \"s/'/'\\\\\\\\''/g; 1s/^/'/; \\$s/\\$/'/\" \"\"\" % path\n escaped_str = check_output(cmd, shell=True)\n finally:\n os.remove(path)\n\n return escaped_str", "def quote(s):\n\n\ts = \"'\" + s.replace(\"'\", \"\"\"'\"'\"'\"\"\") + \"'\"\n\n\t#get rid of gratuitous leading and trailing empty strings\n\tif s.startswith(\"''\"): s = s[2:]\n\tif s.endswith(\"''\"): s = s[:-2]\n\n\treturn s", "def escape_single_quote(unescaped):\n\t# requirements = re\n\treturn re.sub(r'(\\'|\\\\)', r'\\\\\\1', unescaped)", "def unquote(s, *a, **kw):\n return quote(s, *a, **kw)", "def lisp_string(python_string):\n return '\"%s\"' % python_string.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')", "def qstring(self, s):\n\n if '\"' in s or ' ' in s or '\\\\' in s:\n return '\"' + s.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"') + '\"'\n else:\n return s", "def shell_escape(s):\n import io\n\n fp = io.StringIO()\n sq = \"'\" + '\"' + \"'\" + '\"' + \"'\"\n dollar = \"'\" + \"'\" + \"$\" + \"'\" + \"'\"\n print(\"'\", end=\"\", file=fp)\n for c in s:\n if c == \"'\":\n print(sq, end=\"\", file=fp)\n elif c == \"$\":\n print(dollar, end=\"\", file=fp)\n else:\n print(c, end=\"\", file=fp)\n print(\"'\", end=\"\", file=fp)\n return fp.getvalue()", "def QuotedEscaped (s):\n return repr(s)", "def quote(value):\n single = value.find(\"'\")\n double = value.find('\"')\n multiline = value.find('\\n') != -1\n if multiline or ((single != -1) and (double != -1)):\n if value.find('\"\"\"') == -1 and value[0] != '\"' and value[-1] != '\"':\n s = '\"\"\"%s\"\"\"' % value\n else:\n s = \"'''%s'''\" % value\n elif (single != -1) and (double == -1):\n s = '\"%s\"' % value\n else:\n s = \"'%s'\" % value\n return s", "def safe_quoted_string(value):\n validate_safe_string(value)\n return u'\\'{}\\''.format(value)", "def quote(s):\n return unescape(quoteattr(s))", "def quote(s):\n if isinstance(s, str):\n if \" \" in s or len(s.split()) > 1:\n start, end = s[0], s[-1]\n if start != end or start not in ('\"', \"'\"):\n q1s, q1d, q3s, q3d = \"'\", '\"', 3 * \"'\", 3 * '\"'\n if q1d not in s:\n s = q1d + s + q1d\n elif q1s not in s:\n s = q1s + s + q1s\n elif q3d not in s:\n s = q3d + s + q3d\n elif q3s not in s:\n s = q3s + s + q3s\n return s", "def str_wrap_double(s):\n s = str(s)\n return '\"' + s + '\"'", "def safestr(s):\n return quote(str(s), '')", "def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")", "def Quote(s):\n if not nonnormal_char_re.search(s):\n return s # no quoting necessary\n slist = []\n for char in s:\n if nonnormal_char_re.search(char):\n slist.append(\"\\\\x%02x\" % ord(char))\n else:\n slist.append(char)\n return '\"%s\"' % \"\".join(slist)", "def special_character(raw_string, force_quote = False):\n if raw_string == \"\":\n return '\"\"'\n\n # Pass through other values, such as None:\n if type(raw_string) not in types.StringTypes:\n return raw_string\n\n # quick bypass if there are no characters to force escapeaping:\n if not force_quote and not _needs_escapeaping_re.search(raw_string):\n return raw_string\n \n if '\"' not in raw_string:\n return '\"%s\"' % (_avert_unallowable(raw_string),)\n\n if \"'\" not in raw_string:\n return \"'%s'\" % (_avert_unallowable(raw_string),)\n\n # If there are both single and double special_characters in the string, we\n # enclose the whole thing in double special_characters and escape double quotes\n # in the original string.\n return '\"%s\"' % (_avert_unallowable(raw_string, True),)", "def _StringQuote(s, quote='\"', escape='\\\\'):\n entity = {'\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t'}\n chars = []\n if quote:\n chars.append(quote)\n for c in s:\n if c in (escape, quote):\n chars.append(escape)\n elif c in entity:\n c = entity[c]\n chars.append(c)\n if quote:\n chars.append(quote)\n return ''.join(chars)", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def sh_quote_unsafe_cmdline(args):\n return str.join(' ', (sh_quote_unsafe(arg) for arg in args))", "def _escape_string(s, surrounding_quote='\"'):\n s = s.replace('\\\\', '\\\\\\\\')\n if surrounding_quote == '\"':\n s = s.replace('\"', r'\\\"')\n if surrounding_quote == \"'\":\n s = s.replace(\"'\", r\"\\'\")\n return s", "def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v", "def embeded_triple_quotes():\n pass", "def dequote(s):\n if len(s) < 2:\n return s\n elif (s[0] == s[-1]) and s.startswith(('\"', \"'\")):\n return s[1: -1]\n else:\n return s", "def test_quote_str():\n assert pmisc.quote_str(5) == 5\n assert pmisc.quote_str(\"Hello!\") == '\"Hello!\"'\n assert pmisc.quote_str('He said \"hello!\"') == \"'He said \\\"hello!\\\"'\"", "def quoted(val: str) -> str:\n return f'\"{val}\"' if ' ' in val else val", "def _escapeString(self, value):\n if '\"' in value and \"'\" in value:\n substrings = value.split(\"\\\"\")\n result = [\"concat(\"]\n for substring in substrings:\n result.append(\"\\\"%s\\\"\" % substring)\n result.append(\", '\\\"', \")\n result = result[0:-1]\n if value.endswith('\"'):\n result.append(\", '\\\"'\")\n return \"\".join(result) + \")\"\n\n if '\"' in value:\n return \"'%s'\" % value\n return \"\\\"%s\\\"\" % value", "def sh_quote_safe_cmdline(args):\n return str.join(' ', (sh_quote_safe(arg) for arg in args))", "def serialize_string(s):\n return f'\"{s}\"'", "def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"'\").strip('\"').strip(\"'\")", "def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"\\\"'\")", "def encodeLiteral(self, string):\r\n return string.replace(\"'\",\"''\")", "def quote(*a, **kw):\n return quote(*a, **kw)", "def quote(s, safe='/', encoding=None, errors=None):\n # fastpath\n if not s:\n return s\n\n if encoding is not None or isinstance(s, unicode):\n if encoding is None:\n encoding = 'utf-8'\n if errors is None:\n errors = 'strict'\n s = s.encode(encoding, errors)\n if isinstance(safe, unicode):\n # Normalize 'safe' by converting to str and removing non-ASCII chars\n safe = safe.encode('ascii', 'ignore')\n\n cachekey = (safe, always_safe)\n try:\n (quoter, safe) = _safe_quoters[cachekey]\n except KeyError:\n safe_map = _safe_map.copy()\n safe_map.update([(c, c) for c in safe])\n quoter = safe_map.__getitem__\n safe = always_safe + safe\n _safe_quoters[cachekey] = (quoter, safe)\n if not s.rstrip(safe):\n return s\n return ''.join(map(quoter, s))", "def escape(x):\n if '\\'' not in x:\n return '\\'' + x + '\\''\n s = '\"'\n for c in x:\n if c in '\\\\$\"`':\n s = s + '\\\\'\n s = s + c\n s = s + '\"'\n return s", "def safe_str(self, string):\n return self.db.escape_string(string)", "def argument_quote(argument):\n argument = argument.replace('\"', '\"\"')\n if ' ' in argument:\n argument = argument.replace(\"'\", \"''\")\n argument = \"'\" + argument + \"'\"\n return argument", "def quote(value, *args, **kwargs):\n return parse.quote(encode(value, *args, **kwargs))", "def smart_str(s, strings_only=False, errors='strict'):\n return django.utils.encoding.smart_str(\n s, get_site_encoding(), strings_only, errors)", "def cmdstr(cmd):\n if isinstance(cmd, str):\n return cmd\n\n quoted = []\n for arg in cmd:\n if isinstance(arg, Path):\n arg = str(arg)\n if ' ' in arg:\n arg = '\"%s\"' % (arg,)\n quoted.append(arg)\n return ' '.join(quoted)", "def cleanquotes(string):\n return string.replace(\"\\\"\", \"\\\\\\\"\")", "def unquote(s):\n if isinstance(s, str) and len(s) > 1:\n if s[0] in ('\"', \"'\") and s[-1] == s[0]:\n q = s[0]\n if len(s) >= 6 and s[0:3].count(q) == 3 and s[-3:].count(q) == 3:\n count = 3\n else:\n count = 1\n s = s[count:-count]\n return s", "def simple_format_string(self, value, force_raw=False):\n\n m = re.search(r'((\\n)|\\r|\\n|\\t|\\\\)', value)\n newlines = False\n raw = ''\n if m:\n if m.group(2):\n newlines = True\n raw = 'r'\n if force_raw and not raw:\n raw = 'r'\n\n single = self.needs_escape(value, '\\'')\n double = self.needs_escape(value, '\"')\n tsingle = self.needs_escape(value, '\\'', quote_count=3)\n tdouble = self.needs_escape(value, '\"', quote_count=3)\n if not double and not newlines:\n string = '%s\"%s\"' % (raw, value)\n elif not single and not newlines:\n string = '%s\\'%s\\'' % (raw, value)\n elif not tdouble:\n string = '%s\"\"\"%s\"\"\"' % (raw, value)\n elif not tsingle:\n string = '%s\\'\\'\\'%s\\'\\'\\'' % (raw, value)\n elif not newlines:\n string = '%s\"%s\"' % (raw, self.fix_escape(value, '\"'))\n else:\n string = '%s\"\"\"%s\"\"\"' % (raw, self.fix_escape(value, '\"', quote_count=3))\n return string", "def exptostr(value):\n return re.sub(r'`(.*)`', r'\\g<1>', value)", "def typeify(s):\n try:\n return literal_eval(s)\n except:\n return s", "def unescape_single_quote(escaped):\n\tescaped = escaped.replace('\\\\\\\\', '\\\\')\n\tescaped = escaped.replace('\\\\\\'', '\\'')\n\treturn escaped", "def shlex_join(argv):\n def quote(arg):\n if arg.find(\" \") >= 0:\n return '\"%s\"' % arg\n else:\n return arg\n return \" \".join([quote(arg) for arg in argv])", "def _dequote(value: str) -> str:\n if value[0] == '\"' and value[-1] == '\"':\n return value[1:-1]\n return value", "def __str__(self):\n\n if self._s == '':\n return ''\n\n if len(self.quote) == 1:\n s = self.to_short()\n else:\n s = self.to_long()\n\n try:\n eval(self.quote + s + self.quote)\n except UnicodeDecodeError:\n if self._safe_mode:\n raise\n\n self._safe_mode = True\n\n assert eval(self.quote + s + self.quote) == self._s\n\n return s", "def escapeSMString(item):\n if type(item) is dict:\n # return \"key = value\"\n return str(list(item.keys())[0]) + ' = ' + escapeSMString(str(list(item.values())[0]))\n elif type(item) is str:\n if item.startswith(\"`sm \") and item.endswith(\"`\"): # strip `sm ... `\n return item[4:-1]\n return \"'\" + item + \"'\" # return item as quoted string\n return ''", "def _escape(s):\n assert isinstance(s, str), \\\n \"expected %s but got %s; value=%s\" % (type(str), type(s), s)\n s = s.replace(\"\\\\\", \"\\\\\\\\\")\n s = s.replace(\"\\n\", \"\\\\n\")\n s = s.replace(\"\\t\", \"\\\\t\")\n s = s.replace(\",\", \"\\t\")\n return s", "def quote_spaces(arg):\n if ' ' in arg or '\\t' in arg:\n return '\"%s\"' % arg\n else:\n return str(arg)", "def cleaned_string(val):\r\n return urllib.quote_plus(smart_str(val))", "def dequote(self, in_str):\n in_str = in_str.replace(\"'\", \"\")\n in_str = in_str.replace('\"', \"\")\n return in_str", "def string_from_interwebs(input_value):\n \n return escape(unquote(input_value))", "def removeEscapingDoubleQuoteInSQLString(string, forceDoubleQuote=True):\n if string is None:\n return string\n\n string = string.replace('\"\"', '\"')\n\n if forceDoubleQuote:\n string = '\"' + string + '\"'\n return string", "def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):\n string = string.replace('\"', '\"\"')\n\n if forceDoubleQuote:\n string = '\"' + string + '\"'\n return string", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def quoting_sane(i):\n if '\"\"' in i:\n debug('found \"\"')\n pass\n if '\"' in i:\n debug('found \"')\n\treturn i.replace('\"','')\n debug('found \"\"')\n if \"''\" in i:\n debug(\"found ''\")\n pass\n if \"'\" in i:\n debug(\"found '\")\n pass\n return i", "def test_symlit_escape():\n return \"\\\"=\\\"\"", "def dequote(x): \n if x[0] == '\"' and x[len(x)-1] == '\"':\n return x[1:len(x)-1]\n return x", "def _unwrap_command_line(s: str) -> str:\n if not _command_escape_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[(len(indent) + 8):-len(_command_escape_comment)]\n return indent + cmd", "def json_escape(s):\n \n return s.replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")", "def escape(orig):\n return '\"{}\"'.format(orig.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"'))", "def unicode_quote(s):\n if isinstance(s, unicode):\n return quote(s.encode(\"utf-8\"))\n else:\n return quote(str(s))", "def test_spelling_mistake_in_single_quote_string(self, style):\n with ExpectedException(LinterFailure):\n self._spellcheck_lint(\"{s}{e}\\ncall('''splelling mistake''')\\n\",\n style)", "def sh_escape(command):\n command = command.replace(\"\\\\\", \"\\\\\\\\\")\n command = command.replace(\"$\", r'\\$')\n command = command.replace('\"', r'\\\"')\n command = command.replace('`', r'\\`')\n return command", "def key(name):\n return (\n Literal(name) ^\n (sep('\\'') + Literal(name) + sep('\\'')) ^\n (sep('\"') + Literal(name) + sep('\"')))", "def escape_triple_quotes(text):\n return text.replace(u'\"\"\"', u'\\\\\"\\\\\"\\\\\"')", "def quoted_string(content: str) -> str:\n if not (QCONTENT > set(content)):\n raise ValueError(f\"bad content for quoted-string {content!r}\")\n return not_qtext_re.sub(lambda x: \"\\\\\" + x.group(0), content)", "def Unquote(quoted_string):\n if not quoted_string[0] == '\"' or quoted_string[0] == \"'\":\n return quoted_string\n assert quoted_string[0] == quoted_string[-1]\n return_list = []\n i = 1 # skip initial char\n while i < len(quoted_string) - 1:\n char = quoted_string[i]\n if char == \"\\\\\":\n # quoted section\n assert quoted_string[i + 1] == \"x\"\n return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))\n i += 4\n else:\n return_list.append(char)\n i += 1\n return \"\".join(return_list)", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def celstr(token: lark.Token) -> celpy.celtypes.StringType:\n def expand(match_iter: Iterable[Match[str]]) -> Iterator[str]:\n for match in (m.group() for m in match_iter):\n if len(match) == 1:\n expanded = match\n elif match[:2] == r'\\x':\n expanded = chr(int(match[2:], 16))\n elif match[:2] in {r'\\u', r'\\U'}:\n expanded = chr(int(match[2:], 16))\n elif match[:1] == '\\\\' and len(match) == 4:\n expanded = chr(int(match[1:], 8))\n else:\n expanded = CEL_ESCAPES.get(match, match)\n yield expanded\n\n text = token.value\n if text[:1] in (\"R\", \"r\"):\n # Raw; ignore ``\\`` escapes\n if text[1:4] == '\"\"\"' or text[1:4] == \"'''\":\n # Long\n expanded = text[4:-3]\n else:\n # Short\n expanded = text[2:-1]\n else:\n # Cooked; expand ``\\`` escapes\n if text[0:3] == '\"\"\"' or text[0:3] == \"'''\":\n # Long\n match_iter = CEL_ESCAPES_PAT.finditer(text[3:-3])\n else:\n # Short\n match_iter = CEL_ESCAPES_PAT.finditer(text[1:-1])\n expanded = ''.join(expand(match_iter))\n return celpy.celtypes.StringType(expanded)", "def __getXpathLiteral(self, string):\r\n\r\n if not '\"' in string:\r\n return '\"' + string + '\"'\r\n if not \"'\" in string:\r\n return \"'\" + string + \"'\"\r\n\r\n\r\n\r\n #if the value contains both single and double quotes, construct an\r\n #expression that concatenates all non-double-quote substrings with\r\n #the quotes, e.g.:\r\n #concat(\"foo\", '\"', \"bar\")\r\n\r\n sb = \"concat(\"\r\n substrings = string.split('\"')\r\n for i in range(len(substrings)):\r\n needComma = (i>0);\r\n if substrings[i] != \"\":\r\n if i > 0:\r\n sb += \", \"\r\n\r\n sb += \"\\\"\"\r\n sb += substrings[i]\r\n sb += '\"'\r\n needComma = True\r\n if i < (len(substrings) - 1):\r\n if needComma:\r\n sb += \", \"\r\n sb += \"'\\\"'\"\r\n\r\n sb += \")\"\r\n return sb", "def safe(e):\n if PY2 and isinstance(e, unicode):\n return quote(e.encode('utf-8'), safe='')\n else:\n return quote(str(e), safe='')", "def _wrap_command_line(s: str) -> str:\n if not _command_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[len(indent):]\n return f\"{indent}pass # {cmd}{_command_escape_comment}\"", "def encode_string(self, value, double=False):\n\n # Replace special characters in string using the %xx escape\n encoded_str = quote(value, '')\n if double: # double encode\n encoded_str = quote(encoded_str, '')\n\n return encoded_str", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def jsquote(prim) :\n\tif isinstance(prim, URIRef) :\n\t\treturn 'u' + str(prim)\n\telif isinstance(prim, float) :\n\t\treturn 'f' + str(prim)\n\telif isinstance(prim, int) :\n\t\treturn 'i' + str(prim)\n\telif isinstance(prim, basestring) :\n\t\treturn 's' + prim.replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\").replace(\"\\n\", \"\\\\n\")\n\telse :\n\t\treturn None" ]
[ "0.7403372", "0.74031615", "0.71584487", "0.71584487", "0.7133629", "0.71020097", "0.7097107", "0.7079384", "0.6891607", "0.683838", "0.6683751", "0.66462994", "0.6627006", "0.6565741", "0.6501744", "0.64522266", "0.63975513", "0.6358231", "0.63522434", "0.6346998", "0.63205904", "0.63012606", "0.6288624", "0.62825716", "0.62817687", "0.6279321", "0.6278117", "0.6267552", "0.6233549", "0.62321997", "0.6192882", "0.61472994", "0.6144251", "0.611346", "0.61042136", "0.6066018", "0.60332006", "0.5968032", "0.5943156", "0.5908041", "0.585687", "0.58558774", "0.58348125", "0.57888407", "0.5759621", "0.5742179", "0.5737059", "0.5724058", "0.567849", "0.5673036", "0.5664554", "0.5646585", "0.56345296", "0.5615689", "0.56144875", "0.5613376", "0.56062233", "0.5601628", "0.55675733", "0.5547627", "0.55228454", "0.54430276", "0.5440372", "0.54366875", "0.5434324", "0.5426846", "0.54260075", "0.5424309", "0.54222006", "0.54216975", "0.5409791", "0.5408959", "0.5407301", "0.54015076", "0.53891844", "0.5377219", "0.53721553", "0.5369321", "0.5345322", "0.5342473", "0.5322063", "0.5319201", "0.5318104", "0.53126013", "0.5309714", "0.53075546", "0.5304332", "0.5297821", "0.5297564", "0.52889943", "0.52857935", "0.5282753", "0.5281678", "0.52803373", "0.5275005", "0.5273634", "0.5270327", "0.5264131", "0.52625", "0.52501404" ]
0.7261007
2
Return an shellescaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the
def DoubleQuote(s): if not s: return '""' elif all(c in _SafeShellChars for c in s): return s else: return '"' + s.replace('"', '\\"') + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shlex_quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shlex_quote(s):\n if not s:\n return \"''\"\n # PKGW: builtin not available in Python 2\n ###if _find_unsafe(s) is None:\n ### return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def quote(s):\n # Based on shlex.quote. Bun unlike shlex, it quotes every string and\n # not just the ones that contain unsafe characters.\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shellformat(string):\n return \"'\" + string.replace(\"'\", \"'\\\\''\") + \"'\"", "def _sh_quote(s):\n if not s:\n return b\"\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return b\"'\" + s.replace(b\"'\", b\"'\\\"'\\\"'\") + b\"'\"", "def SingleQuote(s):\n return pipes.quote(s)", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def sh_quote_unsafe(arg):\n return ('\"' + _DQUOTE_RE.sub(r'\\1\\1\\\"', str(arg)) + '\"' )", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return \"'\" + s + \"'\"\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def sh_quote_safe(arg):\n return (\"'\" + str(arg).replace(\"'\", r\"'\\''\") + \"'\")", "def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")", "def quot(string):\r\n return string.replace('\"', \"'\")", "def quote(s):\n return unescape(quoteattr(s))", "def QuotedEscaped (s):\n return repr(s)", "def unquote(s, *a, **kw):\n return quote(s, *a, **kw)", "def lisp_string(python_string):\n return '\"%s\"' % python_string.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')", "def shquote(arg):\n for c in '\"', \"'\", \"\\\\\", \"#\":\n if c in arg:\n return repr(arg)\n if arg.split() != [arg]:\n return repr(arg)\n return arg", "def str_wrap_double(s):\n s = str(s)\n return '\"' + s + '\"'", "def shell_escape(s):\n from tempfile import mkstemp\n fd, path = mkstemp()\n try:\n with os.fdopen(fd, 'w') as f:\n f.write(s)\n cmd = r\"\"\"cat %s | sed -e \"s/'/'\\\\\\\\''/g; 1s/^/'/; \\$s/\\$/'/\" \"\"\" % path\n escaped_str = check_output(cmd, shell=True)\n finally:\n os.remove(path)\n\n return escaped_str", "def Quote(s):\n if not nonnormal_char_re.search(s):\n return s # no quoting necessary\n slist = []\n for char in s:\n if nonnormal_char_re.search(char):\n slist.append(\"\\\\x%02x\" % ord(char))\n else:\n slist.append(char)\n return '\"%s\"' % \"\".join(slist)", "def quoteString(s):\n if s is None:\n return None\n quoted = str(s).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n return \"'{}'\".format(quoted)", "def quote(s):\n\n\ts = \"'\" + s.replace(\"'\", \"\"\"'\"'\"'\"\"\") + \"'\"\n\n\t#get rid of gratuitous leading and trailing empty strings\n\tif s.startswith(\"''\"): s = s[2:]\n\tif s.endswith(\"''\"): s = s[:-2]\n\n\treturn s", "def c_stringify(cls, st):\n return '\"{0}\"'.format(st.value.replace('\"', '\\\\\"'))", "def special_character(raw_string, force_quote = False):\n if raw_string == \"\":\n return '\"\"'\n\n # Pass through other values, such as None:\n if type(raw_string) not in types.StringTypes:\n return raw_string\n\n # quick bypass if there are no characters to force escapeaping:\n if not force_quote and not _needs_escapeaping_re.search(raw_string):\n return raw_string\n \n if '\"' not in raw_string:\n return '\"%s\"' % (_avert_unallowable(raw_string),)\n\n if \"'\" not in raw_string:\n return \"'%s'\" % (_avert_unallowable(raw_string),)\n\n # If there are both single and double special_characters in the string, we\n # enclose the whole thing in double special_characters and escape double quotes\n # in the original string.\n return '\"%s\"' % (_avert_unallowable(raw_string, True),)", "def quote(value):\n return DoubleQuotedScalarString(value)", "def safe_quoted_string(value):\n validate_safe_string(value)\n return u'\\'{}\\''.format(value)", "def shell_escape(s):\n import io\n\n fp = io.StringIO()\n sq = \"'\" + '\"' + \"'\" + '\"' + \"'\"\n dollar = \"'\" + \"'\" + \"$\" + \"'\" + \"'\"\n print(\"'\", end=\"\", file=fp)\n for c in s:\n if c == \"'\":\n print(sq, end=\"\", file=fp)\n elif c == \"$\":\n print(dollar, end=\"\", file=fp)\n else:\n print(c, end=\"\", file=fp)\n print(\"'\", end=\"\", file=fp)\n return fp.getvalue()", "def _StringQuote(s, quote='\"', escape='\\\\'):\n entity = {'\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t'}\n chars = []\n if quote:\n chars.append(quote)\n for c in s:\n if c in (escape, quote):\n chars.append(escape)\n elif c in entity:\n c = entity[c]\n chars.append(c)\n if quote:\n chars.append(quote)\n return ''.join(chars)", "def embeded_triple_quotes():\n pass", "def qstring(self, s):\n\n if '\"' in s or ' ' in s or '\\\\' in s:\n return '\"' + s.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"') + '\"'\n else:\n return s", "def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")", "def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n return shlex.quote(val)\n return shlex.quote(s)", "def quote(s):\n if isinstance(s, str):\n if \" \" in s or len(s.split()) > 1:\n start, end = s[0], s[-1]\n if start != end or start not in ('\"', \"'\"):\n q1s, q1d, q3s, q3d = \"'\", '\"', 3 * \"'\", 3 * '\"'\n if q1d not in s:\n s = q1d + s + q1d\n elif q1s not in s:\n s = q1s + s + q1s\n elif q3d not in s:\n s = q3d + s + q3d\n elif q3s not in s:\n s = q3s + s + q3s\n return s", "def _escape_string(s, surrounding_quote='\"'):\n s = s.replace('\\\\', '\\\\\\\\')\n if surrounding_quote == '\"':\n s = s.replace('\"', r'\\\"')\n if surrounding_quote == \"'\":\n s = s.replace(\"'\", r\"\\'\")\n return s", "def dequote(s):\n if len(s) < 2:\n return s\n elif (s[0] == s[-1]) and s.startswith(('\"', \"'\")):\n return s[1: -1]\n else:\n return s", "def escape_single_quote(unescaped):\n\t# requirements = re\n\treturn re.sub(r'(\\'|\\\\)', r'\\\\\\1', unescaped)", "def safestr(s):\n return quote(str(s), '')", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def quote(value):\n single = value.find(\"'\")\n double = value.find('\"')\n multiline = value.find('\\n') != -1\n if multiline or ((single != -1) and (double != -1)):\n if value.find('\"\"\"') == -1 and value[0] != '\"' and value[-1] != '\"':\n s = '\"\"\"%s\"\"\"' % value\n else:\n s = \"'''%s'''\" % value\n elif (single != -1) and (double == -1):\n s = '\"%s\"' % value\n else:\n s = \"'%s'\" % value\n return s", "def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v", "def sh_quote_unsafe_cmdline(args):\n return str.join(' ', (sh_quote_unsafe(arg) for arg in args))", "def _escapeString(self, value):\n if '\"' in value and \"'\" in value:\n substrings = value.split(\"\\\"\")\n result = [\"concat(\"]\n for substring in substrings:\n result.append(\"\\\"%s\\\"\" % substring)\n result.append(\", '\\\"', \")\n result = result[0:-1]\n if value.endswith('\"'):\n result.append(\", '\\\"'\")\n return \"\".join(result) + \")\"\n\n if '\"' in value:\n return \"'%s'\" % value\n return \"\\\"%s\\\"\" % value", "def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"'\").strip('\"').strip(\"'\")", "def _Unquote(s):\n if not hasattr(s, 'strip'):\n return s\n # Repeated to handle both \"'foo'\" and '\"foo\"'\n return s.strip(\"\\\"'\")", "def test_quote_str():\n assert pmisc.quote_str(5) == 5\n assert pmisc.quote_str(\"Hello!\") == '\"Hello!\"'\n assert pmisc.quote_str('He said \"hello!\"') == \"'He said \\\"hello!\\\"'\"", "def _dequote(value: str) -> str:\n if value[0] == '\"' and value[-1] == '\"':\n return value[1:-1]\n return value", "def quoted(val: str) -> str:\n return f'\"{val}\"' if ' ' in val else val", "def dequote(self, in_str):\n in_str = in_str.replace(\"'\", \"\")\n in_str = in_str.replace('\"', \"\")\n return in_str", "def escape(x):\n if '\\'' not in x:\n return '\\'' + x + '\\''\n s = '\"'\n for c in x:\n if c in '\\\\$\"`':\n s = s + '\\\\'\n s = s + c\n s = s + '\"'\n return s", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def escape(raw_string): \n return ''.join(\n [_caret_escapes_for_unprintables.get(c, c) for c in raw_string])", "def sh_quote_safe_cmdline(args):\n return str.join(' ', (sh_quote_safe(arg) for arg in args))", "def escape(orig):\n return '\"{}\"'.format(orig.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"'))", "def _escape(s):\n assert isinstance(s, str), \\\n \"expected %s but got %s; value=%s\" % (type(str), type(s), s)\n s = s.replace(\"\\\\\", \"\\\\\\\\\")\n s = s.replace(\"\\n\", \"\\\\n\")\n s = s.replace(\"\\t\", \"\\\\t\")\n s = s.replace(\",\", \"\\t\")\n return s", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def quote(s, safe='/', encoding=None, errors=None):\n # fastpath\n if not s:\n return s\n\n if encoding is not None or isinstance(s, unicode):\n if encoding is None:\n encoding = 'utf-8'\n if errors is None:\n errors = 'strict'\n s = s.encode(encoding, errors)\n if isinstance(safe, unicode):\n # Normalize 'safe' by converting to str and removing non-ASCII chars\n safe = safe.encode('ascii', 'ignore')\n\n cachekey = (safe, always_safe)\n try:\n (quoter, safe) = _safe_quoters[cachekey]\n except KeyError:\n safe_map = _safe_map.copy()\n safe_map.update([(c, c) for c in safe])\n quoter = safe_map.__getitem__\n safe = always_safe + safe\n _safe_quoters[cachekey] = (quoter, safe)\n if not s.rstrip(safe):\n return s\n return ''.join(map(quoter, s))", "def test_symlit_escape():\n return \"\\\"=\\\"\"", "def argument_quote(argument):\n argument = argument.replace('\"', '\"\"')\n if ' ' in argument:\n argument = argument.replace(\"'\", \"''\")\n argument = \"'\" + argument + \"'\"\n return argument", "def unquote(s):\n if isinstance(s, str) and len(s) > 1:\n if s[0] in ('\"', \"'\") and s[-1] == s[0]:\n q = s[0]\n if len(s) >= 6 and s[0:3].count(q) == 3 and s[-3:].count(q) == 3:\n count = 3\n else:\n count = 1\n s = s[count:-count]\n return s", "def quote(*a, **kw):\n return quote(*a, **kw)", "def man_escape(string):\n result = string.replace(\"\\\\\",\"\\\\\\\\\")\n result = result.replace(\"`\",\"\\\\`\")\n result = result.replace(\"-\",\"\\\\-\")\n return result", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def encodeLiteral(self, string):\r\n return string.replace(\"'\",\"''\")", "def dequote(x): \n if x[0] == '\"' and x[len(x)-1] == '\"':\n return x[1:len(x)-1]\n return x", "def escape_triple_quotes(text):\n return text.replace(u'\"\"\"', u'\\\\\"\\\\\"\\\\\"')", "def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):\n string = string.replace('\"', '\"\"')\n\n if forceDoubleQuote:\n string = '\"' + string + '\"'\n return string", "def unescape_single_quote(escaped):\n\tescaped = escaped.replace('\\\\\\\\', '\\\\')\n\tescaped = escaped.replace('\\\\\\'', '\\'')\n\treturn escaped", "def Unquote(quoted_string):\n if not quoted_string[0] == '\"' or quoted_string[0] == \"'\":\n return quoted_string\n assert quoted_string[0] == quoted_string[-1]\n return_list = []\n i = 1 # skip initial char\n while i < len(quoted_string) - 1:\n char = quoted_string[i]\n if char == \"\\\\\":\n # quoted section\n assert quoted_string[i + 1] == \"x\"\n return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))\n i += 4\n else:\n return_list.append(char)\n i += 1\n return \"\".join(return_list)", "def cleanquotes(string):\n return string.replace(\"\\\"\", \"\\\\\\\"\")", "def quote(value, *args, **kwargs):\n return parse.quote(encode(value, *args, **kwargs))", "def serialize_string(s):\n return f'\"{s}\"'", "def safe_str(self, string):\n return self.db.escape_string(string)", "def unspecial_character(quoted_string):\n\n tokens = tokenize(special_characterd_string, just_one_token=True)\n if len(tokens) == 0:\n raise lib.errorhandler.StringUnspecial_characterException(\n special_characterd_string, \"invalid quoted string\")\n elif len(tokens) == 1:\n return tokens[0]\n else:\n raise lib.errorhandler.StringUnspecial_characterException(\n special_characterd_string, \"unquoted whitespace\")", "def string_from_interwebs(input_value):\n \n return escape(unquote(input_value))", "def elimenate_quote(string):\n\n for i, c in enumerate(string):\n if i==0:\n begin = c\n end = c \n \n if begin == '\"' and end == '\"':\n return string[1:-1]\n if begin == \"'\" and end == \"'\":\n return string[1:-1] \n \n else:\n return string", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def celstr(token: lark.Token) -> celpy.celtypes.StringType:\n def expand(match_iter: Iterable[Match[str]]) -> Iterator[str]:\n for match in (m.group() for m in match_iter):\n if len(match) == 1:\n expanded = match\n elif match[:2] == r'\\x':\n expanded = chr(int(match[2:], 16))\n elif match[:2] in {r'\\u', r'\\U'}:\n expanded = chr(int(match[2:], 16))\n elif match[:1] == '\\\\' and len(match) == 4:\n expanded = chr(int(match[1:], 8))\n else:\n expanded = CEL_ESCAPES.get(match, match)\n yield expanded\n\n text = token.value\n if text[:1] in (\"R\", \"r\"):\n # Raw; ignore ``\\`` escapes\n if text[1:4] == '\"\"\"' or text[1:4] == \"'''\":\n # Long\n expanded = text[4:-3]\n else:\n # Short\n expanded = text[2:-1]\n else:\n # Cooked; expand ``\\`` escapes\n if text[0:3] == '\"\"\"' or text[0:3] == \"'''\":\n # Long\n match_iter = CEL_ESCAPES_PAT.finditer(text[3:-3])\n else:\n # Short\n match_iter = CEL_ESCAPES_PAT.finditer(text[1:-1])\n expanded = ''.join(expand(match_iter))\n return celpy.celtypes.StringType(expanded)", "def removeEscapingDoubleQuoteInSQLString(string, forceDoubleQuote=True):\n if string is None:\n return string\n\n string = string.replace('\"\"', '\"')\n\n if forceDoubleQuote:\n string = '\"' + string + '\"'\n return string", "def json_escape(s):\n \n return s.replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")", "def cmdstr(cmd):\n if isinstance(cmd, str):\n return cmd\n\n quoted = []\n for arg in cmd:\n if isinstance(arg, Path):\n arg = str(arg)\n if ' ' in arg:\n arg = '\"%s\"' % (arg,)\n quoted.append(arg)\n return ' '.join(quoted)", "def _escapeArg(arg):\n #XXX There is a *lot* more that we should escape here.\n return arg.replace('\"', r'\\\"')", "def exptostr(value):\n return re.sub(r'`(.*)`', r'\\g<1>', value)", "def parse_string(token: Token) -> str:\n chars: List[str] = []\n\n index = 1\n end = len(token.value) - 1\n line, column = token.line, token.column + 1\n\n while index < end:\n char = token.value[index]\n\n if char != '\\\\':\n chars.append(char)\n index += 1\n if char == '\\n':\n line += 1\n column = 1\n else:\n column += 1\n continue\n\n next_char = token.value[index+1]\n if next_char == 'u':\n hex_string = token.value[index+2:index+6]\n try:\n unicode_char = literal_eval(f'\"\\\\u{hex_string}\"')\n except SyntaxError as err:\n raise ParseError(\n f\"Invalid unicode escape: \\\\u{hex_string} \"\n f\"(line {line} column {column})\") from err\n\n chars.append(unicode_char)\n index += 6\n column += 6\n continue\n\n if next_char in ('\"', '/', '\\\\'):\n chars.append(next_char)\n elif next_char == 'b':\n chars.append('\\b')\n elif next_char == 'f':\n chars.append('\\f')\n elif next_char == 'n':\n chars.append('\\n')\n elif next_char == 'r':\n chars.append('\\r')\n elif next_char == 't':\n chars.append('\\t')\n else:\n raise ParseError(\n f\"Unknown escape sequence: {token.value} \"\n f\"(line {line} column {column})\")\n\n index += 2\n column += 2\n\n string = ''.join(chars)\n return string", "def sh_escape(command):\n command = command.replace(\"\\\\\", \"\\\\\\\\\")\n command = command.replace(\"$\", r'\\$')\n command = command.replace('\"', r'\\\"')\n command = command.replace('`', r'\\`')\n return command", "def shlex_join(argv):\n def quote(arg):\n if arg.find(\" \") >= 0:\n return '\"%s\"' % arg\n else:\n return arg\n return \" \".join([quote(arg) for arg in argv])", "def quote_spaces(arg):\n if ' ' in arg or '\\t' in arg:\n return '\"%s\"' % arg\n else:\n return str(arg)", "def unquote(s):\n res = s.split('%')\n # fastpath\n if len(res) == 1:\n return s\n s = res[0]\n for item in res[1:]:\n try:\n s += _hextochr[item[:2]] + item[2:]\n except KeyError:\n s += '%' + item\n except UnicodeDecodeError:\n s += unichr(int(item[:2], 16)) + item[2:]\n return s", "def smart_str(s, strings_only=False, errors='strict'):\n return django.utils.encoding.smart_str(\n s, get_site_encoding(), strings_only, errors)", "def safe(e):\n if PY2 and isinstance(e, unicode):\n return quote(e.encode('utf-8'), safe='')\n else:\n return quote(str(e), safe='')", "def LaTeX_verbatimString(self, string, tab_spaces):\n\n lookup_table = {\n '[' : \"{[}\",\n ']' : \"{]}\",\n '{' : \"\\\\{{}\",\n '}' : \"\\\\}{}\",\n '#' : \"\\\\#{}\",\n '&' : \"\\\\&{}\",\n '_' : \"\\\\_{}\",\n '%' : \"\\\\%{}\",\n '$' : \"\\\\${}\",\n '^' : \"\\\\^{}\",\n '\\\\' : \"\\\\textbackslash{}\",\n '~' : \"\\\\textasciitilde{}\",\n '-' : \"{}{-}{}\",\n ':' : \"{}{:}{}\",\n ';' : \"{}{;}{}\",\n '!' : \"{}{!}{}\",\n '?' : \"{}{?}{}\",\n '\"' : \"{}{\\\"}{}\",\n '`' : \"{}{`}{}\",\n '\\'' : \"{}{'}{}\",\n '=' : \"{}{=}{}\",\n '\\t' : \"\\t\"\n }\n\n frame_start = 0\n frame_end = 0\n parts = []\n length = len(string)\n\n while frame_end < length:\n ch = string[frame_end]\n # look for translation\n try:\n escape_sequence = lookup_table[ch]\n except KeyError:\n escape_sequence = \"\"\n #end try\n if escape_sequence:\n if frame_end > frame_start:\n parts.append(string[frame_start:frame_end])\n #end if\n if ch == '\\t':\n parts.append(\" \" * tab_spaces)\n else:\n parts.append(escape_sequence)\n #end if\n frame_end += 1\n frame_start = frame_end\n else:\n frame_end += 1\n #end if\n #end while\n\n if frame_end > frame_start:\n parts.append(string[frame_start:frame_end])\n\n return \"\".join(parts)", "def quote(m):\n return '\"' + m + '\"'", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def __str__(self):\n\n if self._s == '':\n return ''\n\n if len(self.quote) == 1:\n s = self.to_short()\n else:\n s = self.to_long()\n\n try:\n eval(self.quote + s + self.quote)\n except UnicodeDecodeError:\n if self._safe_mode:\n raise\n\n self._safe_mode = True\n\n assert eval(self.quote + s + self.quote) == self._s\n\n return s", "def escape_like(string, escape_char=\"\\\\\"):\n return (\n string.replace(escape_char, escape_char * 2)\n .replace(\"%\", escape_char + \"%\")\n .replace(\"_\", escape_char + \"_\")\n )", "def low_dequote(s):\n s = iter(s)\n d_s = ''\n for c in s:\n if c == M_QUOTE:\n n = s.next()\n c = M_DEQUOTE_TABLE.get(c+n, n) # maybe raise an error\n d_s += c\n return d_s", "def escape_string(value: str) -> str:\n\n def replace(match: Match) -> str:\n return ESCAPE_DCT[match.group(0)]\n\n return ESCAPE.sub(replace, value)" ]
[ "0.7271046", "0.7253869", "0.72336555", "0.7229503", "0.70458126", "0.6944718", "0.6929941", "0.69232047", "0.69232047", "0.6779495", "0.67524135", "0.6597111", "0.65538913", "0.6536574", "0.65330154", "0.65087634", "0.6507119", "0.64975196", "0.64822", "0.646473", "0.644913", "0.63986534", "0.63792753", "0.6364152", "0.63629556", "0.63309103", "0.6299909", "0.62974316", "0.62783724", "0.6259792", "0.6257619", "0.6243498", "0.62306654", "0.62298685", "0.61893445", "0.6179503", "0.61681306", "0.61274266", "0.6110733", "0.6100157", "0.6093866", "0.6009455", "0.6001345", "0.5919022", "0.5910378", "0.58476704", "0.58269227", "0.5819352", "0.57967573", "0.5759096", "0.573995", "0.57358867", "0.572178", "0.57209057", "0.56963396", "0.5689566", "0.56843126", "0.5673905", "0.5671793", "0.5662471", "0.56520563", "0.56303334", "0.56071365", "0.5605475", "0.5594444", "0.55693835", "0.5559797", "0.5557864", "0.5556724", "0.55492973", "0.5542145", "0.553907", "0.551678", "0.55140156", "0.54949903", "0.5484688", "0.54837584", "0.54587865", "0.5456366", "0.54544985", "0.544232", "0.5441337", "0.54365087", "0.5435477", "0.5433787", "0.54310095", "0.5417291", "0.54105496", "0.5408733", "0.53880596", "0.53825104", "0.53789914", "0.53767735", "0.5370465", "0.5369664", "0.5368741", "0.53642726", "0.5362792", "0.5362274", "0.5358069" ]
0.7298411
0
Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen.
def ShrinkToSnippet(cmd_parts, var_name, var_value): def shrink(value): parts = (x and SingleQuote(x) for x in value.split(var_value)) with_substitutions = ('"$%s"' % var_name).join(parts) return with_substitutions or "''" return ' '.join(shrink(part) for part in cmd_parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substshell(command, path=None, output=os.devnull, mode='w'):\n _compile = SubstCommandCompiler(path)\n _compile.init_command(command)\n return functools.partial(_compile, output, mode)", "def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell", "def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n return shlex.quote(val)\n return shlex.quote(s)", "def _wrap_command_line(s: str) -> str:\n if not _command_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[len(indent):]\n return f\"{indent}pass # {cmd}{_command_escape_comment}\"", "def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE", "def _build_direct_command(self, cmd, arg):\n return \"%s%s\" % (arg, self._newline)", "def wrap_command(command: str) -> str: \n\n wrapper = \"\"\"\n sub callback {\n {{COMMAND}};\n }\n\n import java.io.*; \n import java.util.*; \n $baos = [new ByteArrayOutputStream]; \n $oos = [new ObjectOutputStream: $baos]; \n [$oos writeObject: callback()]; \n [$oos close]; \n $encoder = [Base64 getEncoder]; \n println([$encoder encodeToString: [$baos toByteArray]]);\n \"\"\"\n\n # Replace command in wrapper\n wrapper = wrapper.replace(r\"{{COMMAND}}\", command)\n return convert_to_oneline(wrapper)", "def do_shell(self, line):\n eval(line)", "def _instantiateSecrets(cmd, secrets, hide):\n if secrets:\n for (i, secret) in enumerate(secrets):\n if hide:\n secret = '<hidden>'\n cmd = cmd.replace(f':{i}:', secret)\n return cmd", "def build_sh_cmd(cmd, cwd=None):\n args = cmd.split()\n return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])", "def _preprocess(command):\n for shell_command in DockerProxy.DockerProxy.shell_commands:\n if shell_command in command:\n replace_string = \"/bin/bash -c \\\"\" + shell_command\n command = command.replace(shell_command, replace_string)\n command += \"\\\"\"\n return command", "def _build_simple_command(self, cmd):\n return cmd+SBE37_NEWLINE", "def do_shell(self, line):\n os.system(line)", "def win32_command(command, *args, **kwargs):\n # pylint: disable = redefined-outer-name\n return ' '.join([metasub(\n '\"%s\"' % (slashsub(token).replace('\"', '\\\\\"'),)\n if needq(token) else token\n ) for token in map(_make_formatter(*args, **kwargs),\n split_command(command))])", "def sh_quote_safe(arg):\n return (\"'\" + str(arg).replace(\"'\", r\"'\\''\") + \"'\")", "def sh_quote_unsafe(arg):\n return ('\"' + _DQUOTE_RE.sub(r'\\1\\1\\\"', str(arg)) + '\"' )", "def StringifyCommand(cmd):\n ret = ''\n grouping = 0\n for a in cmd:\n if grouping == 0 and len(ret) > 0:\n ret += \" \\\\\\n \"\n elif grouping > 0:\n ret += \" \"\n if grouping == 0:\n grouping = 1\n if a.startswith('-') and len(a) == 2:\n grouping = 2\n ret += a\n grouping -= 1\n return ret", "def shell_command_strings(self, command):\n return (None, \"$(shell \" + command + \")\", None)", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def shellcommand(command):\n\n subprocess.call(str(command))", "def _make_posix_command():\n qsearch = _re.compile(r'[^a-zA-Z\\d_./-]').search\n needq = lambda x: not x or qsearch(x)\n\n def posix_command(command, *args, **kwargs):\n \"\"\"\n Return a POSIX shell suitable commandline\n\n Either args or kwargs or neither of them can be set. There cannot be\n set both of them.\n\n :Parameters:\n `command` : ``str``\n Generic commandline, possibly containing substitutions, filled by\n args or kwargs. See `split_command` for generic commandline\n syntax.\n\n `args` : ``tuple``\n Substitution tuple\n\n `kwargs` : ``dict``\n Substitution dict\n\n :Return: Strictly quoted shell commandline for POSIX shells\n :Rtype: ``str``\n \"\"\"\n # pylint: disable = redefined-outer-name\n return ' '.join([\n \"'%s'\" % (token.replace(\"'\", \"'\\\\''\")) if needq(token) else token\n for token in map(_make_formatter(*args, **kwargs),\n split_command(command))\n ])\n return posix_command", "def shell_command(self):\n # TODO: fix this naive version by adding quotes where appropriate\n return \" \".join(self.args)", "def do_shell(self, command):\n os.system(command)", "def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")", "def expand_var(self, key, val=None):\n if val is None:\n return 'unset {0};'.format(key)\n else:\n return '{0}=\"{1}\";export {0};'.format(key, val)", "def _unwrap_command_line(s: str) -> str:\n if not _command_escape_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[(len(indent) + 8):-len(_command_escape_comment)]\n return indent + cmd", "def rebuild_command(args):\n return \"%s\\n\" % (\" \".join(args)).replace(\"\\\\\", \"\\\\\\\\\")", "async def shell(*command: Strings, prefix: Optional[Strings] = None, **resources: int) -> None:\n current = Invocation.current\n if prefix is None:\n global default_shell_prefix # pylint: disable=invalid-name\n prefix = default_shell_prefix.value\n\n def _run_shell(parts: List[str]) -> Awaitable:\n assert prefix is not None\n global shell_executable # pylint: disable=invalid-name\n return asyncio.create_subprocess_shell(\n \" \".join(flatten(prefix, parts)),\n executable=shell_executable.value,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n await current.done(current.run_action(\"shell\", _run_shell, *command, **resources))", "def get_shell_cmd(self, action, wildcards):\n assert action == \"run\", \"Unsupported action\"\n ins = expand(\n self.base_path_in.format(wildcards=wildcards),\n postproc=[self._get_postproc_token()],\n ext=self.extensions,\n )\n outs = [s.format(**wildcards) for s in expand(self.base_path_out, ext=self.extensions)]\n assert len(ins) == len(outs)\n return \"\\n\".join(\n (\n \"test -L {out} || ln -sr {in_} {out}\".format(in_=in_, out=out)\n for in_, out in zip(ins, outs)\n )\n )", "def get_shell(self, shell):", "def make_cmd_fn(cmd):\n\n def do_cmd(rest_of_line, cmd=cmd):\n global_dict, local_dict = namespaces.get_twill_glocals()\n\n args = []\n if rest_of_line.strip() != \"\":\n try:\n args = parse.arguments.parseString(rest_of_line)[0]\n args = parse.process_args(args, global_dict,local_dict)\n except Exception as e:\n print('\\n INPUT ERROR: {} \\n'.format(e))\n return\n\n try:\n parse.execute_command(cmd, args, global_dict, local_dict,\n \"<shell>\")\n except SystemExit:\n raise\n except Exception as e:\n print('\\nERROR: %s\\n' % (str(e),))\n\n return do_cmd", "def posix_command(command, *args, **kwargs):\n # pylint: disable = redefined-outer-name\n return ' '.join([\n \"'%s'\" % (token.replace(\"'\", \"'\\\\''\")) if needq(token) else token\n for token in map(_make_formatter(*args, **kwargs),\n split_command(command))\n ])", "def __build_cmd(self, infname, outdir):\n self._outdirname = os.path.join(outdir, \"trimmomatic_output\")\n cmd = [\"trimmomatic\",\n infname,\n \"-o\", self._outdirname]\n self._cmd = ' '.join(cmd)", "def memory_limit_script_command():\n command = 'random_string() { ' \\\n ' base64 /dev/urandom | tr -d \\'/+\\' | dd bs=1048576 count=1024 2>/dev/null; ' \\\n '}; ' \\\n 'R=\"$(random_string)\"; ' \\\n 'V=\"\"; ' \\\n 'echo \"Length of R is ${#R}\" ; ' \\\n 'for p in `seq 0 99`; do ' \\\n ' for i in `seq 1 10`; do ' \\\n ' V=\"${V}.${R}\"; ' \\\n ' echo \"progress: ${p} ${p}-percent iter-${i}\" ; ' \\\n ' done ; ' \\\n 'done'\n return command", "def _make_cmdline(self, line):\n if isinstance(line, list):\n parts = line\n else:\n parts = line.split(\" \", 1)\n cmd = parts[0]\n exe = os.path.join(BINDIR, cmd)\n\n python_cmds = [\"samba-tool\",\n \"samba_dnsupdate\",\n \"samba_upgradedns\",\n \"script/traffic_replay\",\n \"script/traffic_learner\"]\n\n if os.path.exists(exe):\n parts[0] = exe\n if cmd in python_cmds and os.getenv(\"PYTHON\", None):\n parts.insert(0, os.environ[\"PYTHON\"])\n\n if not isinstance(line, list):\n line = \" \".join(parts)\n\n return line", "def make_help_cmd(cmd, docstring):\n def help_cmd(message=docstring, cmd=cmd):\n print('=' * 15)\n print('\\nHelp for command %s:\\n' % (cmd,))\n print(message.strip())\n print('')\n print('=' * 15)\n print('')\n\n return help_cmd", "def sh_escape(command):\n command = command.replace(\"\\\\\", \"\\\\\\\\\")\n command = command.replace(\"$\", r'\\$')\n command = command.replace('\"', r'\\\"')\n command = command.replace('`', r'\\`')\n return command", "def shell(cmd):\n return G.DEVICE.shell(cmd)", "def sh(cmd, quiet=False, wd=None, wrap=True, maxbuflen=1000000000, ignore_errors=False, no_venv=False, pyenv_version=None):\n if wd is None: wd = os.getcwd()\n\n try:\n return _rsh(\"127.0.0.1\", cmd, quiet, wd, wrap, maxbuflen, -1, ignore_errors, no_venv, pyenv_version)\n except ShellException as e: # this makes the stacktrace easier to read\n raise ShellException(e.returncode) from None", "def _build_solo_command(self, cmd):\n return COMMAND_CHAR[cmd]", "def repl_command(fxn):\n\n @functools.wraps(fxn)\n def wrapper(self, arglist):\n \"\"\"Wraps the command method\"\"\"\n args = []\n kwargs = {}\n if arglist:\n for arg in shlex.split(arglist):\n if \"=\" in arg:\n split = arg.split(\"=\", 1)\n kwargs[split[0]] = split[1]\n else:\n args.append(arg)\n return fxn(self, *args, **kwargs)\n\n return wrapper", "def shell(console):\n return create_shell(\n MANAGE_DICT.get(\"shell\", {}).get(\"console\", console), MANAGE_DICT\n )", "def _cleanup_command(self, crawl_id):\n cmd_line = (\"qsub -V -b y -cwd python {}/spidercleaner.py -r host:{},port:{} -c {}\"\n ).format(_spdr_engine_location(), self.engine_redis_host,\n self.engine_redis_port, crawl_id)\n if self.psuedo_dist:\n cmd_line += \" -d\"\n return cmd_line", "def _format_command(command: List[str], shell: bool = False) -> Union[Sequence[str], str]:\n return command if not shell else \" \".join(command)", "def generate_subshell_file_contents(cmd, skip_module_loading, skip_module_unloading):\r\n\r\n\r\n out_lines = []\r\n\r\n # Invoke commands to produce their output command string(s)\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n\r\n # Write current command to stdout\r\n out_lines.append('echo ' + '-'*80)\r\n out_lines.append('echo Executing the following command:')\r\n for c in cmd_list:\r\n out_lines.append('echo \"' + c + '\"')\r\n out_lines.append('date')\r\n\r\n # Write current command to errout\r\n out_lines.append('echo ' + '-'*80 + ' >&2')\r\n out_lines.append('echo Executing the following command: >&2')\r\n for c in cmd_list:\r\n out_lines.append('echo \"' + c + '\" >&2')\r\n out_lines.append('date >&2')\r\n\r\n # Write module load commands required for current command to\r\n # the output shell script\r\n if not skip_module_loading:\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n # Write command lines to the output shell script\r\n out_lines += cmd_list\r\n out_lines += ['#']*5\r\n\r\n # Write module unload commands required for current command\r\n # to the output shell script\r\n if not skip_module_unloading:\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n\r\n #Write to stdout\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n #Write to errout\r\n out_lines.append('echo Finished at: >&2')\r\n out_lines.append('date >&2')\r\n\r\n return out_lines", "def do_shell(self, line):\n subprocess.call(line, shell=True)", "def make_create_cmd(opts, vmdk_path):\n if not \"size\" in opts:\n size = DEFAULT_DISK_SIZE\n else:\n size = str(opts[\"size\"])\n logging.debug(\"SETTING VMDK SIZE to %s for %s\", size, vmdk_path)\n\n if kv.VSAN_POLICY_NAME in opts:\n # Note that the --policyFile option gets ignored if the\n # datastore is not VSAN\n policy_file = vsan_policy.policy_path(opts[kv.VSAN_POLICY_NAME])\n return \"{0} {1} --policyFile {2} {3}\".format(VMDK_CREATE_CMD, size,\n policy_file, vmdk_path)\n else:\n return \"{0} {1} {2}\".format(VMDK_CREATE_CMD, size, vmdk_path)", "def formatCommand(command):\n cmdstr=\"\"\n logging.debug(repr(command))\n for arg in command:\n if \" \" in arg:\n cmdstr=cmdstr+\" \\\"\"+arg+\"\\\"\"\n else:\n cmdstr=cmdstr+\" \"+arg\n return cmdstr", "def sh_quote_unsafe_cmdline(args):\n return str.join(' ', (sh_quote_unsafe(arg) for arg in args))", "def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))", "def _shiftSecrets(cmd, secrets, shift):\n if secrets:\n for i in range(len(secrets), 0, -1):\n cmd = cmd.replace(f':{i+shift-2}:', f':{i+shift-1}:')\n return cmd", "def make_command(remit=str(),\n source=str(),\n writer=str(),\n extension=str(),\n pandoc_options=list()):\n # if no extension specified, infer it from writer\n if not extension and writer:\n if writer in spec.DEFAULT_EXTENSION:\n extension = spec.DEFAULT_EXTENSION[writer]\n else:\n print('WARNING: No known extension for writer \"%s\", '\n 'using \".UNKNOWN\"' % writer)\n extension = '.UNKNOWN'\n # start building the command...\n # remit\n command = [remit]\n # input file\n # - if remit_WRITER.md exists, then use it!\n writer_specific_source = remit + '_' + writer + '.md'\n if os.path.exists(writer_specific_source):\n command += [writer_specific_source]\n else:\n command += [remit + '.md']\n # writer\n if writer:\n command += ['-t']\n command += [writer]\n # other options\n command.extend(pandoc_options)\n # output\n # - first build output filename...\n if writer == '':\n pretty_writer_string = 'default'\n else:\n pretty_writer_string = writer\n target = os.path.join(os.getcwd(), '..', '..', 'output-'+remit,\n source,\n pretty_writer_string\n + ''.join(pandoc_options)\n + extension)\n target = os.path.normpath(target)\n target = os.path.relpath(target)\n command += ['-o']\n command += [target]\n # panzer-specific options\n if remit == 'panzer':\n command += ['---quiet']\n # support directory\n command += ['---panzer-support']\n target = os.path.join(os.getcwd(), '..', '..', 'dot-panzer')\n target = os.path.normpath(target)\n target = os.path.relpath(target)\n command += [target]\n # debug outputs\n # command += ['---debug']\n # target = os.path.join(os.getcwd(), '..', '..', 'output-'+remit,\n # source, 'debug', 'debug_' + source)\n # target = os.path.normpath(target)\n # command += [target\n # + \"_\"\n # + pretty_writer_string\n # + ''.join(pandoc_options)]\n # done!\n return command", "def get_command(self, command):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif command in ('md5sum','sed','head'):\n\t\t\tif self.get_current_shutit_pexpect_session_environment().distro == 'osx':\n\t\t\t\treturn 'g' + command\n\t\treturn command", "def do_shell(command, context=None, **kwargs):\n logging.info(\"%s: executing %s\" % (context, command))\n\n child_env = {'CRANKD_CONTEXT': context}\n\n # We'll pull a subset of the available information in for shell scripts.\n # Anyone who needs more will probably want to write a Python handler\n # instead so they can reuse things like our logger & config info and avoid\n # ordeals like associative arrays in Bash\n for k in [ 'info', 'key' ]:\n if k in kwargs and kwargs[k]:\n child_env['CRANKD_%s' % k.upper()] = str(kwargs[k])\n\n if 'user_info' in kwargs:\n for k, v in kwargs['user_info'].items():\n child_env[create_env_name(k)] = str(v)\n\n try:\n rc = call(command, shell=True, env=child_env)\n if rc == 0:\n logging.debug(\"`%s` returned %d\" % (command, rc))\n elif rc < 0:\n logging.error(\"`%s` was terminated by signal %d\" % (command, -rc))\n else:\n logging.error(\"`%s` returned %d\" % (command, rc))\n except OSError, exc:\n logging.error(\"Got an exception when executing %s:\" % (command, exc))", "def MakeCommand(base_cmd, outpath, filename):\n basename = os.path.basename(filename)\n out = os.path.join(outpath, os.path.splitext(basename)[0] + '.obj')\n return (base_cmd + ['-c', filename, '-o', out], basename)", "def command(c):\n if len(c.strip().split()) < 1:\n return c\n\n cmd = c.strip().split()[0]\n if cmd in HELP:\n env['ht'].infos.append(HELP_TEXT)\n\n\n if cmd in STATS:\n env['ht'].infos.append(\"The current table faced %d conflicts, hosts %d distincts cards and it's size is %d slots'\" % \\\n (env['ht'].conflicts, len(env['ht'].slots), len(env['ht'].cards)))\n return \"\"\n\n if len(c.strip().split()) < 2:\n return c\n\n arg = c.strip().split()[1]\n if cmd in ADD_CARD:\n env['ht'].insert(Card(arg))\n return \"id %s\" % arg.split(';')[0]\n\n if cmd in DELETE_CARD:\n env['ht'].delete(arg)\n return \"\"\n\n if c in TOGGLE_DUPLICATE_SILENCE:\n if env['duplicate_silence']:\n set_val = False\n str = \" no more\"\n else:\n set_val = True\n str = \"\"\n env.permanent(('duplicate_silence', set_val))\n env['ht'].duplicate_silence = set_val\n env['ht'].infos.append(\"Duplicates are silent%s\" % str)\n return \"\"\n\n return c", "def _define_script_command(command_name,\n parent_shell,\n bootstrap_script,\n container_path,\n scripts_path,\n script):\n script_fragment = \"\\\"{}\\\"\".format(script) if script else \"\"\n parent_shell.define_command(command_name,\n \"python \\\"{bootstrap}\\\" \"\n \"-d \\\"{container}\\\" \"\n \"-r \\\"{scripts}\\\" \"\n \"-s {script}\"\n \"\".format(bootstrap=bootstrap_script,\n container=container_path,\n scripts=scripts_path,\n script=script_fragment))", "def make_shirt(size='L', message='Look Away'):\n print(\"I need a shirt, size \" + size + \", that says \" + \"'\" + message + \"'.\")", "def parse_glue_cmd(self, line):\n line, any_vars = self.find_vars_in_str(line)\n words = line.split()\n words = self.fix_words(words)\n gluers = [gen_parse.rm_quotation_marks(word) for word in words[1:-2]]\n new_str = ''.join(gluers)\n\n self.set_var(words[-1], new_str)", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def wrap_command(self, prog_name, command, preserve_files):\n raise NotImplementedError(\"Pure virtual method wrap_command() called\")", "def cmdstr(cmd):\n if isinstance(cmd, str):\n return cmd\n\n quoted = []\n for arg in cmd:\n if isinstance(arg, Path):\n arg = str(arg)\n if ' ' in arg:\n arg = '\"%s\"' % (arg,)\n quoted.append(arg)\n return ' '.join(quoted)", "def cmdShell(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, clear: bool=True, command: Union[AnyStr, bool]=\"\",\n defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dragCallback:\n Script=None, dropCallback: Script=None, enable: bool=True, enableBackground:\n bool=True, enableKeyboardFocus: bool=True, exists: bool=True, fullPathName:\n bool=True, height: Union[int, bool]=0, highlightColor: Union[List[float, float,\n float], bool]=None, isObscured: bool=True, manage: bool=True, noBackground:\n bool=True, numberOfHistoryLines: Union[int, bool]=0, numberOfPopupMenus: bool=True,\n numberOfSavedLines: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, preventOverride: bool=True, prompt: Union[AnyStr,\n bool]=\"\", statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int, bool]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def extracted(self, variable, timestep):\n try:\n process = cmd_run(cmd_make(variable, self.uda, timestep))\n except subprocess.CalledProcessError:\n print(\"The timestep {timestep} was not found in {uda}\"\n \" and was therefore ignored.\".format(timestep=timestep, uda=self.uda))\n return None\n return StringIO(process)", "def shell(self, cmd):\n raise NotImplementedError", "def cmd(self, shellcmd, *args, **kwargs):\n _cmd = shellcmd.format(*args, **kwargs)\n os.system(_cmd)", "def shell_cmd(ctx, extra_flags):\n ctx.load_plugins(extra_flags=extra_flags)\n import code\n from lektor.db import F, Tree\n from lektor.builder import Builder\n\n banner = \"Python %s on %s\\nLektor Project: %s\" % (\n sys.version,\n sys.platform,\n ctx.get_env().root_path,\n )\n ns = {}\n startup = os.environ.get(\"PYTHONSTARTUP\")\n if startup and os.path.isfile(startup):\n with open(startup, \"r\", encoding=\"utf-8\") as f:\n eval(compile(f.read(), startup, \"exec\"), ns) # pylint: disable=eval-used\n pad = ctx.get_env().new_pad()\n ns.update(\n project=ctx.get_project(),\n env=ctx.get_env(),\n pad=pad,\n tree=Tree(pad),\n config=ctx.get_env().load_config(),\n make_builder=lambda: Builder(\n ctx.get_env().new_pad(), ctx.get_default_output_path()\n ),\n F=F,\n )\n try:\n c = Config()\n c.TerminalInteractiveShell.banner2 = banner\n embed(config=c, user_ns=ns)\n except NameError: # No IPython\n code.interact(banner=banner, local=ns)", "def simplify(expression, auto_identify_var=False, variable=\"x\"):\n \n if not auto_identify_var:\n var = variable\n else:\n var = identify_var(expression)\n \n return Expression(expression, var=var).eval()", "def memory_limit_python_command():\n command = 'python3 -c ' \\\n '\"import resource; ' \\\n ' import sys; ' \\\n ' sys.stdout.write(\\'Starting...\\\\n\\'); ' \\\n ' one_mb = 1024 * 1024; ' \\\n ' [sys.stdout.write(' \\\n ' \\'progress: {} iter-{}.{}-mem-{}mB-{}\\\\n\\'.format(' \\\n ' i, ' \\\n ' i, ' \\\n ' len(\\' \\' * (i * 50 * one_mb)), ' \\\n ' int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / one_mb), ' \\\n ' sys.stdout.flush())) ' \\\n ' for i in range(100)]; ' \\\n ' sys.stdout.write(\\'Done.\\\\n\\'); \"'\n return command", "def make_shirt(size='large', message='I love Python!'):\n print(\"\\nI'm going to make a \" + size + \" t-shirt.\")\n print('It will say, \"' + message + '\"')", "def make_shirt(size='large', message='I love Python!'):\n print(\"\\nI'm going to make a \" + size + \" t-shirt.\")\n print('It will say, \"' + message + '\"')", "def _make_split_command():\n argre = r'[^\"\\s]\\S*|\"[^\\\\\"]*(?:\\\\[\\\\\"][^\\\\\"]*)*\"'\n check = _re.compile(\n r'\\s*(?:%(arg)s)(?:\\s+(?:%(arg)s))*\\s*$' % dict(arg=argre)\n ).match\n split = _re.compile(argre).findall\n strip = _ft.partial(_re.compile(r'\\\\([\\\\\"])').sub, r'\\1')\n\n def split_command(command): # pylint: disable = redefined-outer-name\n \"\"\"\n Split generic commandline into single arguments\n\n The command splitter splits between tokens. Tokens are non-whitespace\n sequences or double quoted strings. Inside those double quotes can be\n escaped with a backslash. So have to be backslashes.\n\n Stolen from <http://opensource.perlig.de/svnmailer/>.\n\n :Return: Parser for generic commandlines\n :Rtype: callable\n \"\"\"\n if not check(command):\n raise ValueError(\"Invalid command string %r\" % (command,))\n\n return [\n strip(arg[1:-1]) if arg.startswith('\"') else arg\n for arg in split(command)\n ]\n\n return split_command", "def MakeVmCommand(image, memory, snapshot):\n\n cmd = ['qemu-system-x86_64',\n '-hda', image,\n '--enable-kvm',\n '-m', str(memory)]\n # Concatenate any additional options.\n if snapshot:\n cmd += ['-snapshot']\n return cmd", "def make_shirt(size, message):\n print(\"\\nI'm going to make a \" + size + \" t-shirt.\")\n print('It will say, \"' + message + '\"')", "def make_shirt(size, message):\n print(\"\\nI'm going to make a \" + size + \" t-shirt.\")\n print('It will say, \"' + message + '\"')", "def djshell():\n if '@' in env.host_string:\n env.shell_host_string = env.host_string\n else:\n env.shell_host_string = '%(user)s@%(host_string)s' % env\n env.shell_default_dir = env.shell_default_dir_template % env\n env.shell_interactive_djshell_str = env.shell_interactive_djshell % env\n if env.is_local:\n cmd = '%(shell_interactive_djshell_str)s' % env\n else:\n cmd = 'ssh -t -i %(key_filename)s %(shell_host_string)s \"%(shell_interactive_djshell_str)s\"' % env\n #print cmd\n os.system(cmd)", "def sh(s, log_command=True, **kwargs):\n def actually_sh(task=None):\n logger = logging.getLogger(__name__)\n if log_command:\n logger.info(SHELL_COMMAND+s)\n kwargs['shell'] = True\n ret = _sh(s, **kwargs)\n logger.info(\"Execution complete. Stdout: %s\\nStderr: %s\",\n ret[0] or '',\n ret[1] or '')\n return actually_sh", "def cmd_make(var, uda, timestep=None):\n cmdargs = [\"-partvar\", var]\n if timestep:\n if not isinstance(timestep, list):\n timestep = [timestep]\n cmdargs.extend([\n \"-timesteplow\",\n str(min(timestep)), \"-timestephigh\",\n str(max(timestep))\n ])\n return [PUDA, *cmdargs, uda]", "def do_shell(self, command):\n proc = subprocess.Popen(command, stdout=self.stdout, shell=True)\n proc.communicate()", "def get_make_var(self, name: str) -> str:\n return self.soong_ui([\"--dumpvar-mode\", name], capture_output=True).rstrip(\"\\n\")", "def build_command_string(self):\n if self._regex_helper.search_compiled(W._re_h, self.options):\n if self._regex_helper.group(\"SOLO\"):\n self.options = self.options.replace('-h', '')\n else:\n self.options = self.options.replace('h', '')\n\n cmd = \"{} {}\".format(\"w\", self.options)\n else:\n cmd = \"{}\".format(\"w\")\n return cmd", "def _sh_quote(s):\n if not s:\n return b\"\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return b\"'\" + s.replace(b\"'\", b\"'\\\"'\\\"'\") + b\"'\"", "def _command_template(self, switches):\n\n command = [\"java\", \"-jar\", self.file_jar, \"-eUTF-8\"]\n\n if self.memory_allocation:\n command.append(\"-Xmx{}\".format(self.memory_allocation))\n\n command.extend(switches)\n\n if six.PY2:\n with open(os.devnull, \"w\") as devnull:\n out = subprocess.Popen(\n command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=devnull)\n elif six.PY3:\n out = subprocess.Popen(\n command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n stdoutdata, _ = out.communicate()\n\n return stdoutdata.decode(\"utf-8\").strip()", "def create_ipmi_ext_command_string(command, **options):\n\n new_options = collections.OrderedDict()\n for option in ipmi_required_options:\n # This is to prevent boot table \"-N 10\" vs user input timeout.\n if \" -N \" in command and option == \"N\":\n continue\n if option in options:\n # If the caller has specified this particular option, use it in\n # preference to the default value.\n new_options[option] = options[option]\n # Delete the value from the caller's options.\n del options[option]\n else:\n # The caller hasn't specified this required option so specify it\n # for them using the global value.\n var_name = 'ipmi_' + ipmi_option_name_map[option]\n value = eval(var_name)\n new_options[option] = value\n # Include the remainder of the caller's options in the new options\n # dictionary.\n for key, value in options.items():\n new_options[key] = value\n\n return gc.create_command_string('ipmitool', command, new_options)", "def factory(cmd, **default_kwargs):\n cmd = resolve_command(cmd)\n return Command(cmd)", "def make_command(self):\n # self.add_root_bucket()\n\n stringa = \"tc qdisc add dev \" + self.__interface + \" root netem \"\n stringa += \"delay \" + self.latency['latency'] + \"ms \" + self.latency['jitter'] + \"ms \" + self.latency[\n 'correlation'] + \"% distribution \" + self.latency['distribution']\n stringa += \" loss \" + self.drop['probability'].__str__() + \"% \" + self.drop['correlation'].__str__() + \"%\"\n stringa += \" corrupt \" + self.corrupt['probability'].__str__() + \"% duplicate \" + \\\n self.duplicate['probability'].__str__() + \"%\"\n\n cmd = shlex.split(stringa)\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n o, e = proc.communicate(timeout=1)\n except subprocess.TimeoutExpired:\n proc.kill()\n raise RuntimeWarning(\"Old configuration not eliminated\")\n\n if e.decode('ascii') != \"\":\n if proc.returncode == 2:\n raise RuntimeWarning(e.decode('ascii') + \"\\nUsing stale configuration, wipe the old settings\")\n return str(proc.returncode)", "def gen_command(process):\n cmd = \"{} \".format(process.name)\n for o in process.options.opt_list:\n i = 0\n opt = \"\"\n for el in o: \n if el and el != \"input\" and el != \"output\" and i != 3:\n opt += str(el)\n if opt[-1] != \"=\" and opt[-1] != \"'\": # command without space\n opt += \" \" # space\n i += 1\n cmd += opt\n return cmd", "def command(wrapped=None, synonyms=(), helphint=\"\", hidden=False,\n chat_only=False, muc_only=False):\n def decorator(fn):\n fn.is_command = True\n fn.synonyms = synonyms\n fn.helphint = helphint\n fn.hidden = hidden\n fn.chat_only = chat_only\n fn.muc_only = muc_only\n return fn\n if wrapped is None:\n return decorator\n else:\n return decorator(wrapped)", "def shlex_join(split_command) -> str:\n return \" \".join(shlex.quote(str(arg)) for arg in split_command)", "def shellformat(string):\n return \"'\" + string.replace(\"'\", \"'\\\\''\") + \"'\"", "def make_shirt(size, message):\n print(\"I need a shirt, size \" + size + \", that says \" + message + \".\")", "def cmdify(self):\n return \" \".join(\n itertools.chain(\n [_quote_if_contains(self.command, r\"[\\s^()]\")],\n (_quote_if_contains(arg, r\"[\\s^]\") for arg in self.args),\n )\n )", "def command(func):\n classname = inspect.getouterframes(inspect.currentframe())[1][3]\n name = func.__name__\n help_name = name.replace(\"do_\", \"help_\")\n doc = textwrap.dedent(func.__doc__)\n\n def new(instance, args):\n # instance.new.__doc__ = doc\n try:\n argv = shlex.split(args)\n arguments = docopt(doc, help=True, argv=argv)\n func(instance, args, arguments)\n except SystemExit as e:\n if args not in ('-h', '--help'):\n print(\"Could not execute the command.\")\n print(e)\n print(doc)\n\n new.__doc__ = doc\n return new", "def dunc(self, arg):\n return \"A{0}{1}\".format(arg, self.opts)", "def quote(*a, **kw):\n return quote(*a, **kw)", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def _build_command(tables, fixtures_path, fixture_name):\n command = \"python manage.py dumpdata{0} --indent=4 > {1}/{2}\".format(\n tables, fixtures_path, fixture_name\n )\n return command", "def generate_command_string(self, operation, *args, **kwargs):\n cmd = [self.terraform_binary_path, operation]\n\n for key, value in kwargs.items():\n if key == \"var\":\n for varkey, varval in value.items():\n option = \"-var=\"\n option += \"'%s=%s'\" % (varkey, varval)\n cmd.append(option)\n else:\n option = \"\"\n if \"_\" in key:\n key = key.replace(\"_\", \"-\")\n\n if value == \"IsFlag\":\n option = \"-%s\" % key\n else:\n option = \"-%s=%s\" % (key, value)\n cmd.append(option)\n\n if len(args) > 0:\n for arg in args:\n cmd.append(arg)\n\n return \" \".join(cmd)", "def shell(self,\n command,\n command_name=\"shell\",\n timeout=None,\n port=0,\n include_return_code=False):\n timeout = timeout or self.timeouts[\"SHELL\"]\n response = \"\"\n if include_return_code:\n return_code = 0\n try:\n response = self._command(command)\n response = self._list_to_str(response)\n except errors.DeviceError:\n return_code = -1\n return response, return_code\n response = self._command(command)\n return self._list_to_str(response)", "def shell_init_instructions(cmd, equivalent):\n\n shell_specific = \"{sh_arg}\" in equivalent\n\n msg = [\n \"`%s` requires Spack's shell support.\" % cmd,\n \"\",\n \"To set up shell support, run the command below for your shell.\",\n \"\",\n color.colorize(\"@*c{For bash/zsh/sh:}\"),\n \" . %s/setup-env.sh\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For csh/tcsh:}\"),\n \" source %s/setup-env.csh\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For fish:}\"),\n \" source %s/setup-env.fish\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For Windows batch:}\"),\n \" source %s/spack_cmd.bat\" % spack.paths.share_path,\n \"\",\n \"Or, if you do not want to use shell support, run \"\n + (\"one of these\" if shell_specific else \"this\")\n + \" instead:\",\n \"\",\n ]\n\n if shell_specific:\n msg += [\n equivalent.format(sh_arg=\"--sh \") + \" # bash/zsh/sh\",\n equivalent.format(sh_arg=\"--csh \") + \" # csh/tcsh\",\n equivalent.format(sh_arg=\"--fish\") + \" # fish\",\n equivalent.format(sh_arg=\"--bat \") + \" # batch\",\n ]\n else:\n msg += [\" \" + equivalent]\n\n msg += [\n \"\",\n \"If you have already set up Spack's shell support but still receive\",\n \"this message, please make sure to call Spack via the `spack` command\",\n \"without any path components (such as `bin/spack`).\",\n ]\n\n msg += [\"\"]\n tty.error(*msg)" ]
[ "0.5757063", "0.53830165", "0.53762203", "0.5325956", "0.53113496", "0.5278688", "0.518739", "0.5154804", "0.51409817", "0.5108147", "0.50834435", "0.50418144", "0.5036694", "0.49932173", "0.49862692", "0.4973778", "0.49597052", "0.49114555", "0.49044985", "0.4899245", "0.48928267", "0.48887584", "0.4858884", "0.48539594", "0.4846609", "0.48303756", "0.48161605", "0.4791082", "0.4765783", "0.4755334", "0.47485548", "0.47179595", "0.47140798", "0.47103828", "0.4691153", "0.46717402", "0.46475855", "0.46291232", "0.46277297", "0.46215034", "0.4612225", "0.46079412", "0.45893493", "0.45789316", "0.45779952", "0.45695677", "0.45688635", "0.45660564", "0.45582426", "0.45475304", "0.4544211", "0.4534335", "0.4518972", "0.45149383", "0.45125452", "0.45113784", "0.45105085", "0.45021906", "0.45016775", "0.44974002", "0.44956383", "0.44859317", "0.44792563", "0.44743383", "0.44700304", "0.4468968", "0.44669458", "0.4456755", "0.44539034", "0.44504344", "0.44504344", "0.4422477", "0.44203815", "0.44133183", "0.44133183", "0.44130385", "0.44105858", "0.44001094", "0.43969294", "0.43897858", "0.43895534", "0.43806762", "0.4379904", "0.43784848", "0.43765035", "0.43749386", "0.43724802", "0.43701684", "0.43618965", "0.43604705", "0.43584713", "0.4357683", "0.4342958", "0.43271804", "0.4326599", "0.43258402", "0.43255103", "0.4323636", "0.43164417", "0.4301022" ]
0.7213392
0
Opens a subprocess to execute a program and returns its return value.
def RunCmd(args, cwd=None): logger.debug(str(args) + ' ' + (cwd or '')) return Call(args, cwd=cwd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runProgram(cmd):\n try:\n p=subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n (stdout, stderr) = p.communicate()\n except Exception as e:\n print(\"Error running program because: {0}\".format(e), file=errorOutput)\n return None\n else:\n if stderr:\n print(\"Error running program because: {0} \".format(stderr), file=errorOutput)\n return None\n else:\n trace(\"runProgram() => \" + str(stdout), minLevel=2)\n return stdout.decode('utf-8').rstrip('\\n')", "def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc", "def exec_process(cmdline, silent, input=None, **kwargs):\n try:\n sub = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n stdout, stderr = sub.communicate(input=input)\n returncode = sub.returncode\n if not silent:\n sys.stdout.write(stdout)\n sys.stderr.write(stderr)\n except OSError, e:\n if e.errno == 2:\n raise RuntimeError('\"%s\" is not present on this system' % cmdline[0])\n else:\n raise\n if returncode != 0:\n raise RuntimeError('Got return value %d while executing \"%s\", stderr output was:\\n%s' % (returncode, \" \".join(cmdline), stderr.rstrip(\"\\n\")))\n return stdout", "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def run_subprocess(command, environment=None, shell=False, raise_on_error=True):\n proc = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n env=environment)\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n if raise_on_error:\n raise RuntimeError('{}\\n{}'.format(stderr, stdout))\n return stdout, stderr, proc.returncode", "def subprocess_run(cmd, ignore_failure=False, shell=True):\n try:\n proc = subprocess.Popen(\n cmd,\n shell=shell,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rc = proc.returncode\n\n except OSError as exc:\n raise RuntimeError('Failed to run ' + cmd + ': [Errno: %d] ' %\n exc.errno + exc.strerror + ' [Exception: ' +\n type(exc).__name__ + ']')\n if (not ignore_failure) and (rc != 0):\n raise RuntimeError('(%s) failed with rc=%s: %s' %\n (cmd, rc, err))\n return out", "def call_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\r\n null = open(os.devnull, 'wb')\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n params.setdefault('stdin', null)\r\n params['stdout'] = null\r\n params['stderr'] = null\r\n p = subprocess_Popen(command, **params)\r\n p.wait()\r\n return p.returncode", "def run_subprocess(cmd):\n subprocess.Popen(cmd, stdin =subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,)", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def _run_cmd(*args):\n proc = Popen(\n args, stdin=PIPE, stdout=PIPE, stderr=PIPE,\n cwd=os.path.dirname(__file__))\n output, _ = proc.communicate()\n code = proc.returncode\n return code, output", "def execute(cmd):\n\n process = subprocess.Popen(cmd,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out = ''\n err = ''\n exitcode = 0\n\n result = process.communicate()\n (out, err) = result\n exitcode = process.returncode\n\n return exitcode, out.decode(), err.decode()", "def subprocess_Popen(command, **params):\r\n startupinfo = None\r\n if os.name == 'nt':\r\n startupinfo = subprocess.STARTUPINFO()\r\n try:\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n except AttributeError:\r\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\r\n\r\n # Anaconda for Windows does not always provide .exe files\r\n # in the PATH, they also have .bat files that call the corresponding\r\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\r\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\r\n # execute \"g++\" without extensions.\r\n # (Executing \"g++.bat\" explicitly would also work.)\r\n params['shell'] = True\r\n\r\n # Using the dummy file descriptors below is a workaround for a\r\n # crash experienced in an unusual Python 2.4.4 Windows environment\r\n # with the default None values.\r\n stdin = None\r\n if \"stdin\" not in params:\r\n stdin = open(os.devnull)\r\n params['stdin'] = stdin.fileno()\r\n\r\n try:\r\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\r\n finally:\r\n if stdin is not None:\r\n del stdin\r\n return proc", "def run(cmd, dieOnError=True):\n\n\tps = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\texitcode = ps.returncode\n\tstdout,stderr = ps.communicate()\n\treturn exitcode, stdout, stderr", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def Subprocess(self, cmd):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def run_command(cmd):\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n return proc.returncode, stdout, stderr", "def call(self):\n\n process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,\n shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)\n returnData = process.communicate()\n\n return ProcessResult(process.returncode, returnData[0], returnData[1])", "def run(cmd, shell=False, cwd=None):\n try:\n out = check_output(cmd, shell=shell, cwd=cwd, stderr=STDOUT)\n except CalledProcessError as ex:\n return ex.returncode, ex.output\n else:\n return 0, out", "def runCommand(command):\n process = subprocess.Popen(command, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n return process.communicate()", "def run_program(program, args=None, **subprocess_kwargs):\n if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:\n raise ProgramError(\n \"This function is only for non-shell programs, \"\n \"use run_shell_command() instead.\")\n fullcmd = find_program(program)\n if not fullcmd:\n raise ProgramError(\"Program %s was not found\" % program)\n # As per subprocess, we make a complete list of prog+args\n fullcmd = [fullcmd] + (args or [])\n for stream in ['stdin', 'stdout', 'stderr']:\n subprocess_kwargs.setdefault(stream, subprocess.PIPE)\n subprocess_kwargs = alter_subprocess_kwargs_by_platform(\n **subprocess_kwargs)\n return subprocess.Popen(fullcmd, **subprocess_kwargs)", "def RunCommand(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, shell=False)\n return proc.communicate()[0]", "def LaunchFile(*params):\n\n file = subprocess.Popen(params)\n file.communicate()\n return file.returncode", "def run_with_subprocess(cmd):\n new_env = dict(os.environ, LC_ALL='C')\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env)\n output, error = proc.communicate()\n returncode = proc.returncode\n except OSError, (errno, strerror):\n output, error = \"\", \"Could not execute %s: %s\" % (cmd[0], strerror)\n returncode = 1\n\n return (output, error, returncode)", "def _subprocess(cmd):\n\n log.debug('Running: \"%s\"', \" \".join(cmd))\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()\n retcode = proc.wait()\n\n if ret:\n return ret\n elif retcode != 1:\n return True\n else:\n return False\n except OSError as err:\n log.error(err)\n return False", "def exec_and_return(execargs):\n return subprocess.call(execargs)", "def create_subprocess(command, args):\n\n proc = subprocess.Popen([command] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, _ = proc.communicate()\n\n return proc.returncode, output", "def exec_command(cmd):\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True) as p:\n stdout, _ = p.communicate()\n if p.returncode != 0:\n logger.error(stdout)\n return None\n\n return stdout", "def _proc_exec_wait(command_line, silent=False):\n result = (-1, None, None)\n command = None\n proc = None\n\n if _platform_windows:\n command_line = command_line.replace(\"\\\\\", \"/\")\n\n try:\n command = shlex.split(command_line)\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Unable to parse the given command line: {0}\\n\"\n \"Error: {1}.\".format(command_line, e)\n )\n return result\n\n try:\n sp_kwargs = {\n \"stdout\": subprocess.PIPE,\n \"stderr\": subprocess.PIPE,\n \"startupinfo\": None,\n \"env\": os.environ,\n }\n\n if _platform_windows:\n sp_kwargs[\"startupinfo\"] = subprocess.STARTUPINFO()\n sp_kwargs[\"startupinfo\"].dwFlags = (\n subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW\n )\n sp_kwargs[\"startupinfo\"].wShowWindow = subprocess.SW_HIDE\n\n proc = subprocess.Popen(command, **sp_kwargs)\n stdoutdata, stderrdata = proc.communicate()\n status = proc.returncode\n result = (status, stdoutdata.decode(\"utf8\"), stderrdata.decode(\"utf8\"))\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Could not open the process: '{0}'\\n\"\n \"Error: {1}.\".format(command[0], e)\n )\n finally:\n if proc:\n if proc.stdout:\n proc.stdout.close()\n\n if proc.stderr:\n proc.stderr.close()\n\n return result", "def run(cmd, cmd_input=None, cwd=None):\n\n with Popen(\n \" \".join(cmd) if cwd else cmd,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n cwd=cwd,\n shell=True,\n env={\"PATH\": cwd} if cwd else None,\n ) as proc:\n out, err = proc.communicate(\n input=cmd_input.encode(\"utf-8\") if cmd_input else None\n )\n rcode = proc.returncode\n\n return out.decode(\"utf-8\"), err.decode(\"utf-8\"), rcode", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def popen(command, cwd=None, check=False, detach=False):\n\tif detach:\n\t\treturn spawn(command, cwd)\n\telse:\n\t\tcmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)\n\t\tstatus = cmd.wait()\n\t\tres, err = cmd.communicate()\n\t\tif status == 0:\n\t\t\treturn res.decode(\"utf8\")\n\t\telse:\n\t\t\treturn (status, err.decode(\"utf8\"))", "def _exec_cmd(cmd, stdout=None, stderr=None):\n rc = 0\n kwargs = {}\n if stdout is not None:\n kwargs[\"stdout\"] = stdout\n if stderr is not None:\n kwargs[\"stderr\"] = stderr\n try:\n subprocess.check_call(cmd, **kwargs)\n except CalledProcessError as e:\n LOG.error(\"[return code: %s] %s\", e.returncode, e)\n rc = e.returncode\n return rc", "def run(cmd):\n print('running', cmd)\n proc = sp.Popen([cmd], shell=True)\n proc.wait()\n assert proc.poll() == 0", "def system_call(command):\n p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)\n return p.stdout.read()", "def run_command(cmd):\n return subprocess.call(cmd, shell=True)", "def subprocess(command):\n from sys import executable as python\n from subprocess import Popen,PIPE\n from sys import stderr\n command = \"from %s import *; %s\" % (modulename(),command)\n for attempt in range(0,3):\n try:\n process = Popen([python,\"-c\",command],stdout=PIPE,stderr=PIPE,\n universal_newlines=True)\n break\n except OSError,msg: # [Errno 513] Unknown error 513\n log(\"subprocess: %s\" % msg)\n sleep(1)\n output,error = process.communicate()\n if \"Traceback\" in error: raise RuntimeError(repr(command)+\"\\n\"+error)\n if error: stderr.write(error)\n return output", "def execute(args):\n print '################################'\n print 'args: ', args\n p = subprocess.Popen(args, shell=True, executable='/bin/bash')\n # p = subprocess.call(args, shell=True, executable='/bin/bash')\n p.wait()\n return p\n print '################################'", "def run_subprocess(self, input_value):\n try:\n proc = Popen([\"python\", self.SCRIPT_NAME],\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE)\n out_value, err_value = proc.communicate(\n input_value.encode(self.ENCODING),\n timeout=self.PROCESS_TIMEOUT)\n except TimeoutExpired:\n proc.kill()\n out_value, err_value = proc.communicate()\n return out_value.decode(self.ENCODING), err_value.decode(self.ENCODING)", "def main(argv: List[str]) -> int:\n with open(argv[1]) as f:\n popen_description = json.load(f)\n commands = popen_description[\"commands\"]\n cwd = popen_description[\"cwd\"]\n env = popen_description[\"env\"]\n env[\"PATH\"] = os.environ.get(\"PATH\")\n stdin_path = popen_description[\"stdin_path\"]\n stdout_path = popen_description[\"stdout_path\"]\n stderr_path = popen_description[\"stderr_path\"]\n if stdin_path is not None:\n stdin: Union[BinaryIO, int] = open(stdin_path, \"rb\")\n else:\n stdin = subprocess.PIPE\n if stdout_path is not None:\n stdout: Union[BinaryIO, TextIO] = open(stdout_path, \"wb\")\n else:\n stdout = sys.stderr\n if stderr_path is not None:\n stderr: Union[BinaryIO, TextIO] = open(stderr_path, \"wb\")\n else:\n stderr = sys.stderr\n\n try:\n env_script: Optional[str] = argv[2]\n except IndexError:\n env_script = None\n if env_script is not None:\n env = handle_software_environment(env, env_script)\n\n sp = subprocess.Popen( # nosec\n commands,\n shell=False,\n close_fds=True,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n env=env,\n cwd=cwd,\n )\n if sp.stdin:\n sp.stdin.close()\n rcode = sp.wait()\n if not isinstance(stdin, int):\n stdin.close()\n if stdout is not sys.stderr:\n stdout.close()\n if stderr is not sys.stderr:\n stderr.close()\n return rcode", "def run_shell_command(program: str, args: list, separator = None):\n cmd = [program]\n\n for arg in args:\n cmd.append(arg)\n\n return subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode(\"utf-8\")", "def execute(cmd, output_file, env={}):\n return subprocess.Popen(shlex.split(cmd),\n stderr=subprocess.STDOUT,\n stdout=open(output_file, \"w\"),\n env = dict(os.environ, **env))", "def subprocess_cmd(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n\n return(proc_stdout)", "def call(command):\n cmd = join_and_sanitize(command)\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n result, _err = proc.communicate()\n return result", "def system(command):\n print('[system] {}'.format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n rc = p.returncode\n if PY3:\n output = output.decode(\"ascii\")\n err = err.decode(\"ascii\")\n return rc, output, err", "def execute(parent, cmd, *args, **kwargs):\n\n with xtrace(parent, flatten(cmd)) as h:\n try:\n code = subprocess.call(cmd, *args, **kwargs)\n except:\n sys.exit(\n DiagnosticReporter.fatal(EXCEPTION_EXECUTING_PROCESS, cmd[0]))\n finally:\n h.report(code)\n return code", "def RunProcess(self, command, env=None, stdin=None, stdout=None, stderr=None):\n\n # merge specified env with OS env\n myenv = os.environ.copy()\n if env is not None:\n myenv.update(env)\n\n try:\n process = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, env=myenv, bufsize=0)\n return process\n except:\n print(\"Unexpected error when launching process:\")\n print(\" \", command)\n print(\" \", env)\n raise", "def run_subprocess(args, work_dir):\n process = subprocess.Popen(args, cwd=work_dir)\n process.communicate()\n assert process.returncode == 0", "def _Shell(*cmd, **kw):\n _LOGGER.info('Executing %s.', cmd)\n prog = subprocess.Popen(cmd, shell=True, **kw)\n\n stdout, stderr = prog.communicate()\n if prog.returncode != 0:\n raise RuntimeError('Command \"%s\" returned %d.' % (cmd, prog.returncode))\n return (stdout, stderr)", "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):\n return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "def run_program ( name, command ):\n\n # Create temporary file to write stdout, stderr of the program\n tmpfile,tmpname = tempfile.mkstemp()\n ret_code = subprocess.call ( command, shell = True , stdout=tmpfile, stderr=tmpfile)\n # Catch some error codes\n if ret_code == 127:\n msg = \"Unable to find %s executable, please make sure this is present in your PATH.\"%name\n logger.error(msg) \n raise Exception ( msg )\n if ret_code == 1:\n # Get the error message from the temporary file\n errmsg = \"\\n\".join(line for line in open(tmpname).readlines())\n os.remove(tmpname)\n msg = \"%s was not able to run successfully. Please check output. Error message was:\\n%s\"%(name,errmsg)\n raise Exception ( msg )\n\n os.remove(tmpname)", "def run(cmd, fail=True, capture_stdout=False, capture_stderr=False, verbose=False):\n stdout, stderr = None, None\n if capture_stderr:\n stderr = subprocess.PIPE\n if capture_stdout:\n stdout = subprocess.PIPE\n\n if verbose:\n print(cmd)\n\n p = subprocess.Popen(['bash', '-c', cmd], stderr=stderr, stdout=stdout)\n if p.returncode and fail:\n sys.exit(1)\n\n return p", "def subprocess_run(cmd):\n print(shlex.join(cmd))\n try:\n ret = subprocess.run(cmd, capture_output=True,\n text=True, env=os.environ.copy(), check=True)\n if (ret.stdout):\n print(ret.stdout)\n return ret\n except subprocess.CalledProcessError as e:\n if (e.stderr):\n print(e.stderr)\n raise e", "def externalCommand(command, communicate=True):\n command = shlex.split(str(command))\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if communicate:\n return proc.communicate()\n return proc", "def run_script(command):\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n output = process.communicate()[0]\n success = process.poll() == 0\n return (success, output)", "def run(cmd):\n \n proc = subprocess.Popen (cmd, \n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True\n )\n stdout_value, stderr_value = proc.communicate()\n print stdout_value\n print stderr_value\n\n if proc.poll() > 0:\n sys.stderr.write ( \"\\nError\\n\" )\n print '\\tstderr:', repr(stderr_value.rstrip())\n return False\n else:\n return True", "def system(self,cmd):\n code = 'import os;f=os.popen(\"%s\");res = f.read(-1);f.close();' % cmd\n return self.exec_code(code,returns=['res'])", "def run(*args, **kwargs):\n _patch_popen(Popen)\n assert len(args) > 0\n\n arguments = []\n\n input = kwargs.pop('input', None)\n fail_on_error = kwargs.pop('fail_on_error', False)\n encoding = kwargs.pop('encoding', None) \n\n if len(args) == 1:\n if isinstance(args[0], string):\n arguments = args[0].split()\n else:\n for i in args:\n if isinstance(i, (list, tuple)):\n for j in i:\n arguments.append(j)\n else:\n arguments.append(i)\n\n def set_default_kwarg(key, default): \n kwargs[key] = kwargs.get(key, default)\n\n set_default_kwarg('stdin', PIPE)\n set_default_kwarg('stdout', PIPE)\n set_default_kwarg('stderr', PIPE)\n\n\n proc = Popen(arguments, **kwargs)\n stdout, stderr = proc.communicate(input)\n \n if encoding is not None:\n stdout = stdout.decode(encoding=encoding)\n stderr = stderr.decode(encoding=encoding)\n\n result = RunResult(proc.returncode, stdout, stderr)\n \n if fail_on_error and proc.returncode != 0:\n raise ProcessError(' '.join(arguments), result)\n\n return result", "def run(cmd, proc_stdout = sys.stdout, proc_stderr = sys.stderr,\n check = True):\n print cmd\n proc = subprocess.Popen(cmd, shell=True, bufsize=-1,\n stdout=proc_stdout, stderr=proc_stderr)\n output, errors = proc.communicate()\n sts = proc.wait()\n if check is True and sts != 0:\n raise RuntimeError(\"Command: %s exited with non-zero status %i\" % (cmd, sts))\n return output, errors", "def sys_exec(command):\n print('Running: {}'.format(command))\n return os.popen(command).read().rstrip()", "def run_subprocess(self, *cmd_and_args):\n\n command_line = \" \".join(cmd_and_args)\n self.logger.debug(\"Running: %s\", command_line)\n\n return subprocess.Popen(command_line, shell=True, close_fds=True)", "def call(command, working_directory=BASE_DIR):\r\n LOG.info(command)\r\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory, shell=True)\r\n out, err = p.communicate()\r\n return (out, err)", "def run_command(shell_command, get_output):\n command_ran = subprocess.run(shell_command, capture_output=get_output)\n return command_ran", "async def _run_subprocess(\n cmd: str,\n allow_params: bool,\n params: Dict[str, ParamValueT],\n) -> Dict[str, Any]:\n cmd_str = cmd\n if allow_params:\n if params[\"shell_params\"] == []:\n cmd_str = cmd.format([''])\n else:\n cmd_str = cmd.format(*params.get('shell_params', ['']))\n\n logging.info(\"Running command: %s\", cmd_str)\n\n cmd_list = shlex.split(cmd_str)\n\n process = await asyncio.create_subprocess_exec(\n *cmd_list,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await process.communicate()\n\n return {\n \"returncode\": process.returncode,\n \"stdout\": stdout.decode(),\n \"stderr\": stderr.decode(),\n }", "def execCommand(command: str):\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n \n return stdout.decode(\"utf-8\")", "def spawn(self, arguments=None, environment=None):\n return subprocess.Popen(\n args=[self.executable] + ([] or arguments),\n # do not redirect std streams\n # this fakes the impression of having just one program running\n stdin=None,\n stdout=None,\n stderr=None,\n env=environment,\n )", "def call(cmd, shell=False):\n logger.debug(' '.join(cmd))\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=shell)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise IOError(stderr)\n if stderr:\n logger.error(stderr.decode('utf-8'))\n return stdout", "def runin(cmd, stdin):\n result = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n result.wait()\n return result.returncode", "def run_subprocess_cmd(command, print_cmd=True, print_stdout_stderr=True, get_returncode=False):\n if print_cmd:\n print\n print 'Running command:\\n%s' % command\n print \n\n sp = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n shell=True)\n out, error = sp.communicate() \n if print_stdout_stderr:\n print\n print out\n print\n print error\n print\n\n if get_returncode:\n return out, error, sp.returncode\n else:\n return out, error", "def run_command(self, cmd, expects=0, shell=False, stdout=PIPE, stderr=PIPE):\n \n # If the command argument is a string\n if isinstance(cmd, str):\n cmd = cmd.split(' ')\n \n # Open the process\n try:\n proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)\n out, err = proc.communicate()\n \n # Make sure the expected return code is found\n if not proc.returncode == expects:\n self.die('Failed to run command \\'{0}\\', ERROR={1}'.format(str(cmd), err))\n \n # Return exit code / stdout / stderr\n return proc.returncode, out, err\n except Exception as e:\n self.die('Failed to run command \\'{0}\\': ERROR={1}'.format(str(cmd), str(e)))", "def shell(args, wait=True, msg=None):\n\n # Fix Windows error if passed a string\n if isinstance(args, str):\n args = shlex.split(args, posix=(os.name != \"nt\"))\n if os.name == \"nt\":\n args = [arg.replace('/', '\\\\') for arg in args]\n\n if wait:\n proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate(input=msg)\n exitcode = proc.returncode\n if exitcode != 0:\n debug('<<<< shell call failed; error message below >>>>')\n debug(err.decode('utf-8'))\n debug('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n raise IOError()\n return out\n else:\n DETACHED_PROCESS = 0x00000008\n proc = Popen(args, creationflags=DETACHED_PROCESS)", "def spawn(*args):\n # Adapted from ranger.ext.spawn\n process = Popen(args, stdout=PIPE, shell=True)\n stdout, stderr = process.communicate()\n return stdout.decode('utf-8')", "def process_run(cmd_string, stdin=None):\n process_object=subprocess.Popen(shlex.split(cmd_string),\n stdin=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return process_object", "def exec_test_command(cmd):\n process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)\n result = process.communicate()\n return (\n process.returncode,\n bytes(result[0]).decode(\"utf-8\"),\n bytes(result[1]).decode(\"utf-8\"),\n )", "def run_command(*args):\n cmd = sp.Popen(args, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT, encoding='utf-8')\n stdout, _ = cmd.communicate()\n\n if cmd.returncode != 0:\n raise ValueError(f\"Running `{args[0]}` failed with return code {cmd.returncode}, output: \\n {stdout}\")\n else:\n return stdout.strip('\\n')", "def execute(cmd) :\n return os.system( cmd )", "def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)", "def checked_subprocess_run(command):\n args = shlex.split(command)\n completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = completed.stdout.decode()\n err = completed.stderr.decode()\n\n # Print the subprocess output to include in the test output\n print(out, file=sys.stdout)\n print(err, file=sys.stderr)\n\n # After printing the output, raise an exception on a non-zero exit status.\n completed.check_returncode()\n\n return out, err", "def run(cmd, directory, fail_ok=False, verbose=False):\n if verbose:\n print(cmd)\n p = subprocess.Popen(cmd,\n cwd=directory,\n stdout=subprocess.PIPE)\n (stdout, _) = p.communicate()\n if p.returncode != 0 and not fail_ok:\n raise RuntimeError('Failed to run {} in {}'.format(cmd, directory))\n return stdout", "def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,\n universal_newlines=False, close_fds=True, env=None):\n LOG.info(\"Running cmd '%s'\" % \" \".join(cmdline))\n kwargs = {}\n kwargs['stdout'] = stdout and subprocess.PIPE or None\n kwargs['stderr'] = stderr and subprocess.PIPE or None\n kwargs['stdin'] = stdin and subprocess.PIPE or None\n kwargs['universal_newlines'] = universal_newlines\n kwargs['close_fds'] = close_fds\n kwargs['env'] = env\n try:\n proc = subprocess.Popen(cmdline, **kwargs)\n except OSError, e: # noqa\n if e.errno == errno.ENOENT:\n raise CommandNotFound\n else:\n raise\n return proc", "def run(cmd):\n print ' '.join(cmd)\n try:\n check_call(cmd)\n except CalledProcessError as cpe:\n print \"Error: return code: \" + str(cpe.returncode)\n sys.exit(cpe.returncode)", "def call(seq):\n return subprocess.Popen(seq,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()[0]", "def Run(cmd, include_stderr=False, return_pipe=False):\n cmd = to_unicode(str(cmd))\n if include_stderr:\n err = STDOUT\n fds = True\n else:\n err = None\n fds = False\n tmpenv = os.environ.copy()\n tmpenv[\"LC_ALL\"] = \"C\"\n tmpenv[\"LANG\"] = \"C\"\n f = Popen(cmd, shell=True, stdout=PIPE, stderr=err, close_fds=fds,\n env=tmpenv)\n if return_pipe:\n return f.stdout\n else:\n return f.communicate()[0]", "def execute_command(command):\n proc = subprocess.Popen(\n [\"/bin/bash\"], shell=True, cwd=os.environ['PWD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n )\n proc.stdin.write(command)\n stdout, stderr = proc.communicate()\n rc = proc.returncode\n\n return stdout, stderr, rc", "def run(self,command):\n #--------------------------------------------------------------------------\n res = subprocess.run(command,stdout=subprocess.DEVNULL,stderr=subprocess.STDOUT).returncode\n return res", "def run_cmd(cmd, args, path=None, raise_error=True):\n\n if path is not None:\n # Transparently support py.path objects\n path = str(path)\n\n p = sp.Popen([cmd] + list(args), stdout=sp.PIPE, stderr=sp.PIPE,\n cwd=path)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n return_code = p.returncode\n\n if raise_error and return_code != 0:\n raise RuntimeError(\n \"The command `{0}` with args {1!r} exited with code {2}.\\n\"\n \"Stdout:\\n\\n{3}\\n\\nStderr:\\n\\n{4}\".format(\n cmd, list(args), return_code, streams[0], streams[1]))\n\n return streams + (return_code,)", "def run_external(cmd):\n args = shlex.split(cmd, posix=(os.name == 'posix'))\n p = run(args, capture_output=True)\n return p.returncode, p.stdout.decode(), p.stderr.decode()", "def run_command(command):\n process = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n response, error = process.communicate()\n return response.decode().rstrip('\\n'), error.decode().rstrip('\\n')", "def shell_call(cmd):\n try:\n x = subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True\n )\n ret = (x.returncode, str(x.stdout, \"utf-8\"), str(x.stderr, \"utf-8\"))\n return ret\n except subprocess.SubprocessError as e:\n logger.error(\"System error running command: \" + str(cmd))\n logger.error(str(e.output))\n raise RuntimeError()", "def run_command(cmd):\n if env.PY2 and isinstance(cmd, unicode):\n cmd = cmd.encode(sys.getfilesystemencoding())\n\n # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of\n # the subprocess is set incorrectly to ascii. Use an environment variable\n # to force the encoding to be the same as ours.\n sub_env = dict(os.environ)\n encoding = output_encoding()\n if encoding:\n sub_env['PYTHONIOENCODING'] = encoding\n\n proc = subprocess.Popen(\n cmd,\n shell=True,\n env=sub_env,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output, _ = proc.communicate()\n status = proc.returncode\n\n # Get the output, and canonicalize it to strings with newlines.\n if not isinstance(output, str):\n output = output.decode(output_encoding())\n output = output.replace('\\r', '')\n\n return status, output", "def run_this(command_to_run, cwd=os.getcwd()):\n slab_logger.debug('Running shell command \"%s\"' % command_to_run)\n try:\n output = subprocess.Popen(command_to_run,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n close_fds=True,\n cwd=cwd)\n\n myinfo = output.communicate()[0]\n myinfo.strip()\n return(output.returncode, myinfo)\n except OSError, ex:\n slab_logger.error(ex)\n return (1, str(ex))", "def run_process(self, inp=\"\"):\n return subprocess.run(self.binary,\n input=inp,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def exec_proc(self):\n\n\t\ttry:\n\t\t\tp = os.system(self.cmd)\n\t\texcept:\n\t\t\t# Lock stderr so that we can write safely\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_EX)\n\t\t\tsys.stderr.flush()\n\t\t\t# print error message\n\t\t\tsys.stderr.write(\"%s running %s\\nraised %s: %s\\n\" % \\\n\t\t\t\t(self.name, self.cmd, \\\n\t\t\t\tsys.exc_info()[0], sys.exc_info()[1]))\n\t\t\tsys.stderr.flush()\n\t\t\t# unlock stderr\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_UN)\n\t\t\t# Always return. Otherwise, semaphore counter\n\t\t\t# will not be incremented. \n\t\t\treturn\n\t\t# If the command errors out\n\t\tif p != 0:\n\t\t\t# Lock stderr so that we can write safely\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_EX)\n\t\t\tsys.stderr.flush()\n\t\t\t# print error code\n\t\t\tsys.stderr.write(\"Error running: %s\\n\" % self.cmd)\n\t\t\tsys.stderr.write(\"%s returned error code %d\\n\" \\\n\t\t\t\t% (self.host, p))\n\t\t\tsys.stderr.flush()\n\t\t\t# unlock stderr\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_UN)", "def run(cmd: str) -> None:\n subprocess.run(cmd, shell=True, check=True)", "def subprocess_call(command):\n try:\n return_out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)\n if return_out.strip():\n print return_out\n except subprocess.CalledProcessError, err:\n msg = \"Subprocess call failed!\"\\\n \"\\n command : {0}\"\\\n \"\\n console output: \\n\\n{1}\"\\\n \"\\n error message : {2}\"\\\n \"\\n arguments : {3}\"\\\n \"\\n return-code : {4}\\n\"\\\n .format(err.cmd, err.output, err.message, err.args, err.returncode)\n raise Exception(msg)\n\n return return_out", "def execute_cmd(cmd, verb=False):\n if verb:\n print(\"Executing: {}\".format(cmd))\n\n p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n code = p.returncode\n if code:\n sys.exit(\"Error {}: {}\".format(code, err))\n return out, err", "def system_call(command):\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n return process.communicate()[0]", "def run_command(cmd, cmd_input=None, ok_exit_codes=None):\n proc = make_subprocess(cmd, stdout=True, stderr=True, stdin=True,\n close_fds=True)\n return finish_subprocess(proc, cmd, cmd_input=cmd_input,\n ok_exit_codes=ok_exit_codes)", "def start_process(cmd, supress_output=False):\n logging.debug(cmd)\n logging.error(\"[tony]cmd:%r\" % (cmd))\n proc = subprocess.Popen(cmd, stdout=None, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rtn_code = proc.returncode\n\n if supress_output is False:\n if out:\n logging.info(out)\n if err:\n logging.error(err)\n\n if rtn_code == 0 or rtn_code is None:\n logging.info('Success: Process return code %s', str(rtn_code))\n else:\n logging.error('Error: Process return code %s', str(rtn_code))\n sys.exit(1)", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None" ]
[ "0.7069853", "0.70137537", "0.6977988", "0.69574434", "0.69218326", "0.6837311", "0.68356645", "0.678155", "0.6761188", "0.6706355", "0.6686737", "0.66564906", "0.6626361", "0.66172713", "0.65875185", "0.65428317", "0.6520404", "0.6509777", "0.6488528", "0.6487521", "0.6481163", "0.6472606", "0.64667803", "0.64627975", "0.6438445", "0.64353824", "0.643385", "0.6382585", "0.6382223", "0.6374723", "0.63567185", "0.63533795", "0.633184", "0.6329263", "0.63234794", "0.6318239", "0.63103855", "0.63031816", "0.6299233", "0.62665325", "0.6264467", "0.6258197", "0.6251683", "0.6248416", "0.62416506", "0.62383026", "0.6232939", "0.6230599", "0.62183684", "0.62085176", "0.6192522", "0.61915666", "0.61833245", "0.6179844", "0.61605126", "0.6158111", "0.61567783", "0.61440414", "0.614288", "0.6141531", "0.61394", "0.6138065", "0.6128499", "0.6111747", "0.608678", "0.6084408", "0.6084138", "0.6076014", "0.6074681", "0.6071332", "0.60669225", "0.6065395", "0.6064472", "0.60604846", "0.6059634", "0.6058985", "0.60555136", "0.6054046", "0.60517114", "0.6049817", "0.6044365", "0.6041424", "0.60396844", "0.60394037", "0.60310537", "0.6027108", "0.60227686", "0.6021454", "0.6018446", "0.601618", "0.60107803", "0.60035235", "0.6002959", "0.5999412", "0.59846765", "0.59825844", "0.5977997", "0.5965834", "0.59635967", "0.596009", "0.5945995" ]
0.0
-1
Open a subprocess to execute a program and returns its output.
def GetCmdOutput(args, cwd=None, shell=False, env=None): (_, output) = GetCmdStatusAndOutput(args, cwd, shell, env) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def runProgram(cmd):\n try:\n p=subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n (stdout, stderr) = p.communicate()\n except Exception as e:\n print(\"Error running program because: {0}\".format(e), file=errorOutput)\n return None\n else:\n if stderr:\n print(\"Error running program because: {0} \".format(stderr), file=errorOutput)\n return None\n else:\n trace(\"runProgram() => \" + str(stdout), minLevel=2)\n return stdout.decode('utf-8').rstrip('\\n')", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def exec_process(cmdline, silent, input=None, **kwargs):\n try:\n sub = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n stdout, stderr = sub.communicate(input=input)\n returncode = sub.returncode\n if not silent:\n sys.stdout.write(stdout)\n sys.stderr.write(stderr)\n except OSError, e:\n if e.errno == 2:\n raise RuntimeError('\"%s\" is not present on this system' % cmdline[0])\n else:\n raise\n if returncode != 0:\n raise RuntimeError('Got return value %d while executing \"%s\", stderr output was:\\n%s' % (returncode, \" \".join(cmdline), stderr.rstrip(\"\\n\")))\n return stdout", "def run_shell_command(program: str, args: list, separator = None):\n cmd = [program]\n\n for arg in args:\n cmd.append(arg)\n\n return subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode(\"utf-8\")", "def runCommand(command):\n process = subprocess.Popen(command, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n return process.communicate()", "def run_subprocess(cmd):\n subprocess.Popen(cmd, stdin =subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,)", "def run_subprocess(command, environment=None, shell=False, raise_on_error=True):\n proc = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n env=environment)\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n if raise_on_error:\n raise RuntimeError('{}\\n{}'.format(stderr, stdout))\n return stdout, stderr, proc.returncode", "def system_call(command):\n p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)\n return p.stdout.read()", "def subprocess_run(cmd, ignore_failure=False, shell=True):\n try:\n proc = subprocess.Popen(\n cmd,\n shell=shell,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rc = proc.returncode\n\n except OSError as exc:\n raise RuntimeError('Failed to run ' + cmd + ': [Errno: %d] ' %\n exc.errno + exc.strerror + ' [Exception: ' +\n type(exc).__name__ + ']')\n if (not ignore_failure) and (rc != 0):\n raise RuntimeError('(%s) failed with rc=%s: %s' %\n (cmd, rc, err))\n return out", "def subprocess_cmd(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n\n return(proc_stdout)", "def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def RunCommand(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, shell=False)\n return proc.communicate()[0]", "def run_program(program, args=None, **subprocess_kwargs):\n if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:\n raise ProgramError(\n \"This function is only for non-shell programs, \"\n \"use run_shell_command() instead.\")\n fullcmd = find_program(program)\n if not fullcmd:\n raise ProgramError(\"Program %s was not found\" % program)\n # As per subprocess, we make a complete list of prog+args\n fullcmd = [fullcmd] + (args or [])\n for stream in ['stdin', 'stdout', 'stderr']:\n subprocess_kwargs.setdefault(stream, subprocess.PIPE)\n subprocess_kwargs = alter_subprocess_kwargs_by_platform(\n **subprocess_kwargs)\n return subprocess.Popen(fullcmd, **subprocess_kwargs)", "def execute(cmd, output_file, env={}):\n return subprocess.Popen(shlex.split(cmd),\n stderr=subprocess.STDOUT,\n stdout=open(output_file, \"w\"),\n env = dict(os.environ, **env))", "def call(command):\n cmd = join_and_sanitize(command)\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n result, _err = proc.communicate()\n return result", "def exec_command(cmd):\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True) as p:\n stdout, _ = p.communicate()\n if p.returncode != 0:\n logger.error(stdout)\n return None\n\n return stdout", "def _run_cmd(*args):\n proc = Popen(\n args, stdin=PIPE, stdout=PIPE, stderr=PIPE,\n cwd=os.path.dirname(__file__))\n output, _ = proc.communicate()\n code = proc.returncode\n return code, output", "def call(self):\n\n process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,\n shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)\n returnData = process.communicate()\n\n return ProcessResult(process.returncode, returnData[0], returnData[1])", "def execute(cmd):\n\n process = subprocess.Popen(cmd,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out = ''\n err = ''\n exitcode = 0\n\n result = process.communicate()\n (out, err) = result\n exitcode = process.returncode\n\n return exitcode, out.decode(), err.decode()", "def Subprocess(self, cmd):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def subprocess_run(cmd):\n print(shlex.join(cmd))\n try:\n ret = subprocess.run(cmd, capture_output=True,\n text=True, env=os.environ.copy(), check=True)\n if (ret.stdout):\n print(ret.stdout)\n return ret\n except subprocess.CalledProcessError as e:\n if (e.stderr):\n print(e.stderr)\n raise e", "def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)", "def run_command(shell_command, get_output):\n command_ran = subprocess.run(shell_command, capture_output=get_output)\n return command_ran", "def run_command(cmd):\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n return proc.returncode, stdout, stderr", "def system_call(command):\n print(\"\\n### {}\".format(command))\n stderr = subprocess.STDOUT\n pipe = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n stdout, stderr = pipe.communicate()\n print(stdout)", "def run(cmd, cmd_input=None, cwd=None):\n\n with Popen(\n \" \".join(cmd) if cwd else cmd,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n cwd=cwd,\n shell=True,\n env={\"PATH\": cwd} if cwd else None,\n ) as proc:\n out, err = proc.communicate(\n input=cmd_input.encode(\"utf-8\") if cmd_input else None\n )\n rcode = proc.returncode\n\n return out.decode(\"utf-8\"), err.decode(\"utf-8\"), rcode", "def run(cmd, shell=False, cwd=None):\n try:\n out = check_output(cmd, shell=shell, cwd=cwd, stderr=STDOUT)\n except CalledProcessError as ex:\n return ex.returncode, ex.output\n else:\n return 0, out", "def sys_exec(command):\n print('Running: {}'.format(command))\n return os.popen(command).read().rstrip()", "def execCommand(command: str):\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n \n return stdout.decode(\"utf-8\")", "def subprocess(command):\n from sys import executable as python\n from subprocess import Popen,PIPE\n from sys import stderr\n command = \"from %s import *; %s\" % (modulename(),command)\n for attempt in range(0,3):\n try:\n process = Popen([python,\"-c\",command],stdout=PIPE,stderr=PIPE,\n universal_newlines=True)\n break\n except OSError,msg: # [Errno 513] Unknown error 513\n log(\"subprocess: %s\" % msg)\n sleep(1)\n output,error = process.communicate()\n if \"Traceback\" in error: raise RuntimeError(repr(command)+\"\\n\"+error)\n if error: stderr.write(error)\n return output", "def call_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\r\n null = open(os.devnull, 'wb')\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n params.setdefault('stdin', null)\r\n params['stdout'] = null\r\n params['stderr'] = null\r\n p = subprocess_Popen(command, **params)\r\n p.wait()\r\n return p.returncode", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def run(cmd, dieOnError=True):\n\n\tps = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\texitcode = ps.returncode\n\tstdout,stderr = ps.communicate()\n\treturn exitcode, stdout, stderr", "def externalCommand(command, communicate=True):\n command = shlex.split(str(command))\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if communicate:\n return proc.communicate()\n return proc", "def call(command, working_directory=BASE_DIR):\r\n LOG.info(command)\r\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory, shell=True)\r\n out, err = p.communicate()\r\n return (out, err)", "def run_process(self, inp=\"\"):\n return subprocess.run(self.binary,\n input=inp,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def run_program ( name, command ):\n\n # Create temporary file to write stdout, stderr of the program\n tmpfile,tmpname = tempfile.mkstemp()\n ret_code = subprocess.call ( command, shell = True , stdout=tmpfile, stderr=tmpfile)\n # Catch some error codes\n if ret_code == 127:\n msg = \"Unable to find %s executable, please make sure this is present in your PATH.\"%name\n logger.error(msg) \n raise Exception ( msg )\n if ret_code == 1:\n # Get the error message from the temporary file\n errmsg = \"\\n\".join(line for line in open(tmpname).readlines())\n os.remove(tmpname)\n msg = \"%s was not able to run successfully. Please check output. Error message was:\\n%s\"%(name,errmsg)\n raise Exception ( msg )\n\n os.remove(tmpname)", "def run_command(*args):\n cmd = sp.Popen(args, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT, encoding='utf-8')\n stdout, _ = cmd.communicate()\n\n if cmd.returncode != 0:\n raise ValueError(f\"Running `{args[0]}` failed with return code {cmd.returncode}, output: \\n {stdout}\")\n else:\n return stdout.strip('\\n')", "def Run(cmd):\n return os.popen(cmd).read()", "def subprocess_Popen(command, **params):\r\n startupinfo = None\r\n if os.name == 'nt':\r\n startupinfo = subprocess.STARTUPINFO()\r\n try:\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n except AttributeError:\r\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\r\n\r\n # Anaconda for Windows does not always provide .exe files\r\n # in the PATH, they also have .bat files that call the corresponding\r\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\r\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\r\n # execute \"g++\" without extensions.\r\n # (Executing \"g++.bat\" explicitly would also work.)\r\n params['shell'] = True\r\n\r\n # Using the dummy file descriptors below is a workaround for a\r\n # crash experienced in an unusual Python 2.4.4 Windows environment\r\n # with the default None values.\r\n stdin = None\r\n if \"stdin\" not in params:\r\n stdin = open(os.devnull)\r\n params['stdin'] = stdin.fileno()\r\n\r\n try:\r\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\r\n finally:\r\n if stdin is not None:\r\n del stdin\r\n return proc", "def run_command(cmd):\n return subprocess.call(cmd, shell=True)", "def run_subprocess(self, input_value):\n try:\n proc = Popen([\"python\", self.SCRIPT_NAME],\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE)\n out_value, err_value = proc.communicate(\n input_value.encode(self.ENCODING),\n timeout=self.PROCESS_TIMEOUT)\n except TimeoutExpired:\n proc.kill()\n out_value, err_value = proc.communicate()\n return out_value.decode(self.ENCODING), err_value.decode(self.ENCODING)", "def run_subprocess(text, args):\n proc = subprocess.run(\n args,\n input=text,\n encoding='utf-8',\n stdout=subprocess.PIPE)\n return proc.stdout.strip()", "def call(cmd, shell=False):\n logger.debug(' '.join(cmd))\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=shell)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise IOError(stderr)\n if stderr:\n logger.error(stderr.decode('utf-8'))\n return stdout", "def run(cmd, directory, fail_ok=False, verbose=False):\n if verbose:\n print(cmd)\n p = subprocess.Popen(cmd,\n cwd=directory,\n stdout=subprocess.PIPE)\n (stdout, _) = p.communicate()\n if p.returncode != 0 and not fail_ok:\n raise RuntimeError('Failed to run {} in {}'.format(cmd, directory))\n return stdout", "def spawn(*args):\n # Adapted from ranger.ext.spawn\n process = Popen(args, stdout=PIPE, shell=True)\n stdout, stderr = process.communicate()\n return stdout.decode('utf-8')", "def run_command(command):\n process = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n response, error = process.communicate()\n return response.decode().rstrip('\\n'), error.decode().rstrip('\\n')", "def run_subprocess(self, *cmd_and_args):\n\n command_line = \" \".join(cmd_and_args)\n self.logger.debug(\"Running: %s\", command_line)\n\n return subprocess.Popen(command_line, shell=True, close_fds=True)", "def run(*args, **kwargs):\n _patch_popen(Popen)\n assert len(args) > 0\n\n arguments = []\n\n input = kwargs.pop('input', None)\n fail_on_error = kwargs.pop('fail_on_error', False)\n encoding = kwargs.pop('encoding', None) \n\n if len(args) == 1:\n if isinstance(args[0], string):\n arguments = args[0].split()\n else:\n for i in args:\n if isinstance(i, (list, tuple)):\n for j in i:\n arguments.append(j)\n else:\n arguments.append(i)\n\n def set_default_kwarg(key, default): \n kwargs[key] = kwargs.get(key, default)\n\n set_default_kwarg('stdin', PIPE)\n set_default_kwarg('stdout', PIPE)\n set_default_kwarg('stderr', PIPE)\n\n\n proc = Popen(arguments, **kwargs)\n stdout, stderr = proc.communicate(input)\n \n if encoding is not None:\n stdout = stdout.decode(encoding=encoding)\n stderr = stderr.decode(encoding=encoding)\n\n result = RunResult(proc.returncode, stdout, stderr)\n \n if fail_on_error and proc.returncode != 0:\n raise ProcessError(' '.join(arguments), result)\n\n return result", "def run_command(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print output.strip()\n\n rc = process.poll()\n return rc", "def create_subprocess(command, args):\n\n proc = subprocess.Popen([command] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, _ = proc.communicate()\n\n return proc.returncode, output", "def run_subprocess(args, work_dir):\n process = subprocess.Popen(args, cwd=work_dir)\n process.communicate()\n assert process.returncode == 0", "def subprocess_call(command):\n try:\n return_out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)\n if return_out.strip():\n print return_out\n except subprocess.CalledProcessError, err:\n msg = \"Subprocess call failed!\"\\\n \"\\n command : {0}\"\\\n \"\\n console output: \\n\\n{1}\"\\\n \"\\n error message : {2}\"\\\n \"\\n arguments : {3}\"\\\n \"\\n return-code : {4}\\n\"\\\n .format(err.cmd, err.output, err.message, err.args, err.returncode)\n raise Exception(msg)\n\n return return_out", "def popen(command, cwd=None, check=False, detach=False):\n\tif detach:\n\t\treturn spawn(command, cwd)\n\telse:\n\t\tcmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)\n\t\tstatus = cmd.wait()\n\t\tres, err = cmd.communicate()\n\t\tif status == 0:\n\t\t\treturn res.decode(\"utf8\")\n\t\telse:\n\t\t\treturn (status, err.decode(\"utf8\"))", "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):\n return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "def run_command(cmd):\n if env.PY2 and isinstance(cmd, unicode):\n cmd = cmd.encode(sys.getfilesystemencoding())\n\n # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of\n # the subprocess is set incorrectly to ascii. Use an environment variable\n # to force the encoding to be the same as ours.\n sub_env = dict(os.environ)\n encoding = output_encoding()\n if encoding:\n sub_env['PYTHONIOENCODING'] = encoding\n\n proc = subprocess.Popen(\n cmd,\n shell=True,\n env=sub_env,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output, _ = proc.communicate()\n status = proc.returncode\n\n # Get the output, and canonicalize it to strings with newlines.\n if not isinstance(output, str):\n output = output.decode(output_encoding())\n output = output.replace('\\r', '')\n\n return status, output", "def execute(args):\n print '################################'\n print 'args: ', args\n p = subprocess.Popen(args, shell=True, executable='/bin/bash')\n # p = subprocess.call(args, shell=True, executable='/bin/bash')\n p.wait()\n return p\n print '################################'", "def system(self,cmd):\n code = 'import os;f=os.popen(\"%s\");res = f.read(-1);f.close();' % cmd\n return self.exec_code(code,returns=['res'])", "def run(cmd):\n print('running', cmd)\n proc = sp.Popen([cmd], shell=True)\n proc.wait()\n assert proc.poll() == 0", "def shell(commandline, verbose=SHELL_VERBOSE):\n if verbose:\n sys.stderr.write(\"[Executing: \" + commandline + \"]\\n\")\n return sp.check_output(commandline, shell=True)", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def subprocess_run(args, **kwargs_in):\n kwargs = kwargs_in.copy()\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n logger.debug(f'running a subprocess {args} {kwargs}')\n output = subprocess.run(args, **kwargs)\n logger.debug(f' returned: {output.stdout}')\n return output", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def system(command):\n print('[system] {}'.format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n rc = p.returncode\n if PY3:\n output = output.decode(\"ascii\")\n err = err.decode(\"ascii\")\n return rc, output, err", "def call(seq):\n return subprocess.Popen(seq,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()[0]", "def run(cmd, proc_stdout = sys.stdout, proc_stderr = sys.stderr,\n check = True):\n print cmd\n proc = subprocess.Popen(cmd, shell=True, bufsize=-1,\n stdout=proc_stdout, stderr=proc_stderr)\n output, errors = proc.communicate()\n sts = proc.wait()\n if check is True and sts != 0:\n raise RuntimeError(\"Command: %s exited with non-zero status %i\" % (cmd, sts))\n return output, errors", "def exec_and_return(execargs):\n return subprocess.call(execargs)", "def call_subprocess(poutput, data=None):\n try:\n output = poutput.communicate(input=data)\n LOG.debug(\"Exit status: \" + str(poutput.returncode))\n if poutput.returncode != 0:\n LOG.warning(\"Process returned non-zero exit code: \" + str(poutput.returncode))\n LOG.warning(\"Process STDOUT: \" + output[0])\n LOG.warning(\"Process STDERR: \" + output[1])\n return output[0].strip(), output[1].strip()\n except Exception as e:\n LOG.exception(\"Command failed!\")\n raise e", "def run(cmd):\n cmd = str(cmd)\n\n if env['verbose']:\n sys.stdout.write('--> %s\\n' % cmd)\n\n cmd_list = shlex.split(cmd)\n\n p = subprocess.Popen(\n cmd_list,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n return p.communicate()", "def Run(command_line):\n print >> sys.stderr, command_line\n return subprocess.check_output(command_line, shell=True)", "def system_call(command):\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n return process.communicate()[0]", "def run_command(command, cwd=None):\n return subprocess.run(command, shell=True, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)", "def LaunchFile(*params):\n\n file = subprocess.Popen(params)\n file.communicate()\n return file.returncode", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def execute_command(command):\n p = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n rc = p.wait()\n stdout = []\n stderr = []\n for line in p.stdout.read().decode().splitlines():\n stdout.append(line)\n for line in p.stderr.read().decode().splitlines():\n stderr.append(line)\n p.stdout.close()\n p.stderr.close()\n return (rc, stdout, stderr)", "def run_this(command_to_run, cwd=os.getcwd()):\n slab_logger.debug('Running shell command \"%s\"' % command_to_run)\n try:\n output = subprocess.Popen(command_to_run,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n close_fds=True,\n cwd=cwd)\n\n myinfo = output.communicate()[0]\n myinfo.strip()\n return(output.returncode, myinfo)\n except OSError, ex:\n slab_logger.error(ex)\n return (1, str(ex))", "def exec_cmd(cmd):\n print(' '.join(str(e) for e in cmd))\n try:\n res = subprocess.run(cmd, capture_output=True, check=True)\n print(res.stdout.decode(\"utf8\"))\n return res\n except subprocess.CalledProcessError as err:\n logging.error(err.stderr)\n raise err", "def spawn(self, arguments=None, environment=None):\n return subprocess.Popen(\n args=[self.executable] + ([] or arguments),\n # do not redirect std streams\n # this fakes the impression of having just one program running\n stdin=None,\n stdout=None,\n stderr=None,\n env=environment,\n )", "def run_script(command):\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n output = process.communicate()[0]\n success = process.poll() == 0\n return (success, output)", "def subprocess_run(*args, **kwargs):\n if __version__.py_version_lt(3, 7):\n if kwargs.pop('capture_output', None):\n kwargs.setdefault('stdout', subprocess.PIPE)\n kwargs.setdefault('stderr', subprocess.PIPE)\n try:\n output = subprocess.run(*args, **kwargs)\n except subprocess.CalledProcessError as cpe:\n emess = '\\n'.join([\n 'Subprocess error:',\n 'stderr:',\n f'{cpe.stderr}',\n 'stdout:',\n f'{cpe.stdout}',\n f'{cpe}'\n ])\n raise RuntimeError(emess) from cpe\n return output", "def output_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n if not hasattr(params, 'stdin'):\r\n null = open(os.devnull, 'wb')\r\n params['stdin'] = null\r\n params['stdout'] = subprocess.PIPE\r\n params['stderr'] = subprocess.PIPE\r\n p = subprocess_Popen(command, **params)\r\n # we need to use communicate to make sure we don't deadlock around\r\n # the stdour/stderr pipe.\r\n out = p.communicate()\r\n return out + (p.returncode,)", "def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "async def _run_subprocess(\n cmd: str,\n allow_params: bool,\n params: Dict[str, ParamValueT],\n) -> Dict[str, Any]:\n cmd_str = cmd\n if allow_params:\n if params[\"shell_params\"] == []:\n cmd_str = cmd.format([''])\n else:\n cmd_str = cmd.format(*params.get('shell_params', ['']))\n\n logging.info(\"Running command: %s\", cmd_str)\n\n cmd_list = shlex.split(cmd_str)\n\n process = await asyncio.create_subprocess_exec(\n *cmd_list,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await process.communicate()\n\n return {\n \"returncode\": process.returncode,\n \"stdout\": stdout.decode(),\n \"stderr\": stderr.decode(),\n }", "def run_with_subprocess(cmd):\n new_env = dict(os.environ, LC_ALL='C')\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env)\n output, error = proc.communicate()\n returncode = proc.returncode\n except OSError, (errno, strerror):\n output, error = \"\", \"Could not execute %s: %s\" % (cmd[0], strerror)\n returncode = 1\n\n return (output, error, returncode)", "def Run(cmd, include_stderr=False, return_pipe=False):\n cmd = to_unicode(str(cmd))\n if include_stderr:\n err = STDOUT\n fds = True\n else:\n err = None\n fds = False\n tmpenv = os.environ.copy()\n tmpenv[\"LC_ALL\"] = \"C\"\n tmpenv[\"LANG\"] = \"C\"\n f = Popen(cmd, shell=True, stdout=PIPE, stderr=err, close_fds=fds,\n env=tmpenv)\n if return_pipe:\n return f.stdout\n else:\n return f.communicate()[0]", "def run_external(cmd):\n args = shlex.split(cmd, posix=(os.name == 'posix'))\n p = run(args, capture_output=True)\n return p.returncode, p.stdout.decode(), p.stderr.decode()", "def process_run(cmd_string, stdin=None):\n process_object=subprocess.Popen(shlex.split(cmd_string),\n stdin=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return process_object", "def checked_subprocess_run(command):\n args = shlex.split(command)\n completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = completed.stdout.decode()\n err = completed.stderr.decode()\n\n # Print the subprocess output to include in the test output\n print(out, file=sys.stdout)\n print(err, file=sys.stderr)\n\n # After printing the output, raise an exception on a non-zero exit status.\n completed.check_returncode()\n\n return out, err", "def run_cmd(cmd, args, path=None, raise_error=True):\n\n if path is not None:\n # Transparently support py.path objects\n path = str(path)\n\n p = sp.Popen([cmd] + list(args), stdout=sp.PIPE, stderr=sp.PIPE,\n cwd=path)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n return_code = p.returncode\n\n if raise_error and return_code != 0:\n raise RuntimeError(\n \"The command `{0}` with args {1!r} exited with code {2}.\\n\"\n \"Stdout:\\n\\n{3}\\n\\nStderr:\\n\\n{4}\".format(\n cmd, list(args), return_code, streams[0], streams[1]))\n\n return streams + (return_code,)", "def run_command(command):\n\n return subprocess.run(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def getCommandOutput(command):\n#####################################################################\n\n child = os.popen(command)\n data = child.read()\n err = child.close()\n if err:\n print('%s failed w/ exit code %d' % (command, err))\n return data", "def run_subprocess_cmd(command, print_cmd=True, print_stdout_stderr=True, get_returncode=False):\n if print_cmd:\n print\n print 'Running command:\\n%s' % command\n print \n\n sp = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n shell=True)\n out, error = sp.communicate() \n if print_stdout_stderr:\n print\n print out\n print\n print error\n print\n\n if get_returncode:\n return out, error, sp.returncode\n else:\n return out, error", "def execute(cmd) :\n return os.system( cmd )", "def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )", "def RunProcess(self, command, env=None, stdin=None, stdout=None, stderr=None):\n\n # merge specified env with OS env\n myenv = os.environ.copy()\n if env is not None:\n myenv.update(env)\n\n try:\n process = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, env=myenv, bufsize=0)\n return process\n except:\n print(\"Unexpected error when launching process:\")\n print(\" \", command)\n print(\" \", env)\n raise" ]
[ "0.73115253", "0.7231892", "0.7162937", "0.7034998", "0.69283974", "0.6902304", "0.6880463", "0.68566847", "0.67947245", "0.6765262", "0.6710852", "0.66885346", "0.66741914", "0.6659031", "0.66580534", "0.6635002", "0.66338694", "0.6633255", "0.65943277", "0.6593683", "0.65877634", "0.65817463", "0.6579395", "0.65717864", "0.65495396", "0.6538355", "0.6527337", "0.65268177", "0.65086895", "0.65077925", "0.6503225", "0.64960617", "0.64923275", "0.64837885", "0.6478596", "0.6471047", "0.6470727", "0.6468416", "0.64139783", "0.6411994", "0.64074564", "0.6392422", "0.6381966", "0.63766503", "0.6364108", "0.63518953", "0.6348531", "0.63464475", "0.6345693", "0.63431096", "0.6321485", "0.63201535", "0.6319218", "0.6318661", "0.6308603", "0.6308594", "0.6295036", "0.6290725", "0.6289838", "0.62815124", "0.626866", "0.6263901", "0.6255227", "0.62506646", "0.62466514", "0.6245309", "0.6244645", "0.62385845", "0.6227816", "0.6221998", "0.62204885", "0.621386", "0.62128395", "0.6211141", "0.62068504", "0.62050265", "0.6196859", "0.6195311", "0.6190142", "0.618644", "0.6186133", "0.6180706", "0.61764544", "0.6176051", "0.61750484", "0.61735666", "0.61735666", "0.61735666", "0.6170679", "0.616747", "0.6163475", "0.6157126", "0.61555594", "0.6152442", "0.61514807", "0.61425346", "0.61421496", "0.6126696", "0.61252934", "0.6110545", "0.61075056" ]
0.0
-1
Executes a subprocess and returns its exit code and output.
def GetCmdStatusAndOutput(args, cwd=None, shell=False, env=None, merge_stderr=False): status, stdout, stderr = GetCmdStatusOutputAndError( args, cwd=cwd, shell=shell, env=env, merge_stderr=merge_stderr) if stderr: logger.critical('STDERR: %s', stderr) logger.debug('STDOUT: %s%s', stdout[:4096].rstrip(), '<truncated>' if len(stdout) > 4096 else '') return (status, stdout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(cmd):\n\n process = subprocess.Popen(cmd,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out = ''\n err = ''\n exitcode = 0\n\n result = process.communicate()\n (out, err) = result\n exitcode = process.returncode\n\n return exitcode, out.decode(), err.decode()", "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def run(cmd, shell=False, cwd=None):\n try:\n out = check_output(cmd, shell=shell, cwd=cwd, stderr=STDOUT)\n except CalledProcessError as ex:\n return ex.returncode, ex.output\n else:\n return 0, out", "def _exec_cmd(cmd, stdout=None, stderr=None):\n rc = 0\n kwargs = {}\n if stdout is not None:\n kwargs[\"stdout\"] = stdout\n if stderr is not None:\n kwargs[\"stderr\"] = stderr\n try:\n subprocess.check_call(cmd, **kwargs)\n except CalledProcessError as e:\n LOG.error(\"[return code: %s] %s\", e.returncode, e)\n rc = e.returncode\n return rc", "def subprocess_run(cmd, ignore_failure=False, shell=True):\n try:\n proc = subprocess.Popen(\n cmd,\n shell=shell,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rc = proc.returncode\n\n except OSError as exc:\n raise RuntimeError('Failed to run ' + cmd + ': [Errno: %d] ' %\n exc.errno + exc.strerror + ' [Exception: ' +\n type(exc).__name__ + ']')\n if (not ignore_failure) and (rc != 0):\n raise RuntimeError('(%s) failed with rc=%s: %s' %\n (cmd, rc, err))\n return out", "def _run_cmd(*args):\n proc = Popen(\n args, stdin=PIPE, stdout=PIPE, stderr=PIPE,\n cwd=os.path.dirname(__file__))\n output, _ = proc.communicate()\n code = proc.returncode\n return code, output", "def run(cmd, dieOnError=True):\n\n\tps = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\texitcode = ps.returncode\n\tstdout,stderr = ps.communicate()\n\treturn exitcode, stdout, stderr", "def run_subprocess(command, environment=None, shell=False, raise_on_error=True):\n proc = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n env=environment)\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n if raise_on_error:\n raise RuntimeError('{}\\n{}'.format(stderr, stdout))\n return stdout, stderr, proc.returncode", "def checked_subprocess_run(command):\n args = shlex.split(command)\n completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = completed.stdout.decode()\n err = completed.stderr.decode()\n\n # Print the subprocess output to include in the test output\n print(out, file=sys.stdout)\n print(err, file=sys.stderr)\n\n # After printing the output, raise an exception on a non-zero exit status.\n completed.check_returncode()\n\n return out, err", "def run_command(cmd):\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n return proc.returncode, stdout, stderr", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def execute(parent, cmd, *args, **kwargs):\n\n with xtrace(parent, flatten(cmd)) as h:\n try:\n code = subprocess.call(cmd, *args, **kwargs)\n except:\n sys.exit(\n DiagnosticReporter.fatal(EXCEPTION_EXECUTING_PROCESS, cmd[0]))\n finally:\n h.report(code)\n return code", "def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc", "def subprocess_run(*args, **kwargs):\n if __version__.py_version_lt(3, 7):\n if kwargs.pop('capture_output', None):\n kwargs.setdefault('stdout', subprocess.PIPE)\n kwargs.setdefault('stderr', subprocess.PIPE)\n try:\n output = subprocess.run(*args, **kwargs)\n except subprocess.CalledProcessError as cpe:\n emess = '\\n'.join([\n 'Subprocess error:',\n 'stderr:',\n f'{cpe.stderr}',\n 'stdout:',\n f'{cpe.stdout}',\n f'{cpe}'\n ])\n raise RuntimeError(emess) from cpe\n return output", "def Subprocess(self, cmd):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def execute_shell_command(command: str) -> int:\n cwd: str = os.getcwd()\n\n path_env_var: str = os.pathsep.join([os.environ.get(\"PATH\", os.defpath), cwd])\n env: dict = dict(os.environ, PATH=path_env_var)\n\n status_code: int = 0\n try:\n res: CompletedProcess = run(\n args=[\"bash\", \"-c\", command],\n stdin=None,\n input=None,\n # stdout=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4\n # stderr=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4\n capture_output=True,\n shell=False,\n cwd=cwd,\n timeout=None,\n check=True,\n encoding=None,\n errors=None,\n text=None,\n env=env,\n universal_newlines=True,\n )\n sh_out: str = res.stdout.strip()\n logger.info(sh_out)\n except CalledProcessError as cpe:\n status_code = cpe.returncode\n sys.stderr.write(cpe.output)\n sys.stderr.flush()\n exception_message: str = \"A Sub-Process call Exception occurred.\\n\"\n exception_traceback: str = traceback.format_exc()\n exception_message += (\n f'{type(cpe).__name__}: \"{str(cpe)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n\n return status_code", "def execute(args, cwd=None):\n completed_process = subprocess.run(\n args=args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=cwd\n )\n return utils.make_process_result(completed_process)", "def execute(self, cmd, cwd=None, capture_output=False, env=None, raise_errors=True):\n logging.info('Executing command: {cmd}'.format(cmd=str(cmd)))\n stdout = subprocess.PIPE if capture_output else None\n process = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=stdout)\n output = process.communicate()[0]\n returncode = process.returncode\n if returncode:\n # Error\n if raise_errors:\n raise subprocess.CalledProcessError(returncode, cmd)\n else:\n logging.info('Command returned error status %s', returncode)\n if output:\n logging.info(output)\n return returncode, output", "def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):\n if ok_exit_codes is None:\n ok_exit_codes = [0]\n out, err = proc.communicate(cmd_input)\n\n ret = proc.returncode\n if ret not in ok_exit_codes:\n LOG.error(\"Command '%(cmdline)s' with process id '%(pid)s' expected \"\n \"return code in '%(ok)s' but got '%(rc)s': %(err)s\" %\n {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,\n 'rc': ret, 'err': err})\n raise SubprocessException(' '.join(cmdline), ret, out, err)\n return out", "def run(cmd: List[str]) -> int:\n logger.debug('cmd: %s', ' '.join(cmd))\n child = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdoutdata, stderrdata = child.communicate()\n\n if stdoutdata.strip():\n log_std('stdout', stdoutdata.decode(),\n logging.DEBUG if child.returncode == 0 else logging.ERROR)\n\n if stderrdata.strip():\n log_std('stderr', stderrdata.decode(), logging.ERROR)\n\n logger.debug(\"returncode %s\", child.returncode)\n return child.returncode", "def _execute_command(\n args: Union[List[str], str],\n print_output: bool,\n capture_stderr: bool,\n print_command: bool,\n *pargs,\n **kwargs\n) -> Tuple[int, List[str]]:\n stdout_write, stdout_path = tempfile.mkstemp()\n with open(stdout_path, \"rb\") as stdout_read, open('/dev/null', 'w') as dev_null:\n\n if print_command:\n print(\"Executing: %s\" % \" \".join(args))\n\n kwargs['stdout'] = stdout_write\n kwargs['stderr'] = stdout_write if capture_stderr else dev_null\n\n # pylint: disable=consider-using-with\n process = subprocess.Popen(\n args,\n *pargs,\n **kwargs\n )\n\n while True:\n output = stdout_read.read(1).decode(errors=\"replace\")\n\n if output == '' and process.poll() is not None:\n break\n\n if print_output and output:\n print(output, end=\"\", flush=True)\n\n exit_code = process.poll()\n\n stdout_read.seek(0)\n stdout = [line.decode(errors=\"replace\") for line in stdout_read.readlines()]\n\n # ignoring mypy error below because it thinks exit_code can sometimes be None\n # we know that will never be the case because the above While loop will keep looping forever\n # until exit_code is not None\n return exit_code, stdout # type: ignore", "def execute_stdout(command):\n try:\n output = subprocess.check_output([command], stderr=subprocess.STDOUT,\n shell=True)\n return 0, output\n except subprocess.CalledProcessError as excp:\n return excp.returncode, excp.output", "def execute_command(command):\n p = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n rc = p.wait()\n stdout = []\n stderr = []\n for line in p.stdout.read().decode().splitlines():\n stdout.append(line)\n for line in p.stderr.read().decode().splitlines():\n stderr.append(line)\n p.stdout.close()\n p.stderr.close()\n return (rc, stdout, stderr)", "def run_with_subprocess(cmd):\n new_env = dict(os.environ, LC_ALL='C')\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env)\n output, error = proc.communicate()\n returncode = proc.returncode\n except OSError, (errno, strerror):\n output, error = \"\", \"Could not execute %s: %s\" % (cmd[0], strerror)\n returncode = 1\n\n return (output, error, returncode)", "def execute_command(cmd):\n\n env = environ.copy()\n proc = subprocess.Popen(\n [cmd],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n env=env)\n\n stdout, stderr = proc.communicate()\n\n if stdout:\n logging.info(stdout.decode())\n if stderr:\n logging.info(stderr.decode())\n\n if proc.returncode != 0:\n logging.error(u\" Command execution failed.\")\n return proc.returncode, stdout.decode(), stderr.decode()", "def runprocess(self, argv, check_stdout=None, check_stderr=None,\n check_returncode=0, stdin_string='', fail_message=None,\n timeout=5, verbosity=None, env=None):\n if env is None:\n env = os.environ\n env.setdefault('GIT_COMMITTER_DATE', self.isodate_now)\n argv_repr = ' '.join(shellquote(a) for a in argv)\n if verbosity is None:\n verbosity = self.verbosity\n if verbosity:\n print(self.term.blue(argv_repr))\n if verbosity > 2:\n print(self.term.yellow(stdin_string.rstrip()))\n PIPE = subprocess.PIPE\n proc = subprocess.Popen(argv, stdout=PIPE, stderr=PIPE, stdin=PIPE,\n env=env)\n try:\n stdout, stderr = proc.communicate(stdin_string.encode('utf-8'),\n timeout=timeout)\n timeout_expired = False\n except subprocess.TimeoutExpired:\n proc.kill()\n stdout = stderr = b''\n timeout_expired = True\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n returncode = proc.returncode\n failed = any([\n timeout_expired,\n (check_stdout is not None and check_stdout != stdout),\n (check_stderr is not None and check_stderr != stderr),\n (check_returncode is not None and check_returncode != returncode),\n ])\n if failed and not verbosity:\n print(self.term.blue(argv_repr))\n if failed or verbosity >= 2:\n if stdout:\n print(stdout.rstrip())\n if stderr:\n print(self.term.yellow(stderr.rstrip()))\n print('→ %s' % self.term.blue(str(proc.returncode)))\n if failed:\n if timeout_expired:\n self.die('Command timeout expired')\n elif fail_message:\n self.die(fail_message)\n else:\n self.die('Command failed')\n return SubprocessResult(stdout, stderr, returncode)", "def execute_cmd(args: Sequence[str],\n verbose: bool = False,\n **kwargs) -> subprocess.CompletedProcess:\n cmd = \" \".join(args)\n if verbose:\n print(f\"cmd: {cmd}\")\n try:\n return subprocess.run(args, check=True, text=True, **kwargs)\n except subprocess.CalledProcessError as exc:\n print((f\"\\n\\nThe following command failed:\\n\\n{cmd}\"\n f\"\\n\\nReturn code: {exc.returncode}\\n\\n\"))\n if exc.stdout:\n print(f\"Stdout:\\n\\n{exc.stdout}\\n\\n\")\n if exc.stderr:\n print(f\"Stderr:\\n\\n{exc.stderr}\\n\\n\")\n raise exc", "def run_command(cmd):\n if env.PY2 and isinstance(cmd, unicode):\n cmd = cmd.encode(sys.getfilesystemencoding())\n\n # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of\n # the subprocess is set incorrectly to ascii. Use an environment variable\n # to force the encoding to be the same as ours.\n sub_env = dict(os.environ)\n encoding = output_encoding()\n if encoding:\n sub_env['PYTHONIOENCODING'] = encoding\n\n proc = subprocess.Popen(\n cmd,\n shell=True,\n env=sub_env,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output, _ = proc.communicate()\n status = proc.returncode\n\n # Get the output, and canonicalize it to strings with newlines.\n if not isinstance(output, str):\n output = output.decode(output_encoding())\n output = output.replace('\\r', '')\n\n return status, output", "def run_subprocess_cmd(command, print_cmd=True, print_stdout_stderr=True, get_returncode=False):\n if print_cmd:\n print\n print 'Running command:\\n%s' % command\n print \n\n sp = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n shell=True)\n out, error = sp.communicate() \n if print_stdout_stderr:\n print\n print out\n print\n print error\n print\n\n if get_returncode:\n return out, error, sp.returncode\n else:\n return out, error", "def run(cmd, proc_stdout = sys.stdout, proc_stderr = sys.stderr,\n check = True):\n print cmd\n proc = subprocess.Popen(cmd, shell=True, bufsize=-1,\n stdout=proc_stdout, stderr=proc_stderr)\n output, errors = proc.communicate()\n sts = proc.wait()\n if check is True and sts != 0:\n raise RuntimeError(\"Command: %s exited with non-zero status %i\" % (cmd, sts))\n return output, errors", "def call_command(command, env=None, cwd=None):\n\n try:\n LOG.debug('Run %s', ' '.join(command))\n out = subprocess.check_output(command,\n bufsize=-1,\n env=env,\n stderr=subprocess.STDOUT,\n cwd=cwd)\n LOG.debug(out)\n return out, 0\n except subprocess.CalledProcessError as ex:\n LOG.debug('Running command \"%s\" Failed.', ' '.join(command))\n LOG.debug(str(ex.returncode))\n LOG.debug(ex.output)\n return ex.output, ex.returncode\n except OSError as oerr:\n LOG.warning(oerr.strerror)\n return oerr.strerror, oerr.errno", "def call_subprocess(poutput, data=None):\n try:\n output = poutput.communicate(input=data)\n LOG.debug(\"Exit status: \" + str(poutput.returncode))\n if poutput.returncode != 0:\n LOG.warning(\"Process returned non-zero exit code: \" + str(poutput.returncode))\n LOG.warning(\"Process STDOUT: \" + output[0])\n LOG.warning(\"Process STDERR: \" + output[1])\n return output[0].strip(), output[1].strip()\n except Exception as e:\n LOG.exception(\"Command failed!\")\n raise e", "def exec_test_command(cmd):\n process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)\n result = process.communicate()\n return (\n process.returncode,\n bytes(result[0]).decode(\"utf-8\"),\n bytes(result[1]).decode(\"utf-8\"),\n )", "def run(cmd):\n print ' '.join(cmd)\n try:\n check_call(cmd)\n except CalledProcessError as cpe:\n print \"Error: return code: \" + str(cpe.returncode)\n sys.exit(cpe.returncode)", "def exec_command(cmd):\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True) as p:\n stdout, _ = p.communicate()\n if p.returncode != 0:\n logger.error(stdout)\n return None\n\n return stdout", "def subprocess_run(args, **kwargs_in):\n kwargs = kwargs_in.copy()\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n logger.debug(f'running a subprocess {args} {kwargs}')\n output = subprocess.run(args, **kwargs)\n logger.debug(f' returned: {output.stdout}')\n return output", "def run_and_handle_returncode(cmd, print_cmd=True):\n stdout, stderr, returncode = run_subprocess_cmd(cmd, print_cmd=print_cmd, \n print_stdout_stderr=False, get_returncode=True)\n if returncode != 0:\n import sys; sys.exit('STDOUT:\\n%s\\nSTDERR:\\n%s\\n' % (stdout, stderr))\n return stdout, stderr, returncode", "def shell(args, **kwargs):\n import subprocess\n\n output, returncode = '', 0\n logger.debug('running %s', ' '.join(args))\n try:\n if 'cwd' in kwargs:\n # convert cwd to str in case it's a Path\n kwargs['cwd'] = str(kwargs['cwd'])\n output = subprocess.check_output(\n args, stderr=subprocess.STDOUT, **kwargs)\n except subprocess.CalledProcessError as e:\n returncode = e.returncode\n output = e.output\n\n return output.decode('utf-8'), returncode", "def sh(*args, return_bytes=False, encoding='utf-8', **kw):\n options = dict(\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n options.update(kw)\n\n process = subprocess.Popen(args, **options)\n process.wait()\n\n if process.returncode != 0:\n raise CommandFailure(process.stderr.read().decode(encoding))\n\n if return_bytes:\n return process.stdout.read()\n\n return process.stdout.read().decode(encoding)", "def _run(args: List[str], check: bool = False) -> Tuple[int, str]:\n result = subprocess.run(args=args, stdout=subprocess.PIPE)\n if check and result.returncode != 0:\n raise subprocess.CalledProcessError(result.returncode, args)\n return result.returncode, result.stdout.decode('utf-8', 'strict')", "def subprocess_run(cmd):\n print(shlex.join(cmd))\n try:\n ret = subprocess.run(cmd, capture_output=True,\n text=True, env=os.environ.copy(), check=True)\n if (ret.stdout):\n print(ret.stdout)\n return ret\n except subprocess.CalledProcessError as e:\n if (e.stderr):\n print(e.stderr)\n raise e", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def run(cmd, fail=True, capture_stdout=False, capture_stderr=False, verbose=False):\n stdout, stderr = None, None\n if capture_stderr:\n stderr = subprocess.PIPE\n if capture_stdout:\n stdout = subprocess.PIPE\n\n if verbose:\n print(cmd)\n\n p = subprocess.Popen(['bash', '-c', cmd], stderr=stderr, stdout=stdout)\n if p.returncode and fail:\n sys.exit(1)\n\n return p", "def execute(cmd, path):\n oldPath = os.getcwd()\n os.chdir(path)\n\n exitcode, output = subprocess.getstatusoutput(cmd)\n\n os.chdir(oldPath)\n\n ok = not exitcode\n\n return ok, output", "def _run(self, script, args):\n proc = subprocess.Popen([script] + args,\n stdout=subprocess.PIPE\n )\n\n stdout = proc.communicate()[0]\n retcode = proc.returncode\n\n return stdout, retcode", "def exec_process(cmdline, silent, input=None, **kwargs):\n try:\n sub = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n stdout, stderr = sub.communicate(input=input)\n returncode = sub.returncode\n if not silent:\n sys.stdout.write(stdout)\n sys.stderr.write(stderr)\n except OSError, e:\n if e.errno == 2:\n raise RuntimeError('\"%s\" is not present on this system' % cmdline[0])\n else:\n raise\n if returncode != 0:\n raise RuntimeError('Got return value %d while executing \"%s\", stderr output was:\\n%s' % (returncode, \" \".join(cmdline), stderr.rstrip(\"\\n\")))\n return stdout", "def call(self):\n\n process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,\n shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)\n returnData = process.communicate()\n\n return ProcessResult(process.returncode, returnData[0], returnData[1])", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def subprocess_with_output(\n cmd, shell=False, cwd=None, env=None, suppress_output=False):\n # type: (str, bool, str, dict, bool) -> int\n _devnull = None\n try:\n if suppress_output:\n _devnull = open(os.devnull, 'w')\n proc = subprocess.Popen(\n cmd, shell=shell, cwd=cwd, env=env, stdout=_devnull,\n stderr=subprocess.STDOUT)\n else:\n proc = subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)\n proc.wait()\n finally:\n if _devnull is not None:\n _devnull.close()\n return proc.returncode", "def exec_cmd(cmd):\n\targs = shlex.split(cmd)\n\tverbose = True\n\n\ttry:\n\t\tif verbose == True:\n\t\t\tsubprocess.check_call(args)\n\t\telse:\n\t\t\tsubprocess.check_call(args,\n\t\t\t\t\t\t\t\t stdout=subprocess.STDOUT,\n\t\t\t\t\t\t\t\t stderr=subprocess.STDOUT)\n\t# Exception\n\texcept subprocess.CalledProcessError as e:\n\t\tprint \"Command\t :: \", e.cmd\n\t\tprint \"Return Code :: \", e.returncode\n\t\tprint \"Output\t :: \", e.output", "def run_command(*args):\n cmd = sp.Popen(args, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT, encoding='utf-8')\n stdout, _ = cmd.communicate()\n\n if cmd.returncode != 0:\n raise ValueError(f\"Running `{args[0]}` failed with return code {cmd.returncode}, output: \\n {stdout}\")\n else:\n return stdout.strip('\\n')", "def getstatusoutput(*args, **kwargs):\n p = subprocess.Popen(*args, **kwargs)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def _subprocess(cmd):\n\n log.debug('Running: \"%s\"', \" \".join(cmd))\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()\n retcode = proc.wait()\n\n if ret:\n return ret\n elif retcode != 1:\n return True\n else:\n return False\n except OSError as err:\n log.error(err)\n return False", "def _proc_exec_wait(command_line, silent=False):\n result = (-1, None, None)\n command = None\n proc = None\n\n if _platform_windows:\n command_line = command_line.replace(\"\\\\\", \"/\")\n\n try:\n command = shlex.split(command_line)\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Unable to parse the given command line: {0}\\n\"\n \"Error: {1}.\".format(command_line, e)\n )\n return result\n\n try:\n sp_kwargs = {\n \"stdout\": subprocess.PIPE,\n \"stderr\": subprocess.PIPE,\n \"startupinfo\": None,\n \"env\": os.environ,\n }\n\n if _platform_windows:\n sp_kwargs[\"startupinfo\"] = subprocess.STARTUPINFO()\n sp_kwargs[\"startupinfo\"].dwFlags = (\n subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW\n )\n sp_kwargs[\"startupinfo\"].wShowWindow = subprocess.SW_HIDE\n\n proc = subprocess.Popen(command, **sp_kwargs)\n stdoutdata, stderrdata = proc.communicate()\n status = proc.returncode\n result = (status, stdoutdata.decode(\"utf8\"), stderrdata.decode(\"utf8\"))\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Could not open the process: '{0}'\\n\"\n \"Error: {1}.\".format(command[0], e)\n )\n finally:\n if proc:\n if proc.stdout:\n proc.stdout.close()\n\n if proc.stderr:\n proc.stderr.close()\n\n return result", "def subprocess_call(command):\n try:\n return_out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)\n if return_out.strip():\n print return_out\n except subprocess.CalledProcessError, err:\n msg = \"Subprocess call failed!\"\\\n \"\\n command : {0}\"\\\n \"\\n console output: \\n\\n{1}\"\\\n \"\\n error message : {2}\"\\\n \"\\n arguments : {3}\"\\\n \"\\n return-code : {4}\\n\"\\\n .format(err.cmd, err.output, err.message, err.args, err.returncode)\n raise Exception(msg)\n\n return return_out", "async def _run_subprocess(\n cmd: str,\n allow_params: bool,\n params: Dict[str, ParamValueT],\n) -> Dict[str, Any]:\n cmd_str = cmd\n if allow_params:\n if params[\"shell_params\"] == []:\n cmd_str = cmd.format([''])\n else:\n cmd_str = cmd.format(*params.get('shell_params', ['']))\n\n logging.info(\"Running command: %s\", cmd_str)\n\n cmd_list = shlex.split(cmd_str)\n\n process = await asyncio.create_subprocess_exec(\n *cmd_list,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await process.communicate()\n\n return {\n \"returncode\": process.returncode,\n \"stdout\": stdout.decode(),\n \"stderr\": stderr.decode(),\n }", "def execute_command(command):\n proc = subprocess.Popen(\n [\"/bin/bash\"], shell=True, cwd=os.environ['PWD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n )\n proc.stdin.write(command)\n stdout, stderr = proc.communicate()\n rc = proc.returncode\n\n return stdout, stderr, rc", "def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]],\n *, input_lines: typing.Optional[BytesOrStrIterator] = None,\n capture_output: bool = False,\n quiet: bool = False, **kwargs) -> subprocess.CompletedProcess:\n log.debug('run %r', cmd)\n\n if not kwargs.pop('check', True):\n raise NotImplementedError('check must be True or omited')\n\n if capture_output: # Python 3.6 compat\n kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE\n\n kwargs.setdefault('startupinfo', get_startupinfo())\n\n try:\n if input_lines is not None:\n assert kwargs.get('input') is None\n assert iter(input_lines) is input_lines\n popen = subprocess.Popen(cmd, stdin=subprocess.PIPE, **kwargs)\n stdin_write = popen.stdin.write\n for line in input_lines:\n stdin_write(line)\n stdout, stderr = popen.communicate()\n proc = subprocess.CompletedProcess(popen.args, popen.returncode,\n stdout=stdout, stderr=stderr)\n else:\n proc = subprocess.run(cmd, **kwargs)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise ExecutableNotFound(cmd) from e\n else:\n raise\n\n if not quiet and proc.stderr:\n stderr = proc.stderr\n if isinstance(stderr, bytes):\n stderr_encoding = (getattr(sys.stderr, 'encoding', None)\n or sys.getdefaultencoding())\n stderr = stderr.decode(stderr_encoding)\n sys.stderr.write(stderr)\n sys.stderr.flush()\n\n try:\n proc.check_returncode()\n except subprocess.CalledProcessError as e:\n raise CalledProcessError(*e.args)\n\n return proc", "def run_command(command):\n process = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n response, error = process.communicate()\n return response.decode().rstrip('\\n'), error.decode().rstrip('\\n')", "def execute(self, args, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, dryrun=False):\n if isinstance(args, str):\n args = args.split()\n\n if not isinstance(args, list):\n raise ValueError('Execute arguments must be a list')\n\n if dryrun:\n self.log.debug('would execute: {}'.format(' '.join(args)))\n return 0\n\n p = Popen(args, stdin=stdin, stdout=stdout, stderr=stderr)\n p.wait()\n return p.returncode", "def execute(args):\n print '################################'\n print 'args: ', args\n p = subprocess.Popen(args, shell=True, executable='/bin/bash')\n # p = subprocess.call(args, shell=True, executable='/bin/bash')\n p.wait()\n return p\n print '################################'", "def mock_run_cmd(cmd: str, *args: str, **kwargs: Any) -> int:\n completed_process: subprocess.CompletedProcess[bytes] = subprocess.run(\n [cmd] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n exit_code: int = completed_process.returncode\n\n if exit_code != 0:\n print(dim(f\"<exit code: {exit_code}>\\n\"), file=sys.stderr)\n return completed_process.returncode", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def run(*args, **kwargs):\n _patch_popen(Popen)\n assert len(args) > 0\n\n arguments = []\n\n input = kwargs.pop('input', None)\n fail_on_error = kwargs.pop('fail_on_error', False)\n encoding = kwargs.pop('encoding', None) \n\n if len(args) == 1:\n if isinstance(args[0], string):\n arguments = args[0].split()\n else:\n for i in args:\n if isinstance(i, (list, tuple)):\n for j in i:\n arguments.append(j)\n else:\n arguments.append(i)\n\n def set_default_kwarg(key, default): \n kwargs[key] = kwargs.get(key, default)\n\n set_default_kwarg('stdin', PIPE)\n set_default_kwarg('stdout', PIPE)\n set_default_kwarg('stderr', PIPE)\n\n\n proc = Popen(arguments, **kwargs)\n stdout, stderr = proc.communicate(input)\n \n if encoding is not None:\n stdout = stdout.decode(encoding=encoding)\n stderr = stderr.decode(encoding=encoding)\n\n result = RunResult(proc.returncode, stdout, stderr)\n \n if fail_on_error and proc.returncode != 0:\n raise ProcessError(' '.join(arguments), result)\n\n return result", "def exec_and_return(execargs):\n return subprocess.call(execargs)", "def run_subprocess(args, work_dir):\n process = subprocess.Popen(args, cwd=work_dir)\n process.communicate()\n assert process.returncode == 0", "def run_command(self, cmd, expects=0, shell=False, stdout=PIPE, stderr=PIPE):\n \n # If the command argument is a string\n if isinstance(cmd, str):\n cmd = cmd.split(' ')\n \n # Open the process\n try:\n proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)\n out, err = proc.communicate()\n \n # Make sure the expected return code is found\n if not proc.returncode == expects:\n self.die('Failed to run command \\'{0}\\', ERROR={1}'.format(str(cmd), err))\n \n # Return exit code / stdout / stderr\n return proc.returncode, out, err\n except Exception as e:\n self.die('Failed to run command \\'{0}\\': ERROR={1}'.format(str(cmd), str(e)))", "def run_command(cmd, redirect_output=True, check_exit_code=True):\n # subprocess模块用于产生子进程\n if redirect_output:\n stdout = subprocess.PIPE\n else:\n stdout = None\n # cwd 参数指定子进程的执行目录为ROOT,执行cwd 函数\n proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)\n # 使用communicate() 返回值为 (stdoutdata , stderrdata )\n output = proc.communicate()[0]\n if check_exit_code and proc.returncode != 0:\n # 程序不返回0,则失败\n raise Exception('Command \"%s\" failed.\\n%s' % (' '.join(cmd), output))\n return output", "def exec_cmd(cmd):\n print(' '.join(str(e) for e in cmd))\n try:\n res = subprocess.run(cmd, capture_output=True, check=True)\n print(res.stdout.decode(\"utf8\"))\n return res\n except subprocess.CalledProcessError as err:\n logging.error(err.stderr)\n raise err", "def run_subprocess(self, input_value):\n try:\n proc = Popen([\"python\", self.SCRIPT_NAME],\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE)\n out_value, err_value = proc.communicate(\n input_value.encode(self.ENCODING),\n timeout=self.PROCESS_TIMEOUT)\n except TimeoutExpired:\n proc.kill()\n out_value, err_value = proc.communicate()\n return out_value.decode(self.ENCODING), err_value.decode(self.ENCODING)", "def execute_command(self, args, daemon=False):\n code, out, err = 0, {}, {}\n\n try:\n logger.info(f\"Running local process {args}\")\n result = subprocess.run(\n args,\n check=True,\n shell=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n logger.info(f\"Process Status {result}\")\n code = result.returncode\n out = result.stdout\n err = result.stderr\n\n except Exception as e:\n logger.info(f\"Process exception {repr(e)}\")\n code = -1\n err = repr(e)\n finally:\n if code == 0:\n out_str = str(out) if out else \"ok\"\n logger.info(f\"Process stdout {out_str}\")\n return True, out_str\n else:\n err_str = str(err) if err else \"error\"\n logger.info(f\"Process stderr {err_str}\")\n return False, err_str", "def execute(self):\n\n (output, error) = self.process.communicate()\n\n if self.process.returncode != 0:\n decoded = self.decode_output(error)\n\n if not decoded:\n return \"Unkown error. for %s\" % (self.command)\n\n print(decoded)\n exit(1)\n return self.decode_output(output)", "def call_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\r\n null = open(os.devnull, 'wb')\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n params.setdefault('stdin', null)\r\n params['stdout'] = null\r\n params['stderr'] = null\r\n p = subprocess_Popen(command, **params)\r\n p.wait()\r\n return p.returncode", "def exec_command(command):\n exit_code = 1\n stdo = ''\n stde = ''\n from subprocess import Popen, PIPE\n try:\n pobj = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)\n #pobj.wait()\n stdo, stde = pobj.communicate()\n exit_code = pobj.returncode\n except:\n print \"Unexpected error at exec_command:\", sys.exc_info()\n import platform\n s = traceback.format_exc()\n logStr = \" exec command error : error\\n> stderr:\\n%s\\n\" %s\n error = platform.node()+\"-\"+logStr\n return (1,error,\"\")\n return (exit_code, stdo, stde)", "def run(args, **kwargs):\n p = subprocess.Popen(list(map(str, args)), **kwargs)\n\n try:\n p.wait()\n except KeyboardInterrupt as err:\n p.kill()\n raise err\n\n return p.returncode", "def run_shell_command_regular(args):\n try:\n output = subprocess.check_output(args,\n shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n logger.warning(\"Failed in shell command: %s, output: %s\",\n args, ex.output)\n return ex.returncode, native_string(ex.output)\n\n return 0, native_string(output)", "def execute(cmd, fail_ok=False, merge_stderr=False):\n cmdlist = shlex.split(cmd)\n result = ''\n result_err = ''\n stdout = subprocess.PIPE\n stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE\n proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr)\n result, result_err = proc.communicate()\n result = result.decode('utf-8')\n if not fail_ok and proc.returncode != 0:\n raise exceptions.CommandFailed(proc.returncode, cmd, result,\n result_err)\n return result", "def execute(*cmd, **kwargs):\n process_input = kwargs.pop('process_input', None)\n check_exit_code = kwargs.pop('check_exit_code', [0])\n ignore_exit_code = False\n if isinstance(check_exit_code, bool):\n ignore_exit_code = not check_exit_code\n check_exit_code = [0]\n elif isinstance(check_exit_code, int):\n check_exit_code = [check_exit_code]\n delay_on_retry = kwargs.pop('delay_on_retry', True)\n attempts = kwargs.pop('attempts', 1)\n run_as_root = kwargs.pop('run_as_root', False)\n shell = kwargs.pop('shell', False)\n\n if len(kwargs):\n raise exception.SysinvException(_('Got unknown keyword args '\n 'to utils.execute: %r') % kwargs)\n\n if run_as_root and os.geteuid() != 0:\n cmd = ['sudo', 'sysinv-rootwrap', CONF.rootwrap_config] + list(cmd)\n\n cmd = [str(c) for c in cmd]\n\n while attempts > 0:\n attempts -= 1\n try:\n LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))\n _PIPE = subprocess.PIPE # pylint: disable=E1101\n\n if os.name == 'nt':\n preexec_fn = None\n close_fds = False\n else:\n preexec_fn = _subprocess_setup\n close_fds = True\n\n obj = subprocess.Popen(cmd,\n stdin=_PIPE,\n stdout=_PIPE,\n stderr=_PIPE,\n close_fds=close_fds,\n preexec_fn=preexec_fn,\n shell=shell)\n result = None\n if process_input is not None:\n result = obj.communicate(process_input)\n else:\n result = obj.communicate()\n obj.stdin.close() # pylint: disable=E1101\n _returncode = obj.returncode # pylint: disable=E1101\n LOG.debug(_('Result was %s') % _returncode)\n if result is not None and six.PY3:\n (stdout, stderr) = result\n # Decode from the locale using using the surrogateescape error\n # handler (decoding cannot fail)\n stdout = os.fsdecode(stdout)\n stderr = os.fsdecode(stderr)\n result = (stdout, stderr)\n if not ignore_exit_code and _returncode not in check_exit_code:\n (stdout, stderr) = result\n raise exception.ProcessExecutionError(\n exit_code=_returncode,\n stdout=stdout,\n stderr=stderr,\n cmd=' '.join(cmd))\n return result\n except exception.ProcessExecutionError:\n if not attempts:\n raise\n else:\n LOG.debug(_('%r failed. Retrying.'), cmd)\n if delay_on_retry:\n greenthread.sleep(random.randint(20, 200) / 100.0)\n finally:\n # NOTE(termie): this appears to be necessary to let the subprocess\n # call clean something up in between calls, without\n # it two execute calls in a row hangs the second one\n greenthread.sleep(0)", "def RunShellWithReturnCode(command, print_output=False,\n universal_newlines=True,\n env=os.environ):\n logging.info(\"Running %s\", command)\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=use_shell, universal_newlines=universal_newlines,\n env=env)\n if print_output:\n output_array = []\n while True:\n line = p.stdout.readline()\n if not line:\n break\n print line.strip(\"\\n\")\n output_array.append(line)\n output = \"\".join(output_array)\n else:\n output = p.stdout.read()\n p.wait()\n errout = p.stderr.read()\n if print_output and errout:\n print >>sys.stderr, errout\n p.stdout.close()\n p.stderr.close()\n return output, p.returncode", "def GetStatusOutput(command, **kwargs):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, bufsize=1,\n **kwargs)\n output = proc.communicate()[0]\n result = proc.returncode\n\n return (result, output)", "def _execute_cmd(args, silent = False):\n import subprocess\n\n sys.stdout.flush()\n\n # For Windows we need to use the shell so the path is searched (Python/Windows bug)\n # For Android, using the shell complicates things\n p = subprocess.Popen(args, shell=sys.platform.startswith('win'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (std_out_str, std_err_str) = p.communicate()\n returncode = p.returncode\n\n clean_std_out_str = std_out_str.translate(None,'\\r')\n clean_std_err_str = std_err_str.translate(None,'\\r')\n\n if (returncode != 0):\n raise RuntimeError(\"Error (%d) executing command: %s\" % (returncode, \" \".join(args)))\n\n return clean_std_out_str", "def execute(command, *args, **kwargs):\r\n wait = kwargs.pop('wait', True)\r\n process = Process(command, args, env=kwargs.pop('env', None))\r\n process.start()\r\n if not wait:\r\n return process\r\n process.wait()\r\n return process.exit_code, process.read(), process.eread()", "def system(command):\n print('[system] {}'.format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n rc = p.returncode\n if PY3:\n output = output.decode(\"ascii\")\n err = err.decode(\"ascii\")\n return rc, output, err", "def execute_cmd(cmd, verb=False):\n if verb:\n print(\"Executing: {}\".format(cmd))\n\n p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n code = p.returncode\n if code:\n sys.exit(\"Error {}: {}\".format(code, err))\n return out, err", "def run_commands(*commands: str, **kwargs) -> Tuple[Optional[str], Optional[str], int]:\n command = ' ; '.join(commands)\n # Indirectly waits for a return code.\n process = subprocess.run(command, **kwargs)\n stdout = process.stdout\n stderr = process.stderr\n # Decode stdout and stderr to strings if needed.\n if isinstance(stdout, bytes):\n stdout = str(stdout, 'utf-8').strip()\n if isinstance(stderr, bytes):\n stderr = str(stderr, 'utf-8').strip()\n return stdout, stderr, process.returncode", "def get_exitcode_stdout_stderr(cmd):\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n out = out.decode('utf-8')\n exitcode = proc.returncode\n #\n return exitcode, out, err", "async def checked_run(cmd, env=None):\n\n # Start the subprocess.\n logging.info('Running: %s', await expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n lines = []\n while True:\n line = await p.stdout.readline()\n if not line:\n break\n line = line.decode()[:-1]\n lines.append(line)\n logging.info(line)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n output = '\\n'.join(lines)[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, await expand_cmd_str(cmd), output))\n\n return output", "def run_command(shell_command, get_output):\n command_ran = subprocess.run(shell_command, capture_output=get_output)\n return command_ran", "def create_subprocess(command, args):\n\n proc = subprocess.Popen([command] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, _ = proc.communicate()\n\n return proc.returncode, output", "def get_exitcode_stdout_stderr(cmd):\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n exitcode = proc.returncode\n #\n # return exitcode, out.decode(\"utf-8\"), err\n return out.decode(\"utf-8\")", "def run_this(command_to_run, cwd=os.getcwd()):\n slab_logger.debug('Running shell command \"%s\"' % command_to_run)\n try:\n output = subprocess.Popen(command_to_run,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n close_fds=True,\n cwd=cwd)\n\n myinfo = output.communicate()[0]\n myinfo.strip()\n return(output.returncode, myinfo)\n except OSError, ex:\n slab_logger.error(ex)\n return (1, str(ex))", "def _check_output(*args, **kwargs):\n kwargs['stdout'] = subprocess.PIPE\n p = subprocess.Popen(*args, **kwargs)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise ValueError(\n 'subprocess exited with return code %s' % p.returncode\n )\n return stdout", "def run_shell_command(command, checkReturnValue=True, verbose=False):\n process = subprocess.Popen(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n bufsize=1)\n outText = \"\"\n\n for line in iter(process.stdout.readline, ''):\n if verbose:\n sys.stdout.write(line)\n outText += line\n\n process.communicate()[0]\n \"\"\"\n returnValue = process.returncode\n if checkReturnValue and (returnValue != 0):\n raise Exception(outText)\n \"\"\"\n return outText", "def _execute(self, command):\n _, stdout, stderr = self.ssh_client.exec_command(command)\n exit_code = stdout.channel.recv_exit_status()\n stdout = stdout.read().decode().strip()\n stderr = stderr.read().decode().strip()\n\n return exit_code, stdout, stderr", "def get_output(cmd, err=False, returncode=0):\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n stderr = STDOUT if err else sys.stderr\n try:\n output = check_output(cmd, stderr=stderr).decode('utf8', 'replace')\n except CalledProcessError as e:\n if e.returncode != returncode:\n raise\n return e.output.decode('utf8', 'replace')\n else:\n if returncode != 0:\n raise CalledProcessError(0, cmd, output.encode('utf8'), stderr)\n return output", "def _process_command(self, command, stdout=None, supress_dry_run=False):\n logging.debug('Executing shell command: %s', command)\n if (self._dry_run and supress_dry_run) or not self._dry_run:\n prc = Popen(command, shell=True, stdout=stdout)\n std = list(prc.communicate())\n if std[0] is not None:\n std[0] = std[0].decode('utf-8')\n return prc.returncode, std\n return 0, ('', '')", "def exec_proc(self):\n\n\t\ttry:\n\t\t\tp = os.system(self.cmd)\n\t\texcept:\n\t\t\t# Lock stderr so that we can write safely\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_EX)\n\t\t\tsys.stderr.flush()\n\t\t\t# print error message\n\t\t\tsys.stderr.write(\"%s running %s\\nraised %s: %s\\n\" % \\\n\t\t\t\t(self.name, self.cmd, \\\n\t\t\t\tsys.exc_info()[0], sys.exc_info()[1]))\n\t\t\tsys.stderr.flush()\n\t\t\t# unlock stderr\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_UN)\n\t\t\t# Always return. Otherwise, semaphore counter\n\t\t\t# will not be incremented. \n\t\t\treturn\n\t\t# If the command errors out\n\t\tif p != 0:\n\t\t\t# Lock stderr so that we can write safely\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_EX)\n\t\t\tsys.stderr.flush()\n\t\t\t# print error code\n\t\t\tsys.stderr.write(\"Error running: %s\\n\" % self.cmd)\n\t\t\tsys.stderr.write(\"%s returned error code %d\\n\" \\\n\t\t\t\t% (self.host, p))\n\t\t\tsys.stderr.flush()\n\t\t\t# unlock stderr\n\t\t\tfcntl.lockf(sys.stderr, fcntl.LOCK_UN)", "def run_cmd(cmd, args, path=None, raise_error=True):\n\n if path is not None:\n # Transparently support py.path objects\n path = str(path)\n\n p = sp.Popen([cmd] + list(args), stdout=sp.PIPE, stderr=sp.PIPE,\n cwd=path)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n return_code = p.returncode\n\n if raise_error and return_code != 0:\n raise RuntimeError(\n \"The command `{0}` with args {1!r} exited with code {2}.\\n\"\n \"Stdout:\\n\\n{3}\\n\\nStderr:\\n\\n{4}\".format(\n cmd, list(args), return_code, streams[0], streams[1]))\n\n return streams + (return_code,)", "def executeCommand(commandtoexecute):\n try:\n _output = commands.getstatusoutput(commandtoexecute)\n except Exception as er:\n print \"not able to execute command\"\n return False\n return _output", "def command(arguments, **kwargs):\n arguments = _get_command_executable(arguments)\n LOG.info('Running command: %s' % ' '.join(arguments))\n process = subprocess.Popen(\n arguments,\n stdout=subprocess.PIPE,\n **kwargs)\n out, _ = process.communicate()\n return out, process.returncode", "def get_exitcode_stdout_stderr(cmd):\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n exitcode = proc.returncode\n #\n return exitcode, out, err" ]
[ "0.75895786", "0.7547545", "0.7526934", "0.7474151", "0.7464935", "0.73460895", "0.72808135", "0.723191", "0.7178856", "0.71782106", "0.71392924", "0.71354", "0.7107621", "0.70249635", "0.6990342", "0.6971521", "0.6964739", "0.69470686", "0.6945275", "0.69408506", "0.69314843", "0.6905013", "0.6887141", "0.68841237", "0.6879223", "0.6873651", "0.6866964", "0.6859755", "0.6856754", "0.6832878", "0.68223256", "0.6820726", "0.68124706", "0.68102354", "0.6799682", "0.6796297", "0.6785285", "0.6783278", "0.67701954", "0.67683107", "0.676073", "0.675345", "0.67431647", "0.6737114", "0.67090356", "0.67043203", "0.6696629", "0.66907215", "0.66872096", "0.667977", "0.6664529", "0.6659298", "0.66219866", "0.66155964", "0.66145545", "0.65912396", "0.6589902", "0.6578979", "0.65782523", "0.65758187", "0.65734", "0.6569463", "0.6567333", "0.6551537", "0.6543827", "0.65349025", "0.65338933", "0.652009", "0.6511379", "0.6511244", "0.65025556", "0.64944863", "0.6487473", "0.64858484", "0.6485003", "0.6484484", "0.6478077", "0.64699864", "0.64672065", "0.64577836", "0.6457469", "0.64500517", "0.64455384", "0.6445209", "0.6433773", "0.64240676", "0.64226705", "0.64223295", "0.6420948", "0.64131135", "0.64104366", "0.6408726", "0.63965124", "0.63953716", "0.63841105", "0.6380839", "0.6370471", "0.6369848", "0.6367101", "0.63582504", "0.6358108" ]
0.0
-1
Starts a subprocess and returns a handle to the process.
def StartCmd(args, cwd=None, shell=False, env=None): _ValidateAndLogCommand(args, cwd, shell) return Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=cwd, env=env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def start(self):\r\n return self.start_subprocess()", "def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def start_process(self, args):\n try:\n with open(os.devnull, 'w') as devnull:\n popenObj = subprocess.Popen(\n args, stdout=devnull, stderr=subprocess.PIPE, cwd=\"/tmp/\")\n popenObj.name = args\n return popenObj\n except Exception as e:\n self.logger.error(\n \"Cannot start process %s due to reason:%s\", args, e)\n raise e", "def run_subprocess(self, *cmd_and_args):\n\n command_line = \" \".join(cmd_and_args)\n self.logger.debug(\"Running: %s\", command_line)\n\n return subprocess.Popen(command_line, shell=True, close_fds=True)", "def open_subprocess(self, args_, subprocess_key=None):\n\n if subprocess_key in self.subprocess and self.subprocess[subprocess_key].poll is not None:\n # TODO better error class\n\n raise AssertionError(\"process '%s'(pid:%s) already exist and still running\" % (\n subprocess_key, self.subprocess[subprocess_key].pid))\n\n child_process = subprocess.Popen(args_)\n if subprocess_key is None:\n subprocess_key = str(child_process.pid)\n self.subprocess[subprocess_key] = child_process\n str_args = \" \".join(map(str, args_))\n self.log(\"open subprocess pid:%s, cmd='%s'\" % (child_process.pid, str_args))\n\n return child_process.pid", "def RunProcess(self, command, env=None, stdin=None, stdout=None, stderr=None):\n\n # merge specified env with OS env\n myenv = os.environ.copy()\n if env is not None:\n myenv.update(env)\n\n try:\n process = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, env=myenv, bufsize=0)\n return process\n except:\n print(\"Unexpected error when launching process:\")\n print(\" \", command)\n print(\" \", env)\n raise", "def subprocess_Popen(command, **params):\r\n startupinfo = None\r\n if os.name == 'nt':\r\n startupinfo = subprocess.STARTUPINFO()\r\n try:\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n except AttributeError:\r\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\r\n\r\n # Anaconda for Windows does not always provide .exe files\r\n # in the PATH, they also have .bat files that call the corresponding\r\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\r\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\r\n # execute \"g++\" without extensions.\r\n # (Executing \"g++.bat\" explicitly would also work.)\r\n params['shell'] = True\r\n\r\n # Using the dummy file descriptors below is a workaround for a\r\n # crash experienced in an unusual Python 2.4.4 Windows environment\r\n # with the default None values.\r\n stdin = None\r\n if \"stdin\" not in params:\r\n stdin = open(os.devnull)\r\n params['stdin'] = stdin.fileno()\r\n\r\n try:\r\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\r\n finally:\r\n if stdin is not None:\r\n del stdin\r\n return proc", "def Popen(self, *unargs, **kwargs):\r\n cmdline = None\r\n if 'args' in kwargs:\r\n cmdline = kwargs['args']\r\n else:\r\n cmdline = unargs[0]\r\n return PopenWrapper.WaitWrapper(subprocess_.Popen(*unargs, **kwargs), self, cmdline)", "def subprocess_nowait(cmd, shell=False, cwd=None, env=None):\n # type: (str, bool, str, dict) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)", "def Spawn(proc):\n proc.start()\n return proc", "def start_subprocess(self):\r\n errmsg = ('\\n\\nPlease install GNU Octave and put it in your path\\n')\r\n ON_POSIX = 'posix' in sys.builtin_module_names\r\n if self.use_pty:\r\n master, slave = pty.openpty()\r\n self.wfid, self.rfid = master, master\r\n rpipe, wpipe = slave, slave\r\n else:\r\n self.rfid, wpipe = os.pipe()\r\n rpipe, self.wfid = os.pipe()\r\n kwargs = dict(close_fds=ON_POSIX, bufsize=0, stdin=rpipe,\r\n stderr=wpipe, stdout=wpipe)\r\n if os.name == 'nt':\r\n startupinfo = subprocess.STARTUPINFO()\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n kwargs['startupinfo'] = startupinfo\r\n try:\r\n proc = subprocess.Popen(['octave', '-q', '--braindead'],\r\n **kwargs)\r\n except OSError: # pragma: no cover\r\n raise Oct2PyError(errmsg)\r\n else:\r\n self.reader = _Reader(self.rfid, self.read_queue)\r\n return proc", "def run_subprocess(cmd):\n subprocess.Popen(cmd, stdin =subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,)", "def call_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\r\n null = open(os.devnull, 'wb')\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n params.setdefault('stdin', null)\r\n params['stdout'] = null\r\n params['stderr'] = null\r\n p = subprocess_Popen(command, **params)\r\n p.wait()\r\n return p.returncode", "def spawn(self, arguments=None, environment=None):\n return subprocess.Popen(\n args=[self.executable] + ([] or arguments),\n # do not redirect std streams\n # this fakes the impression of having just one program running\n stdin=None,\n stdout=None,\n stderr=None,\n env=environment,\n )", "def process_run(cmd_string, stdin=None):\n process_object=subprocess.Popen(shlex.split(cmd_string),\n stdin=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return process_object", "def start_process(cmd, supress_output=False):\n logging.debug(cmd)\n logging.error(\"[tony]cmd:%r\" % (cmd))\n proc = subprocess.Popen(cmd, stdout=None, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rtn_code = proc.returncode\n\n if supress_output is False:\n if out:\n logging.info(out)\n if err:\n logging.error(err)\n\n if rtn_code == 0 or rtn_code is None:\n logging.info('Success: Process return code %s', str(rtn_code))\n else:\n logging.error('Error: Process return code %s', str(rtn_code))\n sys.exit(1)", "def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,\n universal_newlines=False, close_fds=True, env=None):\n LOG.info(\"Running cmd '%s'\" % \" \".join(cmdline))\n kwargs = {}\n kwargs['stdout'] = stdout and subprocess.PIPE or None\n kwargs['stderr'] = stderr and subprocess.PIPE or None\n kwargs['stdin'] = stdin and subprocess.PIPE or None\n kwargs['universal_newlines'] = universal_newlines\n kwargs['close_fds'] = close_fds\n kwargs['env'] = env\n try:\n proc = subprocess.Popen(cmdline, **kwargs)\n except OSError, e: # noqa\n if e.errno == errno.ENOENT:\n raise CommandNotFound\n else:\n raise\n return proc", "def start(self):\n if self._is_launched.is_set():\n self._log(\"warning\", \"try to start an already started process\")\n return False\n\n self._popen = Popen(shlex.split(self.command), bufsize=0, executable=None, stdin=PIPE, stdout=PIPE,\n stderr=self.stderr, close_fds=False, shell=False, cwd=None, env=None,\n universal_newlines=True, startupinfo=None, creationflags=0,\n preexec_fn=lambda: os.nice(self._priority))\n\n self._defunctdog_thread.start()\n self._stdin_thread.start()\n self._stdout_thread.start()\n register_thread(self)\n self._is_launched.set()\n self._is_running.set()", "def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kw):\n self.clean_environment()\n\n cmdline = self.cmdline(args)\n TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))\n process = subprocess.Popen(\n cmdline,\n cwd=self._pex if with_chroot else os.getcwd(),\n preexec_fn=os.setsid if setsid else None,\n **kw)\n return process.wait() if blocking else process", "def spawn_subprocess(args, loop=None):\n if not _IS_XOS_ASYNC:\n return spawn_subprocess_not_xos(args, loop=loop)\n else:\n return spawn_subprocess_xos(args, loop=loop)", "def run(self, args=(), with_chroot=False, blocking=True, setsid=False,\r\n stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):\r\n import subprocess\r\n self.clean_environment(forking=True)\r\n\r\n cmdline = self.cmdline(args)\r\n TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))\r\n process = subprocess.Popen(cmdline, cwd=self._pex if with_chroot else os.getcwd(),\r\n preexec_fn=os.setsid if setsid else None,\r\n stdin=stdin, stdout=stdout, stderr=stderr)\r\n return process.wait() if blocking else process", "def shell(args, wait=True, msg=None):\n\n # Fix Windows error if passed a string\n if isinstance(args, str):\n args = shlex.split(args, posix=(os.name != \"nt\"))\n if os.name == \"nt\":\n args = [arg.replace('/', '\\\\') for arg in args]\n\n if wait:\n proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate(input=msg)\n exitcode = proc.returncode\n if exitcode != 0:\n debug('<<<< shell call failed; error message below >>>>')\n debug(err.decode('utf-8'))\n debug('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n raise IOError()\n return out\n else:\n DETACHED_PROCESS = 0x00000008\n proc = Popen(args, creationflags=DETACHED_PROCESS)", "def popen(self, args, **kwargs):\n self.log.debug(\"popen %s\", ' '.join(args))\n return vaping.io.subprocess.Popen(args, **kwargs)", "def run_subprocess(command, environment=None, shell=False, raise_on_error=True):\n proc = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n env=environment)\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n if raise_on_error:\n raise RuntimeError('{}\\n{}'.format(stderr, stdout))\n return stdout, stderr, proc.returncode", "def _spawn(self, protocol, args, env=None):\n return reactor.spawnProcess(protocol, self.cmd, args, env=env)", "def run_process(self, inp=\"\"):\n return subprocess.run(self.binary,\n input=inp,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def popenAndCall(onExit, *popenArgs, **popenKWArgs):\n def runInThread(onExit, popenArgs, popenKWArgs):\n global proc\n proc = subprocess.Popen(*popenArgs, **popenKWArgs)\n print(type(proc))\n proc.wait()\n onExit()\n return\n\n thread = threading.Thread(target=runInThread,\n args=(onExit, popenArgs, popenKWArgs))\n thread.start()\n\n return thread # returns immediately after the thread starts", "def create_process(self, args=[], *popenargs, **kwargs):\n try:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs.setdefault('startupinfo', startupinfo)\n except:\n pass\n kwargs.setdefault('universal_newlines', True)\n kwargs.setdefault('stdin', sys.stdin)\n return subprocess.Popen(self.build_args(args), *popenargs, **kwargs)", "def call(self):\n\n process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,\n shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)\n returnData = process.communicate()\n\n return ProcessResult(process.returncode, returnData[0], returnData[1])", "def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE )", "def _proc_exec_wait(command_line, silent=False):\n result = (-1, None, None)\n command = None\n proc = None\n\n if _platform_windows:\n command_line = command_line.replace(\"\\\\\", \"/\")\n\n try:\n command = shlex.split(command_line)\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Unable to parse the given command line: {0}\\n\"\n \"Error: {1}.\".format(command_line, e)\n )\n return result\n\n try:\n sp_kwargs = {\n \"stdout\": subprocess.PIPE,\n \"stderr\": subprocess.PIPE,\n \"startupinfo\": None,\n \"env\": os.environ,\n }\n\n if _platform_windows:\n sp_kwargs[\"startupinfo\"] = subprocess.STARTUPINFO()\n sp_kwargs[\"startupinfo\"].dwFlags = (\n subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW\n )\n sp_kwargs[\"startupinfo\"].wShowWindow = subprocess.SW_HIDE\n\n proc = subprocess.Popen(command, **sp_kwargs)\n stdoutdata, stderrdata = proc.communicate()\n status = proc.returncode\n result = (status, stdoutdata.decode(\"utf8\"), stderrdata.decode(\"utf8\"))\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Could not open the process: '{0}'\\n\"\n \"Error: {1}.\".format(command[0], e)\n )\n finally:\n if proc:\n if proc.stdout:\n proc.stdout.close()\n\n if proc.stderr:\n proc.stderr.close()\n\n return result", "def subprocess_run(args, **kwargs_in):\n kwargs = kwargs_in.copy()\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n logger.debug(f'running a subprocess {args} {kwargs}')\n output = subprocess.run(args, **kwargs)\n logger.debug(f' returned: {output.stdout}')\n return output", "def spawn(stdout, command, **options):\n # grab arguments that we care about\n stderr = options.pop('stderr', None)\n daemon = options.pop('daemon', True)\n\n # empty out the first generator result if a coroutine is passed\n if hasattr(stdout, 'send'):\n res = six.next(stdout)\n res and P.write(res)\n if hasattr(stderr, 'send'):\n res = six.next(stderr)\n res and P.write(res)\n\n # spawn the sub-process\n return process(command, stdout=stdout, stderr=stderr, **options)", "async def _run_subprocess(\n cmd: str,\n allow_params: bool,\n params: Dict[str, ParamValueT],\n) -> Dict[str, Any]:\n cmd_str = cmd\n if allow_params:\n if params[\"shell_params\"] == []:\n cmd_str = cmd.format([''])\n else:\n cmd_str = cmd.format(*params.get('shell_params', ['']))\n\n logging.info(\"Running command: %s\", cmd_str)\n\n cmd_list = shlex.split(cmd_str)\n\n process = await asyncio.create_subprocess_exec(\n *cmd_list,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await process.communicate()\n\n return {\n \"returncode\": process.returncode,\n \"stdout\": stdout.decode(),\n \"stderr\": stderr.decode(),\n }", "def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc", "def run_subprocess(self, input_value):\n try:\n proc = Popen([\"python\", self.SCRIPT_NAME],\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE)\n out_value, err_value = proc.communicate(\n input_value.encode(self.ENCODING),\n timeout=self.PROCESS_TIMEOUT)\n except TimeoutExpired:\n proc.kill()\n out_value, err_value = proc.communicate()\n return out_value.decode(self.ENCODING), err_value.decode(self.ENCODING)", "def spawn(*args):\n # Adapted from ranger.ext.spawn\n process = Popen(args, stdout=PIPE, shell=True)\n stdout, stderr = process.communicate()\n return stdout.decode('utf-8')", "def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process", "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def run(cmd):\n print('running', cmd)\n proc = sp.Popen([cmd], shell=True)\n proc.wait()\n assert proc.poll() == 0", "def start(self, stdin=None, stdout=None, stderr=None):\n logging.debug(\"Starting '%s'\", \" \".join(self.cmd_line))\n self.proc = subprocess.Popen(self.cmd_line,\n stdin=stdin,\n stdout=stdout if stdout\n else subprocess.PIPE,\n stderr=stderr,\n env=self.env)\n self.thread = threading.Thread(target=self.tail)\n self.thread.daemon = True\n self.thread.start()\n self.running = True", "def subprocess_run(cmd, ignore_failure=False, shell=True):\n try:\n proc = subprocess.Popen(\n cmd,\n shell=shell,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rc = proc.returncode\n\n except OSError as exc:\n raise RuntimeError('Failed to run ' + cmd + ': [Errno: %d] ' %\n exc.errno + exc.strerror + ' [Exception: ' +\n type(exc).__name__ + ']')\n if (not ignore_failure) and (rc != 0):\n raise RuntimeError('(%s) failed with rc=%s: %s' %\n (cmd, rc, err))\n return out", "def exec_process(cmdline, silent, input=None, **kwargs):\n try:\n sub = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n stdout, stderr = sub.communicate(input=input)\n returncode = sub.returncode\n if not silent:\n sys.stdout.write(stdout)\n sys.stderr.write(stderr)\n except OSError, e:\n if e.errno == 2:\n raise RuntimeError('\"%s\" is not present on this system' % cmdline[0])\n else:\n raise\n if returncode != 0:\n raise RuntimeError('Got return value %d while executing \"%s\", stderr output was:\\n%s' % (returncode, \" \".join(cmdline), stderr.rstrip(\"\\n\")))\n return stdout", "def _staf_start_proc(self,\n command,\n working_dir,\n wait,\n params=[],\n env_vars={},\n location='local'):\n\n staf_request = ('START SHELL COMMAND \"{0}\" WORKDIR \"{1}\" WAIT '\n '{2}s STDERRTOSTDOUT RETURNSTDOUT'.format(unix_style_path(command),\n unix_style_path(working_dir),\n str(wait)))\n if len(params) != 0:\n staf_request += ' PARMS {0}'.format(\" \".join(params))\n\n if len(env_vars) != 0:\n for key in env_vars:\n staf_request += ' ENV {0}={1}'.format(key, env_vars[key])\n\n result = self._staf_handle.submit(location, 'process', staf_request)\n\n if result.rc != result.Ok:\n raise CoreError(result.result)\n\n #Return the exit code from the executed command and STDOUT.\n return (int(result.resultObj['rc']), result.resultObj['fileList'][0]['data'])", "def run_subprocess(args, work_dir):\n process = subprocess.Popen(args, cwd=work_dir)\n process.communicate()\n assert process.returncode == 0", "def popen(self, args, bufsize=0, stdin=None, stdout=None, stderr=None, cwd=None, env=None, tty=False, compress=False): \n return subprocess.Popen(args, bufsize=bufsize, cwd=cwd, env=env, stdin=stdin, stdout=stdout, stderr=stderr)", "def popen(command, cwd=None, check=False, detach=False):\n\tif detach:\n\t\treturn spawn(command, cwd)\n\telse:\n\t\tcmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)\n\t\tstatus = cmd.wait()\n\t\tres, err = cmd.communicate()\n\t\tif status == 0:\n\t\t\treturn res.decode(\"utf8\")\n\t\telse:\n\t\t\treturn (status, err.decode(\"utf8\"))", "def spawn(cmd, cwd=None):\n\t# FROM: http://stackoverflow.com/questions/972362/spawning-process-from-python\n\t# fork the first time (to make a non-session-leader child process)\n\ttry:\n\t\tpid = os.fork()\n\texcept OSError as e:\n\t\traise RuntimeError(\"1st fork failed: %s [%d]\" % (e.strerror, e.errno))\n\tif pid != 0:\n\t\t# parent (calling) process is all done\n\t\treturn pid\n\t# detach from controlling terminal (to make child a session-leader)\n\tos.setsid()\n\ttry:\n\t\tpid = os.fork()\n\texcept OSError as e:\n\t\traise RuntimeError(\"2nd fork failed: %s [%d]\" % (e.strerror, e.errno))\n\tif pid != 0:\n\t\t# child process is all done\n\t\tos._exit(0)\n\t# grandchild process now non-session-leader, detached from parent\n\t# grandchild process must now close all open files\n\ttry:\n\t\tmaxfd = os.sysconf(\"SC_OPEN_MAX\")\n\texcept (AttributeError, ValueError):\n\t\tmaxfd = 1024\n\tfor fd in range(maxfd):\n\t\ttry:\n\t\t\tos.close(fd)\n\t\texcept OSError: # ERROR, fd wasn't open to begin with (ignored)\n\t\t\tpass\n\t# redirect stdin, stdout and stderr to /dev/null\n\tif (hasattr(os, \"devnull\")):\n\t\tREDIRECT_TO = os.devnull\n\telse:\n\t\tREDIRECT_TO = \"/dev/null\"\n\tos.open(REDIRECT_TO, os.O_RDWR) # standard input (0)\n\tos.dup2(0, 1)\n\tos.dup2(0, 2)\n\t# and finally let's execute the executable for the daemon!\n\ttry:\n\t\targs = filter(lambda _:_, map(lambda _:_.strip(), cmd.split(\" \")))\n\t\tpath_to_executable = args[0]\n\t\targs = args[1:]\n\t\tos.execv(path_to_executable, args)\n\texcept Exception as e:\n\t\t# oops, we're cut off from the world, let's just give up\n\t\tos._exit(255)", "def create_subprocess(command, args):\n\n proc = subprocess.Popen([command] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, _ = proc.communicate()\n\n return proc.returncode, output", "def _spawn_simple_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_simple_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n return process_instance", "def spawn(self):\r\n options = self.config.options\r\n\r\n if self.pid:\r\n msg = 'process %r already running' % self.config.name\r\n options.logger.warn(msg)\r\n return\r\n\r\n self.killing = 0\r\n self.spawnerr = None\r\n self.exitstatus = None\r\n self.system_stop = 0\r\n self.administrative_stop = 0\r\n\r\n self.laststart = time.time()\r\n\r\n self._assertInState(ProcessStates.EXITED, ProcessStates.FATAL,\r\n ProcessStates.BACKOFF, ProcessStates.STOPPED)\r\n\r\n self.change_state(ProcessStates.STARTING)\r\n\r\n try:\r\n filename, argv = self.get_execv_args()\r\n except ProcessException as what:\r\n self.record_spawnerr(what.args[0])\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n self.dispatchers, self.pipes = self.config.make_dispatchers(self)\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EMFILE:\r\n # too many file descriptors open\r\n msg = 'too many open files to spawn %r' % self.config.name\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n pid = options.fork()\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EAGAIN:\r\n # process table full\r\n msg = ('Too many processes in process table to spawn %r' %\r\n self.config.name)\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n options.close_parent_pipes(self.pipes)\r\n options.close_child_pipes(self.pipes)\r\n return\r\n\r\n if pid != 0:\r\n return self._spawn_as_parent(pid)\r\n\r\n else:\r\n return self._spawn_as_child(filename, argv)", "def execute(args):\n print '################################'\n print 'args: ', args\n p = subprocess.Popen(args, shell=True, executable='/bin/bash')\n # p = subprocess.call(args, shell=True, executable='/bin/bash')\n p.wait()\n return p\n print '################################'", "def execute(command, **kwargs):\n proc = ProcessWrapper(command, **kwargs)\n proc.run()\n return proc.join()", "def subprocess_attach_stdin(cmd, shell=False):\n # type: (str, bool) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, stdin=subprocess.PIPE)", "def call_one(\n self,\n args: List[Any],\n logger: Optional[Any] = None,\n executable: Optional[Any] = None,\n shell: bool = False,\n cwd: Optional[str] = None,\n env: Optional[dict] = None,\n universal_newlines: bool = False,\n startupinfo: Optional[Any] = None,\n creationflags: int = 0,\n processes: Optional[Any] = None,\n ignore_exit_codes: Union[list, Any] = None,\n pid: Optional[str] = None,\n ) -> None:\n proc = None\n\n try:\n log.debug(\"Exec %s \", \" \".join(map(str, args)))\n proc = Proc(\n self.workdir,\n pid=pid,\n request_id=self.request_id,\n filename=self.log_name,\n additional_log_names=self.additional_log_names,\n )\n if self._send_response:\n self._mq.post_success(\n \"_\".join([self._subject, \"start\"]),\n self.data,\n )\n proc.run(\n args,\n executable,\n shell,\n cwd,\n env,\n universal_newlines,\n startupinfo,\n creationflags,\n ignore_exit_codes,\n )\n self.time_start = proc.time_start\n self.time_stop = proc.time_stop\n\n except Exception:\n if self._send_response:\n self._mq.post_error(\n self._subject,\n {\"message\": Exception, \"id\": self.request_id},\n )\n log.exception(\"Unexpected error for command line %s\", args)\n try:\n LOCK.acquire()\n if processes is None:\n processes = []\n processes.append(proc)\n finally:\n LOCK.release()\n log.debug(\"DONE\")", "def start(self, hook_url=None):\n\n self.on_start()\n\n if hook_url:\n self.register(hook_url=hook_url)\n\n else:\n p = Process(target=self.run)\n p.daemon = True\n p.start()\n return p", "def open_persistent_pipe(self):\n if self.proc is not None:\n return\n self.proc = subprocess.Popen([self.herbstclient_path, '--binary-pipe'],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n env=self.env,\n encoding=None, # open stdout/stdin in binary mode\n )", "def call(args, env=None, cwd=None, outputHandler=None, outputEncoding=None, timeout=None, displayName=None, options=None):\n\tif options is None: options = {}\n\tif not timeout: timeout = options.get('process.timeout', 600)\n\t\n\tprocessName = os.path.basename(args[0])\n\t#if not timeout: # too many things don't set it at present\n\t#\traise Exception('Invalid argument to %s call - timeout must always be set explicitly'%processName)\n\n\targs = [x for x in args if x != None]\n\n\tenvirons = os.environ.copy()\n\tif env:\n\t\tfor k in env:\n\t\t\tif None == env[k]:\n\t\t\t\tdel environs[k]\n\t\t\telse:\n\t\t\t\tenvirons[k] = env[k]\n\tif not cwd: cwd = os.getcwd()\n\t\n\tlog.info('Executing %s process: %s', processName, ' '.join(['\"%s\"'%s if ' ' in s else s for s in args]))\n\tif cwd != os.getcwd():\n\t\tlog.info('%s working directory: %s', processName, cwd)\n\tif env: \n\t\tlog.info('%s environment overrides: %s', processName, ', '.join(sorted(['%s=%s'%(k, env[k]) for k in env])))\n\ttry:\n\t\tif cwd:\n\t\t\tprocess = subprocess.Popen(args, env=environs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.DEVNULL, cwd=cwd)\n\t\telse:\n\t\t\tprocess = subprocess.Popen(args, env=environs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.DEVNULL)\n\texcept Exception as e:\n\t\traise EnvironmentError('Cannot start process \"%s\": %s'%(args[0], e))\n\n\tif not outputHandler: # use short processName not longer displayName for per-line prefixes, the extra context isn't necessary anyway\n\t\toutputHandler = ProcessOutputHandler.create(processName, options=options)\n\n\t# give the full arguments as the process display name (unless really long) since it's impossible to identify the target otherwise\n\tif not displayName:\n\t\tdisplayName = str(args)\n\t\tif len(displayName)>200: displayName=displayName[:200]+'...]'\n\t(out, err, timedout) = _wait_with_timeout(process, displayName, timeout, True)\n\t\n\tif outputEncoding is None:\n\t\tdecider = options.get('common.processOutputEncodingDecider', None) or defaultProcessOutputEncodingDecider\n\t\toutputEncoding = decider(context=None, executable=args[0])\n\tlog.debug('%s outputEncoding assumed to be: %s', processName, outputEncoding)\n\t\n\t# convert byte buffers to strings\t\n\t# probably best to be tolerant about unexpected chars, given how hard it is to predict what subprocesses will write in \n\tout = str(out, outputEncoding, errors='replace')\n\terr = str(err, outputEncoding, errors='replace')\n\t\n\thasfailed = True\n\ttry:\n\t\tfor l in out.splitlines():\n\t\t\toutputHandler.handleLine(l, False)\n\t\tfor l in err.splitlines():\n\t\t\toutputHandler.handleLine(l, True)\n\t\t\t\n\t\tif timedout: # only throw after we've written the stdout/err\n\t\t\traise BuildException('Terminating process %s after hitting %d second timout' % (processName, timeout))\n\t\t\t\n\t\toutputHandler.handleEnd(process.returncode) # will throw on error\n\t\thasfailed = False\n\t\treturn outputHandler\n\tfinally:\n\t\t# easy-read format\n\t\tif hasfailed:\n\t\t\tlog.debug('Arguments of failed process are: %s' % '\\n '.join(['\"%s\"'%s if ' ' in s else s for s in args]))", "def run(*args, **kwargs):\n _patch_popen(Popen)\n assert len(args) > 0\n\n arguments = []\n\n input = kwargs.pop('input', None)\n fail_on_error = kwargs.pop('fail_on_error', False)\n encoding = kwargs.pop('encoding', None) \n\n if len(args) == 1:\n if isinstance(args[0], string):\n arguments = args[0].split()\n else:\n for i in args:\n if isinstance(i, (list, tuple)):\n for j in i:\n arguments.append(j)\n else:\n arguments.append(i)\n\n def set_default_kwarg(key, default): \n kwargs[key] = kwargs.get(key, default)\n\n set_default_kwarg('stdin', PIPE)\n set_default_kwarg('stdout', PIPE)\n set_default_kwarg('stderr', PIPE)\n\n\n proc = Popen(arguments, **kwargs)\n stdout, stderr = proc.communicate(input)\n \n if encoding is not None:\n stdout = stdout.decode(encoding=encoding)\n stderr = stderr.decode(encoding=encoding)\n\n result = RunResult(proc.returncode, stdout, stderr)\n \n if fail_on_error and proc.returncode != 0:\n raise ProcessError(' '.join(arguments), result)\n\n return result", "def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)", "def spawn(self):\r\n self.before_spawn()\r\n pid = Subprocess.spawn(self)\r\n if pid is None:\r\n #Remove object reference to decrement the reference count on error\r\n self.fcgi_sock = None\r\n return pid", "def start_bot(self):\n self.proc = subprocess.Popen(\"./start\", stdin=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t cwd=os.path.abspath(self.path))", "def _runProcess(self, cmd, echoStdout = True, **kwargs):\n # Can't use unicode!\n cmd = str(cmd)\n defaultKwargs = {\n 'universal_newlines': True\n }\n if echoStdout:\n defaultKwargs['stdout'] = subprocess.PIPE\n # Don't buffer the output, but echo it as it comes in regardless\n # of newlines, etc\n defaultKwargs['bufsize'] = 1\n else:\n defaultKwargs['stdout'] = tempfile.TemporaryFile()\n defaultKwargs['stderr'] = subprocess.STDOUT\n defaultKwargs.update(kwargs)\n\n env = os.environ.copy()\n env['PATH'] = self.settings['context_build_path'] + ':' + env['PATH']\n env.update(defaultKwargs.get('env', {}))\n defaultKwargs['env'] = env\n\n p = subprocess.Popen(shlex.split(cmd), **defaultKwargs)\n if echoStdout:\n try:\n import fcntl\n fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n except ImportError:\n # Windows?\n pass\n if callable(echoStdout):\n outputCallback = echoStdout\n else:\n outputCallback = lambda l: self.writeOutput(l, end = '')\n\n stdThread = threading.Thread(target = self._dumpStdout,\n args = (p, outputCallback))\n stdThread.start()\n while p.poll() is None:\n if self._shouldStop():\n break\n time.sleep(0.1)\n if p.poll() is None:\n # Exited due to shouldStop\n self.writeOutput(\"\\n\\nAborting tests...\")\n while p.poll() is None:\n try:\n p.terminate()\n except OSError:\n # Died already\n pass\n time.sleep(0.1)\n\n if echoStdout:\n # Finish getting output\n stdThread.join()\n\n if not echoStdout:\n tf = defaultKwargs['stdout']\n tf.seek(0)\n return tf", "def subprocess_cmd(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n\n return(proc_stdout)", "def _run_shell(self, command_string: str, cwd: str = '/', print_command: bool = False) -> subprocess.Popen:\n if print_command:\n self.logger.info(command_string)\n return subprocess.Popen(command_string, shell=True, cwd=cwd)", "def spawnProcess(self, processProtocol, executable, args=(), env={},\r\n path=None, uid=None, gid=None, usePTY=0,\r\n childFDs=None):\r\n\r\n proc = DummyProcess(self, executable, args, env, path,\r\n processProtocol, uid, gid, usePTY, childFDs)\r\n processProtocol.makeConnection(proc)\r\n self.spawnedProcesses.append(proc)\r\n return proc", "def start_process(options, args):\n import psutil\n import process_starter\n from synergy.system import process_helper\n\n try:\n pid = process_helper.get_process_pid(options.app)\n if pid is not None:\n if psutil.pid_exists(pid):\n message = 'ERROR: Process %r is already running with pid %r\\n' % (options.app, pid)\n sys.stderr.write(message)\n sys.exit(1)\n\n if not options.interactive:\n # this block triggers if the options.interactive is not defined or is False\n process_helper.start_process(options.app, args)\n else:\n process_starter.start_by_process_name(options.app, args)\n except Exception as e:\n sys.stderr.write('Exception on starting %s : %s \\n' % (options.app, str(e)))\n traceback.print_exc(file=sys.stderr)", "def Run(self) -> None:\n logging.info(\"Running %s in a subprocess...\", self)\n self.stdout = tempfile.TemporaryFile()\n self.stderr = tempfile.TemporaryFile()\n self.begin_time = time.time()\n env = os.environ.copy()\n # Give each test program a separate test_tmpdir so they don't overwrite\n # each other when running in parallel.\n env[\"TEST_TMPDIR\"] = tempfile.mkdtemp()\n # Bazel's test sharding protocol:\n # https://docs.bazel.build/versions/master/test-encyclopedia.html\n if self.total_shards > 1:\n env[\"TEST_TOTAL_SHARDS\"] = str(self.total_shards)\n env[\"TEST_SHARD_INDEX\"] = str(self.shard_id)\n\n self.subprocess = subprocess.Popen(\n [_GetPython(), self.path], stdout=self.stdout, stderr=self.stderr,\n env=env)", "def start(self, process_id=None):\n try:\n self.process = psutil.Process(process_id)\n logging.debug(self.process.connections())\n logging.debug(self.process.ppid())\n return \"Process Started\"\n except Exception as e:\n logging.exception(e)\n return \"Process doesnt exists\"", "def run_process(cmdlist):\n\n try:\n proc = subprocess.Popen(cmdlist,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except Exception as exproc:\n cmdstr = ' '.join(cmdlist[:3])\n print_fail('Error opening process!: {} ...'.format(cmdstr), exc=exproc)\n return None\n return proc", "def mock_managed_process(\n *unused_args: str, **unused_kwargs: str\n) -> ContextManager[scripts_test_utils.PopenStub]:\n return contextlib.nullcontext(\n enter_result=scripts_test_utils.PopenStub(alive=False))", "def subprocess(cls, cmd, **kwargs):\r\n def call(args):\r\n return subprocess.call(cmd + args, **kwargs)\r\n return cls(call)", "def start_server_proc(event, server_cmd, checking_env):\n proc = subprocess.Popen(server_cmd, env=checking_env)\n\n # Blocking termination until event is set.\n event.wait()\n\n # If proc is still running, stop it.\n if proc.poll() is None:\n proc.terminate()", "def _run_subprocess(cmd: List[str], args: List[str], env: Optional[Dict[str, str]] = None):\n async def _read_output(stream, logger_instance):\n \"\"\"Read output from command and print it into the right logger.\"\"\"\n while True:\n line = await stream.readline()\n if line == b'':\n break\n logger_instance(line.decode('utf-8').rstrip())\n\n async def _stream_subprocess(cmd, args, env):\n \"\"\"Run subprocess.\"\"\"\n cmd_ = ' '.join(cmd)\n args_ = ' '.join(args)\n process = await asyncio.create_subprocess_shell(f'{cmd_} {args_}',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=env)\n\n await asyncio.wait([\n _read_output(process.stdout, logger.info),\n _read_output(process.stderr, logger.error)\n ])\n await process.wait()\n if process.returncode is None or process.returncode != 0:\n raise ValueError('Task failed!')\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(_stream_subprocess(cmd, args, env))", "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):\n return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "def spawn():\n if platform.system() == \"Windows\":\n # HACK https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1243#issuecomment-706668723\n # FIXME Use pexpect or wexpect somehow to fix this\n pytest.xfail(\n \"pexpect fails on Windows\",\n )\n # Disable subprocess timeout if debugging (except coverage), for commodity\n # See https://stackoverflow.com/a/67065084/1468388\n tracer = getattr(sys, \"gettrace\", lambda: None)()\n if not isinstance(tracer, (CTracer, type(None))):\n return lambda cmd, timeout=None, *args, **kwargs: PopenSpawn(\n cmd, None, *args, **kwargs\n )\n # Using PopenSpawn, although probably it would be best to use pexpect.spawn\n # instead. However, it's working fine and it seems easier to fix in the\n # future to work on Windows (where, this way, spawning actually works; it's just\n # python-prompt-toolkit that rejects displaying a TUI)\n return PopenSpawn", "def execute(command, *args, **kwargs):\r\n wait = kwargs.pop('wait', True)\r\n process = Process(command, args, env=kwargs.pop('env', None))\r\n process.start()\r\n if not wait:\r\n return process\r\n process.wait()\r\n return process.exit_code, process.read(), process.eread()", "def new_proc(start_gdb=False, val=None):\n p = process(binary.path)\n if start_gdb is True:\n attach_gdb(p)\n return p", "def _spawn_standalone_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n self._cleanup_method(process_instance.id, rsvc)\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_standalone_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def LaunchFile(*params):\n\n file = subprocess.Popen(params)\n file.communicate()\n return file.returncode", "def run(args, **kwargs):\n p = subprocess.Popen(list(map(str, args)), **kwargs)\n\n try:\n p.wait()\n except KeyboardInterrupt as err:\n p.kill()\n raise err\n\n return p.returncode", "def runCommand(command):\n process = subprocess.Popen(command, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n return process.communicate()", "def spawn():\n if platform.system() == \"Windows\":\n # HACK https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1243#issuecomment-706668723\n # FIXME Use pexpect or wexpect somehow to fix this\n pytest.xfail(\n \"pexpect fails on Windows\",\n )\n # Using PopenSpawn, although probably it would be best to use pexpect.spawn\n # instead. However, it's working fine and it seems easier to fix in the\n # future to work on Windows (where, this way, spawning actually works; it's just\n # python-prompt-toolkit that rejects displaying a TUI)\n return PopenSpawn", "def start(self):\n last_stdout = None\n self.processes = []\n for cmd in self.cmds:\n # TODO: handle exceptions raised by Popen\n p = subprocess.Popen(cmd, stdin=last_stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if last_stdout is not None:\n last_stdout.close()\n last_stdout = p.stdout\n self.processes.append(p)", "def subprocess(cls, cmd, **kwargs):\n def call(args):\n return subprocess.call(cmd + args, **kwargs)\n return cls(call)", "def execute(cmd, env=None, path=None, reactor=None):\r\n deferred = Deferred()\r\n protocol = _ProcessProtocol(' '.join(cmd), deferred)\r\n\r\n try:\r\n reactor.spawnProcess(protocol, cmd[0], cmd, env, path)\r\n except OSError:\r\n e = ExecutionError('Command could not be executed.')\r\n deferred.errback(Failure(e))\r\n\r\n return deferred", "def spawn(self, pcls, args):\n\n childp, ownp = multiprocessing.Pipe()\n p = pcls(self._id, childp)\n p._loglevel = self._loglevel\n p.start()\n\n childp.close()\n cid = ownp.recv()\n ownp.send((\"setup\", args))\n ownp.send(\"start\")\n\n self._child_procs.append((p.pid, cid))\n\n return cid", "def _subprocess(cmd):\n\n log.debug('Running: \"%s\"', \" \".join(cmd))\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()\n retcode = proc.wait()\n\n if ret:\n return ret\n elif retcode != 1:\n return True\n else:\n return False\n except OSError as err:\n log.error(err)\n return False", "async def async_run_subproc_from_code(sub_proc_code: str) -> asyncio.subprocess.Process:\n return await asyncio.create_subprocess_exec(sys.executable, '-c', sub_proc_code, stdout=asyncio.subprocess.PIPE)", "def __call__(self, stdin=None, stdout=True, stderr=True,\n cwd=None, env=None):\n if stdout:\n stdout_arg = subprocess.PIPE\n else:\n stdout_arg = open(os.devnull)\n if stderr:\n stderr_arg = subprocess.PIPE\n else:\n stderr_arg = open(os.devnull)\n #We may not need to supply any piped input, but we setup the\n #standard input pipe anyway as a work around for a python\n #bug if this is called from a Windows GUI program. For\n #details, see http://bugs.python.org/issue1124861\n #\n #Using universal newlines is important on Python 3, this\n #gives unicode handles rather than bytes handles.\n child_process = subprocess.Popen(str(self), stdin=subprocess.PIPE,\n stdout=stdout_arg, stderr=stderr_arg,\n universal_newlines=True,\n cwd=cwd, env=env,\n shell=(sys.platform!=\"win32\"))\n #Use .communicate as can get deadlocks with .wait(), see Bug 2804\n stdout_str, stderr_str = child_process.communicate(stdin)\n if not stdout: assert not stdout_str\n if not stderr: assert not stderr_str\n return_code = child_process.returncode\n if return_code:\n raise ApplicationError(return_code, str(self),\n stdout_str, stderr_str)\n return stdout_str, stderr_str", "def _run(proc: Popen, timeout):\n try:\n return proc.wait(timeout=timeout)\n except TimeoutExpired:\n pass\n if sys.platform != 'win32':\n proc.send_signal(signal.SIGINT)\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.terminate() # SIGTERM\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.kill() # SIGKILL\n return proc.wait(timeout=5)", "def execute_local(args, env = None, zerobyte = False):\n\n from tempfile import TemporaryFile\n from subprocess import Popen\n\n # Note: PIPE will cause deadlock if output is larger than 65K\n stdout, stderr = TemporaryFile(\"w+\"), TemporaryFile(\"w+\")\n handle = type('Handle', (object,), {'stdout' : [], 'stderr' : [], 'returncode' : 0})()\n p = Popen(args, stdout = stdout, stderr = stderr, env = env, shell = True)\n p.wait()\n if zerobyte:\n strstdout = stdout.seek(0) or stdout.read()\n handle.stdout = strstdout.split('\\0')\n else:\n handle.stdout = stdout.seek(0) or stdout.readlines()\n handle.stderr = stderr.seek(0) or stderr.readlines()\n handle.returncode = p.returncode\n return handle", "def fork(self):\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"Fork failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)", "def Popen(self, args, **kwargs):\n # Invoke subprocess.check_output\n if self.command.verbosity >= 2:\n print(\">>> {cmdline}\".format(\n cmdline=' '.join(shlex.quote(arg) for arg in args)\n ))\n\n return self._subprocess.Popen(\n [\n str(arg) for arg in args\n ],\n **self.final_kwargs(**kwargs)\n )", "def _subprocess_call(*args, **kwargs):\n return subprocess.call(*args, **kwargs)", "def run_cmd(cmd, callback=None, watch=False, background=False, shell=False):\r\n\r\n if watch and not callback:\r\n raise RuntimeError(\r\n \"You must provide a callback when watching a process.\"\r\n )\r\n\r\n output = None\r\n\r\n if shell:\r\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\r\n else:\r\n proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)\r\n\r\n if background:\r\n # Let task run in background and return pmid for monitoring:\r\n return proc.pid, proc\r\n\r\n if watch:\r\n while proc.poll() is None:\r\n line = proc.stdout.readline()\r\n if line != \"\":\r\n callback(line)\r\n\r\n # Sometimes the process exits before we have all of the output, so\r\n # we need to gather the remainder of the output.\r\n remainder = proc.communicate()[0]\r\n if remainder:\r\n callback(remainder)\r\n else:\r\n output = proc.communicate()[0]\r\n\r\n if callback and output is not None:\r\n return callback(output)\r\n\r\n return output" ]
[ "0.76355433", "0.7024509", "0.6961524", "0.6918989", "0.6882473", "0.68161005", "0.68090093", "0.6789304", "0.6741872", "0.6730431", "0.66940457", "0.66635656", "0.6652612", "0.66523474", "0.66228956", "0.64822423", "0.642592", "0.64208853", "0.63632655", "0.6342233", "0.6317299", "0.6304532", "0.6296637", "0.6279865", "0.62429327", "0.62187535", "0.62078583", "0.6199569", "0.61935115", "0.6173093", "0.6165023", "0.6152678", "0.6087014", "0.6080049", "0.6042515", "0.6031003", "0.6022431", "0.6000971", "0.59992546", "0.59951717", "0.5991336", "0.5977619", "0.59508127", "0.5940763", "0.59352696", "0.5933931", "0.5930377", "0.5923018", "0.59203434", "0.5920189", "0.5909449", "0.58965915", "0.5868371", "0.5867485", "0.5835666", "0.581185", "0.5784045", "0.5736419", "0.57351077", "0.572735", "0.57262367", "0.5710055", "0.5694286", "0.56937385", "0.5685043", "0.56767815", "0.563633", "0.56171817", "0.56116736", "0.5604142", "0.5600395", "0.5595084", "0.55933696", "0.5570183", "0.55676544", "0.55654234", "0.55533177", "0.55519974", "0.55515724", "0.5549142", "0.5543498", "0.5538993", "0.55342925", "0.5531276", "0.55175346", "0.55155396", "0.5514216", "0.54940987", "0.5493229", "0.5484909", "0.54839504", "0.5471909", "0.5471031", "0.547041", "0.54618216", "0.54601145", "0.5454637", "0.54503405", "0.54331183", "0.5432972" ]
0.68733007
5
Executes a subprocess and returns its exit code, output, and errors.
def GetCmdStatusOutputAndError(args, cwd=None, shell=False, env=None, merge_stderr=False): _ValidateAndLogCommand(args, cwd, shell) stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE pipe = Popen( args, stdout=subprocess.PIPE, stderr=stderr, shell=shell, cwd=cwd, env=env) stdout, stderr = pipe.communicate() return (pipe.returncode, stdout, stderr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def execute(cmd):\n\n process = subprocess.Popen(cmd,\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out = ''\n err = ''\n exitcode = 0\n\n result = process.communicate()\n (out, err) = result\n exitcode = process.returncode\n\n return exitcode, out.decode(), err.decode()", "def subprocess_run(cmd, ignore_failure=False, shell=True):\n try:\n proc = subprocess.Popen(\n cmd,\n shell=shell,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rc = proc.returncode\n\n except OSError as exc:\n raise RuntimeError('Failed to run ' + cmd + ': [Errno: %d] ' %\n exc.errno + exc.strerror + ' [Exception: ' +\n type(exc).__name__ + ']')\n if (not ignore_failure) and (rc != 0):\n raise RuntimeError('(%s) failed with rc=%s: %s' %\n (cmd, rc, err))\n return out", "def run(cmd, shell=False, cwd=None):\n try:\n out = check_output(cmd, shell=shell, cwd=cwd, stderr=STDOUT)\n except CalledProcessError as ex:\n return ex.returncode, ex.output\n else:\n return 0, out", "def run_subprocess(command, environment=None, shell=False, raise_on_error=True):\n proc = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n env=environment)\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n if raise_on_error:\n raise RuntimeError('{}\\n{}'.format(stderr, stdout))\n return stdout, stderr, proc.returncode", "def checked_subprocess_run(command):\n args = shlex.split(command)\n completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = completed.stdout.decode()\n err = completed.stderr.decode()\n\n # Print the subprocess output to include in the test output\n print(out, file=sys.stdout)\n print(err, file=sys.stderr)\n\n # After printing the output, raise an exception on a non-zero exit status.\n completed.check_returncode()\n\n return out, err", "def run(cmd, dieOnError=True):\n\n\tps = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\texitcode = ps.returncode\n\tstdout,stderr = ps.communicate()\n\treturn exitcode, stdout, stderr", "def _exec_cmd(cmd, stdout=None, stderr=None):\n rc = 0\n kwargs = {}\n if stdout is not None:\n kwargs[\"stdout\"] = stdout\n if stderr is not None:\n kwargs[\"stderr\"] = stderr\n try:\n subprocess.check_call(cmd, **kwargs)\n except CalledProcessError as e:\n LOG.error(\"[return code: %s] %s\", e.returncode, e)\n rc = e.returncode\n return rc", "def run_command(cmd):\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n return proc.returncode, stdout, stderr", "def subprocess_run(*args, **kwargs):\n if __version__.py_version_lt(3, 7):\n if kwargs.pop('capture_output', None):\n kwargs.setdefault('stdout', subprocess.PIPE)\n kwargs.setdefault('stderr', subprocess.PIPE)\n try:\n output = subprocess.run(*args, **kwargs)\n except subprocess.CalledProcessError as cpe:\n emess = '\\n'.join([\n 'Subprocess error:',\n 'stderr:',\n f'{cpe.stderr}',\n 'stdout:',\n f'{cpe.stdout}',\n f'{cpe}'\n ])\n raise RuntimeError(emess) from cpe\n return output", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def execute_command(command):\n p = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n rc = p.wait()\n stdout = []\n stderr = []\n for line in p.stdout.read().decode().splitlines():\n stdout.append(line)\n for line in p.stderr.read().decode().splitlines():\n stderr.append(line)\n p.stdout.close()\n p.stderr.close()\n return (rc, stdout, stderr)", "def _run_cmd(*args):\n proc = Popen(\n args, stdin=PIPE, stdout=PIPE, stderr=PIPE,\n cwd=os.path.dirname(__file__))\n output, _ = proc.communicate()\n code = proc.returncode\n return code, output", "def Subprocess(self, cmd):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def run_subprocess_cmd(command, print_cmd=True, print_stdout_stderr=True, get_returncode=False):\n if print_cmd:\n print\n print 'Running command:\\n%s' % command\n print \n\n sp = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n shell=True)\n out, error = sp.communicate() \n if print_stdout_stderr:\n print\n print out\n print\n print error\n print\n\n if get_returncode:\n return out, error, sp.returncode\n else:\n return out, error", "def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):\n if ok_exit_codes is None:\n ok_exit_codes = [0]\n out, err = proc.communicate(cmd_input)\n\n ret = proc.returncode\n if ret not in ok_exit_codes:\n LOG.error(\"Command '%(cmdline)s' with process id '%(pid)s' expected \"\n \"return code in '%(ok)s' but got '%(rc)s': %(err)s\" %\n {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,\n 'rc': ret, 'err': err})\n raise SubprocessException(' '.join(cmdline), ret, out, err)\n return out", "def run(cmd, proc_stdout = sys.stdout, proc_stderr = sys.stderr,\n check = True):\n print cmd\n proc = subprocess.Popen(cmd, shell=True, bufsize=-1,\n stdout=proc_stdout, stderr=proc_stderr)\n output, errors = proc.communicate()\n sts = proc.wait()\n if check is True and sts != 0:\n raise RuntimeError(\"Command: %s exited with non-zero status %i\" % (cmd, sts))\n return output, errors", "def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc", "def execute(self, cmd, cwd=None, capture_output=False, env=None, raise_errors=True):\n logging.info('Executing command: {cmd}'.format(cmd=str(cmd)))\n stdout = subprocess.PIPE if capture_output else None\n process = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=stdout)\n output = process.communicate()[0]\n returncode = process.returncode\n if returncode:\n # Error\n if raise_errors:\n raise subprocess.CalledProcessError(returncode, cmd)\n else:\n logging.info('Command returned error status %s', returncode)\n if output:\n logging.info(output)\n return returncode, output", "def call_subprocess(poutput, data=None):\n try:\n output = poutput.communicate(input=data)\n LOG.debug(\"Exit status: \" + str(poutput.returncode))\n if poutput.returncode != 0:\n LOG.warning(\"Process returned non-zero exit code: \" + str(poutput.returncode))\n LOG.warning(\"Process STDOUT: \" + output[0])\n LOG.warning(\"Process STDERR: \" + output[1])\n return output[0].strip(), output[1].strip()\n except Exception as e:\n LOG.exception(\"Command failed!\")\n raise e", "def run_with_subprocess(cmd):\n new_env = dict(os.environ, LC_ALL='C')\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env)\n output, error = proc.communicate()\n returncode = proc.returncode\n except OSError, (errno, strerror):\n output, error = \"\", \"Could not execute %s: %s\" % (cmd[0], strerror)\n returncode = 1\n\n return (output, error, returncode)", "def run_and_handle_returncode(cmd, print_cmd=True):\n stdout, stderr, returncode = run_subprocess_cmd(cmd, print_cmd=print_cmd, \n print_stdout_stderr=False, get_returncode=True)\n if returncode != 0:\n import sys; sys.exit('STDOUT:\\n%s\\nSTDERR:\\n%s\\n' % (stdout, stderr))\n return stdout, stderr, returncode", "def execute_command(cmd):\n\n env = environ.copy()\n proc = subprocess.Popen(\n [cmd],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n env=env)\n\n stdout, stderr = proc.communicate()\n\n if stdout:\n logging.info(stdout.decode())\n if stderr:\n logging.info(stderr.decode())\n\n if proc.returncode != 0:\n logging.error(u\" Command execution failed.\")\n return proc.returncode, stdout.decode(), stderr.decode()", "def execute(parent, cmd, *args, **kwargs):\n\n with xtrace(parent, flatten(cmd)) as h:\n try:\n code = subprocess.call(cmd, *args, **kwargs)\n except:\n sys.exit(\n DiagnosticReporter.fatal(EXCEPTION_EXECUTING_PROCESS, cmd[0]))\n finally:\n h.report(code)\n return code", "def _execute_command(\n args: Union[List[str], str],\n print_output: bool,\n capture_stderr: bool,\n print_command: bool,\n *pargs,\n **kwargs\n) -> Tuple[int, List[str]]:\n stdout_write, stdout_path = tempfile.mkstemp()\n with open(stdout_path, \"rb\") as stdout_read, open('/dev/null', 'w') as dev_null:\n\n if print_command:\n print(\"Executing: %s\" % \" \".join(args))\n\n kwargs['stdout'] = stdout_write\n kwargs['stderr'] = stdout_write if capture_stderr else dev_null\n\n # pylint: disable=consider-using-with\n process = subprocess.Popen(\n args,\n *pargs,\n **kwargs\n )\n\n while True:\n output = stdout_read.read(1).decode(errors=\"replace\")\n\n if output == '' and process.poll() is not None:\n break\n\n if print_output and output:\n print(output, end=\"\", flush=True)\n\n exit_code = process.poll()\n\n stdout_read.seek(0)\n stdout = [line.decode(errors=\"replace\") for line in stdout_read.readlines()]\n\n # ignoring mypy error below because it thinks exit_code can sometimes be None\n # we know that will never be the case because the above While loop will keep looping forever\n # until exit_code is not None\n return exit_code, stdout # type: ignore", "def run(cmd: List[str]) -> int:\n logger.debug('cmd: %s', ' '.join(cmd))\n child = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdoutdata, stderrdata = child.communicate()\n\n if stdoutdata.strip():\n log_std('stdout', stdoutdata.decode(),\n logging.DEBUG if child.returncode == 0 else logging.ERROR)\n\n if stderrdata.strip():\n log_std('stderr', stderrdata.decode(), logging.ERROR)\n\n logger.debug(\"returncode %s\", child.returncode)\n return child.returncode", "def call_command(command, env=None, cwd=None):\n\n try:\n LOG.debug('Run %s', ' '.join(command))\n out = subprocess.check_output(command,\n bufsize=-1,\n env=env,\n stderr=subprocess.STDOUT,\n cwd=cwd)\n LOG.debug(out)\n return out, 0\n except subprocess.CalledProcessError as ex:\n LOG.debug('Running command \"%s\" Failed.', ' '.join(command))\n LOG.debug(str(ex.returncode))\n LOG.debug(ex.output)\n return ex.output, ex.returncode\n except OSError as oerr:\n LOG.warning(oerr.strerror)\n return oerr.strerror, oerr.errno", "def execute_stdout(command):\n try:\n output = subprocess.check_output([command], stderr=subprocess.STDOUT,\n shell=True)\n return 0, output\n except subprocess.CalledProcessError as excp:\n return excp.returncode, excp.output", "def runprocess(self, argv, check_stdout=None, check_stderr=None,\n check_returncode=0, stdin_string='', fail_message=None,\n timeout=5, verbosity=None, env=None):\n if env is None:\n env = os.environ\n env.setdefault('GIT_COMMITTER_DATE', self.isodate_now)\n argv_repr = ' '.join(shellquote(a) for a in argv)\n if verbosity is None:\n verbosity = self.verbosity\n if verbosity:\n print(self.term.blue(argv_repr))\n if verbosity > 2:\n print(self.term.yellow(stdin_string.rstrip()))\n PIPE = subprocess.PIPE\n proc = subprocess.Popen(argv, stdout=PIPE, stderr=PIPE, stdin=PIPE,\n env=env)\n try:\n stdout, stderr = proc.communicate(stdin_string.encode('utf-8'),\n timeout=timeout)\n timeout_expired = False\n except subprocess.TimeoutExpired:\n proc.kill()\n stdout = stderr = b''\n timeout_expired = True\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n returncode = proc.returncode\n failed = any([\n timeout_expired,\n (check_stdout is not None and check_stdout != stdout),\n (check_stderr is not None and check_stderr != stderr),\n (check_returncode is not None and check_returncode != returncode),\n ])\n if failed and not verbosity:\n print(self.term.blue(argv_repr))\n if failed or verbosity >= 2:\n if stdout:\n print(stdout.rstrip())\n if stderr:\n print(self.term.yellow(stderr.rstrip()))\n print('→ %s' % self.term.blue(str(proc.returncode)))\n if failed:\n if timeout_expired:\n self.die('Command timeout expired')\n elif fail_message:\n self.die(fail_message)\n else:\n self.die('Command failed')\n return SubprocessResult(stdout, stderr, returncode)", "async def _run_subprocess(\n cmd: str,\n allow_params: bool,\n params: Dict[str, ParamValueT],\n) -> Dict[str, Any]:\n cmd_str = cmd\n if allow_params:\n if params[\"shell_params\"] == []:\n cmd_str = cmd.format([''])\n else:\n cmd_str = cmd.format(*params.get('shell_params', ['']))\n\n logging.info(\"Running command: %s\", cmd_str)\n\n cmd_list = shlex.split(cmd_str)\n\n process = await asyncio.create_subprocess_exec(\n *cmd_list,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await process.communicate()\n\n return {\n \"returncode\": process.returncode,\n \"stdout\": stdout.decode(),\n \"stderr\": stderr.decode(),\n }", "def execute_shell_command(command: str) -> int:\n cwd: str = os.getcwd()\n\n path_env_var: str = os.pathsep.join([os.environ.get(\"PATH\", os.defpath), cwd])\n env: dict = dict(os.environ, PATH=path_env_var)\n\n status_code: int = 0\n try:\n res: CompletedProcess = run(\n args=[\"bash\", \"-c\", command],\n stdin=None,\n input=None,\n # stdout=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4\n # stderr=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4\n capture_output=True,\n shell=False,\n cwd=cwd,\n timeout=None,\n check=True,\n encoding=None,\n errors=None,\n text=None,\n env=env,\n universal_newlines=True,\n )\n sh_out: str = res.stdout.strip()\n logger.info(sh_out)\n except CalledProcessError as cpe:\n status_code = cpe.returncode\n sys.stderr.write(cpe.output)\n sys.stderr.flush()\n exception_message: str = \"A Sub-Process call Exception occurred.\\n\"\n exception_traceback: str = traceback.format_exc()\n exception_message += (\n f'{type(cpe).__name__}: \"{str(cpe)}\". Traceback: \"{exception_traceback}\".'\n )\n logger.error(exception_message)\n\n return status_code", "def subprocess_run(args, **kwargs_in):\n kwargs = kwargs_in.copy()\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n logger.debug(f'running a subprocess {args} {kwargs}')\n output = subprocess.run(args, **kwargs)\n logger.debug(f' returned: {output.stdout}')\n return output", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def subprocess_call(command):\n try:\n return_out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)\n if return_out.strip():\n print return_out\n except subprocess.CalledProcessError, err:\n msg = \"Subprocess call failed!\"\\\n \"\\n command : {0}\"\\\n \"\\n console output: \\n\\n{1}\"\\\n \"\\n error message : {2}\"\\\n \"\\n arguments : {3}\"\\\n \"\\n return-code : {4}\\n\"\\\n .format(err.cmd, err.output, err.message, err.args, err.returncode)\n raise Exception(msg)\n\n return return_out", "def execute(args, cwd=None):\n completed_process = subprocess.run(\n args=args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=cwd\n )\n return utils.make_process_result(completed_process)", "def execute_cmd(args: Sequence[str],\n verbose: bool = False,\n **kwargs) -> subprocess.CompletedProcess:\n cmd = \" \".join(args)\n if verbose:\n print(f\"cmd: {cmd}\")\n try:\n return subprocess.run(args, check=True, text=True, **kwargs)\n except subprocess.CalledProcessError as exc:\n print((f\"\\n\\nThe following command failed:\\n\\n{cmd}\"\n f\"\\n\\nReturn code: {exc.returncode}\\n\\n\"))\n if exc.stdout:\n print(f\"Stdout:\\n\\n{exc.stdout}\\n\\n\")\n if exc.stderr:\n print(f\"Stderr:\\n\\n{exc.stderr}\\n\\n\")\n raise exc", "def run(cmd):\n print ' '.join(cmd)\n try:\n check_call(cmd)\n except CalledProcessError as cpe:\n print \"Error: return code: \" + str(cpe.returncode)\n sys.exit(cpe.returncode)", "def subprocess_run(cmd):\n print(shlex.join(cmd))\n try:\n ret = subprocess.run(cmd, capture_output=True,\n text=True, env=os.environ.copy(), check=True)\n if (ret.stdout):\n print(ret.stdout)\n return ret\n except subprocess.CalledProcessError as e:\n if (e.stderr):\n print(e.stderr)\n raise e", "def run_command(command):\n process = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n response, error = process.communicate()\n return response.decode().rstrip('\\n'), error.decode().rstrip('\\n')", "def run_command(cmd):\n if env.PY2 and isinstance(cmd, unicode):\n cmd = cmd.encode(sys.getfilesystemencoding())\n\n # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of\n # the subprocess is set incorrectly to ascii. Use an environment variable\n # to force the encoding to be the same as ours.\n sub_env = dict(os.environ)\n encoding = output_encoding()\n if encoding:\n sub_env['PYTHONIOENCODING'] = encoding\n\n proc = subprocess.Popen(\n cmd,\n shell=True,\n env=sub_env,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output, _ = proc.communicate()\n status = proc.returncode\n\n # Get the output, and canonicalize it to strings with newlines.\n if not isinstance(output, str):\n output = output.decode(output_encoding())\n output = output.replace('\\r', '')\n\n return status, output", "def exec_process(cmdline, silent, input=None, **kwargs):\n try:\n sub = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n stdout, stderr = sub.communicate(input=input)\n returncode = sub.returncode\n if not silent:\n sys.stdout.write(stdout)\n sys.stderr.write(stderr)\n except OSError, e:\n if e.errno == 2:\n raise RuntimeError('\"%s\" is not present on this system' % cmdline[0])\n else:\n raise\n if returncode != 0:\n raise RuntimeError('Got return value %d while executing \"%s\", stderr output was:\\n%s' % (returncode, \" \".join(cmdline), stderr.rstrip(\"\\n\")))\n return stdout", "def _run(args: List[str], check: bool = False) -> Tuple[int, str]:\n result = subprocess.run(args=args, stdout=subprocess.PIPE)\n if check and result.returncode != 0:\n raise subprocess.CalledProcessError(result.returncode, args)\n return result.returncode, result.stdout.decode('utf-8', 'strict')", "def exec_test_command(cmd):\n process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)\n result = process.communicate()\n return (\n process.returncode,\n bytes(result[0]).decode(\"utf-8\"),\n bytes(result[1]).decode(\"utf-8\"),\n )", "def run_commands(*commands: str, **kwargs) -> Tuple[Optional[str], Optional[str], int]:\n command = ' ; '.join(commands)\n # Indirectly waits for a return code.\n process = subprocess.run(command, **kwargs)\n stdout = process.stdout\n stderr = process.stderr\n # Decode stdout and stderr to strings if needed.\n if isinstance(stdout, bytes):\n stdout = str(stdout, 'utf-8').strip()\n if isinstance(stderr, bytes):\n stderr = str(stderr, 'utf-8').strip()\n return stdout, stderr, process.returncode", "def exec_command(cmd):\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True) as p:\n stdout, _ = p.communicate()\n if p.returncode != 0:\n logger.error(stdout)\n return None\n\n return stdout", "def run(*args, **kwargs):\n _patch_popen(Popen)\n assert len(args) > 0\n\n arguments = []\n\n input = kwargs.pop('input', None)\n fail_on_error = kwargs.pop('fail_on_error', False)\n encoding = kwargs.pop('encoding', None) \n\n if len(args) == 1:\n if isinstance(args[0], string):\n arguments = args[0].split()\n else:\n for i in args:\n if isinstance(i, (list, tuple)):\n for j in i:\n arguments.append(j)\n else:\n arguments.append(i)\n\n def set_default_kwarg(key, default): \n kwargs[key] = kwargs.get(key, default)\n\n set_default_kwarg('stdin', PIPE)\n set_default_kwarg('stdout', PIPE)\n set_default_kwarg('stderr', PIPE)\n\n\n proc = Popen(arguments, **kwargs)\n stdout, stderr = proc.communicate(input)\n \n if encoding is not None:\n stdout = stdout.decode(encoding=encoding)\n stderr = stderr.decode(encoding=encoding)\n\n result = RunResult(proc.returncode, stdout, stderr)\n \n if fail_on_error and proc.returncode != 0:\n raise ProcessError(' '.join(arguments), result)\n\n return result", "def getstatusoutput(*args, **kwargs):\n p = subprocess.Popen(*args, **kwargs)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def exec_cmd(cmd):\n\targs = shlex.split(cmd)\n\tverbose = True\n\n\ttry:\n\t\tif verbose == True:\n\t\t\tsubprocess.check_call(args)\n\t\telse:\n\t\t\tsubprocess.check_call(args,\n\t\t\t\t\t\t\t\t stdout=subprocess.STDOUT,\n\t\t\t\t\t\t\t\t stderr=subprocess.STDOUT)\n\t# Exception\n\texcept subprocess.CalledProcessError as e:\n\t\tprint \"Command\t :: \", e.cmd\n\t\tprint \"Return Code :: \", e.returncode\n\t\tprint \"Output\t :: \", e.output", "def shell(args, **kwargs):\n import subprocess\n\n output, returncode = '', 0\n logger.debug('running %s', ' '.join(args))\n try:\n if 'cwd' in kwargs:\n # convert cwd to str in case it's a Path\n kwargs['cwd'] = str(kwargs['cwd'])\n output = subprocess.check_output(\n args, stderr=subprocess.STDOUT, **kwargs)\n except subprocess.CalledProcessError as e:\n returncode = e.returncode\n output = e.output\n\n return output.decode('utf-8'), returncode", "def _proc_exec_wait(command_line, silent=False):\n result = (-1, None, None)\n command = None\n proc = None\n\n if _platform_windows:\n command_line = command_line.replace(\"\\\\\", \"/\")\n\n try:\n command = shlex.split(command_line)\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Unable to parse the given command line: {0}\\n\"\n \"Error: {1}.\".format(command_line, e)\n )\n return result\n\n try:\n sp_kwargs = {\n \"stdout\": subprocess.PIPE,\n \"stderr\": subprocess.PIPE,\n \"startupinfo\": None,\n \"env\": os.environ,\n }\n\n if _platform_windows:\n sp_kwargs[\"startupinfo\"] = subprocess.STARTUPINFO()\n sp_kwargs[\"startupinfo\"].dwFlags = (\n subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW\n )\n sp_kwargs[\"startupinfo\"].wShowWindow = subprocess.SW_HIDE\n\n proc = subprocess.Popen(command, **sp_kwargs)\n stdoutdata, stderrdata = proc.communicate()\n status = proc.returncode\n result = (status, stdoutdata.decode(\"utf8\"), stderrdata.decode(\"utf8\"))\n except Exception as e:\n if not silent:\n _warn(\n \"_proc_exec_wait: Could not open the process: '{0}'\\n\"\n \"Error: {1}.\".format(command[0], e)\n )\n finally:\n if proc:\n if proc.stdout:\n proc.stdout.close()\n\n if proc.stderr:\n proc.stderr.close()\n\n return result", "def sh(*args, return_bytes=False, encoding='utf-8', **kw):\n options = dict(\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n options.update(kw)\n\n process = subprocess.Popen(args, **options)\n process.wait()\n\n if process.returncode != 0:\n raise CommandFailure(process.stderr.read().decode(encoding))\n\n if return_bytes:\n return process.stdout.read()\n\n return process.stdout.read().decode(encoding)", "def execute(cmd, fail_ok=False, merge_stderr=False):\n cmdlist = shlex.split(cmd)\n result = ''\n result_err = ''\n stdout = subprocess.PIPE\n stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE\n proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr)\n result, result_err = proc.communicate()\n result = result.decode('utf-8')\n if not fail_ok and proc.returncode != 0:\n raise exceptions.CommandFailed(proc.returncode, cmd, result,\n result_err)\n return result", "def call_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\r\n null = open(os.devnull, 'wb')\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n params.setdefault('stdin', null)\r\n params['stdout'] = null\r\n params['stderr'] = null\r\n p = subprocess_Popen(command, **params)\r\n p.wait()\r\n return p.returncode", "def run(cmd, fail=True, capture_stdout=False, capture_stderr=False, verbose=False):\n stdout, stderr = None, None\n if capture_stderr:\n stderr = subprocess.PIPE\n if capture_stdout:\n stdout = subprocess.PIPE\n\n if verbose:\n print(cmd)\n\n p = subprocess.Popen(['bash', '-c', cmd], stderr=stderr, stdout=stdout)\n if p.returncode and fail:\n sys.exit(1)\n\n return p", "def subprocess_check_output(*popenargs, **kwargs):\r\n if 'stdout' in kwargs:\r\n raise ValueError('stdout argument not allowed, it will be overridden.')\r\n if 'stderr' in kwargs:\r\n raise ValueError('stderr argument not allowed, it will be overridden.')\r\n\r\n #executable_exists(popenargs[0][0])\r\n\r\n # NOTE: it is very, very important that we use temporary files for\r\n # collecting stdout and stderr here. There is a nasty bug in python\r\n # subprocess; if your process produces more than 64k of data on an fd that\r\n # is using subprocess.PIPE, the whole thing will hang. To avoid this, we\r\n # use temporary fds to capture the data\r\n stdouttmp = TemporaryFile()\r\n stderrtmp = TemporaryFile()\r\n\r\n process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs,\r\n **kwargs)\r\n process.communicate()\r\n retcode = process.poll()\r\n\r\n stdouttmp.seek(0, 0)\r\n stdout = stdouttmp.read()\r\n stdouttmp.close()\r\n\r\n stderrtmp.seek(0, 0)\r\n stderr = stderrtmp.read()\r\n stderrtmp.close()\r\n\r\n if retcode:\r\n cmd = ' '.join(*popenargs)\r\n raise Exception(\"'%s' failed(%d): %s\" % (cmd, retcode, stderr), retcode)\r\n return (stdout, stderr, retcode)", "def process_results(process_object):\n (stdout, stderr)=process_object.communicate()\n return (process_object.returncode, stdout, stderr)", "def call(self):\n\n process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,\n shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)\n returnData = process.communicate()\n\n return ProcessResult(process.returncode, returnData[0], returnData[1])", "def exec_command(command):\n exit_code = 1\n stdo = ''\n stde = ''\n from subprocess import Popen, PIPE\n try:\n pobj = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)\n #pobj.wait()\n stdo, stde = pobj.communicate()\n exit_code = pobj.returncode\n except:\n print \"Unexpected error at exec_command:\", sys.exc_info()\n import platform\n s = traceback.format_exc()\n logStr = \" exec command error : error\\n> stderr:\\n%s\\n\" %s\n error = platform.node()+\"-\"+logStr\n return (1,error,\"\")\n return (exit_code, stdo, stde)", "def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]],\n *, input_lines: typing.Optional[BytesOrStrIterator] = None,\n capture_output: bool = False,\n quiet: bool = False, **kwargs) -> subprocess.CompletedProcess:\n log.debug('run %r', cmd)\n\n if not kwargs.pop('check', True):\n raise NotImplementedError('check must be True or omited')\n\n if capture_output: # Python 3.6 compat\n kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE\n\n kwargs.setdefault('startupinfo', get_startupinfo())\n\n try:\n if input_lines is not None:\n assert kwargs.get('input') is None\n assert iter(input_lines) is input_lines\n popen = subprocess.Popen(cmd, stdin=subprocess.PIPE, **kwargs)\n stdin_write = popen.stdin.write\n for line in input_lines:\n stdin_write(line)\n stdout, stderr = popen.communicate()\n proc = subprocess.CompletedProcess(popen.args, popen.returncode,\n stdout=stdout, stderr=stderr)\n else:\n proc = subprocess.run(cmd, **kwargs)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise ExecutableNotFound(cmd) from e\n else:\n raise\n\n if not quiet and proc.stderr:\n stderr = proc.stderr\n if isinstance(stderr, bytes):\n stderr_encoding = (getattr(sys.stderr, 'encoding', None)\n or sys.getdefaultencoding())\n stderr = stderr.decode(stderr_encoding)\n sys.stderr.write(stderr)\n sys.stderr.flush()\n\n try:\n proc.check_returncode()\n except subprocess.CalledProcessError as e:\n raise CalledProcessError(*e.args)\n\n return proc", "def subprocess_with_output(\n cmd, shell=False, cwd=None, env=None, suppress_output=False):\n # type: (str, bool, str, dict, bool) -> int\n _devnull = None\n try:\n if suppress_output:\n _devnull = open(os.devnull, 'w')\n proc = subprocess.Popen(\n cmd, shell=shell, cwd=cwd, env=env, stdout=_devnull,\n stderr=subprocess.STDOUT)\n else:\n proc = subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)\n proc.wait()\n finally:\n if _devnull is not None:\n _devnull.close()\n return proc.returncode", "def system(command):\n print('[system] {}'.format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n rc = p.returncode\n if PY3:\n output = output.decode(\"ascii\")\n err = err.decode(\"ascii\")\n return rc, output, err", "def run_command(self, cmd, expects=0, shell=False, stdout=PIPE, stderr=PIPE):\n \n # If the command argument is a string\n if isinstance(cmd, str):\n cmd = cmd.split(' ')\n \n # Open the process\n try:\n proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)\n out, err = proc.communicate()\n \n # Make sure the expected return code is found\n if not proc.returncode == expects:\n self.die('Failed to run command \\'{0}\\', ERROR={1}'.format(str(cmd), err))\n \n # Return exit code / stdout / stderr\n return proc.returncode, out, err\n except Exception as e:\n self.die('Failed to run command \\'{0}\\': ERROR={1}'.format(str(cmd), str(e)))", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def _subprocess(cmd):\n\n log.debug('Running: \"%s\"', \" \".join(cmd))\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()\n retcode = proc.wait()\n\n if ret:\n return ret\n elif retcode != 1:\n return True\n else:\n return False\n except OSError as err:\n log.error(err)\n return False", "def execute(cmd, path):\n oldPath = os.getcwd()\n os.chdir(path)\n\n exitcode, output = subprocess.getstatusoutput(cmd)\n\n os.chdir(oldPath)\n\n ok = not exitcode\n\n return ok, output", "def execute_subprocess(command, inputs, *, timeout, target, args, kwargs):\n\n from subprocess import Popen, PIPE\n\n proc = Popen(command,\n stdin=PIPE, stdout=PIPE, stderr=PIPE,\n universal_newlines=True,\n env={'PYTHONPATH': pythonpath()})\n out, err = proc.communicate(input=inputs, timeout=timeout)\n\n if err:\n raise RuntimeError(\n 'error running function %s with:\\n'\n ' args=%r\\n'\n ' kwargs=%r\\n\\n'\n 'Process returned code %s.\\n'\n 'Stdout:\\n%s\\n'\n 'Error message:\\n%s' % (\n target, args, kwargs, proc.poll(),\n indent(out or '<empty>', 4),\n indent(err or '<empty>', 4)\n )\n )\n\n # Make sure out is always a string. We ignore decoding errors praying for\n # the best\n if isinstance(out, bytes):\n out = out.decode('utf8', 'ignore')\n\n # We remove all comments and send separate comments and data sections\n lines = out.splitlines()\n data = '\\n'.join(line for line in lines if not line.startswith('#'))\n data = data.strip()\n comments = '\\n'.join(line for line in lines if line.startswith('#'))\n comments = comments.strip()\n\n # A data section must always be present\n if not data:\n raise RuntimeError('subprocess returned an empty response:\\n%s' %\n indent(out, 4))\n return data, comments", "def exec_command_all(*cmdargs, **kwargs):\n proc = subprocess.Popen(cmdargs, bufsize=-1, # Default OS buffer size.\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n # Waits for subprocess to complete.\n out, err = proc.communicate()\n # Python 3 returns stdout/stderr as a byte array NOT as string.\n # Thus we need to convert that to proper encoding.\n if is_py3:\n encoding = kwargs.get('encoding')\n if encoding:\n out = out.decode(encoding)\n err = err.decode(encoding)\n else:\n # If no encoding is given, assume we're reading filenames from stdout\n # only because it's the common case.\n out = os.fsdecode(out)\n err = os.fsdecode(err)\n\n\n return proc.returncode, out, err", "def shell_call(cmd):\n try:\n x = subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True\n )\n ret = (x.returncode, str(x.stdout, \"utf-8\"), str(x.stderr, \"utf-8\"))\n return ret\n except subprocess.SubprocessError as e:\n logger.error(\"System error running command: \" + str(cmd))\n logger.error(str(e.output))\n raise RuntimeError()", "def execute(self):\n\n (output, error) = self.process.communicate()\n\n if self.process.returncode != 0:\n decoded = self.decode_output(error)\n\n if not decoded:\n return \"Unkown error. for %s\" % (self.command)\n\n print(decoded)\n exit(1)\n return self.decode_output(output)", "def run_shell_command_regular(args):\n try:\n output = subprocess.check_output(args,\n shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n logger.warning(\"Failed in shell command: %s, output: %s\",\n args, ex.output)\n return ex.returncode, native_string(ex.output)\n\n return 0, native_string(output)", "def trycmd(*args, **kwargs):\n discard_warnings = kwargs.pop('discard_warnings', False)\n\n try:\n out, err = execute(*args, **kwargs)\n failed = False\n except exception.ProcessExecutionError as exn:\n out, err = '', str(exn)\n failed = True\n\n if not failed and discard_warnings and err:\n # Handle commands that output to stderr but otherwise succeed\n err = ''\n\n return out, err", "def execute(*cmd, **kwargs):\n process_input = kwargs.pop('process_input', None)\n check_exit_code = kwargs.pop('check_exit_code', [0])\n ignore_exit_code = False\n if isinstance(check_exit_code, bool):\n ignore_exit_code = not check_exit_code\n check_exit_code = [0]\n elif isinstance(check_exit_code, int):\n check_exit_code = [check_exit_code]\n delay_on_retry = kwargs.pop('delay_on_retry', True)\n attempts = kwargs.pop('attempts', 1)\n run_as_root = kwargs.pop('run_as_root', False)\n shell = kwargs.pop('shell', False)\n\n if len(kwargs):\n raise exception.SysinvException(_('Got unknown keyword args '\n 'to utils.execute: %r') % kwargs)\n\n if run_as_root and os.geteuid() != 0:\n cmd = ['sudo', 'sysinv-rootwrap', CONF.rootwrap_config] + list(cmd)\n\n cmd = [str(c) for c in cmd]\n\n while attempts > 0:\n attempts -= 1\n try:\n LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))\n _PIPE = subprocess.PIPE # pylint: disable=E1101\n\n if os.name == 'nt':\n preexec_fn = None\n close_fds = False\n else:\n preexec_fn = _subprocess_setup\n close_fds = True\n\n obj = subprocess.Popen(cmd,\n stdin=_PIPE,\n stdout=_PIPE,\n stderr=_PIPE,\n close_fds=close_fds,\n preexec_fn=preexec_fn,\n shell=shell)\n result = None\n if process_input is not None:\n result = obj.communicate(process_input)\n else:\n result = obj.communicate()\n obj.stdin.close() # pylint: disable=E1101\n _returncode = obj.returncode # pylint: disable=E1101\n LOG.debug(_('Result was %s') % _returncode)\n if result is not None and six.PY3:\n (stdout, stderr) = result\n # Decode from the locale using using the surrogateescape error\n # handler (decoding cannot fail)\n stdout = os.fsdecode(stdout)\n stderr = os.fsdecode(stderr)\n result = (stdout, stderr)\n if not ignore_exit_code and _returncode not in check_exit_code:\n (stdout, stderr) = result\n raise exception.ProcessExecutionError(\n exit_code=_returncode,\n stdout=stdout,\n stderr=stderr,\n cmd=' '.join(cmd))\n return result\n except exception.ProcessExecutionError:\n if not attempts:\n raise\n else:\n LOG.debug(_('%r failed. Retrying.'), cmd)\n if delay_on_retry:\n greenthread.sleep(random.randint(20, 200) / 100.0)\n finally:\n # NOTE(termie): this appears to be necessary to let the subprocess\n # call clean something up in between calls, without\n # it two execute calls in a row hangs the second one\n greenthread.sleep(0)", "def run_command(*args):\n cmd = sp.Popen(args, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT, encoding='utf-8')\n stdout, _ = cmd.communicate()\n\n if cmd.returncode != 0:\n raise ValueError(f\"Running `{args[0]}` failed with return code {cmd.returncode}, output: \\n {stdout}\")\n else:\n return stdout.strip('\\n')", "def _run(self, script, args):\n proc = subprocess.Popen([script] + args,\n stdout=subprocess.PIPE\n )\n\n stdout = proc.communicate()[0]\n retcode = proc.returncode\n\n return stdout, retcode", "def execute_cmd(cmd, verb=False):\n if verb:\n print(\"Executing: {}\".format(cmd))\n\n p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n code = p.returncode\n if code:\n sys.exit(\"Error {}: {}\".format(code, err))\n return out, err", "def _execute_cmd(args, silent = False):\n import subprocess\n\n sys.stdout.flush()\n\n # For Windows we need to use the shell so the path is searched (Python/Windows bug)\n # For Android, using the shell complicates things\n p = subprocess.Popen(args, shell=sys.platform.startswith('win'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (std_out_str, std_err_str) = p.communicate()\n returncode = p.returncode\n\n clean_std_out_str = std_out_str.translate(None,'\\r')\n clean_std_err_str = std_err_str.translate(None,'\\r')\n\n if (returncode != 0):\n raise RuntimeError(\"Error (%d) executing command: %s\" % (returncode, \" \".join(args)))\n\n return clean_std_out_str", "def subprocess(command):\n from sys import executable as python\n from subprocess import Popen,PIPE\n from sys import stderr\n command = \"from %s import *; %s\" % (modulename(),command)\n for attempt in range(0,3):\n try:\n process = Popen([python,\"-c\",command],stdout=PIPE,stderr=PIPE,\n universal_newlines=True)\n break\n except OSError,msg: # [Errno 513] Unknown error 513\n log(\"subprocess: %s\" % msg)\n sleep(1)\n output,error = process.communicate()\n if \"Traceback\" in error: raise RuntimeError(repr(command)+\"\\n\"+error)\n if error: stderr.write(error)\n return output", "def GetStatusOutput(command, **kwargs):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, bufsize=1,\n **kwargs)\n output = proc.communicate()[0]\n result = proc.returncode\n\n return (result, output)", "def run_cmd(cmd, args, path=None, raise_error=True):\n\n if path is not None:\n # Transparently support py.path objects\n path = str(path)\n\n p = sp.Popen([cmd] + list(args), stdout=sp.PIPE, stderr=sp.PIPE,\n cwd=path)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n return_code = p.returncode\n\n if raise_error and return_code != 0:\n raise RuntimeError(\n \"The command `{0}` with args {1!r} exited with code {2}.\\n\"\n \"Stdout:\\n\\n{3}\\n\\nStderr:\\n\\n{4}\".format(\n cmd, list(args), return_code, streams[0], streams[1]))\n\n return streams + (return_code,)", "def create_subprocess(command, args):\n\n proc = subprocess.Popen([command] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, _ = proc.communicate()\n\n return proc.returncode, output", "def run(cmd):\n \n proc = subprocess.Popen (cmd, \n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True\n )\n stdout_value, stderr_value = proc.communicate()\n print stdout_value\n print stderr_value\n\n if proc.poll() > 0:\n sys.stderr.write ( \"\\nError\\n\" )\n print '\\tstderr:', repr(stderr_value.rstrip())\n return False\n else:\n return True", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def run_shell_command(command, checkReturnValue=True, verbose=False):\n process = subprocess.Popen(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n bufsize=1)\n outText = \"\"\n\n for line in iter(process.stdout.readline, ''):\n if verbose:\n sys.stdout.write(line)\n outText += line\n\n process.communicate()[0]\n \"\"\"\n returnValue = process.returncode\n if checkReturnValue and (returnValue != 0):\n raise Exception(outText)\n \"\"\"\n return outText", "def get_output_error(cmd, **kwargs):\n if not isinstance(cmd, list):\n cmd = [cmd]\n logging.debug(\"Running: %s\", ' '.join(map(quote, cmd)))\n try:\n result = Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)\n except OSError as e:\n return -1, '', f'Failed to run {cmd!r}: {e!r}'\n so, se = result.communicate()\n # unicode:\n so = so.decode('utf8', 'replace')\n se = se.decode('utf8', 'replace')\n\n return result.returncode, so, se", "def execute_command(command):\n proc = subprocess.Popen(\n [\"/bin/bash\"], shell=True, cwd=os.environ['PWD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n )\n proc.stdin.write(command)\n stdout, stderr = proc.communicate()\n rc = proc.returncode\n\n return stdout, stderr, rc", "def output_subprocess_Popen(command, **params):\r\n if 'stdout' in params or 'stderr' in params:\r\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\r\n # stdin to devnull is a workaround for a crash in a weird Windows\r\n # environement where sys.stdin was None\r\n if not hasattr(params, 'stdin'):\r\n null = open(os.devnull, 'wb')\r\n params['stdin'] = null\r\n params['stdout'] = subprocess.PIPE\r\n params['stderr'] = subprocess.PIPE\r\n p = subprocess_Popen(command, **params)\r\n # we need to use communicate to make sure we don't deadlock around\r\n # the stdour/stderr pipe.\r\n out = p.communicate()\r\n return out + (p.returncode,)", "def execute_cmd(parms_string, quiet=False):\n\n result = subprocess.run([parms_string],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n universal_newlines=True)\n\n if result.stderr and not quiet:\n print('\\n\\t\\tERROR with %s ' % parms_string)\n print('\\t\\t' + result.stderr)\n\n return result.stdout", "def run_command(cmd, redirect_output=True, check_exit_code=True):\n # subprocess模块用于产生子进程\n if redirect_output:\n stdout = subprocess.PIPE\n else:\n stdout = None\n # cwd 参数指定子进程的执行目录为ROOT,执行cwd 函数\n proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)\n # 使用communicate() 返回值为 (stdoutdata , stderrdata )\n output = proc.communicate()[0]\n if check_exit_code and proc.returncode != 0:\n # 程序不返回0,则失败\n raise Exception('Command \"%s\" failed.\\n%s' % (' '.join(cmd), output))\n return output", "def exec_cmd(cmd):\n print(' '.join(str(e) for e in cmd))\n try:\n res = subprocess.run(cmd, capture_output=True, check=True)\n print(res.stdout.decode(\"utf8\"))\n return res\n except subprocess.CalledProcessError as err:\n logging.error(err.stderr)\n raise err", "def get_exitcode_stdout_stderr(cmd):\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n out = out.decode('utf-8')\n exitcode = proc.returncode\n #\n return exitcode, out, err", "def get_output(cmd, err=False, returncode=0):\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n stderr = STDOUT if err else sys.stderr\n try:\n output = check_output(cmd, stderr=stderr).decode('utf8', 'replace')\n except CalledProcessError as e:\n if e.returncode != returncode:\n raise\n return e.output.decode('utf8', 'replace')\n else:\n if returncode != 0:\n raise CalledProcessError(0, cmd, output.encode('utf8'), stderr)\n return output", "def run_command(command, raise_on_try=True):\n try:\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=True)\n outdata,errdata = p.communicate()\n err = p.wait()\n except OSError, message:\n raise RuntimeError, \"%s subprocess error:\\n %s\" % \\\n (command, str(message))\n if err != 0 and raise_on_try:\n raise RuntimeError, '%s failed with exit code %d\\n%s' % \\\n (str(command), err, errdata)\n return outdata,errdata", "def _check_output(*args, **kwargs):\n kwargs['stdout'] = subprocess.PIPE\n p = subprocess.Popen(*args, **kwargs)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise ValueError(\n 'subprocess exited with return code %s' % p.returncode\n )\n return stdout", "async def checked_run(cmd, env=None):\n\n # Start the subprocess.\n logging.info('Running: %s', await expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n lines = []\n while True:\n line = await p.stdout.readline()\n if not line:\n break\n line = line.decode()[:-1]\n lines.append(line)\n logging.info(line)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n output = '\\n'.join(lines)[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, await expand_cmd_str(cmd), output))\n\n return output", "def RunCommand(cmd):\n logging.debug(\"Running cmd %s\", cmd)\n\n p = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True)\n o, e = p.communicate()\n s = p.returncode\n\n if s != 0:\n return (s, e)\n\n return (s, o)", "def run_subprocess(args, work_dir):\n process = subprocess.Popen(args, cwd=work_dir)\n process.communicate()\n assert process.returncode == 0", "def _process_command(self, command, stdout=None, supress_dry_run=False):\n logging.debug('Executing shell command: %s', command)\n if (self._dry_run and supress_dry_run) or not self._dry_run:\n prc = Popen(command, shell=True, stdout=stdout)\n std = list(prc.communicate())\n if std[0] is not None:\n std[0] = std[0].decode('utf-8')\n return prc.returncode, std\n return 0, ('', '')", "def execute(self, args, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, dryrun=False):\n if isinstance(args, str):\n args = args.split()\n\n if not isinstance(args, list):\n raise ValueError('Execute arguments must be a list')\n\n if dryrun:\n self.log.debug('would execute: {}'.format(' '.join(args)))\n return 0\n\n p = Popen(args, stdin=stdin, stdout=stdout, stderr=stderr)\n p.wait()\n return p.returncode", "def run(cmd, directory, fail_ok=False, verbose=False):\n if verbose:\n print(cmd)\n p = subprocess.Popen(cmd,\n cwd=directory,\n stdout=subprocess.PIPE)\n (stdout, _) = p.communicate()\n if p.returncode != 0 and not fail_ok:\n raise RuntimeError('Failed to run {} in {}'.format(cmd, directory))\n return stdout", "def run_subprocess(self, input_value):\n try:\n proc = Popen([\"python\", self.SCRIPT_NAME],\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE)\n out_value, err_value = proc.communicate(\n input_value.encode(self.ENCODING),\n timeout=self.PROCESS_TIMEOUT)\n except TimeoutExpired:\n proc.kill()\n out_value, err_value = proc.communicate()\n return out_value.decode(self.ENCODING), err_value.decode(self.ENCODING)" ]
[ "0.7698771", "0.76585543", "0.7596537", "0.75804675", "0.75774103", "0.75148445", "0.7452843", "0.73803294", "0.72769535", "0.72733176", "0.7248266", "0.723106", "0.7226016", "0.7157534", "0.71220666", "0.711173", "0.7100731", "0.707238", "0.70716333", "0.70658004", "0.70643234", "0.70541227", "0.7050469", "0.7038192", "0.70335996", "0.69919187", "0.69829243", "0.69704676", "0.6964816", "0.6906557", "0.68901277", "0.6887787", "0.687976", "0.6875442", "0.6867858", "0.6864216", "0.68560255", "0.6851371", "0.6849634", "0.6845089", "0.6828696", "0.68141246", "0.68125147", "0.6809216", "0.6801887", "0.67968726", "0.6777737", "0.67460775", "0.6729008", "0.6723298", "0.67125887", "0.66989774", "0.66986126", "0.6696705", "0.66930884", "0.6686551", "0.6686423", "0.66864157", "0.66823363", "0.6676078", "0.66719306", "0.6657583", "0.66565126", "0.6653982", "0.66525847", "0.6633224", "0.66166323", "0.6605602", "0.65983963", "0.6598248", "0.6597056", "0.65945035", "0.6585428", "0.6583123", "0.6571356", "0.65646434", "0.6558151", "0.65497595", "0.6546118", "0.65430015", "0.6542787", "0.6542625", "0.6542111", "0.6539996", "0.6539001", "0.6531832", "0.6525415", "0.6523808", "0.6522915", "0.6516884", "0.6512629", "0.6502215", "0.6499858", "0.6498376", "0.6496156", "0.6490082", "0.64840627", "0.648284", "0.648278", "0.6480891" ]
0.65505224
77
An fcntlbased implementation of _IterProcessStdout.
def _IterProcessStdoutFcntl(process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): # pylint: disable=too-many-nested-blocks import fcntl try: # Enable non-blocking reads from the child's stdout. child_fd = process.stdout.fileno() fl = fcntl.fcntl(child_fd, fcntl.F_GETFL) fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) end_time = (time.time() + timeout) if timeout else None iter_end_time = (time.time() + iter_timeout) if iter_timeout else None while True: if end_time and time.time() > end_time: raise TimeoutError() if iter_end_time and time.time() > iter_end_time: yield None iter_end_time = time.time() + iter_timeout if iter_end_time: iter_aware_poll_interval = min(poll_interval, max(0, iter_end_time - time.time())) else: iter_aware_poll_interval = poll_interval read_fds, _, _ = select.select([child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = _read_and_decode(child_fd, buffer_size) if not data: break yield data if process.poll() is not None: # If process is closed, keep checking for output data (because of timing # issues). while True: read_fds, _, _ = select.select([child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = _read_and_decode(child_fd, buffer_size) if data: yield data continue break break finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def _IterProcessStdoutQueue(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=unused-argument\n if six.PY3:\n import queue\n else:\n import Queue as queue\n import threading\n\n stdout_queue = queue.Queue()\n\n def read_process_stdout():\n # TODO(jbudorick): Pick an appropriate read size here.\n while True:\n try:\n output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size)\n except IOError:\n break\n stdout_queue.put(output_chunk, True)\n if not output_chunk and process.poll() is not None:\n break\n\n reader_thread = threading.Thread(target=read_process_stdout)\n reader_thread.start()\n\n end_time = (time.time() + timeout) if timeout else None\n\n try:\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n try:\n s = stdout_queue.get(True, iter_timeout)\n if not s:\n break\n yield s\n except queue.Empty:\n yield None\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()\n reader_thread.join()", "def __readStdout(self):\n if self.process is not None:\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n if (\n self.currentChangelist != \"\" and\n self.rx_status.exactMatch(s)\n ):\n file = self.rx_status.cap(5).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif (\n self.currentChangelist != \"\" and\n self.rx_status2.exactMatch(s)\n ):\n file = self.rx_status2.cap(2).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif self.rx_changelist.exactMatch(s):\n self.currentChangelist = self.rx_changelist.cap(1)\n if self.currentChangelist not in self.changeListsDict:\n self.changeListsDict[self.currentChangelist] = []", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "def __iter__(self):\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n yield line", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n print(line.strip('\\n'))\n self.pipeReader.close()", "def _stdout_to_flag(self):\n self._is_running.wait()\n while self._is_running.is_set():\n msg = self.stdout_queue.get()\n if msg is None or len(msg) < 1: # It's time to stop\n break\n if msg[0] == \"#\": # It's a signal from the kxkmcard program\n self.onEvent(msg[1:].split(' '))\n else:\n self._log(\"warning\", \"unknown stdout line {0}\".format(msg))", "def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)", "def nostdout():\n\n save_stdout = sys.stdout\n sys.stdout = cStringIO.StringIO()\n yield\n sys.stdout = save_stdout", "def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)", "def redirect_stdout():\n save_stdout = sys.stdout\n sys.stdout = _TQDMFile(sys.stdout)\n yield\n sys.stdout = save_stdout", "def _watch(self):\n # self._popen.wait()\n lines_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in lines_iterator:\n line = line.strip()\n # log.log(\"raw\",self.name.upper()+\" SAYS: \"+line)\n # cmd = line.split(' ')[0]\n # args = line.split(' ')[1:]\n if line[0] == '#':\n self.onEvent(line.split(' '))\n if self.onClose:\n self.onEvent([self.onClose])\n self._running.clear()\n if self.stderr is not None:\n self.stderr.close()", "def piped(self):\n\t\tpass", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def read(self):\n # now read stderr for log messages, we could buffer here but since\n # we're just logging the messages, I don't care to\n try:\n out = self.proc.stderr.read()\n if out:\n LOG.debug('reading %s got %d bytes on stderr', self.name,\n len(out))\n for line in out.splitlines():\n LOG.warning('%s: %s', self.name, line)\n except IOError as err:\n if err.errno != errno.EAGAIN:\n # allowing a caller to handle the exception as well\n raise\n except:\n LOG.exception('uncaught exception in stderr read')\n\n # This read call is non-blocking\n try:\n self.buffer += self.proc.stdout.read()\n if len(self.buffer):\n LOG.debug('reading %s, buffer now %d bytes',\n self.name, len(self.buffer))\n except IOError as err:\n if err.errno != errno.EAGAIN:\n raise\n except:\n # sometimes the process goes away in another thread and we don't\n # have it anymore\n LOG.exception('uncaught exception in stdout read')\n return\n\n # iterate for each line we have\n while self.buffer:\n idx = self.buffer.find('\\n')\n if idx == -1:\n break\n\n line = self.buffer[0:idx].strip()\n if line:\n self.datalines.append(line)\n self.buffer = self.buffer[idx+1:]", "def stdio(self):\n\n if isinstance(self.log_file, TotalLogFile):\n self.stdio_stolen = True\n self.log_file.stdio()", "def _flush_buffer(self):\n self.pexpect_child.logfile = None\n flushedStuff = \"\"\n while self.pexpect_child.expect([pexpect.TIMEOUT, r\".+\"], timeout=1):\n flushedStuff += self.pexpect_child.match.group(0)\n self.pexpect_child.logfile = self.log_file", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n logging.log(self.level, line.strip('\\n'))\n\n self.pipeReader.close()", "def _stdin_writer(self):\n self._is_launched.wait()\n while True:\n message = self.stdin_queue.get()\n if message is None or self._is_stopping or not self._is_running.is_set():\n if message is not None:\n log.debug(\"Ignore {0} on process {1} because it's stopped\".format(message, self.name))\n break\n self._direct_stdin_writer(message)\n self._log(\"raw\", \"write to stdin : {0}\".format(message.encode(\"utf-8\")))", "def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())", "def hook() -> None:\n real_recv = process.recv_raw\n\n def recv(self: process, numb: int) -> bytes:\n data = real_recv(self, numb)\n # Sometimes the returned data is of type str\n # Accept them by converting them to bytes\n if type(data) == str:\n data = data.encode()\n try:\n stdout_all = self.stdout_all\n except Exception: # pylint: disable=broad-except\n stdout_all = b\"\"\n stdout_all += data\n self.stdout_all = stdout_all\n return data\n\n process.recv_raw = recv", "def tail(self):\n for line in iter(self.proc.stdout.readline, ''):\n if len(line) == 0:\n break\n if self.log_filter(line.decode('ASCII')):\n continue\n if self.verbose:\n logging.debug(f\"{self.prefix}: {line.decode().rstrip()}\")\n with self.logs_cond:\n self.logs.append(str(line.rstrip()))\n self.logs_cond.notifyAll()\n self.running = False\n self.proc.stdout.close()\n if self.proc.stderr:\n self.proc.stderr.close()", "def process_output(self, stdout=True, final_read=False):\n if stdout:\n pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee\n else:\n pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee\n\n if final_read:\n # read in all the data we can from pipe and then stop\n data = []\n while select.select([pipe], [], [], 0)[0]:\n data.append(os.read(pipe.fileno(), 1024))\n if len(data[-1]) == 0:\n break\n data = \"\".join(data)\n else:\n # perform a single read\n data = os.read(pipe.fileno(), 1024)\n buf.write(data)\n tee.write(data)", "def write_queued_output(self):\n for stream in [\"stdout\", \"stderr\"]:\n while True:\n output, queue_size = getattr(self, stream).readline(timeout=0.1)\n if not (output is None or len(output) == 0):\n self.log(output, self.log_level[stream])\n if queue_size == 0:\n break", "def readOutput(self):\n while True:\n char = os.read(self.pipe_out, 1).decode(self.encoding)\n if not char or self.escape_char in char:\n break\n self.capturedtext += char", "def stdout(self):\n pass", "def print_output(self, final=False):\n encoding = sys.stdout.encoding\n if final and self.process: # ask for process because might be an action\n line = self.process.stdout.read().decode(encoding)\n self.last_run['output'] += line\n sys.stdout.write(line)\n else:\n str_chunk = None\n chunk = bytes()\n while not isinstance(str_chunk, str):\n assert self.process\n chunk += self.process.stdout.read(1)\n try:\n str_chunk = chunk.decode(encoding)\n except:\n str_chunk = None\n self.last_run['output'] += str_chunk\n sys.stdout.write(str_chunk)", "def _ffmpeg_loop(cls, ffmpeg: subprocess.Popen) -> Iterable[Progress]:\n while ffmpeg.poll() is None:\n rlist, _, _ = select((ffmpeg.stderr, ffmpeg.stdout), (), ())\n # Read logs from stdin\n if ffmpeg.stderr in rlist:\n status = cls.process_logs(ffmpeg.stderr.read().splitlines())\n if status:\n yield status\n # ignore stdout\n if ffmpeg.stdout in rlist:\n ffmpeg.stdout.read()", "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def IterCmdOutputLines(args,\n iter_timeout=None,\n timeout=None,\n cwd=None,\n shell=False,\n env=None,\n check_status=True):\n cmd = _ValidateAndLogCommand(args, cwd, shell)\n process = Popen(\n args,\n cwd=cwd,\n shell=shell,\n env=env,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n return _IterCmdOutputLines(\n process,\n cmd,\n iter_timeout=iter_timeout,\n timeout=timeout,\n check_status=check_status)", "def nostdout():\n f = io.StringIO()\n with redirect_stdout(f):\n try:\n yield\n except Exception as err:\n raise err", "async def copier_recorder(\r\n self,\r\n ) -> None:\r\n if not self.process:\r\n raise Exception(\"missing process; was this called inside a with statement?\")\r\n\r\n assert (\r\n self.process.stdout is not None\r\n ), \"process must be opened with stdout=PIPE and stderr=STDOUT\"\r\n\r\n async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel:\r\n async for chunk in self.process.stdout:\r\n # print(f\"seen chunk: '{chunk!r}'\", flush=True) # debug\r\n self.stdout += chunk\r\n await self.printer_send_channel.send(chunk)\r\n\r\n # send notification\r\n # if it's full, that's fine: if expect() is run, it'll see\r\n # there's a \"pending\" notification and check stdout, then wait\r\n # for another notification\r\n try:\r\n self.notifier_send_channel.send_nowait(b\"\")\r\n except trio.WouldBlock:\r\n pass\r\n except trio.BrokenResourceError as err:\r\n print(f\"cause '{err.__cause__}'\")\r\n raise err", "def copy_file_to_stdout(file_):\n while True:\n block = file_.read(const.BUFFER_SIZE)\n if not block:\n break\n const.STDOUT.write(block)", "def process(fd):\n global last_heartbeat\n descriptors[fd]['lines'] += os.read(fd, 1024 * 1024)\n # Avoid partial lines by only processing input with breaks\n if descriptors[fd]['lines'].find('\\n') != -1:\n elems = descriptors[fd]['lines'].split('\\n')\n # Take all but the partial line\n for l in elems[:-1]:\n if len(l) > 0:\n l = '%s %s' % (descriptors[fd]['name'], l)\n logger.info(l)\n last_heartbeat = time.time()\n # Place the partial line back into lines to be processed\n descriptors[fd]['lines'] = elems[-1]", "def __iter__(self):\n command = '/usr/bin/heroku logs -t --app ' + self.app_name\n args = shlex.split(command)\n heroku = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n return iter(heroku.stdout.readline, b'')", "async def read_console(self):\n while self.proc is not None and self.proc.poll() is None:\n line = await self.loop.run_in_executor(None, self.proc.stdout.readline) # Async readline\n # Parse the command output and get the time in epoch format\n match = re.match(r'\\[([0-9]{2}):([0-9]{2}):([0-9]{2})\\] \\[([^][]*)\\]: (.*)$', line.decode())\n if match is None:\n return\n h, m, s, log, text = match.groups()\n local = time.localtime()\n if h == 23 and local.tm_hour == 0: # In case a line from 23:59 gets parsed at 00:00\n local = time.localtime(time.time()-3600)\n log_t = list(local)\n log_t[3:6] = map(int, (h, m, s))\n log_time = time.mktime(tuple(log_t))\n self.loop.create_task(self.on_line(log_time, log, text))", "def stdout(self):\n p = self.bg()\n if isinstance(p, PyPipe):\n return p.iter_stdout\n else:\n return p.stdout", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def watch(self):\n reader, writer = os.pipe2(0)\n\n pid = os.fork()\n\n # In the child\n if pid == 0:\n tty.setraw(0)\n os.close(reader)\n os.close(2)\n\n os.dup2(writer, 1)\n\n os.execlp(self.__program, self.__program, *self.__args)\n\n sys.exit(1)\n else:\n os.close(writer)\n\n while True:\n result = os.read(reader, 1024)\n if len(result) == 0:\n break\n sys.stdout.write(result.decode('utf-8'))\n\n os.waitpid(pid, 0)", "def pipe_thru(*commands):\n if commands is not None:\n last_process = None\n for command in commands:\n if last_process is None:\n last_process = Popen(command, stdout=PIPE, stderr=PIPE)\n else:\n last_process = Popen(command, stdin=last_process.stdout, stdout=PIPE, stderr=PIPE)\n System.log_subprocess_output(last_process)", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def output(self, output, status=None):\n if output:\n size = self.cli.output.get_size()\n\n margin = self.get_output_margin(status)\n\n fits = True\n buf = []\n output_via_pager = self.explicit_pager and special.is_pager_enabled()\n for i, line in enumerate(output, 1):\n self.log_output(line)\n special.write_tee(line)\n special.write_once(line)\n\n if fits or output_via_pager:\n # buffering\n buf.append(line)\n if len(line) > size.columns or i > (size.rows - margin):\n fits = False\n if not self.explicit_pager and special.is_pager_enabled():\n # doesn't fit, use pager\n output_via_pager = True\n\n if not output_via_pager:\n # doesn't fit, flush buffer\n for line in buf:\n click.secho(line)\n buf = []\n else:\n click.secho(line)\n\n if buf:\n if output_via_pager:\n # sadly click.echo_via_pager doesn't accept generators\n click.echo_via_pager(\"\\n\".join(buf))\n else:\n for line in buf:\n click.secho(line)\n\n if status:\n self.log_output(status)\n click.secho(status)", "def _exec_cmd_helper(self, cmd: str, nvim_ipc: str):\n assert self.busy is False\n\n self.shared_status.set_running()\n self.busy = True\n os.system(\"clear\")\n logging.info(\"Executing cmd {0}\".format(cmd))\n\n start = time.time()\n\n success = False\n if self.command_group.is_cmd_runner_command(cmd):\n for runner in self.runners:\n if runner.config.name == cmd:\n success = runner.run_all()\n break\n else:\n # The code block below essentially just \"tees\" the stdout and\n # stderr to a log file, while still preserving the terminal\n # output (inclusive colors).\n # Using subprocess.PIPE does not seem possible under Darwin,\n # since the pipe does not have the isatty flag set (the isatty\n # flag affects the color output).\n # Note that the file is only written at the end and not streamed.\n master, slave = pty.openpty()\n\n # This prevents LF from being converted to CRLF\n attr = termios.tcgetattr(slave)\n attr[1] = attr[1] & ~termios.ONLCR\n termios.tcsetattr(slave, termios.TCSADRAIN, attr)\n\n proc = subprocess.Popen(cmd, shell=True, stdout=slave, stderr=slave, close_fds=False)\n\n # Close the write end of the pipe in this process, since we don't need it.\n # Otherwise we would not get EOF etc.\n os.close(slave)\n\n read_stdout_stderr = os.fdopen(master, 'rb', buffering=0)\n complete_output = \"\"\n\n try:\n while proc.poll() is None:\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n\n # Read the last line\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n # This error is \"expected\" under Linux systems.\n # readline() doesn't seem to behave properly there.\n # The exception does not occur on MacOS.\n except OSError as oserr:\n if oserr.errno != errno.EIO or proc.poll() is None:\n logging.critical(\"Unexpected OS error: {0}\".format(oserr))\n except:\n logging.critical(\"Unexpected error while reading from process\")\n\n os.close(master)\n proc.wait()\n\n if proc.returncode == 0:\n success = True\n\n logfile, logfilename = tempfile.mkstemp(dir=cybld_helpers.get_base_path(),\n prefix=cybld_helpers.NVIM_LOG_PREFIX)\n\n # strip color codes from logfile\n # complete_output = re.sub(r'(\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[@-~]', '', complete_output)\n complete_output = re.sub(r'\\x1b(\\[.*?[@-~]|\\].*?(\\x07|\\x1b\\\\))', '', complete_output)\n\n with open(logfile, 'w+') as logfile_opened:\n logfile_opened.write(complete_output)\n\n CyBldIpcNeovim(True, nvim_ipc, logfilename, cmd)\n\n end = time.time()\n\n self.busy = False\n cybld_helpers.print_seperator_lines()\n\n timediff_in_seconds = str(int(end - start))\n\n if success:\n cybld_helpers.print_centered_text(\"SUCCESS: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), True)\n self.shared_status.set_success()\n else:\n cybld_helpers.print_centered_text(\"FAIL: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), False)\n self.shared_status.set_fail()\n\n if self.settings.print_stats:\n cybld_helpers.print_centered_text(self.stats.get_command_stats(cmd), None)\n\n if success:\n self.talker.say_success()\n else:\n self.talker.say_fail()\n\n cybld_helpers.print_seperator_lines()\n self.stats.update_command_stats(cmd, success, int(timediff_in_seconds))\n\n if success:\n self.success_callback(cmd)\n else:\n self.fail_callback(cmd)", "def outputStatus(self, line):\r\n for l in line.strip('\\r\\n').split('\\n'):\r\n self.output('%s: %s' % (ctime(), l), 0)", "def progress_callbacks(self):\n self.interface.progress_start()\n self.interface.on_write_stdout.add(self.handle_progress)\n self.interface.on_write_stderr.add(self.handle_progress)\n try:\n yield\n finally:\n self.interface.on_write_stderr.remove(self.handle_progress)\n self.interface.on_write_stdout.remove(self.handle_progress)\n self.interface.progress_end()", "def __call__(self, fd_in, fd_out, len_, flags):\n\n if not self.available:\n raise EnvironmentError('tee not available')\n\n if not isinstance(flags, six.integer_types):\n c_flags = six.moves.reduce(operator.or_, flags, 0)\n else:\n c_flags = flags\n\n c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()\n c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()\n\n return self._c_tee(c_fd_in, c_fd_out, len_, c_flags)", "def readProcessStdoutLog(self, name, offset, length):\r\n self._update('readProcessStdoutLog')\r\n return self._readProcessLog(name, offset, length, 'stdout')", "def running_output(process, outputs):\n state = type(\"State\",\n (object, ),\n {\n \"printed_message\": False,\n \"read_first_byte\": False\n })\n\n def output_printer(file_handle):\n \"\"\"Thread that prints the output of this process.\"\"\"\n character = bytearray()\n while True:\n character += file_handle.read(1)\n try:\n if character:\n if not state.read_first_byte:\n state.read_first_byte = True\n\n if character != \"\\n\":\n IndentedLogger.message(\"\\n\")\n\n # If this fails, then we will just read further characters\n # until the decode succeeds.\n IndentedLogger.message(character.decode(\"utf-8\"))\n state.printed_message = True\n character = bytearray()\n else:\n return\n except UnicodeDecodeError:\n continue\n\n stdout = threading.Thread(target=output_printer, args=(outputs[0], ))\n\n stdout.start()\n stderr_lines = list(outputs[1])\n\n try:\n status = process.wait()\n finally:\n stdout.join()\n\n # Print a new line before printing any stderr messages\n if len(stderr_lines):\n IndentedLogger.message(\"\\n\")\n\n for line in stderr_lines:\n IndentedLogger.message(line.decode(\"utf-8\"))\n state.printed_message = True\n\n if state.printed_message:\n print_message(\"\\n\")\n\n return status", "def capture_print():\n\n old_streams = sys.stdout, sys.stderr\n sys.stdout = sys.stderr = io.StringIO()\n filestring = FileString(sys.stdout)\n try:\n yield filestring\n finally:\n sys.stdout, sys.stderr = old_streams\n filestring.read()", "def echo_pipe(name: str, pipe: IO[AnyStr], dest_file: TextIO):\r\n assert pipe\r\n with pipe:\r\n for line in iter(pipe.readline, b''):\r\n if not line:\r\n break\r\n logging.debug(name, len(line), line, end=\"\", file=dest_file)\r\n print(line, end=\"\", file=dest_file)\r\n\r\n logging.info(f\"{name} closed\")", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n echo(line.strip('\\n'), ctx=self.click_ctx, err=self.is_err)\n\n self.pipeReader.close()", "def print_twice(pipe, ofile, last_line):\n\n # Utility subroutine to print listing data both to stdout\n # and to the listing file, accessed via the ofile handle\n lastlineempty = False # JPG addition here as opposed to argument\n last_dat = b''\n for line in iter(pipe.readline, b''):\n dat = line.rstrip()\n # This IF statement just avoid printing a lot of blank lines\n # at the end of the run, before Python realises that the process\n # has stopped.\n if dat == b'':\n if not lastlineempty:\n print(dat.decode('utf-8'))\n if ofile != None:\n # Write to sortiefile (if requested)\n ofile.write(dat.decode('utf-8')+'\\n')\n # Set to avoid printing multiple consecutive newlines\n lastlineempty = True\n else:\n lastlineempty = False\n print(dat.decode('utf-8'))\n if ofile != None:\n # Write to sortiefile (if requested)\n ofile.write(dat.decode('utf-8')+'\\n')\n last_dat = dat\n\n last_line.append(last_dat)", "def readlines(channel=LOG_CHANNEL_STDOUT):", "def output_to_pipe(pipe_in):\n os.dup2(pipe_in, 1) # stdout\n # os.dup2(pipe_in, 2) # stderr", "def write( shell, data ):\n #print 'cmd: ' + data\n global waiting\n os.write( shell.stdin.fileno(), data )\n waiting = True", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()", "def output_to_screen(stdout_fd, stderr_fd):\n os.dup2(stdout_fd, 1)\n #os.dup2(stderr_fd, 2)", "async def __aiter__(self):\n message = b''\n # wait until the pipe is opened by a writer\n await wait_readable(self.fd)\n while True:\n try:\n item = os.read(self.fd, self.blocksize)\n except BlockingIOError:\n # pipe is empty, yield message and wait for another\n if self.encoding is not None:\n message = message.decode(self.encoding)\n yield message\n message = b''\n await wait_readable(self.fd)\n else:\n if not item:\n # pipe is closed, return\n break\n else:\n message += item\n if self.encoding is not None:\n message = message.decode(self.encoding)\n yield message", "def run_and_tee_output(args):\n output_bytes = bytearray()\n\n def read(fd):\n data = os.read(fd, 1024)\n output_bytes.extend(data)\n return data\n\n pty.spawn(args, read)\n\n # Strip ANSI / terminal escapes.\n output_bytes = _ANSI_ESCAPE_8BIT_REGEX.sub(b'', output_bytes)\n\n return output_bytes.decode('utf-8')", "def stdout_read(self, timeout):\n chan = self._chan\n now = datetime.datetime.now()\n timeout_time = now + datetime.timedelta(seconds=timeout)\n output = \"\"\n while not _SHELL_PROMPT.search(output):\n rd, wr, err = select([chan], [], [], _SELECT_WAIT)\n if rd:\n data = chan.recv(_RECVSZ)\n output += data.decode()\n if datetime.datetime.now() > timeout_time:\n raise TimeoutError\n return output", "def captured_output(self):\n new_out, new_err = StringIO(), StringIO()\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err", "def pipemeter(cmd1, cmd2):\n\n proc1 = subprocess.Popen(cmd1, bufsize=0, shell=True, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(cmd2, bufsize=0, shell=True, stdin=subprocess.PIPE)\n bytes_piped = 0\n\n while True:\n data = proc1.stdout.read(CHUNKSIZE)\n length = len(data)\n if length == 0:\n break\n\n written = proc2.stdin.write(data)\n if written != length:\n raise RuntimeError(\"Write failed, wanted to write: {}, written={}\".format(length, written))\n\n bytes_piped += length\n\n proc1.stdout.close()\n proc2.stdin.close()\n\n return proc1.wait(), proc2.wait(), bytes_piped", "def captured_std_streams() -> ContextManager[Tuple[TextIO, TextIO, TextIO]]:\n stdin_r, stdin_w = os.pipe()\n stdout_r, stdout_w = os.pipe()\n stderr_r, stderr_w = os.pipe()\n stdin_old, stdout_old, stderr_old = sys.stdin, sys.stdout, sys.stderr\n\n # We close the files explicitly at the end of ths scope.\n sys.stdin = os.fdopen(stdin_r, closefd=False)\n sys.stdout = os.fdopen(stdout_w, \"w\", closefd=False)\n sys.stderr = os.fdopen(stderr_w, \"w\", closefd=False)\n try:\n yield os.fdopen(stdin_w, \"w\"), os.fdopen(stdout_r), os.fdopen(stderr_r)\n finally:\n sys.stdout.flush()\n sys.stderr.flush()\n os.close(stdin_r)\n os.close(stdout_w)\n os.close(stderr_w)\n sys.stdin, sys.stdout, sys.stderr = stdin_old, stdout_old, stderr_old", "def _stream_ffmpeg(config: Configuration, ffmpeg_proc: subprocess.Popen, signal: RunningSignal):\n while True:\n try:\n yield ffmpeg_proc.stdout.read(config.bytes_per_read)\n except:\n ffmpeg_proc.terminate()\n ffmpeg_proc.communicate()\n signal.stop()\n break", "def _streaming_command(self, service, command, transport_timeout_s, read_timeout_s, timeout_s):\n adb_info = self._open(b'%s:%s' % (service, command), transport_timeout_s, read_timeout_s, timeout_s)\n\n for data in self._read_until_close(adb_info):\n yield data", "def yield_output(self, args, *popenargs, **kwargs):\n p = self.create_process(args,\n stdout=subprocess.PIPE,\n *popenargs,\n **kwargs)\n for line in p.stdout:\n yield line\n p.wait()\n if p.returncode:\n raise subprocess.CalledProcessError(p.returncode,\n self.build_args(args))", "def write_stdout(self, data):\n filt, handler = self.filter[-1]\n data, filtered = filt.filter(data)\n self._write(pty.STDOUT_FILENO, data)\n if filtered:\n self.log(\"Filter matched %d bytes\" % len(filtered))\n self.filter.pop()\n assert callable(handler)\n res = handler(filtered)\n if res:\n self.sock.sendto(res, 0, self.last_addr)", "def run_and_log_output(cmd_string):\n logging.info('Running %s', cmd_string)\n c = iterpipes.cmd(cmd_string)\n out = iterpipes.run(c)\n for line in out:\n logging.info(line)", "def fork(self):\r\n\r\n return _StreamFork(self)", "def runCommand(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1)\n for line in p.stdout:\n print (line.decode(\"utf-8\"),end=\"\") # the end=\"\" argument to print prevents unwanted newlines after each line\n p.wait()", "def wait(t,p):\n\toutput_list = []\n\tc = ''\n\td = ''\n\twhile p not in d:\n\t\tc = t.read_very_eager()\n\t\tif len(c) > 0:\n\t\t\td += c\n\t\t\tprint c\n\t\t\toutput_list.append(c)\n\t\tif \"Press any key to continue\" in c or \"--More--\" in c:\n\t\t\tt.write(\" \")\n\toutput_list = ((''.join(output_list)).replace('\\r\\n','\\n')).split('\\n')\n\treturn output_list", "def on(self):\n self._current_stream = self._stdout", "def recieve(self):\n return self.__proc.stdout.readline().strip('\\n')", "def atomic_io(cmd, in_file, out_file, err_file, prog=None):\n with open(in_file, 'r') as inp, open(out_file, 'w') as out, open(err_file, 'w') as err:\n p = subprocess.Popen(\n cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=err)\n while True:\n line = inp.readline()\n if not line:\n break\n p.stdin.write(line)\n out.write(p.stdout.readline())\n out.flush()\n if prog:\n prog.inc()\n p.stdin.close()\n p.wait()", "def _read_rs(self, process, append):\n print('read_rs thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_up()\n if 'value -1' in line.decode('utf-8'):\n self.vol_down()\n print('read_rs thread stopped')", "def idle_writer(output, color=None):\n if isinstance(output, str):\n if color is None:\n sys.stdout.shell.write(output, \"stderr\") # noqa\n else:\n sys.stdout.shell.write(output, color) # noqa\n return\n for fragment in output:\n if isinstance(fragment, str):\n sys.stdout.shell.write(fragment, \"stderr\") # noqa\n elif len(fragment) == 2:\n sys.stdout.shell.write(fragment[0], fragment[1]) # noqa\n else:\n sys.stdout.shell.write(fragment[0], \"stderr\") # noqa", "def _prepare_child_fds(self):\r\n sock_fd = self.fcgi_sock.fileno()\r\n\r\n options = self.config.options\r\n options.dup2(sock_fd, 0)\r\n options.dup2(self.pipes['child_stdout'], 1)\r\n if self.config.redirect_stderr:\r\n options.dup2(self.pipes['child_stdout'], 2)\r\n else:\r\n options.dup2(self.pipes['child_stderr'], 2)\r\n for i in range(3, options.minfds):\r\n options.close_fd(i)", "def stdout_redirector(stream):\n\n old_stdout = sys.stdout\n sys.stdout = stream\n try:\n yield\n finally:\n sys.stdout = old_stdout", "def executeOld(cmd):\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n yield stdout_line \n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)", "def capture_output():\r\n stdout, stderr = sys.stdout, sys.stderr\r\n sys.stdout, sys.stderr = StringIO(), StringIO()\r\n out, err = [], []\r\n try:\r\n yield out, err\r\n finally:\r\n out.extend(sys.stdout.getvalue().splitlines())\r\n err.extend(sys.stderr.getvalue().splitlines())\r\n sys.stdout, sys.stderr = stdout, stderr", "def write_long_output(self, n):\n for _ in range(n):\n sys.stderr.write(\"spam, bacon, eggs\\n\")", "def _print_status(self, stream, count, final=False):\n twidth = shutil.get_terminal_size()[0]\n msg_width = min((twidth // 2) - 5, 20)\n if stream == 'in':\n self._last_in_count = count\n if final:\n self._in_finished = True\n else:\n self._last_out_count = count\n if final:\n self._out_finished = True\n\n in_msg = '{: <{}}'.format('in: {}'.format(self._last_in_count),\n msg_width)\n out_msg = '{: <{}}'.format('out: {}'.format(self._last_out_count),\n msg_width)\n print('\\r{} -> {}'.format(in_msg, out_msg), end='', flush=True)\n if self._in_finished and self._out_finished:\n print()", "def test_stdout_to_pipe(self):\n original_stdout = sys.stdout\n with self.stdout_to_pipe() as output:\n self.assertNotEqual(original_stdout, sys.stdout)\n print \"Hello world!\"\n self.assertEqual(output.readline(), \"Hello world!\\n\")\n # Line without CR should be readable after closing\n sys.stdout.write(\"Goodbye\")\n self.assertEqual(original_stdout, sys.stdout)\n # Now that writing side is closed, we should be able to read\n # up to EOF.\n self.assertEqual(output.readline(), \"Goodbye\")", "def paexec_out_stream(buffer_size=4096):\n b_data = pkgutil.get_data('pypsexec', 'paexec.exe')\n byte_count = len(b_data)\n for i in range(0, byte_count, buffer_size):\n yield b_data[i:i + buffer_size], i", "def captured_output(stream_name):\n import StringIO\n orig_stdout = getattr(sys, stream_name)\n setattr(sys, stream_name, StringIO.StringIO())\n yield getattr(sys, stream_name)\n setattr(sys, stream_name, orig_stdout)", "def getLines(self, **cmdKwargs):\n assert 'args' in cmdKwargs;\n assert len(cmdKwargs.keys())==1;\n import subprocess;\n popen = subprocess.Popen(args=cmdKwargs['args'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n lines = iter(popen.stdout.readline, b\"\") \n return lines;", "def update(self):\n return\n #~ self.stdout_l.append(self.ffplay_proc.stdout.read())\n print(\"update\")\n data = self.ffplay_proc.stderr.read(1000000)\n print(\"got data\")\n print(len(data))\n while len(data) == 1000000:\n self.stderr_l.append(data)\n data = self.ffplay_proc.stderr.read(1000000)\n print(\"done\")", "def pflush(*args, **kwargs):\n print(*args, **kwargs)\n sys.stdout.flush()", "def _log_output(config: Configuration, stderr: IO, signal: RunningSignal):\n if config.verbose > 0:\n logger = logging.getLogger(\"ffmpeg\")\n while signal.running():\n try:\n line = _readline(stderr)\n if line != '':\n logger.info(line)\n except:\n pass\n logger.debug(\"Logging thread ended\")", "def captured_output():\n new_out, new_err = io.StringIO(), io.StringIO()\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err", "def read_stdout(self, dt):\n\n self.temp_stdout += self.temp_output\n self.ids[\"txt_code_output\"].text = self.temp_output", "def testStdoutReadDuringCapture(self):\n with self.OutputCapturer():\n print('foo')\n self.AssertOutputContainsLine('foo')\n print('bar')\n self.AssertOutputContainsLine('bar')\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar')", "def writer():\n\twhile True:\n\t\tw = (yield)\t\n\t\tprint('>>', w)", "def communicate(self, std_in=None, timeout=0):\n if timeout <= 0:\n return super(Popen, self).communicate(input=std_in)\n\n fds = []\n stdout = []\n stderr = []\n\n if self.stdout is not None:\n set_file_nonblock(self.stdout)\n fds.append(self.stdout)\n if self.stderr is not None:\n set_file_nonblock(self.stderr)\n fds.append(self.stderr)\n\n if std_in is not None and sys.stdin is not None:\n sys.stdin.write(std_in)\n\n returncode = None\n inactive = 0\n while returncode is None:\n (rlist, dummy_wlist, dummy_xlist) = select.select(\n fds, [], [], 1.0)\n\n if not rlist:\n inactive += 1\n if inactive >= timeout:\n raise TimeoutError\n else:\n inactive = 0\n for fd in rlist:\n if fd is self.stdout:\n stdout.append(fd.read())\n elif fd is self.stderr:\n stderr.append(fd.read())\n\n returncode = self.poll()\n\n if self.stdout is not None:\n stdout = ''.join(stdout)\n else:\n stdout = None\n if self.stderr is not None:\n stderr = ''.join(stderr)\n else:\n stderr = None\n\n return (stdout, stderr)", "def _read_thread(proc, ready_event):\n ready = False\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n\n if output_lines is not None:\n output_lines.append(line)\n\n if not ready and indicator in line:\n ready = True\n ready_event.set()", "def dispatch(greps):\n try:\n while True:\n line = (yield)\n for grep in greps:\n grep.send(line)\n except GeneratorExit:\n for grep in greps:\n grep.close()" ]
[ "0.6449587", "0.6135046", "0.60871077", "0.595699", "0.594129", "0.58840406", "0.5684317", "0.5682602", "0.5677694", "0.5666196", "0.565126", "0.55647177", "0.5556993", "0.55479926", "0.54802346", "0.5458966", "0.5452489", "0.54261523", "0.5415573", "0.539853", "0.53972363", "0.5392595", "0.5381609", "0.534832", "0.5345636", "0.53397214", "0.533477", "0.52793676", "0.5277436", "0.52761036", "0.52685946", "0.5264866", "0.5186033", "0.5178133", "0.5177319", "0.5174133", "0.5166621", "0.5165542", "0.51427776", "0.51419026", "0.51364124", "0.51217294", "0.51151156", "0.5104355", "0.5098214", "0.5059039", "0.50570124", "0.5048078", "0.50419927", "0.50238854", "0.501661", "0.5016049", "0.5012398", "0.500051", "0.49888226", "0.49868542", "0.49751514", "0.49602494", "0.49506098", "0.495007", "0.49381223", "0.49346387", "0.49317294", "0.4925827", "0.49204853", "0.49196395", "0.49090925", "0.49075556", "0.49007902", "0.48940465", "0.4890303", "0.48897937", "0.48793623", "0.48703256", "0.48580068", "0.48538294", "0.48504132", "0.48495388", "0.4848708", "0.4830293", "0.48288655", "0.48254582", "0.48173502", "0.48166552", "0.48052004", "0.4800292", "0.4799399", "0.479107", "0.4790988", "0.47903803", "0.47767022", "0.4770163", "0.476741", "0.4766832", "0.47654915", "0.47617254", "0.47605458", "0.4759835", "0.47569004", "0.47552356" ]
0.67283905
0
A Queue.Queuebased implementation of _IterProcessStdout.
def _IterProcessStdoutQueue(process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): # pylint: disable=unused-argument if six.PY3: import queue else: import Queue as queue import threading stdout_queue = queue.Queue() def read_process_stdout(): # TODO(jbudorick): Pick an appropriate read size here. while True: try: output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size) except IOError: break stdout_queue.put(output_chunk, True) if not output_chunk and process.poll() is not None: break reader_thread = threading.Thread(target=read_process_stdout) reader_thread.start() end_time = (time.time() + timeout) if timeout else None try: while True: if end_time and time.time() > end_time: raise TimeoutError() try: s = stdout_queue.get(True, iter_timeout) if not s: break yield s except queue.Empty: yield None finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait() reader_thread.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def write_queued_output(self):\n for stream in [\"stdout\", \"stderr\"]:\n while True:\n output, queue_size = getattr(self, stream).readline(timeout=0.1)\n if not (output is None or len(output) == 0):\n self.log(output, self.log_level[stream])\n if queue_size == 0:\n break", "def enqueue_output(self, out, queue):\n\n started = False\n finished = False\n\n while not self.stop:\n line = out.readline()\n queue.put(line)\n # Test if we have reached the end of the output\n if started and IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n finished = True\n if IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n started = True\n if finished and self.comms_lock.locked():\n self.comms_lock.release()\n started = False\n finished = False\n\n time.sleep(QUEUE_THREAD_SLEEP_TIME)", "def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def output_function(**kwargs):\n\n\t\toutput_queue = kwargs['q']\n\t\twhile True:\n\t\t\titem = output_queue.get()\n\t\t\t# expects to get a string or None\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\toutfile.write(item)\n\t\t\t# outfile.write(\"output_function:: {item}\".format(item=item)+\"\\n\")\n\t\t\toutput_queue.task_done()", "def print_all_contents(self, *args, **kwargs):\n while self.has_to_print():\n # Try to print the first element in the queue.\n tar_to_print: str = self.print_queue[0].tar\n self.print_monitor.wait_turn(self, tar_to_print, *args, **kwargs)\n\n # Print all applicable values in the print_queue.\n while self.print_queue and (self.print_queue[0].tar == tar_to_print):\n msg: str = self.print_queue.popleft().msg\n print(msg, end=\"\", flush=True)\n\n # If True, then all of the output for extracting tar_to_print was in the queue.\n # Since we just finished printing all of it, we can move onto the next one.\n if self.is_output_done_enqueuing[tar_to_print]:\n # Let all of the other workers know that this worker is done.\n self.print_monitor.done_dequeuing_output_for_tar(self, tar_to_print)", "def read_stream(self, output_queue, stream_type):\n output = []\n\n # Get all available output off the queue.\n try:\n while 1:\n output.append(output_queue.get_nowait())\n except Empty:\n pass\n\n # If we read any output, toss it out to the logger\n if len(output):\n logger = logging.getLogger('taskmaster.processes.{}'.format(self.process_index))\n\n if stream_type == StreamType.Stdout:\n for line in output:\n logger.info(line)\n elif stream_type == StreamType.Stderr:\n for line in output:\n logger.error(line)\n\n # Get the current status to determine if we should try to read more or stop.\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n if current_status != psutil.STATUS_DEAD:\n # Process still alive, schedule the call to read more output.\n self.ioloop.call_later(0.1, self.read_stream, *[output_queue, stream_type])\n else:\n # Process has died. Flush the iostreams so the BlockingStreamReader triggers one last time and\n # nicely exits.\n self.process.stdout.flush()\n self.process.stderr.flush()", "def print_queue(self):\n for i in self.Obs:\n print(i)", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def put_lines_into_queue(self):\n for line in iter(self.stream.readline, b''):\n self.queue.put(line)\n self.stream.close()", "def print_queue(self):\n for value in self.data:\n element = f'| {value} |'\n print(element)", "def __iter__(self):\n command = '/usr/bin/heroku logs -t --app ' + self.app_name\n args = shlex.split(command)\n heroku = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n return iter(heroku.stdout.readline, b'')", "def _stdin_writer(self):\n self._is_launched.wait()\n while True:\n message = self.stdin_queue.get()\n if message is None or self._is_stopping or not self._is_running.is_set():\n if message is not None:\n log.debug(\"Ignore {0} on process {1} because it's stopped\".format(message, self.name))\n break\n self._direct_stdin_writer(message)\n self._log(\"raw\", \"write to stdin : {0}\".format(message.encode(\"utf-8\")))", "def use_queue():\n q = queue.Queue()\n for i in range(10):\n q.put_nowait(i)\n while q.qsize() > 0:\n element = q.get_nowait()\n sys.stdout.write(\"poping out from queue: {0}\\n\".format(element))", "def clean_output(self, process, queue):\n while True:\n try:\n dirty = process.getline()\n clean = self.parse(dirty)\n except Queue.Empty:\n process.queueHasData.wait()\n except ValueError as inst:\n print(\"Error: \" + str(inst))\n else:\n if clean != None:\n self.cleanOutput.append(clean)", "def read_nonblock(self):\n out_str = []\n\n try:\n while not self.queue.empty():\n out_str.append(self.queue.get_nowait())\n except Empty:\n pass\n\n return \"\\n\".join(out_str)", "def __readStdout(self):\n if self.process is not None:\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n if (\n self.currentChangelist != \"\" and\n self.rx_status.exactMatch(s)\n ):\n file = self.rx_status.cap(5).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif (\n self.currentChangelist != \"\" and\n self.rx_status2.exactMatch(s)\n ):\n file = self.rx_status2.cap(2).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif self.rx_changelist.exactMatch(s):\n self.currentChangelist = self.rx_changelist.cap(1)\n if self.currentChangelist not in self.changeListsDict:\n self.changeListsDict[self.currentChangelist] = []", "def __iter__(self):\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n yield line", "def _IterProcessStdoutFcntl(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=too-many-nested-blocks\n import fcntl\n try:\n # Enable non-blocking reads from the child's stdout.\n child_fd = process.stdout.fileno()\n fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)\n fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n end_time = (time.time() + timeout) if timeout else None\n iter_end_time = (time.time() + iter_timeout) if iter_timeout else None\n\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n if iter_end_time and time.time() > iter_end_time:\n yield None\n iter_end_time = time.time() + iter_timeout\n\n if iter_end_time:\n iter_aware_poll_interval = min(poll_interval,\n max(0, iter_end_time - time.time()))\n else:\n iter_aware_poll_interval = poll_interval\n\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if not data:\n break\n yield data\n\n if process.poll() is not None:\n # If process is closed, keep checking for output data (because of timing\n # issues).\n while True:\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if data:\n yield data\n continue\n break\n break\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()", "def traceQueueContents(self):\n from typhon.objects.printers import toString\n debug_print(\"Pending queue for \" + self.name.encode(\"utf-8\"))\n for (resolver, target, atom, args, namedArgs) in self._pending:\n debug_print(toString(target).encode('utf-8') +\n \".\" + atom.verb.encode('utf-8') + \"(\" +\n ', '.join([toString(a).encode('utf-8')\n for a in args]) + \")\")", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input_data = arguments[\"input\"].encode(\"utf-8\") if arguments[\"input\"] else None\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution of taskw: '{0}' . \"\n \"If you are running out-of-tree tests set TASK_USE_PATH=1 \"\n \"in shell env before execution and add the \"\n \"location of the task(d) binary to the PATH\".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input_data)\n\n if sys.version_info > (3,):\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def __call__(self) -> buffer.Buffer:\n processed_buffer = self.output_queue.get()\n\n return processed_buffer", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def _stdout_to_flag(self):\n self._is_running.wait()\n while self._is_running.is_set():\n msg = self.stdout_queue.get()\n if msg is None or len(msg) < 1: # It's time to stop\n break\n if msg[0] == \"#\": # It's a signal from the kxkmcard program\n self.onEvent(msg[1:].split(' '))\n else:\n self._log(\"warning\", \"unknown stdout line {0}\".format(msg))", "async def copier_recorder(\r\n self,\r\n ) -> None:\r\n if not self.process:\r\n raise Exception(\"missing process; was this called inside a with statement?\")\r\n\r\n assert (\r\n self.process.stdout is not None\r\n ), \"process must be opened with stdout=PIPE and stderr=STDOUT\"\r\n\r\n async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel:\r\n async for chunk in self.process.stdout:\r\n # print(f\"seen chunk: '{chunk!r}'\", flush=True) # debug\r\n self.stdout += chunk\r\n await self.printer_send_channel.send(chunk)\r\n\r\n # send notification\r\n # if it's full, that's fine: if expect() is run, it'll see\r\n # there's a \"pending\" notification and check stdout, then wait\r\n # for another notification\r\n try:\r\n self.notifier_send_channel.send_nowait(b\"\")\r\n except trio.WouldBlock:\r\n pass\r\n except trio.BrokenResourceError as err:\r\n print(f\"cause '{err.__cause__}'\")\r\n raise err", "def process_list(self):\n for p in self._queue:\n print \"%-5d %-10s %-10s %2d %10s %10s\" % (p.id, p.name,\n p.status['type'], p.priority, \n self.print_cr_tree(p.creation_tree['parent']), \n self.print_cr_tree(p.creation_tree['child']))", "def thread_output(*args, **kwargs):\n return_queue = Queue()\n kwargs[\"args\"] = (kwargs[\"args\"] or tuple()) + (return_queue, )\n thread = threading.Thread(*args, **kwargs)\n thread.start()\n yield return_queue\n thread.join()", "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass", "def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n print(line.strip('\\n'))\n self.pipeReader.close()", "async def print_processor(self) -> None:\n try:\n while True:\n while self.print_queue.empty() is not True:\n stub = await self.print_queue.get()\n if isinstance(stub, str):\n print(stub)\n elif isinstance(stub, tuple):\n if stub[0] == \"error\":\n print(f\"{r}{stub[1]}{reset}\")\n elif stub[0] == \"warning\":\n print(f\"{y}{stub[1]}{reset}\")\n elif stub[0] == \"success\":\n print(f\"{g}{stub[1]}{reset}\")\n elif stub[0] == \"bold\":\n print(f\"{bold}{stub[1]}{reset}\")\n else:\n print(f\"{stub[1]}\")\n self.print_queue.task_done()\n await asyncio.sleep(0.002)\n except asyncio.CancelledError:\n print('Closing the RedCisco application... Cleaning up running tasks...\\n')", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def stdout(self):\n pass", "def queue_consumer(self, q):\n\n self.status = 'Running...'\n\n while True:\n try:\n msg = q.get_nowait()\n if msg is None:\n break\n self.update_plot(msg)\n except Queue.Empty:\n time.sleep(0.1)\n\n self.status = 'Done'", "def print_processor(print_que):\n print(termcolor.colored(\"!--DO NOT CLOSE--!\", \"red\"))\n print(len(print_que))\n ID_LIMIT = 40\n run = True\n jobs_ran = 0\n while run:\n Q_Jobs = 0\n if len(print_que) > 0:\n if \"10.56.54.162\" in print_que[0]:\n Q_Jobs = print_status(\"10.56.54.162\")\n else:\n Q_Jobs = print_status(\"10.56.54.156\")\n if Q_Jobs >= ID_LIMIT:\n print(\"Printed so Far: \", str(jobs_ran))\n print(\"Waiting For Jobs to Clear Up\")\n # input(\n # \"Please Confirm Printers Will Support 40 More Job IDS before pressing enter: \")\n jobs_ran = 0\n time.sleep(100)\n continue\n if len(print_que) > 0:\n if(\"banner\" not in print_que[0]):\n os.system(print_que[0])\n print((str(print_que[0]).replace(\n \"C:/Windows/System32/lpr.exe -S 10.56.54.\", \"\").replace(\n '-P PS \"C:/S/SO/', \"\").split(\"-J\")[0]))\n print_que.pop(0)\n jobs_ran += 1\n else:\n print(termcolor.colored(\"\\n!--PROCESSING CAUGHT UP--!: \", \"green\"))\n run = False\n jobs_ran += 1", "def enqueue_print(self, obj, fd='stdout'):\n self._log_queue.put_nowait(\n PrintRequest(string=str(obj) + '\\n', fd=fd)\n )", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "def _dump_queue(self):\n outfile = self.registryValue('dumpFile')\n with open(outfile, 'w') as h:\n i = 1\n for nick, msg in self._queue:\n if msg is None:\n msg = '[no message]'\n h.write(\"% 2d\\t%s\\t%s\\n\" % (i, nick, msg))\n i += 1", "def tail(self):\n for line in iter(self.proc.stdout.readline, ''):\n if len(line) == 0:\n break\n if self.log_filter(line.decode('ASCII')):\n continue\n if self.verbose:\n logging.debug(f\"{self.prefix}: {line.decode().rstrip()}\")\n with self.logs_cond:\n self.logs.append(str(line.rstrip()))\n self.logs_cond.notifyAll()\n self.running = False\n self.proc.stdout.close()\n if self.proc.stderr:\n self.proc.stderr.close()", "def _drain_queue(self):\n while self.queue:\n self._export_batch()", "def read_incoming(self):\r\n buf = ''\r\n debug_prompt = re.compile(r'\\A[\\w]+>>? ')\r\n while 1:\r\n try:\r\n buf += os.read(self.fid, 100).decode('utf8')\r\n except:\r\n self.queue.put(None)\r\n return\r\n lines = buf.splitlines()\r\n for line in lines[:-1]:\r\n self.queue.put(line)\r\n if buf.endswith('\\n'):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n elif re.match(debug_prompt, lines[-1]):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n else:\r\n buf = lines[-1]", "def __init__(self):\n # Open stata as pipe; make a queue for non-blocking. Start the thread.\n self.proc = sp.Popen(['stata-mp'], stdin=sp.PIPE, stdout=sp.PIPE, bufsize=1)\n\n self.qu = Queue()\n\n self.thread = Thread(target = self.enqueue_output, args = (self.proc.stdout,\n self.qu))\n self.thread.daemon = True\n self.thread.start()\n\n # Read the initial stdout content.\n self.genout()", "def process_queue(self):\n self.log_info(\"Started Telemetry Logger Thread.\")\n\n while self.input_processing_running:\n\n # Process everything in the queue.\n while self.input_queue.qsize() > 0:\n try:\n _telem = self.input_queue.get_nowait()\n self.write_telemetry(_telem)\n except Exception as e:\n self.log_error(\"Error processing telemetry dict - %s\" % str(e))\n\n # Close any un-needed log handlers.\n self.cleanup_logs()\n\n # Sleep while waiting for some new data.\n time.sleep(0.5)\n\n self.log_info(\"Stopped Telemetry Logger Thread.\")", "def __iter__(self):\n return iter(self.queue)", "def printTerminal(self,message):\n self.queue.put(message)", "def show_queue(Q):\n print(\"(Size of the queue:\", Q.qsize(), \")\", end=\" \")\n for n in list(Q.queue):\n print(n, end=\" \")\n print()", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def process(q, results, iolock, func, args, kwargs):\n\n kwargs[\"iolock\"] = iolock\n\n while True:\n\n line = q.get()\n\n if line is None:\n break\n\n result = func(line, *args, **kwargs)\n results.put(result)\n\n return", "def run(outfile, nprocs, cmd, arg_list, input_options):\n\tnum_worker_threads = nprocs\n\tworker_queue = Queue.Queue()\n\tthreads = []\n\toutput_queue = Queue.Queue()\n\n\tdef output_function(**kwargs):\n\t\t\"\"\"\n\t\toutput_function take 'output' from the output_queue and writes it to outfile\n\t\tsince there is nly one thread running this function do not\n\t\tneed any kind of lock/semaphore to protect it\n\t\t\"\"\"\n\n\t\toutput_queue = kwargs['q']\n\t\twhile True:\n\t\t\titem = output_queue.get()\n\t\t\t# expects to get a string or None\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\toutfile.write(item)\n\t\t\t# outfile.write(\"output_function:: {item}\".format(item=item)+\"\\n\")\n\t\t\toutput_queue.task_done()\n\n\t# def output_function\n\n\tdef worker_function(ident, work):\n\t\t\"\"\"\n\t\tworker_function - called by a worker thread with 'work'.\n\t\tThe work is a shell command and arguments. Executes that command and passes the output to the output_queue\n\t\tDetailed behaviour is modified by input_options\n\n\t\tArgs:\n\t\t\tident (int)\t\t\t\t:\tthe index into the threads table of the thread that is running this worker\n\t\t\twork (list of strings)\t:\tthe arguments for this invocation\n\t\t\n\t\tOuter scope access:\n\t\t\tinput_options (dictionary):\tread only modified details of behaviour\n\t\t\toutput_queue (Queue.Queue):\tread only - where output text goes\n\n\t\t\"\"\"\n\n\t\tdef exec_debug(command_string) :\n\t\t\t\"\"\" \n\t\t\twhen the --debug option is set this outputs the command string rather than execute the command\n\t\t\t\n\t\t\t\tArgs:\n\t\t\t\t\tcommand_string (string) : the command and all args as a simple string\n\t\t\t\t\n\t\t\t\tOuter scope access:\n\t\t\t\t\tnone\n\n\t\t\t\tReturns:\n\t\t\t\t\tstring\n\t\t\t\"\"\"\n\t\t\tline += cmd_string + \"\\n\"\n\t\t\treturn line\n\n\t\tdef exec_lines(command_list, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen the --lines option is set this function outputs every line of output from the command to the output_queue as soon as it is avaliable\n\t\t\trather then wait for the command to complete and puts the command with all options on the fron of each outout\n\t\t\tline so it can be reconciles with the command that generated it. \n\n\t\t\tArgs:\n\t\t\t\tcommand list (dictionary) \t: the result of applying shlex.split() to command_string\n\t\t\t\tmark_flag(bool)\t\t\t\t: if true adds \n\n\t\t\tReturns:\n\t\t\t\tNothing\n\n\t\t\tOuter scope access:\n\t\t\t\toutput_queue\n\n\t\t\t\"\"\"\t\n\n\t\t\toutput = \"\"\n\t\t\tcommand_string = \" \".join(command_list)\n\t\t\ttry:\n\t\t\t\tprocess = subprocess.Popen(command_list, stdout=subprocess.PIPE)\n\t\t\t\tpipe = process.stdout\n\t\t\t\toutput = \"\"\n\n\t\t\t\twhile True:\n\n\t\t\t\t\toutput = pipe.readline()\n\t\t\t\t\tif len(output) == 0 : #and (proc.process.poll() is not None ):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif mark_flag:\n\t\t\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\t\t\toutput = mark + output\n\t\n\t\t\t\t\toutput_queue.put(output)\n\t\n\t\t\t\t# while\n\t\n\t\t\t\tprocess.wait()\n\t\t\t\treturn\n\t\t\t#\n\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += \"LINES \"+cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\n\t\t\tif mark_flag:\n\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\toutput = mark + output + \"\\n\"\n\n\t\t\toutput_queue.put(output)\n\n\n\t\t# def exec_and_output_each_line\n\n\t\tdef exec_not_lines(command_string, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen neither the --debug or the --lines options are set this function runs the command and collects all the output\n\t\t\twaits for the command to complete and then returns all the output as a single string\n\n\t\t\tArgs:\n\t\t\t\tcommand_string (string) - \tthe complete command to be executed\n\t\t\t\tmark_flag(bool)\t\t\t- \twhen true the output has additional text on the start and end of the\n\t\t\t\t\t\t\t\t\t\t\toutput so that \n\n\t\t\t\t\t\t\t\t\t\t\t-\tthe start of command execution is marked\n\t\t\t\t\t\t\t\t\t\t\t-\tthe begionning and end of command output is marked\n\t\t\tReturns:\n\t\t\t\tall output as a single string\n\n\t\t\tOuter scope access:\n\t\t\t\tnone\n\n\t\t\t\"\"\"\n\t\t\ttry:\n\t\t\t\toutput = \"\"\n\t\t\t\tif mark_flag:\n\t\t\t\t\tmarker = \"\\nMARK \" + command_string + \"================================\\n\"\n\t\t\t\t\toutput_queue.put(marker)\n\n\t\t\t\t# subprocess.check_output returns a single string with all the output\n\t\t\t\t# if its multi line output there are line breaks in the string\n\t\t\t\toutput += subprocess.check_output(command_string, shell=True)\n\t\t\t\t#\n\t\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\t\t\t\n\t\t\tif mark_flag:\n\t\t\t\toutput = output.replace(\"\\n\", \"\\n\\t\")\n\t\t\t\toutput = \"OUTPUT START[\" + command_string + \"]: \\n\" + output + \"\\nOUTPUT END[\" + command_string + \"]\" \n\n\t\t\treturn output\n\n\t\t# def exec_and_output_each_line\n\n\n\t\t#\n\t\t# we are going to exec the command with subprocess.check_output\n\t\t# this is best done with a single command string holding\n\t\t# the command opetions and all args\n\t\t#\n\t\tcmd_string = \" \".join([cmd] + work)\n\t\tcmd_list = shlex.split(cmd_string)\n\t\tline = \"\"\n\n\t\tif input_options['debug']:\n\n\t\t\toutput = exec_debug(cmd_string)\n\t\t\toutput_queue.put(output)\n\n\t\telif input_options['lines']:\n\n\t\t\toutput = exec_lines(cmd_list, input_options['mark'])\n\t\t\t# output_queue.put() not required it is done line by line inside exec_lines()\n\n\t\telse:\n\n\t\t\toutput = exec_not_lines(cmd_string, input_options['mark'])\n\t\t\toutput_queue.put(output)\n\n\t\treturn\n\n\t\t# semaphore.acquire()\n\t\t# print \"do_work:: {id} {work}\".format(id=ident, work=work)\n\t\t# semaphore.release()\n\n\t# def worker_function\n\n\tdef worker(**kwargs):\n\t\t\"\"\"\n\t\ttarget function for worker threads. Takes 'work' from the worker queue and\n\t\tpasses that to `worker_function`. When `work == None` return\n\t\tand terminate the worker thread.\n\n\t\tArgs:\n\t\t\tkwargs['ident'] (int)\t- the index of the thread running this worker\n\n\t\tOuter scope access:\n\t\t\tworker_queue (Queue.Queue) - multiple worker processes (and hence worker functions) take work from this queue\n\n\t\t@return nothing\n\t\t\"\"\"\n\t\tident = kwargs[\"ident\"]\n\t\twhile True:\n\t\t\titem = worker_queue.get()\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\tworker_function(ident, item)\n\t\t\tworker_queue.task_done()\n\n\t# def worker\n\n\t# def run - body\n\n\tfor i in range(num_worker_threads):\n\t\tkwargs = {\"ident\": i}\n\t\tt = threading.Thread(target=worker, kwargs=kwargs)\n\t\tt.start()\n\t\tthreads.append(t)\n\n\tfor item in arg_list:\n\t\tworker_queue.put(item)\n\n\toutput_thread = threading.Thread(target=output_function, kwargs={'q': output_queue})\n\toutput_thread.start()\n\n\t# block until all tasks are done\n\tworker_queue.join()\n\n\t# stop workers\n\tfor i in range(num_worker_threads):\n\t\tworker_queue.put(None)\n\n\tfor t in threads:\n\t\tt.join()\n\n\toutput_queue.put(None)\n\toutput_thread.join()", "def visualizar(self):\n print(self.queue)", "def _start_output_file_worker(\n q_out: Queue, output_data_path: str, stop_event: Event\n) -> None:\n\n counter = 0\n with open(output_data_path, \"w\") as output_data_file:\n while not stop_event.is_set():\n try:\n line = q_out.get(timeout=0.1)\n except Empty:\n continue\n output_data_file.write(f\"{line}\\n\")\n q_out.task_done()\n\n counter += 1\n if counter % 100 == 0:\n logger.info(f\"Processed instances: {counter}\")\n logger.info(f\"Total processed instances: {counter}\")", "def processq(self):\n\n while True:\n command = None\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if any(q):\n command = q.pop(0)\n # remember q has now changed\n if not any(q):\n self.queuefile.unlink()\n else:\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()\n\n if command:\n self.execute(command)\n else:\n break", "def print_dots(status_queue):\n while True:\n # Exit when something gets written to the pipe\n try:\n status_queue.get(True, dot_timeout)\n return\n except Empty:\n IndentedLogger.dot()", "def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()", "def push_results_to_file(file_name,queue_name, aux_q,queue_service):\n # verify the queues exist\n queue_service.create_queue(queue_name)\n queue_service.create_queue(aux_q)\n # open file for write\n f1=open('./{0}.csv'.format(file_name), 'w+')\n while queue_service.get_queue_metadata(queue_name).approximate_message_count > 0:\n messages = queue_service.get_messages(queue_name,1)\n if len(messages)>0 :\n for message in messages: \n line = '{0},{1},{2}'.format(message.id,message.insertion_time,message.content)\n queue_service.put_message(aux_q,line)\n f1.write(line)\n f1.write('\\n')\n queue_service.delete_message(queue_name, message.id, message.pop_receipt)\n f1.close()", "def read(self):\n # now read stderr for log messages, we could buffer here but since\n # we're just logging the messages, I don't care to\n try:\n out = self.proc.stderr.read()\n if out:\n LOG.debug('reading %s got %d bytes on stderr', self.name,\n len(out))\n for line in out.splitlines():\n LOG.warning('%s: %s', self.name, line)\n except IOError as err:\n if err.errno != errno.EAGAIN:\n # allowing a caller to handle the exception as well\n raise\n except:\n LOG.exception('uncaught exception in stderr read')\n\n # This read call is non-blocking\n try:\n self.buffer += self.proc.stdout.read()\n if len(self.buffer):\n LOG.debug('reading %s, buffer now %d bytes',\n self.name, len(self.buffer))\n except IOError as err:\n if err.errno != errno.EAGAIN:\n raise\n except:\n # sometimes the process goes away in another thread and we don't\n # have it anymore\n LOG.exception('uncaught exception in stdout read')\n return\n\n # iterate for each line we have\n while self.buffer:\n idx = self.buffer.find('\\n')\n if idx == -1:\n break\n\n line = self.buffer[0:idx].strip()\n if line:\n self.datalines.append(line)\n self.buffer = self.buffer[idx+1:]", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def stdout(self):\n p = self.bg()\n if isinstance(p, PyPipe):\n return p.iter_stdout\n else:\n return p.stdout", "def output(self):\n queue_type = self.queue_type\n queue_options = self.queue_options\n jobname = self.jobname\n emtopath = self.emtopath\n\n line = batch_head(jobname=jobname, latpath=emtopath, runtime=self.runtime, \n account=self.account, queue_type=queue_type, queue_options=queue_options)\n \n self.use_module = False\n if self.slurm_options is not None:\n for tmp in self.slurm_options:\n if 'module load emto' in tmp:\n self.use_module = True\n break\n line += \"\\n\"\n\n sub_module = [\"kgrn_cpa\", \"kfcd_cpa\"]\n sub_module_run = [self.runKGRN, self.runKFCD]\n file_type = [self.KGRN_file_type, self.KFCD_file_type]\n output_file_ext = [\"kgrn\", \"kfcd\"]\n\n #elapsed_time = \"/usr/bin/time \"\n elapsed_time = \"\"\n if self.parallel is True:\n sub_module = [\"kgrn_omp\", \"kfcd_cpa\"]\n\n if which(\"kfcd_cpa\") is not None:\n self.use_module = True\n if not self.use_module:\n module_path = [os.path.join(self.EMTOdir, module_i) for module_i in sub_module]\n else:\n module_path = sub_module\n\n for i in range(0, len(sub_module)):\n if sub_module_run[i]:\n runStr = [elapsed_time, module_path[i], \"<\", \n os.path.join(emtopath, jobname + \".\" + file_type[i]), \">\",\n os.path.join(emtopath, jobname + \"_\" + output_file_ext[i] + \".output\")]\n line += \" \".join(runStr).strip() + \"\\n\"\n\n return line", "def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)", "def wait(self, timeout: float = None) -> CompletedProcess: # type: ignore\n if self.stdout is None:\n return CompletedProcess(self.args, returncode=super().wait(timeout=timeout), stdout=None)\n else:\n stdout = []\n while self.poll() is None:\n stdout.append(line := self.stdout.readline())\n\n if self.verbose:\n print(line, end=\"\")\n\n return CompletedProcess(self.args, returncode=self.poll(), stdout=\"\".join(stdout))", "def simple_sink(riter):\n for r in riter:\n pass", "def write_queue_log(outfile, new_data_queue: Queue, max_lines: int = 1000) -> int:\n i = 0\n while not new_data_queue.empty() and i < max_lines:\n info = new_data_queue.get()\n row_str = \",\".join([str(v) for v in info])\n logging.debug(row_str)\n outfile.write(row_str + \"\\n\")\n i += 1\n return i", "def spawn_process_for(self, gq):\n pipe_top, pipe_bottom = multiprocessing.Pipe()\n p = multiprocessing.Process(target=GridQueue.listen,args=(gq, pipe_bottom))\n p.start()\n self.pipes[gq.index] = pipe_top", "def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def stdout(self, stdout: str) -> Tuple[List[Message], List[AnnotateCode], str]:\n return [], [], stdout", "def print_output(self, final=False):\n encoding = sys.stdout.encoding\n if final and self.process: # ask for process because might be an action\n line = self.process.stdout.read().decode(encoding)\n self.last_run['output'] += line\n sys.stdout.write(line)\n else:\n str_chunk = None\n chunk = bytes()\n while not isinstance(str_chunk, str):\n assert self.process\n chunk += self.process.stdout.read(1)\n try:\n str_chunk = chunk.decode(encoding)\n except:\n str_chunk = None\n self.last_run['output'] += str_chunk\n sys.stdout.write(str_chunk)", "def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n logging.log(self.level, line.strip('\\n'))\n\n self.pipeReader.close()", "def worker(inQueue, outQueue):\n for i in iter(inQueue.get, 'STOP'):\n\n status = run(i)\n\n outQueue.put(( status ))", "def _ffmpeg_loop(cls, ffmpeg: subprocess.Popen) -> Iterable[Progress]:\n while ffmpeg.poll() is None:\n rlist, _, _ = select((ffmpeg.stderr, ffmpeg.stdout), (), ())\n # Read logs from stdin\n if ffmpeg.stderr in rlist:\n status = cls.process_logs(ffmpeg.stderr.read().splitlines())\n if status:\n yield status\n # ignore stdout\n if ffmpeg.stdout in rlist:\n ffmpeg.stdout.read()", "def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []", "def print_queue(queue):\n print(tabulate.tabulate(queue,headers=['Time','Priority','Action','Argument','kwargs'],\n floatfmt=(\".12f\")))", "def _pull_batch_from_queue(self):\n rollout = self.explorer.queue.get( timeout = 600.0 )\n while not rollout.terminal:\n try: \n rollout.extend( self.explorer.queue.get_nowait() )\n except queue.Empty:\n break\n print(rollout.size())\n return rollout", "def piped(self):\n\t\tpass", "def gui_process(self):\n ti = self.scan_queue.qsize()\n t = TqdmUpTo(total=self.scan_queue.qsize(), unit='Files')\n\n while True:\n try:\n t.update(ti - self.scan_queue.qsize())\n ti = self.scan_queue.qsize()\n if self.message_queue.__len__() > 0:\n for m in self.message_queue:\n TqdmUpTo.write(m)\n self.message_queue.remove(m)\n # We dont need more then 60fps in the terminal :P\n except BrokenPipeError:\n continue", "def send_command_queue(self, command_queue):\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()", "def send_command_queue(self, command_queue):\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def _process_whisper_queue(self, whisper_queue):\n while True:\n if len(whisper_queue) > 0:\n whisper_tuple = (whisper_queue.pop())\n self.ts.send_whisper(whisper_tuple[0], whisper_tuple[1])\n time.sleep(.5)", "def producer():\n try:\n # Build a new iterable for each thread. This is crucial if working with\n # tensorflow datasets because tf.Graph objects are thread local.\n for item in iterable:\n buffer.put(item)\n except Exception as e: # pylint: disable=broad-except\n logging.exception('Error in producer thread for %s', iterable.__name__)\n producer_error.append(e)\n finally:\n buffer.put(end)", "def run(self) -> Any:\n self.prepare()\n for step in self.stream:\n self.output = step\n return self.output", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def save(self):\n if len(self.queue):\n #logging.info('Saving %d pages' % len(self.queue))\n for q in self.queue:\n if q and q != '':\n if self.output:\n print >> self.output, q\n else:\n print q\n self.queue = []", "def processed(items, func, max_processes=5, max_queue=200, join=True,\n daemon=True):\n input_queue = Queue(maxsize=max_queue)\n output_queue = Queue(maxsize=max_queue)\n for item in items:\n input_queue.put(item, True)\n\n def wrapped_func(output_queue, item):\n try:\n func(item)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n output_queue.put(e)\n\n processes = []\n while not input_queue.empty():\n try:\n log.exception(output_queue.get_nowait())\n except Empty:\n pass\n while sum(1 for process in processes if process.is_alive()) >= max_processes:\n pass\n item = input_queue.get(True)\n processes.append(Process(target=wrapped_func, args = (output_queue, item,), daemon = daemon))\n processes[-1].start()\n input_queue.task_done()\n\n if join:\n while any(process.is_alive() for process in processes):\n pass", "def worker_function(ident, work):\n\n\t\tdef exec_debug(command_string) :\n\t\t\t\"\"\" \n\t\t\twhen the --debug option is set this outputs the command string rather than execute the command\n\t\t\t\n\t\t\t\tArgs:\n\t\t\t\t\tcommand_string (string) : the command and all args as a simple string\n\t\t\t\t\n\t\t\t\tOuter scope access:\n\t\t\t\t\tnone\n\n\t\t\t\tReturns:\n\t\t\t\t\tstring\n\t\t\t\"\"\"\n\t\t\tline += cmd_string + \"\\n\"\n\t\t\treturn line\n\n\t\tdef exec_lines(command_list, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen the --lines option is set this function outputs every line of output from the command to the output_queue as soon as it is avaliable\n\t\t\trather then wait for the command to complete and puts the command with all options on the fron of each outout\n\t\t\tline so it can be reconciles with the command that generated it. \n\n\t\t\tArgs:\n\t\t\t\tcommand list (dictionary) \t: the result of applying shlex.split() to command_string\n\t\t\t\tmark_flag(bool)\t\t\t\t: if true adds \n\n\t\t\tReturns:\n\t\t\t\tNothing\n\n\t\t\tOuter scope access:\n\t\t\t\toutput_queue\n\n\t\t\t\"\"\"\t\n\n\t\t\toutput = \"\"\n\t\t\tcommand_string = \" \".join(command_list)\n\t\t\ttry:\n\t\t\t\tprocess = subprocess.Popen(command_list, stdout=subprocess.PIPE)\n\t\t\t\tpipe = process.stdout\n\t\t\t\toutput = \"\"\n\n\t\t\t\twhile True:\n\n\t\t\t\t\toutput = pipe.readline()\n\t\t\t\t\tif len(output) == 0 : #and (proc.process.poll() is not None ):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif mark_flag:\n\t\t\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\t\t\toutput = mark + output\n\t\n\t\t\t\t\toutput_queue.put(output)\n\t\n\t\t\t\t# while\n\t\n\t\t\t\tprocess.wait()\n\t\t\t\treturn\n\t\t\t#\n\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += \"LINES \"+cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\n\t\t\tif mark_flag:\n\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\toutput = mark + output + \"\\n\"\n\n\t\t\toutput_queue.put(output)\n\n\n\t\t# def exec_and_output_each_line\n\n\t\tdef exec_not_lines(command_string, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen neither the --debug or the --lines options are set this function runs the command and collects all the output\n\t\t\twaits for the command to complete and then returns all the output as a single string\n\n\t\t\tArgs:\n\t\t\t\tcommand_string (string) - \tthe complete command to be executed\n\t\t\t\tmark_flag(bool)\t\t\t- \twhen true the output has additional text on the start and end of the\n\t\t\t\t\t\t\t\t\t\t\toutput so that \n\n\t\t\t\t\t\t\t\t\t\t\t-\tthe start of command execution is marked\n\t\t\t\t\t\t\t\t\t\t\t-\tthe begionning and end of command output is marked\n\t\t\tReturns:\n\t\t\t\tall output as a single string\n\n\t\t\tOuter scope access:\n\t\t\t\tnone\n\n\t\t\t\"\"\"\n\t\t\ttry:\n\t\t\t\toutput = \"\"\n\t\t\t\tif mark_flag:\n\t\t\t\t\tmarker = \"\\nMARK \" + command_string + \"================================\\n\"\n\t\t\t\t\toutput_queue.put(marker)\n\n\t\t\t\t# subprocess.check_output returns a single string with all the output\n\t\t\t\t# if its multi line output there are line breaks in the string\n\t\t\t\toutput += subprocess.check_output(command_string, shell=True)\n\t\t\t\t#\n\t\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\t\t\t\n\t\t\tif mark_flag:\n\t\t\t\toutput = output.replace(\"\\n\", \"\\n\\t\")\n\t\t\t\toutput = \"OUTPUT START[\" + command_string + \"]: \\n\" + output + \"\\nOUTPUT END[\" + command_string + \"]\" \n\n\t\t\treturn output\n\n\t\t# def exec_and_output_each_line\n\n\n\t\t#\n\t\t# we are going to exec the command with subprocess.check_output\n\t\t# this is best done with a single command string holding\n\t\t# the command opetions and all args\n\t\t#\n\t\tcmd_string = \" \".join([cmd] + work)\n\t\tcmd_list = shlex.split(cmd_string)\n\t\tline = \"\"\n\n\t\tif input_options['debug']:\n\n\t\t\toutput = exec_debug(cmd_string)\n\t\t\toutput_queue.put(output)\n\n\t\telif input_options['lines']:\n\n\t\t\toutput = exec_lines(cmd_list, input_options['mark'])\n\t\t\t# output_queue.put() not required it is done line by line inside exec_lines()\n\n\t\telse:\n\n\t\t\toutput = exec_not_lines(cmd_string, input_options['mark'])\n\t\t\toutput_queue.put(output)\n\n\t\treturn\n\n\t\t# semaphore.acquire()\n\t\t# print \"do_work:: {id} {work}\".format(id=ident, work=work)\n\t\t# semaphore.release()", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # ESRCH means the process finished/died between last check and now\n if e.errno != errno.ESRCH:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"TaskWarrior stopped responding and couldn't be killed\")", "def process_queue(self):\n while self.input_processing_running:\n\n # Process everything in the queue.\n while self.input_queue.qsize() > 0:\n try:\n _telem = self.input_queue.get_nowait()\n self.process_telemetry(_telem)\n\n except Exception as e:\n self.log_error(\"Error processing telemetry dict - %s\" % str(e))\n\n # Sleep while waiting for some new data.\n time.sleep(0.5)" ]
[ "0.7584206", "0.74641174", "0.68386096", "0.6770728", "0.6437304", "0.63062155", "0.6296086", "0.62914383", "0.6253849", "0.62142974", "0.62142974", "0.62142974", "0.62142974", "0.62142974", "0.62142974", "0.61727333", "0.61600655", "0.61525756", "0.61478114", "0.6145392", "0.6057833", "0.5976558", "0.5960747", "0.5867239", "0.586684", "0.58278906", "0.58223546", "0.58152735", "0.5798342", "0.5792152", "0.57648414", "0.57499236", "0.5728277", "0.5727159", "0.5721458", "0.56936496", "0.56850946", "0.56832737", "0.5680417", "0.5676181", "0.566754", "0.5665256", "0.56571406", "0.5653356", "0.5646762", "0.5628233", "0.56167674", "0.56145984", "0.56032497", "0.56014395", "0.5593293", "0.55812913", "0.55757076", "0.5568863", "0.5561949", "0.5543718", "0.5543718", "0.55388206", "0.55376595", "0.5519085", "0.5473589", "0.5451167", "0.5441342", "0.5429342", "0.5409689", "0.5399904", "0.53978187", "0.53789395", "0.535548", "0.5341845", "0.53363895", "0.53214294", "0.53206563", "0.5318566", "0.5280462", "0.5276614", "0.52763957", "0.5264079", "0.52623004", "0.5257329", "0.5256854", "0.525155", "0.5236701", "0.5230525", "0.52285457", "0.52197057", "0.5214512", "0.5207733", "0.5207733", "0.5206137", "0.52056545", "0.5202512", "0.5201011", "0.51978827", "0.5195773", "0.5189029", "0.518051", "0.5179204", "0.5176353", "0.51746297" ]
0.7113257
2
Executes a subprocess with a timeout.
def GetCmdStatusAndOutputWithTimeout(args, timeout, cwd=None, shell=False, logfile=None, env=None): _ValidateAndLogCommand(args, cwd, shell) output = six.StringIO() process = Popen( args, cwd=cwd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) try: for data in _IterProcessStdout(process, timeout=timeout): if logfile: logfile.write(data) output.write(data) except TimeoutError: raise TimeoutError(output.getvalue()) str_output = output.getvalue() logger.debug('STDOUT+STDERR: %s%s', str_output[:4096].rstrip(), '<truncated>' if len(str_output) > 4096 else '') return process.returncode, str_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call_timeout(args, **kwargs):\n\n timeout = kwargs.pop('timeout', 20)\n assert timeout > 0\n\n endtime = time.time() + timeout\n\n proc = subprocess.Popen(args, **kwargs)\n \n try:\n while time.time() < endtime:\n if proc.poll() != None:\n return proc.returncode\n time.sleep(1 * 0.05)\n except:\n raise\n\n try:\n proc.kill()\n print 'Killed \"%s\" (timeout).' % args[0]\n except:\n pass\n\n return None", "def run_cmd_with_timeout(cmd, timeout_sec):\n proc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)\n kill_proc = lambda p: p.kill()\n timer = Timer(timeout_sec, kill_proc, [proc])\n timer.start()\n stdout, stderr = proc.communicate()\n timer.cancel()\n return proc.returncode, stdout, stderr", "def execv_wait_timeout(timeout, spath, attrs=None, args=None):\n return dialv_wait_timeout(timeout, \"execute\", spath, attrs, args)", "def command(cmd, timeout): \n is_linux = platform.system() == 'Linux' \n \n p = subprocess.Popen(cmd, stderr=None, stdout=None, shell=True, preexec_fn=os.setsid if is_linux else None) \n t_beginning = time.time() \n seconds_passed = 0 \n while True: \n if p.poll() is not None: \n break \n seconds_passed = time.time() - t_beginning \n if timeout and seconds_passed > timeout: \n if is_linux: \n os.killpg(p.pid, signal.SIGTERM) \n print 'linux'\n else: \n p.terminate()\n p.kill()\n print 'windows' \n print 'timeout!'\n time.sleep(0.1)", "def run_popen_with_timeout(command_string, timeout, input_data):\n kill_check = threading.Event()\n def _kill_process_after_a_timeout(pid):\n os.kill(pid, signal.SIGTERM)\n kill_check.set() # tell the main routine that we had to kill\n # use SIGKILL if hard to kill...\n global dead_threads\n dead_threads = dead_threads + 1\n return\n p = Popen(command_string, shell=True, stdout=PIPE, stderr=PIPE)\n pid = p.pid\n watchdog = threading.Timer(timeout, _kill_process_after_a_timeout, args=(pid, ))\n watchdog.start()\n (stdout, stderr) = p.communicate(input_data)\n watchdog.cancel() # if it's still waiting to run\n success = not kill_check.isSet()\n kill_check.clear()\n return (success, stdout, stderr)", "def timeout_check_call(timeout, *args, **kwargs):\n resolution = 0.1\n pro = subprocess.Popen(*args, **kwargs)\n starttime = time.time()\n while pro.poll() is None:\n time.sleep(resolution)\n if time.time() - starttime > timeout:\n to = time.time() - starttime\n pro.terminate()\n time.sleep(resolution)\n if pro.poll() is None:\n pro.kill()\n raise TimeoutError('Timed out after {0:.1f} seconds'.format(to))\n if pro.returncode:\n raise subprocess.CalledProcessError(\n pro.returncode, kwargs['args'] if 'args' in kwargs else args[0])\n return 0", "def run_shell_command_timeout(args, cwd=None, shell=True, env=None):\n\n def get_process_children(parent):\n piped_process = subprocess.Popen('ps --no-headers -o pid ' # pylint: disable=R1732\n '--ppid %d' % parent,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, _ = piped_process.communicate()\n return [int(proc) for proc in stdout.split()]\n\n # no need for pass here:\n # https://github.com/PyCQA/pylint/issues/2616#issuecomment-442738701\n # do not remove the doc string - code would break\n class Alarm(Exception):\n \"\"\"\n Custom alarm exception\n \"\"\"\n\n def alarm_handler(_, __):\n raise Alarm\n\n piped_process = subprocess.Popen(args, # pylint: disable=R1732\n shell=shell,\n cwd=cwd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=env)\n if TIMEOUT != 0:\n signal.signal(signal.SIGALRM, alarm_handler)\n signal.alarm(TIMEOUT)\n try:\n output, _ = piped_process.communicate()\n # disable alarm after process returns\n signal.alarm(0)\n except Alarm:\n logger.warning(\"cmd: %s timed out\", args)\n pids = [piped_process.pid]\n pids.extend(get_process_children(piped_process.pid))\n for pid in pids:\n # process might have died before getting to this line\n # so wrap to avoid OSError: no such process\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass\n return -9, \"cmd: %s timed out\" % args\n\n return piped_process.returncode, native_string(output)", "def _run(proc: Popen, timeout):\n try:\n return proc.wait(timeout=timeout)\n except TimeoutExpired:\n pass\n if sys.platform != 'win32':\n proc.send_signal(signal.SIGINT)\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.terminate() # SIGTERM\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.kill() # SIGKILL\n return proc.wait(timeout=5)", "def command(cmd, timeout=60): \n is_linux = platform.system() == 'Linux' \n \n p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True,shell=True, preexec_fn=os.setsid if is_linux else None)\n if timeout==0:\n return p.stdout.read()\n t_beginning = time.time() \n seconds_passed = 0 \n while True: \n if p.poll() is not None: \n break \n seconds_passed = time.time() - t_beginning \n if timeout and seconds_passed > timeout: \n if is_linux: \n os.killpg(p.pid, signal.SIGTERM) \n else: \n p.terminate() \n raise TimeoutError(cmd, timeout) \n time.sleep(0.1) \n return p.stdout.read()", "def launch_command(command, timeout, logfile):\n out = open(logfile, \"w\")\n proc = subprocess.Popen(command,\n shell=True,\n stdout=out,\n stderr=subprocess.STDOUT)\n try:\n signal.alarm(timeout)\n proc.wait()\n except TimeoutException:\n out.close()\n proc.kill()\n fail_with(\"operation took too long (> %d seconds)\" % timeout)\n\n out.close()\n signal.alarm(0) # reset alarm\n lines = None\n with open(logfile, \"r\") as f:\n lines = f.readlines()\n return lines", "def timeout_cmd(cmd, timeout):\n return \"timeout -sKILL %us stdbuf -o0 -e0 %s\" % (timeout, cmd)", "def run_command(command, timeout=None):\n if type(command) != list:\n command = [command]\n cmd = shlex.split(command[0])\n process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n prev_process = process #Assign this process as prev_process so that the variables make sense later\n for cmd in command[1:]:\n cmd = shlex.split(cmd)\n #prev_process is the process that was run before the current iteration of loop\n process = subprocess.Popen(cmd, shell=False, stdin=prev_process.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n prev_process.stdout.close() #Close the stdout of the previous process, as we don't need it\n prev_process = process #Assign the process in the current iteration of the loop as the current process\n #Handling timeouts\n if timeout:\n try:\n process.communicate(timeout=timeout)\n except TimeoutExpired:\n process.kill()\n result = process.communicate()\n err_code = process.returncode\n output = result[0].decode(\"utf-8\")\n error = result[1].decode(\"utf-8\")\n return output, error, err_code", "def run_shell_command(args):\n # to allow timeouts to work in windows,\n # would need to find another mechanism that signal.alarm based\n if sys.platform == 'win32' or TIMEOUT == 0:\n return run_shell_command_regular(args)\n\n return run_shell_command_timeout(args)", "def timeout_soft_cmd(cmd, timeout):\n return \"timeout %us stdbuf -o0 -e0 %s\" % (timeout, cmd)", "def executeCommand(command):\n time.sleep(1)\n #return os.system(command)\n subprocess.Popen(command, shell=True)", "def execute_command_with_timeout(self, **kwargs):\n return LiteHelper.execute_local_cmd_with_timeout(self.com, **kwargs)", "def run_install_subprocess(\n install_command: List[str], install_timeout: float = 300\n) -> int: # pragma: nocover\n with subprocess.Popen(install_command) as subp: # nosec\n subp.wait(install_timeout)\n return subp.returncode", "def execute2(cmd, timeout=None):\n \n output = {'error':True, 'log':''}\n try:\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, timeout = timeout)\n output['log'] = p.stdout\n output['error'] = False\n output['why_error'] =''\n except subprocess.TimeoutExpired as ex:\n output['log'] = ex.stdout+'\\n'+str(ex)\n output['why_error'] = 'timeout'\n except:\n output['log'] = 'unknown run error'\n output['why_error'] = 'unknown'\n return output", "def _wait_with_timeout(process, displayName, timeout, read):\n\t# read output from the command, with a timeout\n\t# returns (out, err, timedout) if read else returncode; out/err are byte buffers\n\t\n\t_processCleanupMonitor.add(process)\n\ttimedOut = [False]\n\t\n\tparentThreadName = threading.currentThread().name\n\t\n\tdef kill_proc(): # executed on a background thread\n\t\ttry:\n\t\t\tthreading.currentThread().name = parentThreadName+'-timer'\n\t\t\tlog.info('Process timeout handler for %s invoked after %s s; still running=%s', displayName, timeout, process.poll()==None)\n\t\t\tif process.poll()!=None: return # has terminated, so nothing to do - this happen on loaded machines sometimes\n\t\t\t\n\t\t\t# this will cause us to throw an exception\n\t\t\ttimedOut[0] = True\n\t\t\ttry:\n\t\t\t\tprocess.kill()\n\t\t\t\tlog.info('Process kill completed successfully for %s'%displayName)\n\t\t\texcept Exception as e:\n\t\t\t\t# only log if process is still running (Windows Access Denied 5 are seen occasionally in kill()) - should not happen\n\t\t\t\ttime.sleep(2)\n\t\t\t\tif process.poll() == None:\n\t\t\t\t\tlog.error('Failed to kill process %s (pid %s) after %d second timeout: %s', displayName, process.pid, timeout, e)\n\t\t\t\telse:\n\t\t\t\t\tlog.debug('Process kill failed but process is now stopped anyway: %s', e)\n\t\texcept Exception as e: # should never happen but make sure we notice if it does\n\t\t\tlog.exception('Unexpected error in process timeout monitoring thread for %s: '%displayName)\n\t\t\t\n\ttimer = threading.Timer(timeout, kill_proc, [])\n\ttimer.start()\n\ttry:\n\t\tif read:\n\t\t\tstdout, stderr = process.communicate()\n\t\telse:\n\t\t\trv = process.wait()\n\tfinally:\n\t\ttimer.cancel()\n\t\t_processCleanupMonitor.remove(process)\n\t\t\n\tif timedOut[0]:\n\t\tif read:\n\t\t\treturn stdout, stderr, True\n\t\telse:\n\t\t\traise BuildException('Terminating process %s after hitting %d second timout' % (displayName, timeout))\n\telse:\n\t\tif read:\n\t\t\treturn stdout, stderr, False\n\t\telse:\n\t\t\treturn rv", "def execute_cmd(cmd, cwd=None, timeout=5):\n p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n p.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n return None\n else:\n stdout, stderr = p.stdout.read(), p.stderr.read()\n stdout, stderr = stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore')\n if p.returncode:\n raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format(\n ' '.join(cmd), p.returncode, stderr\n ))\n else:\n return stdout, stderr", "def wait_rc(popen, timeout=30):\n stop = False\n end_time = time.time() + timeout\n rc = None\n while not stop:\n rc = popen.poll()\n if time.time() > end_time:\n stop = True\n return rc\n if rc is not None:\n stop = True\n return rc\n else:\n time.sleep(0.5)", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def execute_command_with_timeout(self, command=\"\", timeout=TIMEOUT,\n receiver=None):\n return LiteHelper.execute_remote_cmd_with_timeout(\n self.telnet, command, timeout, receiver)", "def execute_command_with_timeout(self, **kwargs):\n args = kwargs\n key = args.get(\"key\", ComType.cmd_com)\n command = args.get(\"command\", None)\n case_type = args.get(\"case_type\", \"\")\n receiver = args.get(\"receiver\", None)\n timeout = args.get(\"timeout\", TIMEOUT)\n return self.com_dict.get(key).execute_command_with_timeout(\n command=command, case_type=case_type,\n timeout=timeout, receiver=receiver)", "def test_runner_timeout():\n testplan_script = os.path.join(\n os.path.dirname(__file__), \"timeout_test_plan.py\"\n )\n assert os.path.isfile(testplan_script)\n\n current_proc = psutil.Process()\n start_procs = current_proc.children()\n\n output_json = tempfile.NamedTemporaryFile(suffix=\".json\").name\n\n try:\n proc = subprocess.Popen(\n [sys.executable, testplan_script, \"--json\", output_json],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n\n # Set our own timeout so that we don't wait forever if the testplan\n # script fails to timeout. 10 minutes ought to be long enough.\n # In Python 3 we could wait() with a timeout, but this is not\n # available in Python 2 so we need to roll our own timeout mechanism.\n timer = threading.Timer(300, _timeout_cbk, args=[proc])\n timer.start()\n stdout, _ = proc.communicate()\n timer.cancel()\n\n rc = proc.returncode\n\n with open(output_json, \"r\") as json_file:\n report = json.load(json_file)\n\n # Check that the testplan exited with an error status.\n assert rc == 1\n assert report[\"status\"] == \"error\"\n assert report[\"counter\"][\"error\"] == 1\n\n # Check that the timeout is logged to stdout.\n if not re.search(\n r\"Timeout: Aborting execution after 5 seconds\", stdout\n ):\n print(stdout)\n raise RuntimeError(\"Timeout log not found in stdout\")\n\n # Check that no extra child processes remain since before starting.\n assert current_proc.children() == start_procs\n finally:\n os.remove(output_json)", "def execute_subprocess(command, inputs, *, timeout, target, args, kwargs):\n\n from subprocess import Popen, PIPE\n\n proc = Popen(command,\n stdin=PIPE, stdout=PIPE, stderr=PIPE,\n universal_newlines=True,\n env={'PYTHONPATH': pythonpath()})\n out, err = proc.communicate(input=inputs, timeout=timeout)\n\n if err:\n raise RuntimeError(\n 'error running function %s with:\\n'\n ' args=%r\\n'\n ' kwargs=%r\\n\\n'\n 'Process returned code %s.\\n'\n 'Stdout:\\n%s\\n'\n 'Error message:\\n%s' % (\n target, args, kwargs, proc.poll(),\n indent(out or '<empty>', 4),\n indent(err or '<empty>', 4)\n )\n )\n\n # Make sure out is always a string. We ignore decoding errors praying for\n # the best\n if isinstance(out, bytes):\n out = out.decode('utf8', 'ignore')\n\n # We remove all comments and send separate comments and data sections\n lines = out.splitlines()\n data = '\\n'.join(line for line in lines if not line.startswith('#'))\n data = data.strip()\n comments = '\\n'.join(line for line in lines if line.startswith('#'))\n comments = comments.strip()\n\n # A data section must always be present\n if not data:\n raise RuntimeError('subprocess returned an empty response:\\n%s' %\n indent(out, 4))\n return data, comments", "def __internal_exec(self, command, timeout, expected_result):\n\n f_stdout = tempfile.TemporaryFile()\n proc = None\n expected_triglog = None\n exec_status = Global.FAILURE\n current_dir = os.getcwd()\n\n try:\n if self._run_from_tc_directory:\n execution_path = os.path.join(self._execution_config_path,\n os.path.dirname(self._name))\n os.chdir(execution_path)\n if expected_result is None:\n expected_result = 0\n\n # Check that expected result is trigger message from device log\n if str(expected_result).startswith(\"[TRIG_LOG]\"):\n expected_triglog = str(expected_result).replace(\"[TRIG_LOG]\", \"\").strip()\n self._device.get_device_logger().add_trigger_message(expected_triglog)\n\n if timeout is None:\n timeout = 5\n exec_timeout = float(timeout)\n\n args = list()\n if not isinstance(command, list):\n if \"Windows\" in platform.system():\n command = str(command).split()\n else:\n command = shlex.split(command)\n\n for item in command:\n args.append(item)\n\n proc = subprocess.Popen(args,\n stdout=f_stdout,\n stderr=subprocess.STDOUT)\n\n timeout = float(timeout)\n while timeout > 0 and proc.poll() is None:\n # Agree to keep t0 & t1 variable names\n t0 = time.time()\n time.sleep(0.2)\n t1 = time.time()\n timeout -= (t1 - t0)\n\n # Process or timeout terminated\n # Get return code\n return_code = proc.poll()\n # Get stdout & stderr\n f_stdout.seek(0)\n stdout = f_stdout.read()\n\n exec_status = Global.FAILURE\n if return_code is not None:\n # Check if triggering Device log\n if expected_triglog is not None:\n # Get triggered messages from device log\n start_time = time.time()\n triglog_found = False\n\n while not triglog_found and \\\n ((time.time() - start_time) < exec_timeout):\n # Get triggered messages from device log\n messages = self._device.get_device_logger().\\\n get_message_triggered_status(expected_triglog)\n\n if isinstance(messages, list) and (len(messages) > 0):\n triglog_found = True\n\n if triglog_found:\n exec_status = Global.SUCCESS\n else:\n exec_status = Global.FAILURE\n self._logger.error(\"Did not find trigger \\\"%s\\\",\"\n \" in device log.\" % expected_triglog)\n\n # Remove triggered messages from device log\n self._device.get_device_logger().\\\n remove_trigger_message(expected_triglog)\n\n # In that condition, isdigit is called on string\n elif isinstance(expected_result, int) or\\\n ((isinstance(expected_result, str) or\n isinstance(expected_result, unicode))\n and expected_result.isdigit()):\n if int(expected_result) == return_code:\n # If expected_result is an integer\n # and if it's equal to the return code then\n exec_status = Global.SUCCESS\n else:\n self._logger.error(\"Expected \\\"%s\\\", got \\\"%s\\\"\"\n % (expected_result, return_code))\n elif expected_result in stdout:\n # Expected result is a string, search for this pattern\n # in the output\n exec_status = Global.SUCCESS\n else:\n self._logger.error(\"Did not find trigger \\\"%s\\\", in stdout.\"\n % expected_result)\n else:\n # Timeout !!\n self._logger.error(\"Timeout !\")\n\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as ex:\n stdout = str(ex)\n finally:\n if proc is not None and not proc.terminate: # pylint: disable=E1101\n # pylint: disable=E1101\n proc.terminate()\n\n f_stdout.close()\n os.chdir(current_dir)\n\n return exec_status, stdout", "def run(parameters=None, silent=False):\n return _proc_exec_wait(\"{0} {1}\".format(_config.command, parameters), silent)", "def execute(command, hide_log=False, mute=False, timeout=30, wait=True, kill=False, drop=False, shell=False):\n if isinstance(command, list):\n command = subprocess.list2cmdline([unicode(arg) for arg in command])\n\n if not hide_log:\n print(\"%s > %s\" % (HOSTNAME, command))\n\n stdin = subprocess.PIPE\n stdout = subprocess.PIPE\n stderr = subprocess.STDOUT\n\n if drop or kill:\n devnull = open(os.devnull, \"w\")\n stdout = devnull\n stderr = devnull\n\n start = time.time()\n p = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell)\n\n if kill:\n delta = 0.5\n # Try waiting for the process to die\n for _ in xrange(int(timeout / delta) + 1):\n time.sleep(delta)\n if p.poll() is not None:\n return\n\n log(\"Killing process\", str(p.pid))\n try:\n p.kill()\n time.sleep(0.5)\n except OSError:\n pass\n elif wait:\n output = ''\n p.stdin.write(os.linesep)\n while p.poll() is None:\n line = p.stdout.readline()\n if line:\n output += line\n if not (hide_log or mute):\n print(line.rstrip())\n\n output += p.stdout.read()\n output = output.strip()\n\n # Add artificial sleep to slow down command lines\n end = time.time()\n run_time = end - start\n if run_time < MIN_EXECUTION_TIME:\n time.sleep(MIN_EXECUTION_TIME - run_time)\n\n if not (hide_log or mute):\n if p.returncode != 0:\n print(\"exit code = %d\" % p.returncode)\n print(\"\")\n return p.returncode, output\n else:\n return p", "def execute_command_with_timeout(self, command, **kwargs):\n return LiteHelper.execute_agent_cmd_with_timeout(self,\n command,\n **kwargs)", "def run_command(self, command, timeout=None, stdout=True):\n print('Running \"{}\"...'.format(command))\n output = self._shell.run_command(\n command, timeout=timeout, async_=False\n )\n if stdout:\n print(output)\n print(\"Done!\")\n return output", "def photo_worker(cmd):\n\n subprocess.run(cmd, shell=True, timeout=20)", "def run_command(command, root_dir):\n print highlight('\\nRunning tests: %s' % command)\n\n start_time = time.time()\n proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=root_dir)\n stdout_value, stderr_value = proc.communicate()\n end_time = time.time()\n\n if 0 < proc.returncode > 1:\n print error('Error trying to run the tests')\n print error(stderr_value)\n stdout_value = stderr_value\n else:\n print highlight('-------------------------------')\n print highlight('Finished tests in %.2f seconds\\n' % (end_time - start_time))\n\n return (proc.returncode, stdout_value)", "def execute_cmd(self, cmd):\n stdout = \"\"\n returncode = -1\n process = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n try:\n signal.signal(signal.SIGALRM, self.signal_handler)\n signal.alarm(self.timeout)\n stdout, stderr = process.communicate()\n returncode = process.returncode\n self.print_debug(\"cmd={0}, returncode={1}\".format(cmd, returncode))\n if returncode != 0:\n self.print_debug(\"stderr={0}\".format(stderr))\n signal.alarm(0)\n except Exception as err:\n self.print_debug(str(err))\n return (returncode, stdout)", "def check_output(cmd, timeout=None, write_stdout=False, **kwargs):\n PIPE = subprocess.PIPE\n popen = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)\n\n class _Alarm(Exception):\n pass\n\n def alarm_handler(signum, frame):\n raise _Alarm(\"Command timeout: %s\" % cmd)\n\n if timeout:\n old_handler = signal.signal(signal.SIGALRM, alarm_handler)\n old_alarm = signal.alarm(timeout)\n try:\n out, err = popen.communicate()\n retcode = popen.returncode\n if retcode:\n logging.debug(\"Command %s failed with error code %s\",\n \" \".join(cmd), retcode)\n if not write_stdout:\n logging.debug(\"stdout: %s\", out)\n logging.debug(\"stderr: %s\", err)\n raise subprocess.CalledProcessError(retcode, \" \".join(cmd))\n return out, err\n except _Alarm:\n popen.kill()\n _ = popen.communicate()\n raise TimeoutError()\n finally:\n if timeout:\n signal.alarm(old_alarm)\n signal.signal(signal.SIGALRM, old_handler)", "def _run_process(self, command_string, timeout=None):\n args = shlex.split(command_string)\n with open(self.logger.log_debug_err, 'ab') as stderr, open(self.logger.log_debug_out, 'ab') as stdout:\n try:\n subprocess.check_call(args, stdout=stdout, stderr=stderr, timeout=timeout)\n except (subprocess.TimeoutExpired, subprocess.CalledProcessError):\n return\n return True", "def run_cmd(command, inputStream = \"\"):\n timeoutSecs = 3600\n timePassed = 0.0\n increment = 0.01\n\n stderrFD, errFile = tempfile.mkstemp()\n stdoutFD, outFile = tempfile.mkstemp()\n\n process = Popen(command, shell=True, stdin=PIPE, stdout=stdoutFD, \n stderr=stderrFD, close_fds=False)\n\n if process == None:\n print \"Could not create process\"\n sys.exit(1)\n\n try:\n if inputStream != \"\":\n for line in inputStream:\n process.stdin.write(line)\n process.stdin.flush()\n\n while True:\n status = process.poll()\n if status != None:\n # Process terminated succesfully.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n return (False, stdoutContents, stderrContents, process.returncode)\n\n if timePassed < timeoutSecs:\n time.sleep(increment)\n timePassed = timePassed + increment\n else:\n # time out, kill the process.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n os.kill(process.pid, signal.SIGTSTP)\n return (True, stdoutContents, stderrContents, process.returncode)\n except Exception, e:\n # if something threw exception (e.g. ctrl-c)\n print e\n os.kill(process.pid, signal.SIGTSTP)\n try:\n # time out, kill the process.\n # time out, kill the process.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n os.kill(process.pid, signal.SIGTSTP) \n except:\n pass\n\n return (False, stdoutContents, stderrContents, process.returncode)", "def kill_gracefully(process, timeout=2):\n try:\n with suppress(ProcessLookupError):\n process.terminate()\n stdout, stderr = process.communicate(timeout=timeout)\n except TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n\n return process.returncode, stdout, stderr", "def _shcmd(cmd, timeout=15):\n delay = 1.0\n obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if sys.hexversion < 0x03000000:\n while (obj.poll() is None) and (timeout > 0):\n time.sleep(delay)\n timeout -= delay\n if not timeout:\n obj.kill()\n stdout, stderr = obj.communicate()\n else:\n try:\n stdout, stderr = obj.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n obj.kill()\n stdout, stderr = obj.communicate()\n if obj.returncode:\n print(\"COMMAND: \" + (\" \".join(cmd)))\n print(\"STDOUT:\" + os.linesep + _tostr(stdout))\n print(\"STDERR:\" + os.linesep + _tostr(stderr))\n raise RuntimeError(\"Shell command could not be executed successfully\")\n stdout = _tostr(stdout).split(os.linesep)\n stderr = _tostr(stderr).split(os.linesep)\n return stdout, stderr", "def wait(self, timeout: float | None = None) -> Task:\n try:\n stdout, stderr = self._process.communicate(timeout=timeout)\n except sp.TimeoutExpired:\n self.send_signal(sig=signal.SIGKILL, children=True)\n stdout, stderr = self._process.communicate()\n raise sp.TimeoutExpired(self._process.args, timeout, output=stdout, stderr=stderr)\n except Exception:\n self.send_signal(sig=signal.SIGKILL, children=True)\n raise\n\n retcode = self._process.poll()\n\n if stdout:\n stdout = stdout.strip()\n\n if stderr:\n stderr = stderr.strip()\n\n self._completed = sp.CompletedProcess(self._popen_args, retcode, stdout, stderr)\n return self", "def run(args, cwd = None, shell = False, kill_tree = True, timeout = -1,\n verbose = False, stdout = PIPE, stderr = PIPE):\n binary_name = args.split(' ')[0]\n\n thread = SubprocessThread(binary_name, args, shell, cwd, verbose, stdout,\n stderr)\n thread.start()\n\n if timeout == -1:\n thread.join()\n else:\n t10min = 10 * 60\n if timeout < t10min:\n thread.join(timeout)\n else:\n start = time()\n diff = 0\n while diff < timeout:\n if t10min < timeout - diff:\n t = t10min\n else:\n t = timeout - diff\n thread.join(t)\n if not thread.is_alive():\n break\n diff = time() - start\n if diff < timeout:\n print \"Keep alive, current job runs for %dmin\" % (diff / 60)\n\n if timeout != -1 and thread.is_alive():\n assert thread.pid is not None\n return kill_process(thread.pid, kill_tree, thread)\n\n return thread.returncode, thread.stdout_result, thread.stderr_result", "def issueCommand(self,command, timeout=3, message=None):\n p = self.spawnProc\n p.sendline(command)\n #self._checkCommandStatus() ", "def exec_cmd(command, wait_after=None):\n logging.debug('Executing command: %s', command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n proc.wait()\n\n if proc.returncode:\n cmd = ' '.join(command)\n error = subprocess.CalledProcessError(\n returncode=proc.returncode, cmd=cmd,\n output=proc.stdout.read().decode('utf-8'))\n\n logging.error('Error executing command \"%s\"', cmd)\n logging.debug('Command \"%s\" output: [%s]', cmd, error.output, stack_info=True, exc_info=error)\n raise error\n\n if wait_after:\n time.sleep(wait_after)\n\n return proc.stdout.read().decode('utf-8')", "def wait_timeout(proc, seconds):\n start = time.time()\n end = start + seconds\n interval = 0.01\n\n while True:\n result = proc.poll()\n #print \"waiting\"\n if result is not None:\n return result\n if time.time() >= end:\n\n os.killpg(proc.pid, signal.SIGTERM)\n raise RuntimeError(\"Process timed out\")\n time.sleep(interval)", "async def async_call(self, args=None, timeout=None):\n if args is None:\n args = []\n\n # Executing command with Tornado subprocess is possible only in main thread\n if threading.main_thread().ident != threading.get_ident():\n return self.call(args=args, timeout=timeout)\n\n all_args = [self.CMD if self.CMD is not None else cfg['tools.%s.cmd' % self.NAME]]\n all_args.extend(self.COMMON_ARGS)\n all_args.extend(args)\n cmd = ' '.join(all_args),\n log.debug('Executing: %s', cmd)\n\n if self._cancelled:\n raise Exception('Task was cancelled')\n task = process.Subprocess(all_args, stderr=process.Subprocess.STREAM, stdout=process.Subprocess.STREAM)\n self.proc = task.proc\n\n coroutine = gen.multi([task.wait_for_exit(raise_error=False),\n task.stdout.read_until_close(),\n task.stderr.read_until_close()])\n\n if not timeout:\n return_code, stdout, stderr = await coroutine\n else:\n try:\n return_code, stdout, stderr = await gen.with_timeout(timedelta(seconds=timeout), coroutine)\n except gen.TimeoutError as exception:\n log.exception(\"Command %s timed out after %s while executing %s\", self.NAME, timeout, cmd)\n task.proc.kill()\n raise exception\n\n self.proc = None\n\n if return_code != 0:\n log.warning(\"Command '%s' failed wit exit code: %s\", cmd, return_code)\n log.debug(\"Command '%s':\\nSTDOUT:\\n%s\\nSTDERR:\\n%s\", cmd, stdout, stderr)\n if self.RAISE_ERROR:\n raise subprocess.CalledProcessError(return_code, cmd)\n\n return self.parser.parse(stdout.decode('utf-8'), stderr.decode('utf-8'))", "def communicate(self, process, timeout):\n\n self.timeout = timeout\n self.process = process\n self.start() # Start watchdog\n result = self.process.communicate()\n if self.finished.is_set():\n raise TestException('Test timed out')\n else:\n self.finished.set() # Stop watchdog\n\n if self.process.poll():\n # Non-zero return code. Probably target program crash.\n raise TestException(\n 'Process returned error: ' + result[0].decode())\n\n return result", "def run_cmd(self, cmd, timeout,\n force_execution=False,\n wait_for_response=True,\n silent_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def call(self, args=None, timeout=None):\n if args is None:\n args = []\n\n if not timeout:\n timeout = None\n\n all_args = [self.CMD if self.CMD is not None else cfg['tools.%s.cmd' % self.NAME]]\n all_args.extend(self.COMMON_ARGS)\n all_args.extend(args)\n cmd = ' '.join(all_args),\n log.debug('Executing: %s', cmd)\n\n if self._cancelled:\n raise Exception('Task was cancelled')\n\n self.proc = subprocess.Popen(all_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n try:\n self.proc.wait(timeout=timeout)\n except TimeoutError as exception:\n log.exception(\"Command %s timed out after %s while executing %s\", self.NAME, timeout, cmd)\n self.proc.kill()\n raise exception\n\n return_code, stdout, stderr = self.proc.returncode, self.proc.stdout.read(), self.proc.stderr.read()\n self.proc = None\n\n if return_code != 0:\n log.warning(\"Command '%s' failed wit exit code: %s\", cmd, return_code)\n log.debug(\"Command '%s':\\nSTDOUT:\\n%s\\nSTDERR:\\n%s\", cmd, stdout, stderr)\n if self.RAISE_ERROR:\n raise subprocess.CalledProcessError(return_code, cmd)\n\n return self.parser.parse(stdout.decode('utf-8'), stderr.decode('utf-8'))", "async def _run_cmd(self, cmd, timeout=5):\n try:\n self._flush_buffer()\n self.pexpect_child.sendline(cmd)\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n stdout = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else \"\"\n self.pexpect_child.sendline(\"echo $?\")\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n exit_status = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else -1\n try:\n exit_status = int(exit_status)\n except ValueError:\n exit_status = -1\n return exit_status, stdout\n except Exception as e:\n self.applog.exception(\"Exception occured --> _run_command\", exc_info=e)\n raise", "def test_echo(proc, TIMEOUT):\n proc.sendline(u'echo 1')\n assert proc.expect([TIMEOUT, u'1'])", "def exec_command(self, cmd, timeout=10):\n start = time()\n chan = self._ssh.get_transport().open_session()\n if timeout is not None:\n chan.settimeout(int(timeout))\n chan.exec_command(cmd)\n end = time()\n logger.trace('exec_command \"{0}\" on {1} took {2} seconds'.format(cmd,\n self._hostname, end-start))\n\n\n stdout = \"\"\n while True:\n buf = chan.recv(self.__MAX_RECV_BUF)\n stdout += buf\n if not buf:\n break\n\n stderr = \"\"\n while True:\n buf = chan.recv_stderr(self.__MAX_RECV_BUF)\n stderr += buf\n if not buf:\n break\n\n return_code = chan.recv_exit_status()\n logger.trace('chan_recv/_stderr took {} seconds'.format(time()-end))\n\n return (return_code, stdout, stderr)", "def execute(cmd, kill_msgs=[], verbose=False, timeout=1e6, workdir=''):\n w = Watcher(cmd=cmd, timeout=timeout, verbose=verbose, kill_msgs=kill_msgs, workdir=workdir)\n w.run()\n\n return w.run_time, w.exception, w.log", "def sync_exec(self, point, timeout=None, counter=1):\n return self._get_exec_note(point).wait(timeout=timeout,state=counter)", "def run(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False,\n timeout=None,\n check=True,\n universal_newlines=True,\n **kwargs,\n):\n result = subprocess.run(\n args,\n stdout=stdout,\n stderr=stderr,\n shell=shell,\n timeout=timeout,\n check=check,\n universal_newlines=universal_newlines,\n **kwargs,\n )\n return result", "def test_cmd(host, cmd, timeout=None):\n ps = host.popen(cmd)\n rs = monitor_ps(ps, timeout)\n if rs is None:\n ps.kill()\n return False\n return rs == 0", "def _patch_popen(Popen):\n if hasattr(Popen.communicate, 'patched'):\n return\n\n from threading import Timer\n _communicate = Popen.communicate\n\n def communicate(self, *args, **kwargs):\n timeout = kwargs.pop('timeout', None)\n\n if timeout is not None:\n timeout /= 1000.0\n timer = Timer(timeout, self.kill)\n try:\n timer.start()\n return _communicate(self, *args, **kwargs)\n finally:\n timer.cancel()\n else:\n return _communicate(self, *args, **kwargs)\n\n Popen.communicate = communicate\n communicate.patched = True", "def shell_command(self, path, timeout=5):\n if self.verbose > 0:\n print('STARTING PROGRAM: ' + str(path))\n self.program = pexpect.spawn(\"/bin/bash\", [\"-c\", path], timeout, \n encoding='utf-8')\n # pexpect copies all input and output to this file\n self.program.logfile = open('tester.log', 'a')", "def execute(self, *args, **kwargs):\n\n if 'timeout' not in kwargs:\n kwargs['timeout'] = self.worker.get('timeout', 60)\n\n self.logger.info(\"Running command %s\", args)\n result = sp.check_call(args, **kwargs)\n self.logger.info(\"Command Finished\")\n return result", "def wait_for_shutdown(self, timeout=5):\n # pylint: disable=E1101\n self._process.join(timeout=timeout) # type: ignore\n # pylint: enable=E1101", "def test_pipeline_timeout(mockpipe_timeout, testdir):\n test = testdir.makepyfile(TEST_TIMEOUT)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 0\n assert len(skipped) == 0\n assert len(failed) == 1", "def exec_spawn(cmd):\n i = 0\n child = pexpect.spawn(' '.join(cmd))\n print(f\"exec_spawn command: {' '.join(cmd)}\")\n while True:\n try:\n # 0 - '\\n'; 1 - 'ms\\$'\n i = child.expect(['\\n', 'ms\\$'])\n except:\n # two type exception EOF & TIMEOUT\n # in both cases will be done finally & close()\n break\n finally:\n # if i == 0:\n yield child.before\n\n child.close()", "def _timeout(self, timeout, f, *args, **kwargs):\r\n\r\n t = spawn_thread(target=f, args=args, kwargs=kwargs)\r\n t.daemon = True\r\n t.start()\r\n t.join(timeout)\r\n\r\n if not t.is_alive():\r\n if t.exc_info:\r\n return t.exc_info\r\n return t.result\r\n else:\r\n try:\r\n msg = '[%s] Execution was forcefully terminated'\r\n raise RuntimeError(msg % t.name)\r\n except:\r\n return sys.exc_info()", "def run(directory, command, encoding):\n stdout = None\n stderr = None\n return_code = None\n result = None\n try:\n process = subprocess.Popen(\n command,\n cwd=directory,\n universal_newlines=False,\n stdin=None,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n # pylint: disable=unexpected-keyword-arg\n (stdout, stderr) = process.communicate(timeout=TIMEOUT_DELAY)\n return_code = process.returncode\n except OSError:\n stdout = None\n stderr = None\n return_code = None\n except subprocess.TimeoutExpired as timeout:\n stderr = getattr(timeout, \"stderr\", \"\")\n process.kill()\n (_ignored, stderr_tail) = process.communicate()\n stderr += stderr_tail\n stdout = None\n return_code = None\n if stdout is not None:\n stdout = stdout.decode(encoding)\n if stderr is not None:\n stderr = stderr.decode(encoding)\n result = (stdout, stderr, return_code)\n return result", "def execute(command, **kwargs):\n proc = ProcessWrapper(command, **kwargs)\n proc.run()\n return proc.join()", "def adb(self, args, timeout=60, force_output=False):\n if self.adb_transport:\n args = [\"-t\", \"%d\" % self.adb_transport] + args\n\n if force_output:\n stdout = None\n stderr = None\n else:\n stdout = self.stdout\n stderr = self.stderr\n\n adb_proc = subprocess.Popen(\n [self.adb_bin()] + args, stdin=self.stdin, stdout=stdout,\n stderr=stderr)\n\n # This code simulates the timeout= parameter due to no python 3\n\n def kill_adb():\n \"\"\"Kills the running adb\"\"\"\n # Technically this races with wait - it is possible, though\n # unlikely, to get a spurious timeout message and kill\n # if .wait() returns, this function is triggered, and then\n # .cancel() runs\n adb_proc.kill()\n print \"Timed out (%d s)\" % timeout\n\n kill_timer = threading.Timer(timeout, kill_adb)\n kill_timer.start()\n # Add finally here so that the python interpreter will exit quickly\n # in the event of an exception rather than waiting for the timer\n try:\n exit_code = adb_proc.wait()\n return exit_code\n finally:\n kill_timer.cancel()", "def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)", "def execute_command_line(command_line_str, time_limit):\n execution = CommandExecution()\n execution.run(command_line_str, time_limit)\n if execution.timelimit:\n return execution.output, execution.wall_time, None\n else:\n return execution.output, execution.wall_time, execution.return_code", "async def async_wait_for_process(loop, process: psutil.Process, timeout):\n try:\n await asyncio.wait_for(loop.run_in_executor(None, process.wait), timeout=timeout)\n except asyncio.exceptions.TimeoutError as e:\n raise e", "def _timeout_cbk(proc):\n proc.kill()\n raise RuntimeError(\"Timeout popped.\")", "def compute(self):\n parfile = self.create_parfile()\n self._command = [self.class_exe, parfile]\n process = subprocess.Popen(self._command)\n try:\n # process.wait(timeout=300)\n process.wait()\n # except (KeyboardInterrupt, subprocess.TimeoutExpired) as e: # TimeoutExpired only in Python >= 3.3\n except Exception as e:\n process.kill()\n raise e\n return", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def execute_local_cmd(cmd, timeout=10):\n l.info(\"Executing local command [%s]\", cmd)\n pg_cmd = PySysCommand(cmd)\n pg_cmd.run(timeout=timeout)\n output = pg_cmd.stdout + pg_cmd.stderr\n l.info(\"Result: %s\", output)", "def execute_command(\n args: Union[List[str], str],\n *pargs,\n print_output: bool = True,\n capture_stderr: bool = True,\n print_command: bool = False,\n retry: bool = False,\n timeout: int = 15 * 60,\n **kwargs\n) -> Tuple[int, List[str]]:\n max_tries = MAX_RETRIES if retry else 1\n try_count = 0\n\n jitter = Jitter()\n time_passed = 0\n exit_code = 0\n stdout: List[str] = []\n while try_count < max_tries:\n exit_code, stdout = _execute_command(\n args,\n print_output,\n capture_stderr,\n print_command,\n *pargs,\n **kwargs,\n )\n\n try_count += 1\n\n network_errors = _get_retriable_errors(stdout)\n if exit_code != 0 and network_errors and retry:\n logger.warning('Found network errors while running %s command: %s', args, network_errors)\n else:\n # The command either succeeded or failed with a non network error. don't retry\n break\n\n if time_passed >= timeout:\n raise TimeoutError('Timed out retrying %s command' % args)\n\n time_passed = jitter.backoff()\n\n return exit_code, stdout", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def execute(\n cmd,\n showout=False,\n cwd=None,\n shell=\"/bin/bash\",\n timeout=600,\n asynchronous=False,\n env=None,\n replace_env=False,\n die=False,\n):\n return j.core.executors.run_local(\n cmd=cmd,\n hide=not showout,\n cwd=cwd,\n shell=shell,\n timeout=timeout,\n asynchronous=asynchronous,\n env=env or {},\n replace_env=replace_env,\n warn=not die,\n )", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def sleep(self, timeout):\n try:\n self._wait_in_process_loop(lambda: (False,None),timeout=timeout)\n except threadprop.TimeoutThreadError:\n pass", "def execute(command):\n try:\n with open(os.devnull, 'w') as null:\n process = subprocess.Popen(command,\n stdout=null,\n stderr=null,\n preexec_fn=os.setsid)\n process.communicate()\n except KeyboardInterrupt:\n # use process reset instead of the system call 'reset' to avoid the\n # terminal to hang after C-c. this avoid the terminal hangs after\n # killing the process with 'C-c' ref: stackoverflow #6488275\n process.send_signal(signal.SIGINT)\n # instead of 'process.terminate()' use os.killpg() to finish the\n # execution of all process' children. it works with the argument\n # preexec_fn=os.setsid) ref: stackoverflow #9117566\n os.killpg(process.pid, signal.SIGTERM)\n except OSError as error:\n print(error, file=sys.stderr)", "def terminal_test_child(t):\n t = sys.argv[-1]\n print(\"Waiting for parent process to continue...\")\n if not _terminal_test_wait(t, 1):\n return 1\n _terminal_test_report(t, 2)\n print(\"Waiting for parent process to terminate...\")\n if not _terminal_test_wait(t, 3):\n return 1\n print(\"Test complete. Will terminate in 3 seconds.\")\n time.sleep(3)\n _terminal_test_report(t, 4)\n return 0", "def timeout(seconds, force_kill=True):\n def wrapper(function):\n def inner(*args, **kwargs):\n now = time.time()\n proc = RunnableProcessing(function, *args, **kwargs)\n proc.start()\n proc.join(seconds)\n if proc.is_alive():\n if force_kill:\n proc.terminate()\n runtime = time.time() - now\n raise TimeoutException('timed out after {0} seconds'.format(runtime))\n assert proc.done()\n success, result = proc.result()\n if success:\n return result\n else:\n raise result\n return inner\n return wrapper", "def __init__(self, command, timeout=-1, stdout_chunk_callback=None, stderr_chunk_callback=None,\n exit_process_callback=None, stdin_bytes=None,\n io_loop=None, kill_on_timeout=False):\n self.exit_process_callback = exit_process_callback\n self.kill_on_timeout = kill_on_timeout\n stdin = Subprocess.STREAM if stdin_bytes else None\n stdout = Subprocess.STREAM if stdout_chunk_callback else None\n stderr = Subprocess.STREAM if stderr_chunk_callback else None\n\n Subprocess.__init__(self, command, stdin=stdin, stdout=stdout, stderr=stderr, io_loop=io_loop)\n\n self.process_expired = False\n self.terminate_timeout = self.io_loop.call_later(timeout, self.timeout_callback) if timeout > 0 else None\n\n self.set_exit_callback(self.exit_callback)\n\n if stdin:\n self.stdin.write(stdin_bytes)\n self.stdin.close()\n\n if stdout:\n output_stream = PipeIOStream(self.stdout.fileno())\n\n def on_stdout_chunk(data):\n stdout_chunk_callback(data)\n if not output_stream.closed():\n output_stream.read_bytes(102400, on_stdout_chunk, None, True)\n\n output_stream.read_bytes(102400, on_stdout_chunk, None, True)\n\n if stderr:\n stderr_stream = PipeIOStream(self.stderr.fileno())\n\n def on_stderr_chunk(data):\n stdout_chunk_callback(data)\n if not stderr_stream.closed():\n stderr_stream.read_bytes(102400, on_stderr_chunk, None, True)\n\n stderr_stream.read_bytes(102400, on_stderr_chunk, None, True)", "def timeout(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n action = Process(target=func, args=args, kwargs=kwargs)\n action.start()\n action.join(timeout=5)\n if action.is_alive():\n # terminate function\n action.terminate()\n # clean up\n action.join()\n raise (TimeoutError)\n # if process is not 0, is not successful\n if action.exitcode != 0:\n # raise Attribute Error, which is the most probable\n raise (AttributeError)\n return (wrapper)", "def send_command(command, timeout_time = set_err_codes.tcs_coms_timeout):\n\t\n\ttry:\n\t\t#Send the command to the TCS\t\n\t\toutput = subprocess.run(['ssh','wasp@tcs', command],\n\t\t\t\tcapture_output=True, timeout=timeout_time)\n\texcept subprocess.TimeoutExpired:\n\t\tlogger.critical('Failed to contact TCS')\n\telse:\n\t\tresponse = output.stdout\n\t\n\t#get rid of repeated command\n\tresponse = response.decode('utf-8')\n\tlogger.info('FROM TCS: '+response)\n\treturn response", "def run_slow(interface, *args, **kwargs):\n\n try:\n interface.call(args, cancel=True, **kwargs)\n except (subprocess.CalledProcessError, OSError):\n return False\n return True", "def _handle_task_timeout(self, task_timeout):\n pid = task_timeout.pid\n\n # Kill the associated process so the thread stops.\n LOG.info(\"Subprocess %d timed out. Terminating...\", pid)\n self._kill_process(pid, join=True)\n\n # Make a new process to replace it.\n self._create_and_register_process()", "def subprocess_nowait(cmd, shell=False, cwd=None, env=None):\n # type: (str, bool, str, dict) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")", "def run(self, timeout=None):\n with self.kill_timeout(timeout):\n super().run()", "def wait(self, timeout=None):\n assert type(timeout) in (\n int, type(None)), 'Wrong type for [timeout], should be an int or None [was {0}]'.format(type(timeout))\n\n self._process.join(timeout)", "def dialv_wait_inouterr_timeout(timeout, op, spath, attrs=None, args=None, stdin=None, stdout_size=STDOUT_SIZE_DEFAULT, stderr_size=STDERR_SIZE_DEFAULT):\n return dialv_wait_inouterr(to_deadline(timeout), op, spath, attrs, args, stdin, stdout_size, stderr_size)", "def runComUnlimited(command, cwd=os.getcwd(), sleepTime=0, terminal=_default_terminal):\n tmpsh = open(os.path.join(cwd,\"QZTEMP.sh\"), \"w\")\n tmpsh.write(\"ulimit -s unlimited\\n\")\n tmpsh.write(command+\"\\n\")\n tmpsh.write(\"sleep \"+str(sleepTime))\n if __q_debug__ == True:\n tmpsh.write(\"sleep 10\")\n tmpsh.close()\n proc = subprocess.Popen(terminal+\" -e bash QZTEMP.sh\", shell=True, cwd=cwd)\n while proc.wait() != 0:\n pass\n #os.remove(os.path.join(cwd,\"QZTEMP.sh\"))", "def run_command(cmd, log_file, time_file):\r\n sts = 0\r\n try_times = -1\r\n while True:\r\n try_times += 1\r\n if try_times > 2: # try to run the command 4 times --> change to 3 times\r\n break\r\n etime = ElapsedTime()\r\n etime.play()\r\n log = \"Running Command: %s [%s]\" % (cmd, etime.get_play_time())\r\n if try_times:\r\n log += \"-- TryTime: %d\" % try_times\r\n say_it(log)\r\n say_it(\" <%s>\" % os.getcwd())\r\n sts, text = get_status_output(cmd)\r\n etime.stop()\r\n append_file(log_file, [\"=\" * 30,cmd, etime] + text)\r\n append_file(time_file, [cmd, \"rem %s\" % etime])\r\n if sts:\r\n t = \"Status: Failed.\"\r\n else:\r\n t = \"Status: Pass.\"\r\n say_it(\"%s %s\" % (t, etime))\r\n time.sleep(3.14)\r\n if not sts:\r\n break\r\n # flow failed!\r\n have_license_error = simple_parser(log_file, p_license_error, 100)\r\n if have_license_error:\r\n # backup log file and re-launch the command after 1 minute\r\n backup_log_file = log_file+str(try_times)\r\n if os.path.isfile(backup_log_file):\r\n os.remove(backup_log_file)\r\n os.rename(log_file, backup_log_file)\r\n time.sleep(30) # change 60 --> 30\r\n else:\r\n if abnormal_exit(log_file):\r\n time.sleep(30)\r\n else:\r\n break\r\n if sts:\r\n error_msg_line = simple_parser(log_file, p_error_msg, 100)\r\n if error_msg_line:\r\n say_it(error_msg_line[0])\r\n say_it(\"\")\r\n return sts", "def runSubprocessInThread(command, resultQ, verbose=False):\n result = runSubProcess(command, verbose=verbose)\n resultQ.put(result)", "def exec_cmd(command):\r\n global _verbose\r\n debug(\"Executing command: %s\" % command)\r\n if not _verbose:\r\n command = \"%s > /dev/null 2>&1\" % command\r\n resp = os.system(command)\r\n if resp != 0:\r\n exit(\"Command [%s] failed\" % command, resp)", "def WaitForExit(self, timeout: int = 1200) -> Dict[str, Any]:\n raise NotImplementedError()", "def wait_for_command_execution(self, timeout=None, check_fun=None):\n if check_fun is None:\n def check_fun2(buf, whole_data):\n # TODO: expose via logging config entry\n if self.verbose_logger is not None:\n self.verbose_logger.debug(\"expecting '%s', got: '%s'\", self.shell_prompt, buf)\n\n return self.re_shell_prompt.search(whole_data)\n\n check_fun = check_fun2\n try:\n res = self.process_output(\n NetUtil.wait_for_socket_result(self.sock,\n check_fun,\n read_buf_size=SOCKET_READ_BUF_SIZE,\n timeout=timeout\n )\n )\n except NetUtil.Timeout as e:\n # netstat_uds = run_shell(\"netstat -ape -A unix\")\n # open_fds = run_shell('ls -l /proc/%s/fd/' % os.getpid())\n # lsof = run_shell('lsof -U')\n # debug:\n\n # Active Unix Domain Sockets:\n # %s.\n # Open file handles (Unix):\n # %s\n # lsof:\n # %s\n # % (netstat_uds, open_fds, lsof))\n # log exception to node log\n if self.brief_logger:\n self.brief_logger.exception(e)\n\n raise\n return res", "def execute_remote(args, host = None, timeout = 10):\n\n from time import sleep\n\n timeout = config.Config.ssh_timeout\n def is_timeout(test):\n wait_time = 0\n while not test():\n sleep(0.5)\n wait_time += 0.5\n if wait_time > timeout:\n return True\n return False\n\n try:\n handle = type('Handle', (object,), {'stdout' : [], 'stderr' : [], 'returncode' : 0})()\n if not SSHSession:\n raise ArcError('There is no active SSH session! Run lrms.common.ssh.ssh_connect', 'common.proc')\n session = SSHSession[host if host else list(SSHSession.keys())[-1]].open_session()\n session.exec_command(args)\n if is_timeout(session.exit_status_ready):\n warn('Session timed out. Some output might not be received. Guessing exit code from stderr.', 'common.proc')\n handle.returncode = session.exit_status\n\n chnksz = 2 << 9\n\n stdout = ''\n data = session.recv(chnksz)\n while data:\n stdout += data\n data = session.recv(chnksz)\n handle.stdout = stdout.split('\\n')\n\n stderr = ''\n data = session.recv_stderr(chnksz)\n while data:\n stderr += data\n data = session.recv_stderr(chnksz)\n handle.stderr = stderr.split('\\n')\n\n if handle.returncode == -1:\n handle.returncode = len(stderr) > 0\n return handle\n\n except Exception as e:\n raise ArcError('Failed to execute command \\'%s (...) \\':\\n%s' % (args.split()[:4], str(e)), 'common.proc')", "def executeCommand(cmd,loopsleep):\n\tsleep(loopsleep)\n\tresult = subprocess.getoutput(cmd)\n\treturn(result.split(\"\\n\"))", "def execute(command, stdout, stderr=sys.stdout):\n # Does tail work to watch stdout to logging service?\n proc = subprocess.Popen(\n command, shell=True, stdout=stdout, stderr=stderr)\n proc.wait()", "def subprocess(command):\n from sys import executable as python\n from subprocess import Popen,PIPE\n from sys import stderr\n command = \"from %s import *; %s\" % (modulename(),command)\n for attempt in range(0,3):\n try:\n process = Popen([python,\"-c\",command],stdout=PIPE,stderr=PIPE,\n universal_newlines=True)\n break\n except OSError,msg: # [Errno 513] Unknown error 513\n log(\"subprocess: %s\" % msg)\n sleep(1)\n output,error = process.communicate()\n if \"Traceback\" in error: raise RuntimeError(repr(command)+\"\\n\"+error)\n if error: stderr.write(error)\n return output" ]
[ "0.7861095", "0.74641967", "0.72873646", "0.7224932", "0.72131467", "0.7131527", "0.7093006", "0.70907867", "0.7079524", "0.70083195", "0.6727389", "0.65759695", "0.65237683", "0.6510072", "0.6489184", "0.6449738", "0.64226705", "0.63744366", "0.63308406", "0.6277528", "0.6273885", "0.6256989", "0.6232077", "0.6230843", "0.6213242", "0.6208117", "0.6180735", "0.6151805", "0.6107725", "0.6087039", "0.60851204", "0.60537475", "0.60433656", "0.60099125", "0.5970667", "0.5961855", "0.5960168", "0.59499705", "0.59161", "0.59029317", "0.5879124", "0.5865407", "0.58513284", "0.58152926", "0.5804717", "0.57954717", "0.5784996", "0.5771028", "0.5768886", "0.5763518", "0.57553244", "0.571821", "0.57101583", "0.5698866", "0.5690781", "0.5690692", "0.56771827", "0.56620175", "0.5660836", "0.5652493", "0.56125414", "0.5604799", "0.55900663", "0.5584583", "0.55815583", "0.55795634", "0.5577131", "0.5574681", "0.55649644", "0.5550555", "0.55451876", "0.551334", "0.5501525", "0.5492713", "0.5492474", "0.5479078", "0.547122", "0.54544413", "0.54439306", "0.5441119", "0.5425401", "0.5423138", "0.5419529", "0.54144734", "0.5398449", "0.5398095", "0.5384606", "0.5368985", "0.5367447", "0.5363726", "0.5361534", "0.53601635", "0.5355729", "0.53509974", "0.5350636", "0.53438175", "0.5339627", "0.53324485", "0.5324331", "0.53202283" ]
0.566922
57
Executes a subprocess and continuously yields lines from its output.
def IterCmdOutputLines(args, iter_timeout=None, timeout=None, cwd=None, shell=False, env=None, check_status=True): cmd = _ValidateAndLogCommand(args, cwd, shell) process = Popen( args, cwd=cwd, shell=shell, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return _IterCmdOutputLines( process, cmd, iter_timeout=iter_timeout, timeout=timeout, check_status=check_status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def runCommand(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1)\n for line in p.stdout:\n print (line.decode(\"utf-8\"),end=\"\") # the end=\"\" argument to print prevents unwanted newlines after each line\n p.wait()", "def runCommand(comm, opts):\n comm = comm.format(*opts)\n print(\"Running '{}'...\".format(comm))\n process = subprocess.Popen(comm, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n for line in iter(process.stdout.readline, \"\"):\n yield line\n\n print(\"Completed\")\n print(\"\")\n process.stdout.close()", "def executeOld(cmd):\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n yield stdout_line \n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)", "def execute_command(cmd):\n popen = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout = b''\n while True: # Save output to youtube_stdout while this being echoed\n tmp = popen.stdout.read(1)\n stdout += tmp\n _print(tmp, end=\"\")\n sys.stdout.flush()\n # do it until the process finish and there isn't output\n if tmp == b\"\" and popen.poll() is not None:\n break", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def run_command(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print output.strip()\n\n rc = process.poll()\n return rc", "def execute(cmd):\n print(f\"Execute command: {' '.join(cmd)}\")\n popen = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n universal_newlines=False,\n bufsize=1, # unbuffered\n )\n for stdout_line in iter(popen.stdout.readline, b''):\n yield stdout_line\n\n popen.stdout.close()\n popen.kill()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)", "def run_command(cmd, print_output=True):\n def enqueue_output(out, queue):\n for line in iter(out.readline, b''):\n queue.put(line.decode(\"utf-8\"))\n out.close()\n\n print(\" -> {}\".format(cmd))\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n q_stdout = Queue()\n q_stderr = Queue()\n t_stdout = Thread(target=enqueue_output, args=(proc.stdout, q_stdout))\n t_stderr = Thread(target=enqueue_output, args=(proc.stderr, q_stderr))\n t_stderr.daemon = True # thread dies with the program\n t_stdout.daemon = True\n t_stdout.start()\n t_stderr.start()\n stdout = \"\"\n stderr = \"\"\n\n # read stdout and stderr without blocking\n finished = False\n while True:\n done = proc.poll()\n try:\n line_stdout = \"\"\n while True:\n line_stdout += q_stdout.get(timeout=0.01)\n except Empty:\n pass\n # accumilate stdout and print if we should\n stdout += line_stdout\n if print_output and line_stdout != \"\":\n sys.stdout.write(bcolors.COLOR_CYAN)\n for line in line_stdout.splitlines():\n sys.stdout.write(\"\\t{}\\n\".format(line))\n sys.stdout.write(bcolors.COLOR_NC)\n sys.stdout.flush()\n\n try:\n line_stderr = \"\"\n while True:\n line_stderr += q_stderr.get(timeout=0.01)\n except Empty:\n pass\n # accumilate stderr and print if we should\n stderr += line_stderr\n if print_output and line_stderr != \"\":\n sys.stderr.write(bcolors.COLOR_RED)\n for line in line_stderr.splitlines():\n sys.stderr.write(\"\\t{}\\n\".format(line))\n sys.stderr.write(bcolors.COLOR_NC)\n sys.stderr.flush()\n\n # check if we're done and the finished flag is set\n if finished:\n if done != 0 and print_output is False:\n sys.stderr.write(bcolors.COLOR_RED)\n for line in stderr.splitlines():\n sys.stderr.write(\"\\t{}\\n\".format(line))\n sys.stderr.write(bcolors.COLOR_NC)\n sys.stderr.flush()\n\n return stdout, stderr, done\n\n # check if the process is done...\n if done is not None:\n finished = True\n # give the process's stdout and stderr time to flush\n time.sleep(0.25)", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def executeCommand(cmd,loopsleep):\n\tsleep(loopsleep)\n\tresult = subprocess.getoutput(cmd)\n\treturn(result.split(\"\\n\"))", "def _run_cmd(args, cwd):\n p = subprocess.Popen(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=cwd)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n for stream_content in streams:\n print(stream_content)\n return (streams) + (p.returncode,)", "def __iter__(self):\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n yield line", "def run_and_log_output(cmd_string):\n logging.info('Running %s', cmd_string)\n c = iterpipes.cmd(cmd_string)\n out = iterpipes.run(c)\n for line in out:\n logging.info(line)", "def bash(cmd, prnt=True, wait=True):\n p = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n if wait:\n p.wait()\n while True and prnt:\n line = p.stdout.readline()\n if line:\n print(line)\n else:\n break\n\n return (p)", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def yield_output(self, args, *popenargs, **kwargs):\n p = self.create_process(args,\n stdout=subprocess.PIPE,\n *popenargs,\n **kwargs)\n for line in p.stdout:\n yield line\n p.wait()\n if p.returncode:\n raise subprocess.CalledProcessError(p.returncode,\n self.build_args(args))", "def subproc(self,line):\n self.set_stdout()\n proc = subprocess.Popen(line.split(),stdout=self.stdout)\n proc.wait() #ensures that the subprocess executes and terminates before returning to the shell", "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def run_cmd(command, work_dir=None):\n if work_dir is not None:\n os.chdir(work_dir) # Change to working directory\n\n # Run Command\n ps = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n\n # Read + yield stdout until process ends\n while ps.poll() is None:\n line = ps.stdout.readline()\n if line != \"\":\n yield line\n\n return_code = ps.returncode\n # Throw exception if return code is not 0\n if return_code:\n exc = \"\\nCOMMAND:%s\\nRET_CODE:%i\" % (command, return_code)\n raise ReturnCodeError(exc, return_code)", "def run_cmd(command, inputStream = \"\"):\n timeoutSecs = 3600\n timePassed = 0.0\n increment = 0.01\n\n stderrFD, errFile = tempfile.mkstemp()\n stdoutFD, outFile = tempfile.mkstemp()\n\n process = Popen(command, shell=True, stdin=PIPE, stdout=stdoutFD, \n stderr=stderrFD, close_fds=False)\n\n if process == None:\n print \"Could not create process\"\n sys.exit(1)\n\n try:\n if inputStream != \"\":\n for line in inputStream:\n process.stdin.write(line)\n process.stdin.flush()\n\n while True:\n status = process.poll()\n if status != None:\n # Process terminated succesfully.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n return (False, stdoutContents, stderrContents, process.returncode)\n\n if timePassed < timeoutSecs:\n time.sleep(increment)\n timePassed = timePassed + increment\n else:\n # time out, kill the process.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n os.kill(process.pid, signal.SIGTSTP)\n return (True, stdoutContents, stderrContents, process.returncode)\n except Exception, e:\n # if something threw exception (e.g. ctrl-c)\n print e\n os.kill(process.pid, signal.SIGTSTP)\n try:\n # time out, kill the process.\n # time out, kill the process.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n os.kill(process.pid, signal.SIGTSTP) \n except:\n pass\n\n return (False, stdoutContents, stderrContents, process.returncode)", "def watch(self):\n reader, writer = os.pipe2(0)\n\n pid = os.fork()\n\n # In the child\n if pid == 0:\n tty.setraw(0)\n os.close(reader)\n os.close(2)\n\n os.dup2(writer, 1)\n\n os.execlp(self.__program, self.__program, *self.__args)\n\n sys.exit(1)\n else:\n os.close(writer)\n\n while True:\n result = os.read(reader, 1024)\n if len(result) == 0:\n break\n sys.stdout.write(result.decode('utf-8'))\n\n os.waitpid(pid, 0)", "def run_command(cmd):\n\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for line in iter(proc.stdout.readline, b''):\n print(\">>> \" + line.rstrip())\n (stdout, stderr) = proc.communicate()\n return proc.returncode == 0, proc", "def cmd(*args, **kwargs):\n cmd_s = ' '.join(args)\n print('+ {}'.format(cmd_s))\n proc = subprocess.Popen(cmd_s, shell=True, stdout=subprocess.PIPE, **kwargs)\n for line in iter(proc.stdout.readline, ''):\n sys.stdout.write('> {}'.format(line))\n while proc.poll() is None:\n time.sleep(0.5)\n if proc.returncode != 0:\n raise CmdError(cmd_s, proc.returncode)", "async def checked_run(*cmd):\n\n # Start the subprocess.\n logging.info('Running: %s', expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n chunks = []\n while True:\n chunk = await p.stdout.read(16 * 1024)\n if not chunk:\n break\n chunks.append(chunk)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n stdout = b''.join(chunks).decode()[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, expand_cmd_str(cmd), stdout))\n\n return stdout", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "def spawn(stdout, command, **options):\n # grab arguments that we care about\n stderr = options.pop('stderr', None)\n daemon = options.pop('daemon', True)\n\n # empty out the first generator result if a coroutine is passed\n if hasattr(stdout, 'send'):\n res = six.next(stdout)\n res and P.write(res)\n if hasattr(stderr, 'send'):\n res = six.next(stderr)\n res and P.write(res)\n\n # spawn the sub-process\n return process(command, stdout=stdout, stderr=stderr, **options)", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n print(line.strip('\\n'))\n self.pipeReader.close()", "def run_command(command, timeout=None):\n if type(command) != list:\n command = [command]\n cmd = shlex.split(command[0])\n process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n prev_process = process #Assign this process as prev_process so that the variables make sense later\n for cmd in command[1:]:\n cmd = shlex.split(cmd)\n #prev_process is the process that was run before the current iteration of loop\n process = subprocess.Popen(cmd, shell=False, stdin=prev_process.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n prev_process.stdout.close() #Close the stdout of the previous process, as we don't need it\n prev_process = process #Assign the process in the current iteration of the loop as the current process\n #Handling timeouts\n if timeout:\n try:\n process.communicate(timeout=timeout)\n except TimeoutExpired:\n process.kill()\n result = process.communicate()\n err_code = process.returncode\n output = result[0].decode(\"utf-8\")\n error = result[1].decode(\"utf-8\")\n return output, error, err_code", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def _ffmpeg_loop(cls, ffmpeg: subprocess.Popen) -> Iterable[Progress]:\n while ffmpeg.poll() is None:\n rlist, _, _ = select((ffmpeg.stderr, ffmpeg.stdout), (), ())\n # Read logs from stdin\n if ffmpeg.stderr in rlist:\n status = cls.process_logs(ffmpeg.stderr.read().splitlines())\n if status:\n yield status\n # ignore stdout\n if ffmpeg.stdout in rlist:\n ffmpeg.stdout.read()", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n echo(line.strip('\\n'), ctx=self.click_ctx, err=self.is_err)\n\n self.pipeReader.close()", "def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process", "def start(self):\n last_stdout = None\n self.processes = []\n for cmd in self.cmds:\n # TODO: handle exceptions raised by Popen\n p = subprocess.Popen(cmd, stdin=last_stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if last_stdout is not None:\n last_stdout.close()\n last_stdout = p.stdout\n self.processes.append(p)", "def _IterProcessStdoutFcntl(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=too-many-nested-blocks\n import fcntl\n try:\n # Enable non-blocking reads from the child's stdout.\n child_fd = process.stdout.fileno()\n fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)\n fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n end_time = (time.time() + timeout) if timeout else None\n iter_end_time = (time.time() + iter_timeout) if iter_timeout else None\n\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n if iter_end_time and time.time() > iter_end_time:\n yield None\n iter_end_time = time.time() + iter_timeout\n\n if iter_end_time:\n iter_aware_poll_interval = min(poll_interval,\n max(0, iter_end_time - time.time()))\n else:\n iter_aware_poll_interval = poll_interval\n\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if not data:\n break\n yield data\n\n if process.poll() is not None:\n # If process is closed, keep checking for output data (because of timing\n # issues).\n while True:\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if data:\n yield data\n continue\n break\n break\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()", "def call_output(*popenargs, **kwargs):\n\n def create_process(*popenargs, **kwargs):\n return subprocess.Popen(*popenargs, **kwargs)\n\n if \"stdout\" in kwargs:\n raise ValueError(\"stdout argument not allowed, it will be overridden.\")\n if \"stdin\" in kwargs:\n raise ValueError(\"stdin argument not allowed, it will be overridden.\")\n\n kwargs[\"stdin\"] = subprocess.PIPE\n line_handler = kwargs.pop(\"listener\", None)\n\n with create_process(\n *popenargs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs\n ) as process:\n return run(process, line_handler)", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def runCommand(command, outputPrefix=\"ProcessRunner> \"):\n proc = ProcessRunner(command)\n proc.mapLines(WriteOut(sys.stdout, outputPrefix=outputPrefix), procPipeName=\"stdout\")\n proc.mapLines(WriteOut(sys.stderr, outputPrefix=outputPrefix), procPipeName=\"stderr\")\n proc.wait()\n returnCode = proc.poll()\n\n # proc.terminate()\n # proc.shutdown()\n\n return returnCode", "def run_process(cmd, out_log=None, err_log=None):\r\n return run_multi_processes([cmd], out_log=out_log, err_log=err_log)", "def check_output(*args, **kwargs):\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.STDOUT\n\n p = subprocess.Popen(*args, **kwargs)\n\n try:\n while p.poll() is None:\n time.sleep(0.002)\n return p.poll(), p.stdout.read().decode('utf-8', 'ignore')\n finally:\n if p.poll() is None: # pragma: no cover\n p.kill()", "def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)", "def _run_subprocess(cmd: List[str], args: List[str], env: Optional[Dict[str, str]] = None):\n async def _read_output(stream, logger_instance):\n \"\"\"Read output from command and print it into the right logger.\"\"\"\n while True:\n line = await stream.readline()\n if line == b'':\n break\n logger_instance(line.decode('utf-8').rstrip())\n\n async def _stream_subprocess(cmd, args, env):\n \"\"\"Run subprocess.\"\"\"\n cmd_ = ' '.join(cmd)\n args_ = ' '.join(args)\n process = await asyncio.create_subprocess_shell(f'{cmd_} {args_}',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=env)\n\n await asyncio.wait([\n _read_output(process.stdout, logger.info),\n _read_output(process.stderr, logger.error)\n ])\n await process.wait()\n if process.returncode is None or process.returncode != 0:\n raise ValueError('Task failed!')\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(_stream_subprocess(cmd, args, env))", "def process(cmd_string, stdin=None):\n return process_results(process_run(cmd_string, stdin=stdin))", "def execute_command(cmd_string, cwd=None, shell=True):\n\n sub = subprocess.Popen(cmd_string, cwd=cwd, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, shell=shell, bufsize=4096)\n\n stdout_str = ''\n while sub.poll() is None:\n stdout_str += str(sub.stdout.read(), encoding=\"UTF-8\")\n time.sleep(0.1)\n\n return stdout_str", "async def checked_run(cmd, env=None):\n\n # Start the subprocess.\n logging.info('Running: %s', await expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n lines = []\n while True:\n line = await p.stdout.readline()\n if not line:\n break\n line = line.decode()[:-1]\n lines.append(line)\n logging.info(line)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n output = '\\n'.join(lines)[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, await expand_cmd_str(cmd), output))\n\n return output", "def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)", "def run_tool(args, quiet=False):\n pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n result = \"\"\n for line in iter(pipe.stdout.readline, \"\"):\n if not line and pipe.poll() is not None:\n break\n output = line.decode(encoding='UTF-8').rstrip()\n if output != \"\":\n if not quiet:\n print(\"\\t * \" + output)\n result = output\n return result", "def _execute_command(\n args: Union[List[str], str],\n print_output: bool,\n capture_stderr: bool,\n print_command: bool,\n *pargs,\n **kwargs\n) -> Tuple[int, List[str]]:\n stdout_write, stdout_path = tempfile.mkstemp()\n with open(stdout_path, \"rb\") as stdout_read, open('/dev/null', 'w') as dev_null:\n\n if print_command:\n print(\"Executing: %s\" % \" \".join(args))\n\n kwargs['stdout'] = stdout_write\n kwargs['stderr'] = stdout_write if capture_stderr else dev_null\n\n # pylint: disable=consider-using-with\n process = subprocess.Popen(\n args,\n *pargs,\n **kwargs\n )\n\n while True:\n output = stdout_read.read(1).decode(errors=\"replace\")\n\n if output == '' and process.poll() is not None:\n break\n\n if print_output and output:\n print(output, end=\"\", flush=True)\n\n exit_code = process.poll()\n\n stdout_read.seek(0)\n stdout = [line.decode(errors=\"replace\") for line in stdout_read.readlines()]\n\n # ignoring mypy error below because it thinks exit_code can sometimes be None\n # we know that will never be the case because the above While loop will keep looping forever\n # until exit_code is not None\n return exit_code, stdout # type: ignore", "def execute_command(cmdstring, cwd=None, shell=True):\n\n if shell:\n cmdstring_list = cmdstring\n\n sub = subprocess.Popen(cmdstring_list, cwd=cwd, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, shell=shell, bufsize=8192)\n\n stdout_str = \"\"\n while sub.poll() is None:\n stdout_str += str(sub.stdout.read())\n time.sleep(0.1)\n\n return stdout_str", "def _run_command(command, cwd, output=True, decode=False, loop=None):\n loop = loop or asyncio.get_event_loop()\n\n if output:\n out = asyncio.subprocess.PIPE\n else:\n out = None\n\n process = yield from asyncio.create_subprocess_shell(\n command, loop=loop, stdout=out, stderr=out,\n limit=GIT_COMMAND_BUFFER_SIZE, cwd=cwd)\n\n if output:\n # communicate() also waits on the process termination\n stdout, stderr = yield from process.communicate()\n if decode:\n stdout = stdout.decode(sys.getdefaultencoding())\n stderr = stderr.decode(sys.getdefaultencoding())\n else:\n stdout, stderr = None, None\n yield from process.wait()\n\n if process.returncode:\n raise base.AiogitException(\n (stderr or stdout).decode(sys.getdefaultencoding()))\n\n return stdout, stderr", "def _exec_cmd_helper(self, cmd: str, nvim_ipc: str):\n assert self.busy is False\n\n self.shared_status.set_running()\n self.busy = True\n os.system(\"clear\")\n logging.info(\"Executing cmd {0}\".format(cmd))\n\n start = time.time()\n\n success = False\n if self.command_group.is_cmd_runner_command(cmd):\n for runner in self.runners:\n if runner.config.name == cmd:\n success = runner.run_all()\n break\n else:\n # The code block below essentially just \"tees\" the stdout and\n # stderr to a log file, while still preserving the terminal\n # output (inclusive colors).\n # Using subprocess.PIPE does not seem possible under Darwin,\n # since the pipe does not have the isatty flag set (the isatty\n # flag affects the color output).\n # Note that the file is only written at the end and not streamed.\n master, slave = pty.openpty()\n\n # This prevents LF from being converted to CRLF\n attr = termios.tcgetattr(slave)\n attr[1] = attr[1] & ~termios.ONLCR\n termios.tcsetattr(slave, termios.TCSADRAIN, attr)\n\n proc = subprocess.Popen(cmd, shell=True, stdout=slave, stderr=slave, close_fds=False)\n\n # Close the write end of the pipe in this process, since we don't need it.\n # Otherwise we would not get EOF etc.\n os.close(slave)\n\n read_stdout_stderr = os.fdopen(master, 'rb', buffering=0)\n complete_output = \"\"\n\n try:\n while proc.poll() is None:\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n\n # Read the last line\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n # This error is \"expected\" under Linux systems.\n # readline() doesn't seem to behave properly there.\n # The exception does not occur on MacOS.\n except OSError as oserr:\n if oserr.errno != errno.EIO or proc.poll() is None:\n logging.critical(\"Unexpected OS error: {0}\".format(oserr))\n except:\n logging.critical(\"Unexpected error while reading from process\")\n\n os.close(master)\n proc.wait()\n\n if proc.returncode == 0:\n success = True\n\n logfile, logfilename = tempfile.mkstemp(dir=cybld_helpers.get_base_path(),\n prefix=cybld_helpers.NVIM_LOG_PREFIX)\n\n # strip color codes from logfile\n # complete_output = re.sub(r'(\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[@-~]', '', complete_output)\n complete_output = re.sub(r'\\x1b(\\[.*?[@-~]|\\].*?(\\x07|\\x1b\\\\))', '', complete_output)\n\n with open(logfile, 'w+') as logfile_opened:\n logfile_opened.write(complete_output)\n\n CyBldIpcNeovim(True, nvim_ipc, logfilename, cmd)\n\n end = time.time()\n\n self.busy = False\n cybld_helpers.print_seperator_lines()\n\n timediff_in_seconds = str(int(end - start))\n\n if success:\n cybld_helpers.print_centered_text(\"SUCCESS: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), True)\n self.shared_status.set_success()\n else:\n cybld_helpers.print_centered_text(\"FAIL: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), False)\n self.shared_status.set_fail()\n\n if self.settings.print_stats:\n cybld_helpers.print_centered_text(self.stats.get_command_stats(cmd), None)\n\n if success:\n self.talker.say_success()\n else:\n self.talker.say_fail()\n\n cybld_helpers.print_seperator_lines()\n self.stats.update_command_stats(cmd, success, int(timediff_in_seconds))\n\n if success:\n self.success_callback(cmd)\n else:\n self.fail_callback(cmd)", "def run_commands(self):\n processes = []\n\n i = 0\n ## get list of commands\n commands = self.get_commands()\n cnum = multiprocessing.cpu_count()\n\n while len(commands)>0:\n while len(processes)<cnum-1:\n c = commands.pop()\n i+=1\n print \"command #\",i, c\n ## run commands\n processes.append((i,subprocess.Popen(c, shell=True)))\n\n for j,p in processes:\n if p.poll() is not None:\n print j, \" status: \", p.poll()\n processes.remove((j,p))\n break\n else:\n time.sleep(10)\n return", "def run_subprocess(cmd):\n subprocess.Popen(cmd, stdin =subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,)", "def execute(self):\n self.process = subprocess.Popen(self.command_text_list)\n self.process.wait()", "def _stream_ffmpeg(config: Configuration, ffmpeg_proc: subprocess.Popen, signal: RunningSignal):\n while True:\n try:\n yield ffmpeg_proc.stdout.read(config.bytes_per_read)\n except:\n ffmpeg_proc.terminate()\n ffmpeg_proc.communicate()\n signal.stop()\n break", "def running_output(process, outputs):\n state = type(\"State\",\n (object, ),\n {\n \"printed_message\": False,\n \"read_first_byte\": False\n })\n\n def output_printer(file_handle):\n \"\"\"Thread that prints the output of this process.\"\"\"\n character = bytearray()\n while True:\n character += file_handle.read(1)\n try:\n if character:\n if not state.read_first_byte:\n state.read_first_byte = True\n\n if character != \"\\n\":\n IndentedLogger.message(\"\\n\")\n\n # If this fails, then we will just read further characters\n # until the decode succeeds.\n IndentedLogger.message(character.decode(\"utf-8\"))\n state.printed_message = True\n character = bytearray()\n else:\n return\n except UnicodeDecodeError:\n continue\n\n stdout = threading.Thread(target=output_printer, args=(outputs[0], ))\n\n stdout.start()\n stderr_lines = list(outputs[1])\n\n try:\n status = process.wait()\n finally:\n stdout.join()\n\n # Print a new line before printing any stderr messages\n if len(stderr_lines):\n IndentedLogger.message(\"\\n\")\n\n for line in stderr_lines:\n IndentedLogger.message(line.decode(\"utf-8\"))\n state.printed_message = True\n\n if state.printed_message:\n print_message(\"\\n\")\n\n return status", "async def read_console(self):\n while self.proc is not None and self.proc.poll() is None:\n line = await self.loop.run_in_executor(None, self.proc.stdout.readline) # Async readline\n # Parse the command output and get the time in epoch format\n match = re.match(r'\\[([0-9]{2}):([0-9]{2}):([0-9]{2})\\] \\[([^][]*)\\]: (.*)$', line.decode())\n if match is None:\n return\n h, m, s, log, text = match.groups()\n local = time.localtime()\n if h == 23 and local.tm_hour == 0: # In case a line from 23:59 gets parsed at 00:00\n local = time.localtime(time.time()-3600)\n log_t = list(local)\n log_t[3:6] = map(int, (h, m, s))\n log_time = time.mktime(tuple(log_t))\n self.loop.create_task(self.on_line(log_time, log, text))", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def runSubprocessInThread(command, resultQ, verbose=False):\n result = runSubProcess(command, verbose=verbose)\n resultQ.put(result)", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def do_shell(self, line):\n print 'Running shell command:', line\n output = os.popen(line).read()\n print output\n self.last_output = output", "def paexec_out_stream(buffer_size=4096):\n b_data = pkgutil.get_data('pypsexec', 'paexec.exe')\n byte_count = len(b_data)\n for i in range(0, byte_count, buffer_size):\n yield b_data[i:i + buffer_size], i", "def _IterProcessStdoutQueue(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=unused-argument\n if six.PY3:\n import queue\n else:\n import Queue as queue\n import threading\n\n stdout_queue = queue.Queue()\n\n def read_process_stdout():\n # TODO(jbudorick): Pick an appropriate read size here.\n while True:\n try:\n output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size)\n except IOError:\n break\n stdout_queue.put(output_chunk, True)\n if not output_chunk and process.poll() is not None:\n break\n\n reader_thread = threading.Thread(target=read_process_stdout)\n reader_thread.start()\n\n end_time = (time.time() + timeout) if timeout else None\n\n try:\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n try:\n s = stdout_queue.get(True, iter_timeout)\n if not s:\n break\n yield s\n except queue.Empty:\n yield None\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()\n reader_thread.join()", "def exec_lines(command_list, mark_flag):\t\n\n\t\t\toutput = \"\"\n\t\t\tcommand_string = \" \".join(command_list)\n\t\t\ttry:\n\t\t\t\tprocess = subprocess.Popen(command_list, stdout=subprocess.PIPE)\n\t\t\t\tpipe = process.stdout\n\t\t\t\toutput = \"\"\n\n\t\t\t\twhile True:\n\n\t\t\t\t\toutput = pipe.readline()\n\t\t\t\t\tif len(output) == 0 : #and (proc.process.poll() is not None ):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif mark_flag:\n\t\t\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\t\t\toutput = mark + output\n\t\n\t\t\t\t\toutput_queue.put(output)\n\t\n\t\t\t\t# while\n\t\n\t\t\t\tprocess.wait()\n\t\t\t\treturn\n\t\t\t#\n\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += \"LINES \"+cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\n\t\t\tif mark_flag:\n\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\toutput = mark + output + \"\\n\"\n\n\t\t\toutput_queue.put(output)", "def pipe_thru(*commands):\n if commands is not None:\n last_process = None\n for command in commands:\n if last_process is None:\n last_process = Popen(command, stdout=PIPE, stderr=PIPE)\n else:\n last_process = Popen(command, stdin=last_process.stdout, stdout=PIPE, stderr=PIPE)\n System.log_subprocess_output(last_process)", "def runCommand(self, cmd, stdin=None, env=None):\n\n\t mycmd=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t output, error=mycmd.communicate()\n\t while not mycmd.wait():\n\t \t# do stuff\n\t \treturn 0\n\n\n\n\t #if not isList(cmd):\n\t #cmd = shlex.split(cmd)\n\t #opts = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\t #if env:\n\t # opts.update(env=env)\n\t #if stdin:\n\t # opts.update(stdin=subprocess.PIPE)\n\t # stdout, stderr=subprocess.Popen(cmd, **opts).communicate(stdin)\n\t #else :\n\t # stdout, stderr=subprocess.Popen(cmd, **opts).communicate()\n\t #return stdout, stderr", "def run(command: List[str], notebook: None = None) -> int:\n if notebook is None:\n return Popen(command, cwd=_DIRECTORY).wait()\n cmd = Popen(command, cwd=_DIRECTORY, stdout=PIPE, stderr=STDOUT)\n while True:\n line = cmd.stdout.readline()\n if line == b'' and cmd.poll() is not None:\n return cmd.poll()\n print(line.decode('utf-8'), end='')\n raise Exception()", "def local_command(command):\n print('Executing command: {0}\\n'.format(command))\n p = Popen([command], stdout=PIPE, stderr=PIPE, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n break\n line = line.strip()\n print(line)\n stdout, stderr = p.communicate()\n print(stdout)\n print(stderr)", "def execute(command, stdout, stderr=sys.stdout):\n # Does tail work to watch stdout to logging service?\n proc = subprocess.Popen(\n command, shell=True, stdout=stdout, stderr=stderr)\n proc.wait()", "def myrun(cmd):\n\tp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\tstdout = []\n\twhile True:\n\t\tline = p.stdout.readline()\n\t\tstdout.append(line)\n\t\t#print line\n\t\tph1 = line[9:19]\n\t\t#print (ph1)\n\t\tif ph1 == 'no carrier':\n\t\t\tmail(\"NOT WORKING\")\n\t\t\ttime.sleep(60)", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def run_bash_command(command):\n process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print(output.strip())\n rc = process.poll()\n return rc", "def run_shell_command(command, checkReturnValue=True, verbose=False):\n process = subprocess.Popen(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n bufsize=1)\n outText = \"\"\n\n for line in iter(process.stdout.readline, ''):\n if verbose:\n sys.stdout.write(line)\n outText += line\n\n process.communicate()[0]\n \"\"\"\n returnValue = process.returncode\n if checkReturnValue and (returnValue != 0):\n raise Exception(outText)\n \"\"\"\n return outText", "def do_shell(self, line):\n # print(\"running shell command:\", line)\n sub_cmd = subprocess.Popen(line, shell=True, stdout=subprocess.PIPE)\n output = sub_cmd.communicate()[0].decode('utf-8')\n print(output)\n self.last_output = output", "def run(self):\n while True:\n cmd, flag = self.Q['in'].get()\n if flag == 'stop':\n break\n try:\n if flag == 'process':\n sshCmd = \"ssh -q %s \\\"cd %s; %s\\\"\" % (self.host, self.cwd, cmd)\n fp = os.popen(sshCmd)\n output = fp.read()\n #output = fp.readlines()\n fp.close()\n else:\n raise ValueError, 'Unknown flag %r' % flag\n except:\n # unconditional except is right, since we report *all* errors\n self.reportError()\n else:\n if output:\n self.Q['out'].put(output)", "def RunExternal(command, str_stdin=\"\"):\n\n logging.info(\"Running external command: %s\" % command)\n popen_inst = Popen3(command, True)\n logging.debug(\"stdin = %s\" % str_stdin)\n str_stdout = str_stderr = \"\"\n while 1:\n read_from_child = -1\n if not popen_inst.tochild.closed:\n (rlist, wlist, xlist) = select([popen_inst.fromchild, popen_inst.childerr], \\\n [popen_inst.tochild], [])\n else:\n (rlist, wlist, xlist) = select([popen_inst.fromchild, popen_inst.childerr], [], [])\n\n if popen_inst.fromchild in rlist:\n tmpread = popen_inst.fromchild.read(4096)\n read_from_child = len(tmpread)\n str_stdout += tmpread\n \n if popen_inst.childerr in rlist:\n tmpread = popen_inst.childerr.read(4096)\n read_from_child += len(tmpread)\n str_stderr += tmpread\n \n if popen_inst.tochild in wlist and len(str_stdin) > 0:\n popen_inst.tochild.write(str_stdin[:min( [ len(str_stdin), 4096])])\n str_stdin = str_stdin[min( [ len(str_stdin), 4096]):]\n read_from_child += 1\n elif popen_inst.tochild in wlist:\n popen_inst.tochild.close()\n\n #logging.debug(\"len(str_stdin) = %i, read_from_child = %i, rlist = %s, wlist = %s\", len(str_stdin), read_from_child, rlist, wlist)\n if popen_inst.poll() != -1 and len(str_stdin) == 0 and (read_from_child == -1 or read_from_child == 0):\n break\n \n logging.debug(\"Exit code: %i\", popen_inst.wait())\n logging.debug(\"stdout: %s\", str_stdout)\n logging.debug(\"strerr: %s\", str_stderr)\n return str_stdout, str_stderr", "def _runProcess(self, cmd, echoStdout = True, **kwargs):\n # Can't use unicode!\n cmd = str(cmd)\n defaultKwargs = {\n 'universal_newlines': True\n }\n if echoStdout:\n defaultKwargs['stdout'] = subprocess.PIPE\n # Don't buffer the output, but echo it as it comes in regardless\n # of newlines, etc\n defaultKwargs['bufsize'] = 1\n else:\n defaultKwargs['stdout'] = tempfile.TemporaryFile()\n defaultKwargs['stderr'] = subprocess.STDOUT\n defaultKwargs.update(kwargs)\n\n env = os.environ.copy()\n env['PATH'] = self.settings['context_build_path'] + ':' + env['PATH']\n env.update(defaultKwargs.get('env', {}))\n defaultKwargs['env'] = env\n\n p = subprocess.Popen(shlex.split(cmd), **defaultKwargs)\n if echoStdout:\n try:\n import fcntl\n fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n except ImportError:\n # Windows?\n pass\n if callable(echoStdout):\n outputCallback = echoStdout\n else:\n outputCallback = lambda l: self.writeOutput(l, end = '')\n\n stdThread = threading.Thread(target = self._dumpStdout,\n args = (p, outputCallback))\n stdThread.start()\n while p.poll() is None:\n if self._shouldStop():\n break\n time.sleep(0.1)\n if p.poll() is None:\n # Exited due to shouldStop\n self.writeOutput(\"\\n\\nAborting tests...\")\n while p.poll() is None:\n try:\n p.terminate()\n except OSError:\n # Died already\n pass\n time.sleep(0.1)\n\n if echoStdout:\n # Finish getting output\n stdThread.join()\n\n if not echoStdout:\n tf = defaultKwargs['stdout']\n tf.seek(0)\n return tf", "def call(seq):\n return subprocess.Popen(seq,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()[0]", "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def run(self):\n def target():\n # Pass these inputs to STDIN with delays\n for i in self.delayed_inputs:\n if type(i) is int or type(i) is float:\n time.sleep(i)\n elif type(i) is bytes:\n try:\n self.process.stdin.write(i) \n except IOError as e:\n lg.info(\n \"Input: {} failed to write to stdin due to\\n{}\".format(i, e)\n )\n break\n if self.disable_communicate:\n self.process.wait()\n else:\n self.stdout_res, self.stderr_res = self.process.communicate(\n input=self.inputs)\n\n try:\n self.process = Popen(self.command, stdin=self.stdin,\n stdout=self.stdout, stderr=self.stderr,\n start_new_session=True, cwd=self.cwd, env=self.env)\n except OSError:\n lg.error(\"Couldn't Popen command {}\".format(self.command))\n raise\n self.thread = Thread(target=target)\n self.thread.start()", "def run(cmd):\n print('running', cmd)\n proc = sp.Popen([cmd], shell=True)\n proc.wait()\n assert proc.poll() == 0", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def run(self, cmd, out_display=None, err_display=None, **kwargs):\n if os.name == 'nt':\n loop = asyncio.ProactorEventLoop() # for subprocess' pipes on Windows\n asyncio.set_event_loop(loop)\n else:\n loop = asyncio.get_event_loop()\n result = loop.run_until_complete(self.arun(cmd, out_display, err_display, **kwargs))\n return result", "def run_command(command, env=None):\n merged_env = os.environ\n if env:\n merged_env.update(env)\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n env=merged_env,\n )\n while True:\n line = process.stdout.readline()\n line = str(line, \"utf-8\")[:-1]\n print(line)\n if line == \"\" and process.poll() is not None:\n break\n\n if process.returncode != 0:\n raise Exception(\n f\"Non zero return code: {process.returncode}\\n\" f\"{command}\\n\\n{process.stdout.read()}\"\n )", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def execute(command, **kwargs):\n proc = ProcessWrapper(command, **kwargs)\n proc.run()\n return proc.join()", "def runCom(command, cwd=os.getcwd(), terminal=_default_terminal):\n\tproc = subprocess.Popen(terminal+\" -e \"+command, shell=True, cwd=cwd)\n\twhile proc.wait() != 0:\n\t\tpass", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def _run(\n self, cmd: List[str], capture_output: bool, dir_to_execute: Optional[str]\n ) -> List[str]:\n kwargs: Dict[str, Any] = {\n \"cwd\": dir_to_execute,\n \"capture_output\": capture_output,\n \"universal_newlines\": capture_output, # receive as string not bytes\n }\n cmd_return = run(cmd, **kwargs)\n\n return cmd_return.stdout.strip().splitlines() if capture_output else []", "def execute_command(command):\n p = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n rc = p.wait()\n stdout = []\n stderr = []\n for line in p.stdout.read().decode().splitlines():\n stdout.append(line)\n for line in p.stderr.read().decode().splitlines():\n stderr.append(line)\n p.stdout.close()\n p.stderr.close()\n return (rc, stdout, stderr)", "def run(self):\n logging.debug(\"Executing: {0!r}...\".format(self.command_str))\n\n self.process = subprocess.Popen(\n self.command_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n start = datetime.now()\n result = self.process.communicate()\n end = datetime.now()\n self.time = end - start\n\n if self.verbose:\n stdout, stderr = result\n message = [\"Output:\\n\" \"- returncode:\\n{0}\".format(self.process.returncode)]\n if stdout:\n message.append(\"- stdout:\\n{0}\".format(stdout))\n if stderr:\n message.append(\"- stderr:\\n{0}\".format(stderr))\n logging.debug(\"\\n\".join(message))\n\n self.stdout = stdout\n self.stderr = stderr\n\n return self", "def open_sku_stream(args, verbose=False, dip_home=None, add_env={}):\n logging.info(\"Running SKU with %s\" % args)\n process = _run_sku(args, verbose, dip_home, add_env=add_env)\n stderr_chunks = deque(maxlen=100)\n\n # we need to have a thread read stderr to make sure that\n # the child process does not block when OS buffer is full.\n def read_stderr():\n while True:\n # Read should be nice enough to block.\n data = process.stderr.read()\n if not data:\n # child process closed stderr\n return\n stderr_chunks.append(data)\n stderr_thread = threading.Thread(target=read_stderr)\n # our process shouldn't wait for this thread to terminate.\n stderr_thread.daemon = True\n stderr_thread.start()\n error = None\n error_details = None\n exited_regularly = False\n try:\n yield process.stdout\n exited_regularly = True\n except Exception as e:\n error = e\n error_details = sys.exc_info()\n except GeneratorExit as e:\n # generator exit is considered regular.\n # it happens for with the timeout exception.\n exited_regularly = True\n finally:\n #\n # Either something bad happened while reading the pipe,\n # or we finished reading SKU's stdout.\n #\n # Let's check for it terminate and check for some\n # possible errors.\n #\n # Give 10s to the process to put sensible stuff on the stderr\n # terminate and close it.\n #\n # Is the process finished\n return_code = process.poll()\n logging.info(\"Returned with %s\" % return_code)\n if return_code is None:\n # the process is still running.\n # this happens when someone does not consume the process entirely before\n # releasing its stdout.\n #\n # Either explicitly, or because he reached a timeout.\n try:\n process.stdout.close()\n except:\n pass\n return_code = process.poll()\n if return_code is None:\n pass\n # wait 1sec for the program to terminate\n time.sleep(1)\n return_code = process.poll()\n if return_code is None:\n # still not terminated ? kill\n # the subprocess and all of its children.\n os.killpg(process.pid, signal.SIGTERM)\n\n stderr_thread.join(2)\n stderr_join = \"\".join(stderr_chunks)\n\n if exited_regularly and return_code == 0:\n logging.info(\"Exit OK\")\n return\n msg = []\n if return_code:\n msg.append(\"+ SKU Process did not end properly (retcode=%s)\" % return_code)\n if not exited_regularly:\n msg.append(\"+ Reading SKU Process encountered the following Exception.\")\n msg.append(str(error.__class__.__name__) + \": \" + str(error))\n if error_details is not None:\n msg.append(\"Stack\")\n msg.append(traceback.format_exc(error_details))\n else:\n msg.append(\"No stack available ?\")\n msg.append(\"Command was :\")\n msg.append(\" \" + str(args))\n if return_code is not None:\n msg.append(\"Return code %i\" % return_code)\n else:\n msg.append(\"Had to kill the process.\")\n msg.append(\"Std err %s\" % stderr_join)\n raise SKUProcessException(\"\\n\".join(msg), args, return_code, stderr_join, cause=error)", "def pipe_exec(args, stdin=None, cwd=None, env=None):\n count = 0 # int used to manage communication between processes\n commands = [] # listed used to hold all the popen objects\n\n # use the default environment if one is not specified\n if env is None:\n env = os.environ.copy()\n\n # if a single command was passed as a string, make it a list\n if not isinstance(args, list):\n args = [args]\n\n # setup various arguments for popen/popen.communicate, account for optional stdin\n popen_kwargs = {\n \"stdout\": subprocess.PIPE,\n \"stderr\": subprocess.PIPE,\n \"cwd\": cwd,\n \"env\": env,\n }\n popen_stdin_kwargs = {}\n communicate_kwargs = {}\n if stdin is not None:\n popen_stdin_kwargs[\"stdin\"] = subprocess.PIPE\n communicate_kwargs[\"input\"] = stdin.encode()\n\n # handle the first process\n i = args.pop(0)\n commands.append(\n subprocess.Popen(shlex.split(i), **popen_kwargs, **popen_stdin_kwargs)\n )\n\n # handle any additional arguments\n for i in args:\n popen_kwargs[\"stdin\"] = commands[count].stdout\n commands.append(subprocess.Popen(shlex.split(i), **popen_kwargs))\n commands[count].stdout.close()\n count = count + 1\n\n # communicate with first command, ensure it gets any optional input\n commands[0].communicate(**communicate_kwargs)\n\n # communicate with final command, which will trigger the entire pipeline\n stdout, stderr = commands[-1].communicate()\n returncode = commands[-1].returncode\n\n return (returncode, stdout, stderr)", "def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )", "def executeCommand(command):\n time.sleep(1)\n #return os.system(command)\n subprocess.Popen(command, shell=True)", "def run_cmd(cmd, callback=None, watch=False, background=False, shell=False):\r\n\r\n if watch and not callback:\r\n raise RuntimeError(\r\n \"You must provide a callback when watching a process.\"\r\n )\r\n\r\n output = None\r\n\r\n if shell:\r\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\r\n else:\r\n proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)\r\n\r\n if background:\r\n # Let task run in background and return pmid for monitoring:\r\n return proc.pid, proc\r\n\r\n if watch:\r\n while proc.poll() is None:\r\n line = proc.stdout.readline()\r\n if line != \"\":\r\n callback(line)\r\n\r\n # Sometimes the process exits before we have all of the output, so\r\n # we need to gather the remainder of the output.\r\n remainder = proc.communicate()[0]\r\n if remainder:\r\n callback(remainder)\r\n else:\r\n output = proc.communicate()[0]\r\n\r\n if callback and output is not None:\r\n return callback(output)\r\n\r\n return output", "def subprocess_nowait(cmd, shell=False, cwd=None, env=None):\n # type: (str, bool, str, dict) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)", "def subprocess(command):\n from sys import executable as python\n from subprocess import Popen,PIPE\n from sys import stderr\n command = \"from %s import *; %s\" % (modulename(),command)\n for attempt in range(0,3):\n try:\n process = Popen([python,\"-c\",command],stdout=PIPE,stderr=PIPE,\n universal_newlines=True)\n break\n except OSError,msg: # [Errno 513] Unknown error 513\n log(\"subprocess: %s\" % msg)\n sleep(1)\n output,error = process.communicate()\n if \"Traceback\" in error: raise RuntimeError(repr(command)+\"\\n\"+error)\n if error: stderr.write(error)\n return output", "def exec_spawn(cmd):\n i = 0\n child = pexpect.spawn(' '.join(cmd))\n print(f\"exec_spawn command: {' '.join(cmd)}\")\n while True:\n try:\n # 0 - '\\n'; 1 - 'ms\\$'\n i = child.expect(['\\n', 'ms\\$'])\n except:\n # two type exception EOF & TIMEOUT\n # in both cases will be done finally & close()\n break\n finally:\n # if i == 0:\n yield child.before\n\n child.close()" ]
[ "0.74790484", "0.7069593", "0.69885415", "0.6885744", "0.6884901", "0.68284523", "0.6759169", "0.67222494", "0.65148145", "0.64916754", "0.63683236", "0.63626593", "0.6347323", "0.62500584", "0.6244789", "0.6235298", "0.6210111", "0.62086326", "0.6171837", "0.61539686", "0.61364967", "0.612783", "0.6114915", "0.60976744", "0.6077193", "0.60758775", "0.6055629", "0.6015052", "0.59943205", "0.59654367", "0.5962343", "0.5957627", "0.5939734", "0.5928363", "0.5914161", "0.5890991", "0.5880341", "0.5852498", "0.583403", "0.5831561", "0.58277434", "0.58241564", "0.5818311", "0.58129513", "0.58127105", "0.5799605", "0.57818866", "0.5772835", "0.57697934", "0.57561326", "0.5746962", "0.5742782", "0.5720252", "0.5706338", "0.5702926", "0.5699965", "0.5697308", "0.5696594", "0.5694533", "0.56908727", "0.5688951", "0.56798136", "0.56726515", "0.5648899", "0.56488925", "0.5622989", "0.5622824", "0.5617148", "0.5616643", "0.5615161", "0.56033725", "0.55970746", "0.5592428", "0.55910087", "0.5589419", "0.5577197", "0.5574912", "0.55649513", "0.5563364", "0.5553133", "0.55502814", "0.55447656", "0.5540802", "0.55344266", "0.55261105", "0.55118585", "0.55049294", "0.550053", "0.54913926", "0.548177", "0.548083", "0.5465895", "0.5463503", "0.5450167", "0.5447705", "0.54428136", "0.54317796", "0.54303485", "0.5428468", "0.5426016" ]
0.6275099
13
Create an AutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='identity', out_transfer='identity', loss='squared', tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(AutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_encoder(self):\n raise NotImplementedError", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder", "def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')", "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def build_encoder(opt, embeddings):\n enc_type = opt.encoder_type if opt.model_type == \"text\" else opt.model_type\n return str2enc[enc_type].from_opt(opt, embeddings)", "def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))", "def _create_encoder(self):\n logger.debug(\"GumBolt::_create_encoder\")\n return HierarchicalEncoder(\n input_dimension=self._flat_input_size,\n n_latent_hierarchy_lvls=self.n_latent_hierarchy_lvls,\n n_latent_nodes=self.n_latent_nodes,\n n_encoder_layer_nodes=self.n_encoder_layer_nodes,\n n_encoder_layers=self.n_encoder_layers,\n skip_latent_layer=False,\n smoother=\"Gumbel\",\n cfg=self._config)", "def __init__(self, autoencoder, latent_space):\r\n self._autoencoder = autoencoder\r\n self._latent_space = latent_space", "def build_full_conv_autoencoder():\n input_img = Input(shape=(84, 84, 3))\n\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c3')(x)\n encoded = MaxPooling2D((3, 3), border_mode='same')(x)\n\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c4')(encoded)\n x = UpSampling2D((3, 3))(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c5')(x)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c6')(x)\n x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 4, 4, activation='sigmoid', border_mode='same', name='c7')(x)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder", "def autoencoder(self, data):\n with tf.variable_scope(\"autoencoder\"):\n latent = self.encoder(data)\n _, output = self.decoder(latent)\n\n return output, latent", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n x = tf.keras.layers.Input(shape=(1,), dtype=tf.string)\n\n h = tf.keras.layers.Lambda(UniversalEmbedding, output_shape=(512,))(x)\n\n return Model(inputs=x, outputs=h, name='encoder')", "def build_encoder(opt, embeddings, structure_embeddings):\n return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings, structure_embeddings)", "def build(self) -> KM.Model:\n\n # For decoder number of features in opposite order of encoder\n decoder_features = self.encoder_features.copy()\n decoder_features.reverse()\n\n # build the encoder model\n self.encoder_model = self.encoder(\n features=self.encoder_features, name=\"encoder\"\n )\n\n # build the decoder model\n decoder = self.decoder(features=decoder_features, name=\"decoder\")\n\n input_tensor = KL.Input(\n shape=(32, 32, 3)\n ) # shape of images for cifar10 dataset\n\n # Encode the images\n encoded = self.encoder_model(input_tensor)\n # Decode the image\n decoded = decoder(encoded[-1])\n\n return KM.Model(inputs=input_tensor, outputs=decoded, name=\"AutoEncoder\")", "def build_conv_combo_autoencoder():\n input_img = Input(shape=(84, 84, 1))\n\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((3, 3), border_mode='same')(x)\n x = Flatten()(x)\n encoded = Dense(512, activation='relu')(x)\n\n encoded_input = Input((512,))\n d1 = Dense(9408, activation='relu')(encoded_input)\n d2 = Reshape((14, 14, 48))(d1)\n d3 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c5')(d2)\n d4 = UpSampling2D((3, 3))(d3)\n d5 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c6')(d4)\n d6 = UpSampling2D((2, 2))(d5)\n decoded = Convolution2D(1, 4, 4, activation='relu', border_mode='same', name='c9')(d6)\n\n encoder = Model(input=input_img, output=encoded, name='conv_encoder')\n decoder = Model(input=encoded_input, output=decoded, name='conv_decoder')\n\n autoencoder = Sequential(name='full_conv_autoencoder')\n autoencoder.add(encoder)\n autoencoder.add(decoder)\n\n encoder.compile(optimizer='adam', loss='mse')\n encoder.summary()\n decoder.compile(optimizer='adam', loss='mse')\n decoder.summary()\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder, encoder, decoder", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n n_stacks = len(dims) - 1\n # input\n input_img = Input(shape=(dims[0],), name='input')\n x = input_img\n # internal layers in encoder\n for i in range(n_stacks-1):\n x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x)\n\n # hidden layer\n encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here\n\n x = encoded\n # internal layers in decoder\n for i in range(n_stacks-1, 0, -1):\n x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x)\n\n # output\n x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x)\n decoded = x\n return Model(inputs=input_img, outputs=decoded, name='AE'), Model(inputs=input_img, outputs=encoded, name='encoder')", "def __call__(self, *args, **kwargs):\n return self.encoder_model(args[0])", "def __call__(self, *args, **kwargs):\n return self.encoder_model(args[0])", "def build(\n n_iter=500, encoding_dim=3, depth=2, nh=20, activation='linear',\n initial_learning_rate=1e-3, solver='Adam', batch_size=32,\n random_state=10, early_stopping=False, patience=10, lamda=1e-1,\n knob_cols=None, auto_refit=True, max_refit_attempts=10):\n assert knob_cols is not None\n\n encoder_hidden_layers = [int(nh / (2**i)) for i in range(depth - 1)]\n if len(encoder_hidden_layers) > 0:\n if 0 in encoder_hidden_layers or encoder_hidden_layers[-1] < encoding_dim:\n return None\n decoder_hidden_layers = encoder_hidden_layers[::-1]\n hidden_layer_sizes = encoder_hidden_layers + \\\n [encoding_dim] + decoder_hidden_layers\n activations = [activation] * 2 * depth\n ae = FancyAutoEncoder(\n n_iter, hidden_layer_sizes, activations, initial_learning_rate,\n solver=solver, batch_size=batch_size, random_state=random_state,\n early_stopping=early_stopping, patience=patience, lamda=lamda,\n knob_cols=knob_cols, auto_refit=auto_refit,\n max_refit_attempts=max_refit_attempts)\n return ae", "def __init__(self, **kwargs):\n var_defaults = {\n \"bias_init\" : 'zeros',\n \"weight_init\" : [0.0, 0.1],\n \"seed\" : None,\n \"num_hid_nodes\" : 32,\n \"activations\": 'sigmoid',\n \"lr\" : 0.1,\n \"decay\": 0,\n \"momentum\" : 0,\n \"nesterov\" : False,\n \"loss\" : 'mean_squared_error',\n \"epochs\" : 10,\n \"batch_size\" : 256,\n \"verbose\" : 2\n }\n for var, default in var_defaults.items():\n setattr(self, var, kwargs.get(var, default))\n self.autoencoder = Sequential()", "def build_encoder(shift):\n ### TODO.", "def __init__(self, encoder_base: int = 2, encoder_precision: int = 16) -> None:\n self.encoder_base = encoder_base\n self.encoder_precision = encoder_precision", "def simple_autoencoder(X_train_input, X_test_input, n_components = 100):\r\n ncol = X_train_input.shape[1]\r\n input_dim = Input(shape = (ncol,))\r\n \r\n # Define the number of encoder dimensions\r\n encoding_dim = n_components\r\n \r\n # Define the encoder layer\r\n encoded = Dense(encoding_dim, activation = 'relu')(input_dim)\r\n \r\n # Define the decoder layer\r\n decoded = Dense(ncol, activation = 'tanh')(encoded)\r\n \r\n # Combine the encoder and decoder into a model\r\n autoencoder = Model(inputs = input_dim, outputs = decoded)\r\n \r\n # Configure and train the autoencoder\r\n autoencoder.compile(optimizer = 'adam', loss = 'mse')\r\n autoencoder.fit(X_train_input, X_train_input, epochs = 50, batch_size = 128, shuffle = True,\r\n validation_data = (X_test_input, X_test_input),verbose = 1)\r\n \r\n # Use the encoder to extract the reduced dimension from the autoencoder\r\n encoder = Model(inputs = input_dim, outputs = encoded)\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def train_autoencoder(self, x_train, x_test):\n # train the autoencoder\n batch_size = 32\n self.autoencoder.fit(x_train,\n x_train,\n validation_data=(x_test, x_test),\n epochs=10,\n batch_size=batch_size)", "def encoder(self, inputs):\n pass", "def __init__(self, encoder=None, **kwargs):\n if encoder is None:\n encoder = dumps\n self.json_settings = kwargs\n self._encoder = encoder", "def test_init(self):\n default_encoder_type = type(Encoder())\n\n payload = Payload()\n self.assertIsInstance(payload.encoder, default_encoder_type)\n\n json_encoder = JSONEncoder()\n payload = Payload(encoder=json_encoder)\n self.assertEqual(payload.encoder, json_encoder)", "def auto_encoder(data: np.ndarray) -> np.ndarray:\n input_img = Input(shape=(784,))\n encoded = Dense(128, activation='relu')(input_img)\n encoded = Dense(64, activation='relu')(encoded)\n encoded = Dense(32, activation='relu')(encoded)\n\n decoded = Dense(64, activation='relu')(encoded)\n decoded = Dense(128, activation='relu')(decoded)\n decoded = Dense(784, activation='sigmoid')(decoded)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.fit(x_train, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n encoder = None\n if encoder_type == \"text\":\n if self.text_encoding == \"tfidf\":\n encoder = self._fit_tfidf(\n frame[prop], max_dim=self.text_encoding_max_dimension)\n elif self.text_encoding == \"word2vec\":\n encoder = self._fit_word2vec(frame[prop])\n elif encoder_type == \"category\":\n encoder = self._fit_multibin(frame[prop])\n elif encoder_type == \"numeric\":\n if self.standardize_numeric:\n encoder = self._fit_standard_scaler(\n frame[prop],\n missing_numeric=self.missing_numeric,\n imputation_strategy=self.imputation_strategy)\n return encoder", "def encode(\n cls: Type[\"DataDocument\"], encoding: str, data: D, **kwargs: Any\n ) -> \"DataDocument[D]\":\n # Dispatch encoding\n blob = lookup_serializer(encoding).dumps(data, **kwargs)\n\n inst = cls(blob=blob, encoding=encoding)\n inst._cache_data(data)\n return inst", "def instantiate_algorithm(args):\n if args.algorithm == 'xor_encoding':\n return XorEncoding(block_size=args.block_size, intensity=args.intensity)\n\n raise RuntimeError('Algorithm type not detected')", "def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',\n out_transfer='identity', reconstruct_loss='squared',\n c_jacobian=1, tied_weights=True, batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(ContractiveAutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer,\n reconstruct_loss, c_jacobian,\n tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def create(cls: Type[Sequence], sequence: bytes, alphabet: Alphabet) -> Sequence:\n return cls(lib.imm_seq_create(sequence, alphabet.imm_abc), alphabet)", "def __init__(self, rpc, encoder):\n super(EncoderModule, self).__init__(rpc, 'encoder', encoder)", "def deep_autoencoder(X_train_input, X_test_input, encoding_dim = 20):\r\n input_dim = X_train_input.shape[1]\r\n \r\n autoencoder = Sequential()\r\n \r\n # Encoder Layers\r\n autoencoder.add(Dense(4 * encoding_dim, input_shape=(input_dim,), activation='relu'))\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(encoding_dim, activation='relu'))\r\n \r\n # Decoder Layers\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(4 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(input_dim, activation='sigmoid'))\r\n \r\n autoencoder.compile(optimizer='adam', loss='binary_crossentropy')\r\n autoencoder.fit(X_train_input, X_train_input,\r\n epochs=50,\r\n batch_size=256,\r\n validation_data=(X_test_input, X_test_input))\r\n \r\n input_img = Input(shape=(input_dim,))\r\n encoder_layer1 = autoencoder.layers[0]\r\n encoder_layer2 = autoencoder.layers[1]\r\n encoder_layer3 = autoencoder.layers[2]\r\n encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def __init__(self, encoding=None, object_hook=None):\n self.encoding = encoding\n self.object_hook = object_hook", "def create(\n identifier: Optional[str] = None, name: Optional[str] = None,\n primary_key: Optional[List[int]] = None, description: Optional[str] = None,\n encoder: Optional[str] = None, decoder: Optional[str] = None,\n serializer: Union[Dict, Callable] = None\n ):\n # Create a unique identifier for the new archive.\n if identifier is None:\n identifier = util.get_unique_identifier()\n # Create the archive descriptor.\n doc = {'id': identifier, 'createdAt': util.current_time()}\n if name is not None:\n doc['name'] = name\n if description is not None:\n doc['description'] = description\n if primary_key is not None:\n doc['primaryKey'] = primary_key\n if encoder is not None:\n doc['encoder'] = encoder\n if decoder is not None:\n doc['decoder'] = decoder\n if serializer is not None:\n doc['serializer'] = serializer if isinstance(serializer, dict) else serializer()\n return ArchiveDescriptor(doc)", "def build_encoder(img_shape):\n input_img = Input(shape=(img_shape)) \n x = Conv2D(16, (3, 3), activation='tanh', padding='same')(input_img)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.5)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((3, 3), padding='same')(x)\n x = Flatten()(x)\n encoded = Dense(540, activation='tanh')(x)\n Encoder=Model(input_img,encoded,name='encoder')\n return input_img,encoded,Encoder", "def edge_encoder_construct(cfg, model_name='edge_encoder', **kwargs):\n encoders = edge_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown edge encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)", "def __init__(\n self, filename: str, compression: Optional[str] = None,\n encoder: Optional[json.JSONEncoder] = None\n ):\n # Use the default JSONEncoder if no encoder is given\n self.encoder = encoder # if encoder is not None else DefaultEncoder\n # Open output file for writing.\n self.fout = util.outputstream(filename, compression=compression)", "def encode(self, input_):\n return self.encoder(input_)", "def encode(image):\n from encoder import launch\n launch(image)", "def setup_encoder_initializer(self):\n if self.mode != \"inference\":\n # Restore inception variables only.\n saver = tf.train.Saver(self.autoencoder_variables)\n\n def restore_fn(sess):\n tf.logging.info(\"Restoring Autoencoder variables from checkpoint dir %s\",\n self.config.autoencoder_checkpoint_dir)\n saver.restore(sess, tf.train.latest_checkpoint(\n self.config.autoencoder_checkpoint_dir))\n\n if self.use_pretrained_ae:\n self.init_fn = restore_fn\n else:\n self.init_fn = None", "def encoder(self) -> IntegerEncoder:\n\n return self._encoder", "def get_trainer(self):\n return AutoEncoderTrainer", "def get_encoder(encoding):\n if encoding == Encoding.V1_THRIFT:\n return _V1ThriftEncoder()\n if encoding == Encoding.V1_JSON:\n return _V1JSONEncoder()\n if encoding == Encoding.V2_JSON:\n return _V2JSONEncoder()\n if encoding == Encoding.V2_PROTO3:\n return _V2ProtobufEncoder()\n raise ZipkinError(\"Unknown encoding: {}\".format(encoding))", "def register_for_auto_class(cls, auto_class=\"FlaxAutoModel\"):\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class", "def generate(name, code, writer=None, output=None, writer_options=None):\n from . import factory\n\n options = writer_options or {}\n barcode = factory.create_instance(name, code, writer)\n\n if isinstance(output, string_types):\n return barcode.save(output, options)\n else:\n barcode.write(output, options)", "def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder':\n encoder_seq = EncoderSequence([], dtype=config.dtype)\n cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,\n config.model_size,\n config.max_seq_len_source,\n fixed_pos_embed_scale_up_input=True,\n fixed_pos_embed_scale_down_positions=False,\n prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)\n encoder_seq.append(cls, **encoder_params)\n if config.conv_config is not None:\n encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,\n prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)\n\n encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX)\n\n return encoder_seq", "def resnet_autoencoder_v1(encoder_depth, decoder_depth, width_multiplier, metric_channels, # noqa\n cifar_stem=False, data_format='channels_last',\n dropblock_keep_probs=None, dropblock_size=None,\n mask_augs=0., greyscale_viz=False, skip=True):\n encoder = resnet_encoder_v1(encoder_depth, \n width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n decoder = resnet_decoder_v1(decoder_depth=decoder_depth,\n encoder_depth=encoder_depth,\n width_multiplier=width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n metric = learned_metric_v1(data_format=data_format, metric_channels=metric_channels) \n \n return resnet_autoencoder_v1_generator(\n encoder=encoder,\n decoder=decoder,\n metric=metric,\n skip=skip,\n mask_augs=mask_augs,\n greyscale_viz=greyscale_viz,\n data_format=data_format)", "def testEncoder(self):\n params = copy.copy(self.typical_instance)\n params.prob_f = 0.5\n params.prob_p = 0.5\n params.prob_q = 0.75\n\n rand_funcs = rappor.SimpleRandFuncs(params, MockRandom())\n rand_funcs.cohort_rand_fn = lambda a, b: a\n e = rappor.Encoder(params, 0, rand_funcs=rand_funcs)\n\n cohort, bloom_bits_irr = e.encode(\"abc\")\n\n self.assertEquals(0, cohort)\n self.assertEquals(0x000ffff, bloom_bits_irr)", "def node_encoder_construct(cfg, model_name='node_encoder', **kwargs):\n encoders = node_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown node encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)", "def test_autoencoder():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = read_images(dataset_path, labels_list=['normal', 'glare_small'], test_size=0.25)\n X_train_im = []\n for im in X_train:\n img = preprocess_image(im)\n img = np.array(img)\n img = img.flatten()\n X_train_im.append(img)\n X_train_im = np.array(X_train_im)\n\n X_test_im = []\n for im in X_test:\n img = preprocess_image(im)\n img = np.array(img)\n img = img.flatten()\n X_test_im.append(img)\n X_test_im = np.array(X_test_im)\n\n autoenc = VAE(encoder_neurons=[16, 32], decoder_neurons=[32, 16], latent_dim=32, epochs=50)\n autoenc.fit(X_train_im, y_train)\n y_pred = autoenc.predict(X_test_im)\n y_test_scores = autoenc.decision_function(X_test_im)\n conf_mtx_test = confusion_matrix(y_test, y_pred, labels=[0, 1])\n evaluate_print('vae', y_test, y_test_scores)\n print(conf_mtx_test)", "def base_encoder(cls, data, init_encoder, downsize_encoder, input_encoder):\n #todo: maybe do positional encoding before passing to init_encoder\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data)", "def __init__(self, coder):\n self.coder = coder", "def set_encoder(attribute: str, encoder: typing.Callable) -> None:\n if encoder is not None:\n __attribute_decoders[attribute.lower()] = encoder", "def create_automaton(self, *, name: t.Optional[str] = None) -> Automaton:\n automaton = Automaton(self, name=name)\n self._automata.add(automaton)\n return automaton", "def __init__(self, shift):\n encoder, decoder = self._make_coder_dicts(shift)\n self._encoder = encoder\n self._decoder = decoder", "def __init__(self, shift):\n encoder, decoder = self._make_coder_dicts(shift)\n self._encoder = encoder\n self._decoder = decoder", "def __init__(self, n_feature, n_hidden, n_output):\n super(simpleAE, self).__init__()\n self.Encoder = Encoder(n_feature, n_hidden)\n self.Decoder = Decoder(n_hidden, n_output)\n self.model_name = 'simpleAE'", "def load_encoder(checkpoint, encoder_cls,\n HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT, encoder_name, bidirectional):\n model = encoder_cls(HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT,\n gate=encoder_name, bidirectional=bidirectional)\n model.load_state_dict(checkpoint['en'])\n model.eval()\n return model", "def _get_encoder(self, params):\n # TODO: refactor method\n\n # Check if encoder was already trained with these parameters\n encoder_id = Encoder.generate_id(params)\n self._logger.debug(\"Retrieving encoder model: \" + str(encoder_id))\n\n # Check if matching encoder is in memory\n if encoder_id in self._trained_encoders:\n self._logger.debug(\"Loading encoder from in-memory cache: \" + str(encoder_id))\n return self._trained_encoders[encoder_id]\n else:\n # Check if matching encoder on disk\n prev_model = None\n if self._encoder_dir is not None:\n prev_model = Encoder.load_if_exists(self._encoder_dir, encoder_id)\n\n if prev_model is not None:\n self._logger.debug(\"Loaded encoder from disk-cache: \" + str(encoder_id))\n encoder = Encoder(params)\n docs = self._get_docs(encoder, params['doc2vec_docs'])\n encoder.set_documents(docs)\n encoder.set_model(prev_model)\n self._trained_encoders[encoder_id] = encoder\n return encoder\n else:\n self._logger.debug(\"Training new encoder model: \" + str(encoder_id))\n encoder = Encoder(params)\n docs = self._get_docs(encoder, params['doc2vec_docs'])\n encoder.set_documents(docs)\n encoder.train()\n self._trained_encoders[encoder_id] = encoder\n self._logger.debug(\"Added encoder to cache: \" + str(encoder_id))\n\n # Save encoder\n if self._encoder_dir is not None:\n encoder.save(self._encoder_dir + \"/\" + encoder_id)\n return encoder", "def Init(*args, **kwargs):\n return _gdi_.EncodingConverter_Init(*args, **kwargs)", "def _create_encoder(self):\n\n def _init_weights(layer):\n \"\"\"Initializes the weights of a layer based on type.\"\"\"\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass\n\n kernel_size = 5\n pad = 2\n input_channels = 1\n first_conv_channels = 6\n second_conv_channels = 16\n max_pool_kernel = 2\n linear_size = 120\n n_pixels = 7\n\n encoder = nn.Sequential(\n nn.Conv2d(\n input_channels, first_conv_channels, kernel_size, padding=pad),\n nn.BatchNorm2d(first_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n nn.Conv2d(\n first_conv_channels, second_conv_channels, kernel_size,\n padding=pad),\n nn.BatchNorm2d(second_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n utils.Flatten(),\n nn.Linear(n_pixels * n_pixels * self.n_digits * second_conv_channels,\n linear_size),\n nn.BatchNorm1d(linear_size),\n nn.ReLU(),\n nn.Linear(linear_size, self.embedding_dim),\n nn.Linear(self.embedding_dim, self.n_classes, bias=False),\n )\n\n encoder.apply(_init_weights)\n\n # This is the empirical approximation for initialization the vMF\n # distributions for each class in the final layer.\n if self.use_vmf:\n utils.vmf_class_weight_init(encoder[-1].weight, self.kappa_confidence,\n self.embedding_dim)\n\n return encoder", "def _apply_encoder(self, frame, prop, encoder, encoder_type=\"category\"):\n pass", "def get_or_make_label_encoder(params, problem, mode, label_list=None, zero_class=None):\n problem_path = params.ckpt_dir\n create_path(problem_path)\n le_path = os.path.join(problem_path, '%s_label_encoder.pkl' % problem)\n\n if mode == 'train' and not os.path.exists(le_path):\n label_encoder = LabelEncoder()\n\n label_encoder.fit(label_list, zero_class=zero_class)\n\n label_encoder.dump(le_path)\n\n else:\n label_encoder = LabelEncoder()\n label_encoder.load(le_path)\n\n return label_encoder", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def encoder_from_string(encoder: str) -> json.JSONEncoder:\n return util.import_obj(encoder) if encoder else None", "def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)", "def __init__(self, AEs):\n \"\"\" the default view of the stacked autoencoders\"\"\"\n sa = AEs\n \"\"\" the encoder view of the stacked autoencoders \"\"\"\n ec = Cat([a.ec for a in sa])\n \"\"\" the decoder view of the stacked autoencoders \"\"\"\n dc = Cat([a.dc for a in reversed(sa)])\n\n self.sa = sa # default view\n self.ec = ec # encoder view\n self.dc = dc # decoder view\n\n nts = []\n nts.extend(ec)\n nts.extend(dc)\n super(SAE, self).__init__(nts)", "def build_label_transform():\n\n return NALabelEncoder()", "def make(self, **kwargs):\n return bytes()", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n pass", "def fit_autoencoder(df, latent_dim=8):\n df = np.asarray(df)\n input_dim = df.shape[1]\n\n class Autoencoder(Model):\n def __init__(self, latent_dim):\n super(Autoencoder, self).__init__()\n self.latent_dim = latent_dim\n self.encoder = tf.keras.Sequential([\n layers.Dense(input_dim, activation='tanh'),\n layers.Dense(20, activation='tanh'),\n layers.Dense(15, activation='tanh'),\n layers.Dense(latent_dim, activation='elu')\n ])\n self.decoder = tf.keras.Sequential([\n layers.Dense(20, activation='tanh'),\n layers.Dense(input_dim, activation='tanh')\n ])\n\n def call(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return decoded\n\n autoencoder = Autoencoder(latent_dim)\n autoencoder.compile(optimizer='adam',\n loss='mse')\n autoencoder.fit(df,\n df,\n epochs=50 if DEBUG else 250,\n shuffle=True,\n batch_size=256,\n validation_split=.2 if DEBUG else .1)\n return autoencoder", "def __init__(\n self,\n encoder: EncoderMnist,\n decoder: DecoderMnist,\n latent_dim: int,\n input_pert: callable,\n name: str = \"model\",\n loss_f: callable = nn.MSELoss(),\n ):\n super(AutoEncoderMnist, self).__init__()\n self.latent_dim = latent_dim\n self.encoder = encoder\n self.decoder = decoder\n self.input_pert = input_pert\n self.name = name\n self.loss_f = loss_f\n self.checkpoints_files = []\n self.lr = None", "def autoencoder(input_dims, filters, latent_dims):\n e_inputs = keras.Input(input_dims)\n d_inputs = keras.Input(latent_dims)\n\n encoder = e_inputs\n for f in filters:\n encoder = keras.layers.Conv2D(\n f, (3, 3), activation='relu', padding='same')(encoder)\n encoder = keras.layers.MaxPooling2D((2, 2), padding='same')(encoder)\n\n decoder = d_inputs\n for i in reversed(range(1, len(filters))):\n decoder = keras.layers.Conv2D(\n filters[i], (3, 3), activation='relu', padding='same')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n\n decoder = keras.layers.Conv2D(\n filters[0], (3, 3), activation='relu', padding='valid')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n decoder = keras.layers.Conv2D(input_dims[-1], (3, 3),\n activation='sigmoid',\n padding='same')(decoder)\n\n encoder = keras.Model(e_inputs, encoder)\n decoder = keras.Model(d_inputs, decoder)\n\n auto = keras.Model(e_inputs, decoder(encoder(e_inputs)))\n auto.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")\n\n return encoder, decoder, auto", "def fit_transform(self):\n if self.enc_types == \"label\":\n return self._label_encoding()\n elif self.enc_types == \"ohe\":\n return self._one_hot_encoder()\n elif self.enc_types == \"binary\":\n return self._binarization()\n else:\n raise Exception(\"Encoding type not understood\")", "def make_encoder_ph(self):\n info = self._module.get_input_info_dict('encode')['x']\n return tf.placeholder(dtype=info.dtype, shape=info.get_shape())", "def autoencoder_model(optimizer, learning_rate, \n filter_block1, kernel_size_block1, \n filter_block2, kernel_size_block2, \n filter_block3, kernel_size_block3, \n filter_block4, kernel_size_block4, \n activation_str, padding):\n # Input Tensors - fully conv\n input_img = Input(shape=(None, None, 1))\n # Encoder Part\n x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding=padding)(input_img) # 420x540x32\n x = Activation('relu')(x)\n x = MaxPooling2D()(x) # 210x270x32\n encoded = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding=padding)(x) # 105x135x32\n # Decoder Part\n x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding=padding)(encoded) # 210x270x32\n x = Activation('relu')(x)\n x = UpSampling2D()(x) # 420x540x32\n decoded = Conv2D(filters=filter_block4, kernel_size=kernel_size_block4, activation='sigmoid', padding=padding)(x) # 420x540x1\n\n # Build the model\n autoencoder = Model(inputs=input_img, outputs=decoded)\n opt = optimizer(learning_rate=learning_rate)\n autoencoder.compile(loss=\"binary_crossentropy\", optimizer=opt)\n autoencoder.summary()\n return autoencoder", "def build_encoder(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tctx = proj[0][-1]\n\n\treturn embedding, x_mask, ctx", "def encoderRouter(self, encoder):\n pass", "def beam_init(self, encoder_outputs, K):\n encoder_outputs = TransformerEncoderOutput(\n repeat_batch(encoder_outputs.output, K),\n repeat_batch(encoder_outputs.src_mask, K)\n )\n return encoder_outputs", "def encode(self):\n \n assert False, \"Not implemented.\"", "def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder:\n params = dict(kwargs)\n if infer_hidden:\n params['num_hidden'] = self.get_num_hidden()\n\n sig_params = inspect.signature(cls.__init__).parameters\n if 'dtype' in sig_params and 'dtype' not in kwargs:\n params['dtype'] = self.dtype\n encoder = cls(**params)\n self.encoders.append(encoder)\n return encoder", "def SemiAutoencoder(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def encode(self): # pragma: no cover\n pass", "def default_encoder(obj):\n\n if isinstance(obj, np.ndarray):\n data_b64 = base64.b64encode(np.ascontiguousarray(obj).data)\n return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape)\n\n if isinstance(obj, dict):\n result = dict()\n for k, v in obj.items():\n result[k] = default_encoder(v)\n\n return result\n\n return obj", "def build_encoder(shift):\n ### TODO.\n while True:\n if shift >= 0 and shift < 27 :\n break\n else:\n print \"That is not a valid input.\"\n print\n final_dict = build_coder(shift)\n return final_dict", "def encoder(self, value):\n self._tensor.encoder = value", "def getencdec():\n return (json.JSONEncoder(), json.JSONDecoder())", "def add_ae(self, model, dataset, latent_options, model_paths, pre_process=None):\n ae = autoencoder(self.app, model, dataset, latent_options, model_paths, pre_process)\n self.body_children.append(ae)", "def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)", "def encode(input):\n return ModelEncoder().encode(input)", "def construct_encoding(self, values):\n return construct_encoding(\n values, self.user_count, self.computer_count, self.auth_type_count, self.logon_type_count,\n self.user_map, self.computer_map, self.auth_type_map, self.logon_type_map)", "def make_encoder(opt, embeddings, intent_size, output_size, use_history=False, hidden_depth=1, identity=None,\n hidden_size=None):\n # encoder = StateEncoder(intent_size=intent_size, output_size=output_size,\n # state_length=opt.state_length, extra_size=3 if opt.dia_num>0 else 0 )\n\n # intent + price\n diaact_size = (intent_size+1)\n extra_size = 3 + 2\n if hidden_size is None:\n hidden_size = opt.hidden_size\n if not opt.use_utterance:\n embeddings = None\n if use_history:\n extra_size = 3\n # + pmask\n diaact_size += 1\n if identity is None:\n encoder = HistoryIDEncoder(None, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n # encoder = HistoryIDEncoder(identity, diaact_size*2+extra_size, embeddings, output_size,\n # hidden_depth=hidden_depth)\n encoder = HistoryIDEncoder(identity, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n if identity is None:\n encoder = CurrentEncoder(diaact_size*opt.state_length+extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n else:\n extra_size = 3\n # + pmask\n diaact_size += 1\n encoder = HistoryIDEncoder(identity, diaact_size * opt.state_length, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n\n return encoder", "def getEncoders ():\n return _registeredEncoders", "def __new__(cls, *args, **kwargs):\n if cls.__name__ != 'Codec':\n return super().__new__(cls)\n if kwargs.get('type'):\n t_cls = ClassFactory.get_cls(ClassType.CODEC, kwargs.pop('type'))\n else:\n t_cls = ClassFactory.get_cls(ClassType.CODEC)\n return super().__new__(t_cls)", "def encode(self) :\n\t\tbitmap = ISO8583Bitmap()\n\t\ttexts=[]\n\t\tfor i in range(2,129) :\n\t\t\tid = 'f%03d' % i\n\t\t\tif hasattr(self,id) :\n\t\t\t\tv = getattr(self,id)\n\t\t\t\ttyp = self.desc_dict[id]['type']\n\t\t\t\tbitmap.setBitmap(i)\n\t\t\t\t# logit(\"%s:%s\" % (id,v))\n\t\t\t\ttxt = dataAttachTo8583(v,typ)\n\t\t\t\ttexts.append(txt)\n\t\treturn (bitmap,''.join(texts))" ]
[ "0.62622386", "0.6247434", "0.62234795", "0.6113021", "0.59711206", "0.59430516", "0.5825264", "0.5779037", "0.56837624", "0.56632996", "0.56139284", "0.5593676", "0.5560463", "0.55399996", "0.55361104", "0.5501795", "0.54680336", "0.54680336", "0.5461032", "0.54492253", "0.54226404", "0.5376168", "0.53644747", "0.5336152", "0.53330106", "0.53176653", "0.5288125", "0.5276875", "0.52488565", "0.5204656", "0.51891005", "0.5173012", "0.5120591", "0.50968176", "0.5094121", "0.5083489", "0.50733167", "0.5068721", "0.5058235", "0.50572234", "0.5052081", "0.5045483", "0.5041683", "0.503674", "0.50363356", "0.5022455", "0.4996962", "0.49958834", "0.49840495", "0.49782088", "0.49714997", "0.49653515", "0.49428725", "0.49408007", "0.49394172", "0.49303374", "0.49300084", "0.49274108", "0.49274108", "0.49257872", "0.49211416", "0.49127623", "0.48896536", "0.4871887", "0.48706207", "0.48699656", "0.48692158", "0.48660472", "0.48598692", "0.4851934", "0.48414516", "0.48380446", "0.48290703", "0.48181465", "0.47993284", "0.47901052", "0.47784647", "0.47749943", "0.47734433", "0.4751934", "0.47507882", "0.47448355", "0.4742829", "0.47281903", "0.4726033", "0.47250888", "0.47239795", "0.47224128", "0.47041136", "0.4698371", "0.46953237", "0.46862778", "0.4685146", "0.468091", "0.46638164", "0.4657099", "0.4652383", "0.4644387", "0.4644263", "0.4640958" ]
0.5244221
29
Create a SparseAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', c_sparsity=1, sparsity_loss='bern_bern_kl', sparsity_target=0.01, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(SparseAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')", "def make_sparse(data):\n assert data.train_pos_edge_index is not None\n\n (row, col), N = data.train_pos_edge_index, data.num_nodes\n perm = (col * N + row).argsort()\n row, col = row[perm], col[perm]\n\n value = [data.edge_id[(row[i] * N + col[i]).item()].item() for i in perm]\n\n data.adj_t = SparseTensor(\n row=col,\n col=row,\n value=torch.tensor(value, dtype=torch.float32),\n sparse_sizes=(N, N),\n is_sorted=True,\n )\n\n # Pre-process some important attributes.\n data.adj_t.storage.rowptr()\n data.adj_t.storage.csr2csc()\n\n return data", "def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)", "def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> 'SparseTensor':\n return SparseTensor(indices, values, shape)", "def sparse_constructor(value, name=None, strict=False, allow_downcast=None,\r\n borrow=False, format=None):\r\n if not isinstance(value, scipy.sparse.spmatrix):\r\n raise TypeError(\"Expected a sparse matrix in the sparse shared variable constructor. Received: \",\r\n value.__class__)\r\n\r\n if format is None:\r\n format = value.format\r\n type = SparseType(format=format, dtype=value.dtype)\r\n if not borrow:\r\n value = copy.deepcopy(value)\r\n return SparseTensorSharedVariable(type=type, value=value, name=name,\r\n strict=strict, allow_downcast=allow_downcast)", "def to_sparse(self):\n from divisi2.sparse import SparseVector\n return SparseVector(self, self.labels)", "def _from_dict_to_sparse(self, adj_dict):\n indices = list(adj_dict.keys())\n values = [1] * len(indices)\n\n edge_index = torch.LongTensor(indices).T.to(self.device)\n edge_attr = torch.FloatTensor(values).to(self.device)\n\n edge_index, edge_attr = utils.to_symmetric(edge_index, edge_attr, self.n)\n\n return SparseTensor.from_edge_index(edge_index=edge_index,\n edge_attr=edge_attr,\n sparse_sizes=torch.Size([self.n, self.n]))", "def SparseEmbedding(data=None, weight=None, input_dim=_Null, output_dim=_Null, dtype=_Null, out=None, name=None, **kwargs):\n return (0,)", "def _make_train(data, smooth_factor):\n train_matrix = data_to_sparse(data).tolil()\n user_counts = np.array(train_matrix.sum(axis=1))[:, 0]\n train_matrix[np.where(user_counts == 0)] = smooth_factor\n train_matrix = normalize(train_matrix, 'l1', axis=1)\n return train_matrix.tocsr()", "def make_sparse(sparse_mx, args):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n\n indices = tensor(np.vstack((sparse_mx.row, sparse_mx.col)), args, torch.long)\n values = tensor(sparse_mx.data, args)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def train_clustermodel_sparse(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse']]", "def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type=\"float\"):\n if a_ndarray is None:\n return None\n invalidInputError(isinstance(a_ndarray, np.ndarray),\n f\"input should be a np.ndarray, not ${type(a_ndarray)}\")\n invalidInputError(isinstance(i_ndarray, np.ndarray),\n f\"indices should be a np.ndarray, not ${type(i_ndarray)}\")\n invalidInputError(i_ndarray.size == a_ndarray.size * shape.size,\n f\"size of values ${a_ndarray.size * shape.size} and\"\n f\" indices ${i_ndarray.size} should match\")\n return cls(a_ndarray,\n shape,\n bigdl_type,\n i_ndarray)", "def save_sparse_csr(filename,array, labels, vocab):\n np.savez(filename,data = array.data ,indices=array.indices,\n indptr =array.indptr, shape=array.shape, labels=labels, vocab=vocab)", "def _binary_3d_label_to_sparse(labels):\n return sparse_tensor.SparseTensor.from_value(\n _binary_3d_label_to_sparse_value(labels))", "def sparseFeature(feat_name, feat_num, embed_dim=4):\n return {'feat_name': feat_name, 'feat_num': feat_num, 'embed_dim': embed_dim}", "def sparse_to_dense(example):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n example[key] = val\n\n return example", "def sparse_encode(D, data, callback=None, n_alphas=3):\n D = np.asanyarray(D, dtype=np.double)\n data = np.asanyarray(data, dtype=np.double)\n data = np.atleast_2d(data)\n\n # TODO: use a smart sparse representation instead\n encoded = np.zeros((data.shape[0], D.shape[1]), dtype=np.double)\n\n for i, code in enumerate(data):\n clf = LassoCV(n_alphas=n_alphas).fit(D, code, fit_intercept=False)\n encoded[i][:] = clf.coef_\n\n if callback is not None:\n callback(i)\n return encoded", "def __init__(self, idxbase=0):\n if idxbase not in (0, 1):\n raise ValueError(\"Invalid index base\")\n\n self.api = cuSparse()\n self.idxbase = (CUSPARSE_INDEX_BASE_ZERO,\n CUSPARSE_INDEX_BASE_ONE)[idxbase]", "def set_sparse_signals(self):\n\t\n\t\tparams_dSs = [self.mu_dSs, self.sigma_dSs]\n\t\tparams_Ss0 = [self.mu_Ss0, self.sigma_Ss0]\n\t\tself.dSs, self.idxs = sparse_vector([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\tparams_dSs,\tseed=self.seed_dSs)\n\t\t\n\t\t# Replace components with conflicting background odor \n\t\tif self.Kk_split is not None and self.Kk_split != 0:\n\t\t\tassert 0 <= self.Kk_split <= self.Kk, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires Kk_split\" \\\n\t\t\t\t\" to be non-negative and less than or equal to Kk.\"\n\t\t\tassert self.mu_dSs_2 is not None \\\n\t\t\t\tand self.sigma_dSs_2 is not None, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires that\" \\\n\t\t\t\t\" mu_dSs_2 and sigma_dSs_2 are set.\"\n\n\t\t\tsp.random.seed(self.seed_dSs)\n\t\t\tself.idxs_2 = sp.random.choice(self.idxs[0], self.Kk_split, \n\t\t\t\t\t\t\t\t\t\t\treplace=False)\n\t\t\tfor idx_2 in self.idxs_2:\n\t\t\t\tself.dSs[idx_2] = sp.random.normal(self.mu_dSs_2, \n\t\t\t\t\t\t\t\t\t\t\t\t\tself.sigma_dSs_2)\n\t\telse:\n\t\t\tself.idxs_2 = []\n\t\t\tself.Kk_split = 0\n\t\t\t\n\t\t# Ss0 is the ideal (learned) background stimulus without noise\n\t\tself.Ss0, self.Ss0_noisy = sparse_vector_bkgrnd([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.idxs, params_Ss0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tseed=self.seed_Ss0)\n\t\t\n\t\tself.Ss = self.dSs + self.Ss0_noisy", "def test_to_sparse(self, fn_name, fn_args, proto_list_key):\n self.run_benchmarks(fn_name, _get_prensor_to_sparse_tensor_fn, fn_args,\n proto_list_key)", "def _binary_2d_label_to_sparse(labels):\n return sparse_tensor.SparseTensor.from_value(\n _binary_2d_label_to_sparse_value(labels))", "def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)", "def _identity_sparse(d, stype=\"csr\", dtype=complex):\n return sp.eye(d, dtype=dtype, format=stype)", "def copy(self):\n return SparseN(self)", "def as_sparse_variable(x, name=None):\r\n\r\n # TODO\r\n # Verify that sp is sufficiently sparse, and raise a\r\n # warning if it is not\r\n\r\n if isinstance(x, gof.Apply):\r\n if len(x.outputs) != 1:\r\n raise ValueError(\"It is ambiguous which output of a \"\r\n \"multi-output Op has to be fetched.\", x)\r\n else:\r\n x = x.outputs[0]\r\n if isinstance(x, gof.Variable):\r\n if not isinstance(x.type, SparseType):\r\n raise TypeError(\"Variable type field must be a SparseType.\", x,\r\n x.type)\r\n return x\r\n try:\r\n return constant(x, name=name)\r\n except TypeError:\r\n raise TypeError(\"Cannot convert %s to SparseType\" % x, type(x))", "def SimpleSparseTensorFrom(x):\n x_ix = []\n x_val = []\n for batch_i, batch in enumerate(x):\n for time, val in enumerate(batch):\n x_ix.append([batch_i, time])\n x_val.append(val)\n x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]\n x_ix = tf.constant(x_ix, tf.int64)\n x_val = tf.constant(x_val, tf.int32)\n x_shape = tf.constant(x_shape, tf.int64)\n\n #return tf.SparseTensor(x_ix, x_val, x_shape)\n return ([x_ix, x_val, x_shape])", "def make_stax_model(self):", "def dense_to_sparse(adj):\n assert adj.dim() >= 2 and adj.dim() <= 3\n assert adj.size(-1) == adj.size(-2)\n\n index = adj.nonzero(as_tuple=True)\n #print(index)\n edge_attr = adj[index]\n\n if len(index) == 3:\n batch = index[0] * adj.size(-1)\n index = (batch + index[1], batch + index[2])\n\n return torch.stack(index, dim=0), edge_attr", "def _create_sparse_train_and_test(ratings, n_users, n_items):\n \n # pick a random set of data as testing data, sorted ascending\n test_set_size = len(ratings) / TEST_SET_RATIO\n test_set_idx = np.random.choice(xrange(len(ratings)), size=test_set_size, replace=False)\n test_set_idx = sorted(test_set_idx)\n \n # use the remaining data to create a training set\n ts_ratings = ratings[test_set_idx]\n tr_ratings = np.delete(ratings, test_set_idx, axis=0)\n \n # create training and test matrices as coo_matrix\n u_tr, i_tr, r_tr = zip(*tr_ratings)\n tr_sparse = coo_matrix((r_tr, (u_tr, i_tr)), shape=(n_users, n_items))\n u_ts, i_ts, r_ts = zip(*ts_ratings)\n test_sparse = coo_matrix((r_ts, (u_ts, i_ts)), shape=(n_users, n_items))\n \n return tr_sparse, test_sparse", "def create_sparse_matrix(self, filename, matrix_length, density):\n pos = 0\n aux_pos = 0\n matrix = []\n pointerB = []\n pointerE = []\n columns = []\n values = []\n\n for i in range(0, matrix_length):\n row = []\n pointerB.append(pos)\n aux_pos = pos\n for j in range(0, matrix_length):\n probability = random.random()\n if probability < density:\n pos += 1\n val = random.randint(1, 10)\n values.append(val)\n columns.append(j)\n else:\n val = 0\n row.append(val)\n matrix.append(row)\n pointerE.append(pos)\n vector_b = SparseMatrix.gen_vector(matrix_length)\n matrix_A = np.matrix(matrix)\n vector_res = np.dot(matrix_A, vector_b).reshape(matrix_length, 1)\n data = {\"values\": values, \"columns\": columns, \"pointerB\": pointerB, \"pointerE\": pointerE}\n CSR_A = json.dumps(data)\n '''\n print(\"x: \", vector_x)\n print(\"A: \", matrix_A)\n print(\"b: \", vector_b)\n data = {\"values\": values, \"columns\": columns, \"pointerB\": pointerB, \"pointerE\": pointerE}\n data_json = json.dumps(data)\n file = open(filename, 'w')\n file.write(data_json)\n file.close()\n np.savetxt(\"vector.txt\", vector_x, fmt=\"%1.9f\", delimiter=\" \")\n '''\n return matrix_A, CSR_A, vector_b, vector_res", "def __init__(self, format, dtype):\r\n if not imported_scipy:\r\n raise Exception(\"You can't make SparseType object as SciPy\"\r\n \" is not available.\")\r\n dtype = str(dtype)\r\n if dtype in self.dtype_set:\r\n self.dtype = dtype\r\n else:\r\n raise NotImplementedError('unsupported dtype \"%s\" not in list' %\r\n dtype, list(self.dtype_set))\r\n\r\n assert isinstance(format, basestring)\r\n if format in self.format_cls:\r\n self.format = format\r\n else:\r\n raise NotImplementedError('unsupported format \"%s\" not in list' %\r\n format, self.format_cls.keys())", "def to_sparse(x):\n x_typename = torch.typename(x).split('.')[-1]\n sparse_tensortype = getattr(torch.sparse, x_typename)\n\n indices = torch.nonzero(x)\n if len(indices.shape) == 0: # if all elements are zeros\n return sparse_tensortype(*x.shape)\n indices = indices.t()\n values = x[tuple(indices[i] for i in range(indices.shape[0]))]\n return sparse_tensortype(indices, values, x.size())", "def bprop_make_sparse_tensor(indices, values, dense_shape, out, dout):\n return zeros_like(indices), F.sparse_tensor_get_values(dout), ()", "def sparseFeature(self, feat, feat_num, embed_dim=4):\n return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim}", "def create_sparse_coo_matrix(df, n_users, n_items, movie_dict):\n\n\t# Map the movie_ids in the data to the new movie_ids given by the dictionary movie_dict\n\tmovie_id_list = list(map(lambda x: movie_dict[x], df['movieId'].tolist()))\n\t# Map the user_id in the dataframe to userid - 1 [to account for zero based indexing]\n\tuser_id_list = list(map(lambda x: x - 1, df['userId'].tolist()))\n\tsparse_matrix_coo = sparse.coo_matrix((df['rating'].tolist(),(user_id_list, movie_id_list)),shape=(n_users,n_items))\n\tlogger.debug(\"Shape of created sparse matrix: \" + str(sparse_matrix_coo.shape))\n\tlogger.debug(\"Number of non_zero elements in the sparse matrix: \" + str(sparse_matrix_coo.nnz))\n\tlogger.debug(\"Number of entries in the input dataframe:[should match the number of non zero entries in sparse matrix] \" + str(df.shape[0]))\n\treturn sparse_matrix_coo", "def test_00_create_sparse_1d_array(self):\n ncells = 100\n sparsity = 3.0 # 1 / density\n _, err = _iquery(\"create array SPARSE <v:int64>[i=0:{0}:0:5]\".format(\n ncells - 1))\n assert not err, err\n self._array_cleanups.append('SPARSE')\n _, err = _iquery(\"\"\"\n insert(\n redimension(\n apply(\n build(<i:int64>[fud=0:{0}], {1}*fud),\n (v, 1)),\n SPARSE),\n SPARSE)\"\"\".format(int(ncells / sparsity) - 1,\n int(sparsity)))\n assert not err, err\n check_v_sum('SPARSE')\n nchunks = chunk_count(vaid_of('SPARSE'))\n prt(\"SPARSE has\", nchunks, \"chunks\")", "def get_sparse_codes(\n signal: np.ndarray,\n dictionary: np.ndarray,\n penalty: float,\n positive_code: bool,\n ) -> np.ndarray:\n coder = SparseCoder(\n dictionary=dictionary,\n transform_algorithm=\"lasso_lars\",\n transform_alpha=penalty,\n positive_code=positive_code,\n )\n return coder.transform(signal.reshape(1, -1))", "def load_sparse(fname):\n E = np.loadtxt(open(fname, \"rb\"), delimiter=\",\")\n H = E[0, :]\n n = int(H[0])\n d = int(H[1])\n E = E[1:, :]\n S = sparse.coo_matrix((E[:, 2], (E[:, 0] - 1, E[:, 1] - 1)), shape=(n, d))\n S = S.todense()\n\n return S", "def to_sparse_representation(label, batch_idx):\n indices = []\n vals = []\n\n for i, idx in enumerate(batch_idx):\n for j, c in enumerate(label[idx]):\n indices.append([i, j])\n vals.append(c)\n\n shape = [len(batch_idx), np.max(indices, axis=0)[1] + 1]\n\n return np.array(indices), np.array(vals), np.array(shape)", "def get_sparse_backend():\n backend = biom_config['python_code_sparse_backend']\n if backend is None:\n backend = 'CSMat'\n\n if backend not in sparse_backends:\n raise InvalidSparseBackendException(\"Unrecognized sparse backend \"\n \"'%s'. Choose from %s.\" % (backend,\n ', '.join(sparse_backends)))\n\n valid_backend = False\n if backend == 'ScipySparseMat':\n try:\n from biom.backends.scipysparse import ScipySparseMat, to_scipy, \\\n dict_to_scipy, list_dict_to_scipy, list_nparray_to_scipy, \\\n nparray_to_scipy, list_list_to_scipy\n SparseObj = ScipySparseMat\n to_sparse = to_scipy\n dict_to_sparseobj = dict_to_scipy\n list_dict_to_sparseobj = list_dict_to_scipy\n list_nparray_to_sparseobj = list_nparray_to_scipy\n nparray_to_sparseobj = nparray_to_scipy\n list_list_to_sparseobj = list_list_to_scipy\n valid_backend = True\n except ImportError:\n valid_backend = False\n stderr.write(\"Cannot load ScipySparseMat (requires that scipy is \"\n \"installed). Using CSMat sparse backend.\\n\")\n\n if backend == 'CSMat' or (not valid_backend):\n try:\n from biom.backends.csmat import CSMat, to_csmat, dict_to_csmat, \\\n list_dict_to_csmat, list_nparray_to_csmat, nparray_to_csmat, \\\n list_list_to_csmat\n SparseObj = CSMat\n to_sparse = to_csmat\n dict_to_sparseobj = dict_to_csmat\n list_dict_to_sparseobj = list_dict_to_csmat\n list_nparray_to_sparseobj = list_nparray_to_csmat\n nparray_to_sparseobj = nparray_to_csmat\n list_list_to_sparseobj = list_list_to_csmat\n valid_backend = True\n except ImportError:\n valid_backend = False\n stderr.write('Cannot load CSMat sparse backend.\\n')\n\n if not valid_backend:\n raise InvalidSparseBackendException(\"The sparse matrix backend '%s' \"\n \"could not be loaded. Please check your biom-format \"\n \"installation.\" % backend)\n\n return SparseObj, to_sparse, dict_to_sparseobj, list_dict_to_sparseobj, \\\n list_nparray_to_sparseobj, nparray_to_sparseobj, \\\n list_list_to_sparseobj", "def test_build_classification_matrix_sparse():\n import scnym\n\n # generate a sparse matrix with ~10% of elements filled\n B = np.zeros((100, 10))\n ridx = np.random.choice(B.size, size=100, replace=True)\n B.flat[ridx] = 1\n B = sparse.csr_matrix(B)\n\n # create dummy gene names where the order of\n # genes in `B` is permuted\n A_genes = np.arange(10)\n B_genes = np.random.permutation(A_genes)\n\n # build the classification matrix\n X = scnym.utils.build_classification_matrix(\n X=B,\n model_genes=A_genes,\n sample_genes=B_genes,\n )\n assert sparse.issparse(X)\n\n # X should have the genes of B in the order of A\n for i, g in enumerate(A_genes):\n j = int(np.where(B_genes == g)[0])\n assert np.all(X[:, i].toarray() == B[:, j].toarray())\n return", "def to_dense(sparse, fill_value=None):\n if fill_value is None or fill_value == 0:\n return sparse.to_dense()\n sparse = sparse.coalesce()\n dense = torch.full(sparse.shape, fill_value, dtype=sparse.dtype, device=sparse.device)\n for idx, value in zip(sparse._indices().t(), sparse._values()):\n dense[tuple(idx)] = value\n return dense", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def make_sparse(self, fmt='csc', make_method=None):\n if make_method:\n self.sparse = make_method(self.hamiltonian)\n else:\n self.sparse = self.hamiltonian.to_matrix(sparse=fmt)", "def _create_mkl_sparse(matrix):\n\n double_precision = _is_double(matrix)\n\n # Figure out which matrix creation function to use\n if _spsparse.isspmatrix_csr(matrix):\n _check_scipy_index_typing(matrix)\n assert matrix.data.shape[0] == matrix.indices.shape[0]\n assert matrix.indptr.shape[0] == matrix.shape[0] + 1\n handle_func = MKL._mkl_sparse_d_create_csr if double_precision else MKL._mkl_sparse_s_create_csr\n\n elif _spsparse.isspmatrix_csc(matrix):\n _check_scipy_index_typing(matrix)\n assert matrix.data.shape[0] == matrix.indices.shape[0]\n assert matrix.indptr.shape[0] == matrix.shape[1] + 1\n handle_func = MKL._mkl_sparse_d_create_csc if double_precision else MKL._mkl_sparse_s_create_csc\n\n elif _spsparse.isspmatrix_bsr(matrix):\n _check_scipy_index_typing(matrix)\n return _create_mkl_sparse_bsr(matrix), double_precision\n\n else:\n raise ValueError(\"Matrix is not CSC, CSR, or BSR\")\n\n return _pass_mkl_handle_csr_csc(matrix, handle_func), double_precision", "def getSparse(self): # as opposed to makeSparse which keeps the same form and return nothing\n return copy.deepcopy(self.makeSparse())", "def getSparseAdjacencyMatrix( graph, attribute=None, transposed=False ):\n if (attribute is not None) and (attribute not in graph.es.attribute_names()):\n raise ValueError( \"Attribute does not exists.\" )\n \n row = []\n col = []\n data = []\n \n if attribute is None:\n if transposed:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(t)\n col.append(s)\n else:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(s)\n col.append(t)\n data = np.ones(len(graph.es()))\n else:\n if transposed:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(t)\n col.append(s)\n else:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(s)\n col.append(t)\n data = np.array(graph.es()[attribute])\n\n return sparse.coo_matrix((data, (row, col)) , shape=(len(graph.vs), len(graph.vs))).tocsr()", "def _dict_to_sparse(matrix_dict):\n return scipy.sparse.coo_matrix(\n (matrix_dict['data'], (matrix_dict['row'], matrix_dict['col'])),\n shape=matrix_dict['shape'])", "def construct_saliency_map(graph):\n \n SP = saliencymap(graph)\n SP.construct_saliency_map()\n return SP.saliency_map", "def __init__(self, size):\n _PysparseMatrixFromShape.__init__(self, rows=size, cols=size, bandwidth = 1)\n ids = numerix.arange(size)\n self.put(numerix.ones(size, 'd'), ids, ids)", "def SparseData(Input,Output,Fraction):\r\n # Take a fraction of the full data\r\n MFull = np.column_stack((Input,Output))\r\n np.random.shuffle(MFull)\r\n split = int(np.ceil((MFull.shape[0])*Fraction)) \r\n # Generate a Sparse Set\r\n C = MFull[:split,:np.shape(Input)[1]]\r\n V = MFull[:split,np.shape(Input)[1]:]\r\n return C,V", "def dense_to_sparse(self, tensor: tf.Tensor) -> tf.Tensor:\n tensor_shape = tensor.shape\n expand_dims = len(tensor_shape) == 3\n\n tensor = tf.gather_nd(tf.reshape(tensor, (-1, 1)), self.observations_index)\n if expand_dims:\n tensor = tf.expand_dims(tensor, axis=-1)\n return tensor", "def test_sparse_with_dense():\n\n def test_func(df):\n df[\"new column\"] = 1 # Create dense column\n return df\n\n atom = ATOMClassifier(X_text, y10, random_state=1)\n atom.apply(test_func)\n atom.vectorize(strategy=\"BOW\", return_sparse=False)\n assert all(not pd.api.types.is_sparse(atom.X[c]) for c in atom.features)", "def make_sparse_batch(instance_indices, instance_values, max_index):\n batch_indices = make_batch_indices(instance_indices)\n batch_values = list(itertools.chain.from_iterable(instance_values))\n batch_shape = (len(instance_indices), max_index)\n return tf.SparseTensorValue(batch_indices, batch_values, batch_shape)", "def train_clustermodel_sparse_long(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix_long(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse_long'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse_long'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_long_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse_long']]", "def add_sparse(self, key, element):\n self.add(self._sparse2seq(key), element)", "def train_clustermodel_nonsparse(self):\n \n segtimes_df, nonsparse_matrix = self.create_nonsparse_matrix(self.data)\n segtimes_df['index']=segtimes_df.index\n nonsparse_matrix['index']=nonsparse_matrix.index\n data_to_scale = pd.merge(segtimes_df, nonsparse_matrix, on=['index'])\n data_scaled = self.scale_matrix(data_to_scale)\n data_to_cluster = data_scaled.drop(columns = ['segment_id','level_0','date','time'])\n \n print('Clustering using nonsparse segment/time matrix and: ' + self.algorithm)\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(data_to_cluster)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_nonsparse'])\n clusters_df['segtimekey'] = clusters_df.index\n segtimes_df['segtimekey'] = segtimes_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(segtimes_df, clusters_df, on=['segtimekey'])\n self.clusters_df_final['cluster_nonsparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_nonsparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','date','time','cluster_nonsparse']]", "def convert_sparse_to_igraph(indices, matrix):\n # sources, targets = matrix.nonzero()\n # weights = matrix[sources, targets]\n # weights = np.array(weights)[0]\n # print(dir(louvain))\n # ig = igraph.Graph(zip(sources, targets), directed=True,\n # edge_attrs={'weight': weights})\n # return ig\n g = igraph.Graph.Adjacency((matrix > 0).tolist())\n g.es['weight'] = matrix[matrix.nonzero()]\n # g.vs['label'] = node_names # or a.index/a.columns\n return g", "def list_to_sparse(inputs):\n\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(\n *[itertools.repeat(i, len(x)) for i, x in enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n\n s = coo_matrix((data, (row, col)), shape=(\n len(inputs), np.max([len(x) for x in inputs])))\n\n return s", "def generate_sparse(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = 1\n return x", "def is_sparse(x: Any, backend=None) -> bool:\r\n module = get_module(backend)\r\n return module.is_sparse(x)", "def to_sparse(a):\n flat = a.flatten()\n indices = np.nonzero(flat)\n values = flat[indices]\n return indices[0], values", "def test_import_values_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.sparse').toarray())", "def sparse_col(self):\n if not self.col_name_mapping or \"sparse_col\" not in self.col_name_mapping:\n return EmptyFeature\n return Feature(\n name=list(self.col_name_mapping[\"sparse_col\"].keys()),\n index=list(self.col_name_mapping[\"sparse_col\"].values()),\n )", "def save_sparse(sparse_number, scan_file_dir, len_bytes):\n logger.info(\"Saving sparse data.\")\n fname = os.path.join(scan_file_dir, \"sparse\"+str(sparse_number))\n f = open(fname, 'ab')\n f.truncate(len_bytes)\n f.close()\n \n return fname", "def create_sparse(f, vol=DMG_VOLUME_NAME, fs=DMG_DEFAULT_FS, mountpoint=DMG_MOUNT, dry_run=ARGS.dry_run):\n result = None\n sparseimage = Path('{f}.sparseimage'.format(f=f)) if not str(f).endswith('.sparseimage') else Path(f)\n\n if not isinstance(mountpoint, Path):\n mountpoint = Path(mountpoint)\n\n if not dry_run:\n if fs not in VALID_DMG_FS:\n raise TypeError\n\n # If the sparseimage exists and is already mounted\n if sparseimage.exists() and mountpoint.exists():\n LOG.warning('Unmounting existing mount point for {mount}'.format(mount=mountpoint))\n eject(silent=True)\n result = mount(sparseimage, mountpoint)\n else:\n cmd = ['/usr/bin/hdiutil', 'create', '-ov', '-plist', '-volname', vol, '-fs', fs, '-attach', '-type', 'SPARSE', str(f)]\n _p = subprocess.run(cmd, capture_output=True)\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n LOG.warning('Created temporary sparseimage for {img}'.format(img=f))\n _stdout = plist.read_string(_p.stdout)\n _entities = _stdout.get('system-entities')\n # _image_path = _stdout.get('image-components')[0] # This may not always be the sparseimage filename?\n\n if _entities:\n result = mount_device(_entities)\n LOG.warning('Mounted sparse image to {mountpoint}'.format(mountpoint=result))\n else:\n LOG.info(_p.stderr.decode('utf-8').strip())\n sys.exit(88)\n else:\n LOG.warning('Create {sparseimage} ({volume}i, {fs}) and mount to {mountpoint}'.format(sparseimage=f, volume=vol, fs=fs, mountpoint=mountpoint))\n\n if result and sparseimage and sparseimage not in result:\n result = (sparseimage, result[0], result[1])\n\n return result", "def _build_sparse_matrix(L):\n shape = L.shape\n i = torch.LongTensor(np.vstack((L.row, L.col)).astype(int))\n v = torch.FloatTensor(L.data)\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))", "def sparse_add_identity(\n A: Tensor\n) -> Tensor:\n idx1, idx2 = A._indices()\n vals = A._values()\n vals[idx1 == idx2] += 1\n return A", "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder", "def tocsr(self):\n\n indptr = np.asarray([len(x) for x in self.rows], dtype=np.intc)\n indptr = np.concatenate( (np.array([0], dtype=np.intc), np.cumsum(indptr)) )\n\n nnz = indptr[-1]\n\n indices = []\n for x in self.rows:\n indices.extend(x)\n indices = np.asarray(indices, dtype=np.intc)\n\n data = []\n for x in self.data:\n data.extend(x)\n data = np.asarray(data, dtype=self.dtype)\n\n from csr import csr_matrix\n return csr_matrix((data, indices, indptr), shape=self.shape)", "def __init__(self):\n super().__init__()\n self.type = 'SparseGridCollocationSampler'\n self.printTag = 'SAMPLER '+self.type.upper()\n self.maxPolyOrder = None #L, the relative maximum polynomial order to use in any dimension\n self.indexSetType = None #TP, TD, or HC; the type of index set to use\n self.polyDict = {} #varName-indexed dict of polynomial types\n self.quadDict = {} #varName-indexed dict of quadrature types\n self.importanceDict = {} #varName-indexed dict of importance weights\n self.maxPolyOrder = None #integer, relative maximum polynomial order to be used in any one dimension\n self.lastOutput = None #pointer to output dataObjects object\n self.ROM = None #pointer to ROM\n self.jobHandler = None #pointer to job handler for parallel runs\n self.doInParallel = True #compute sparse grid in parallel flag, recommended True\n self.dists = {} #Contains the instance of the distribution to be used. keys are the variable names\n self.writeOut = None\n self.indexSet = None\n self.sparseGrid = None\n self.features = None\n self.sparseGridType = None\n self.addAssemblerObject('ROM', InputData.Quantity.one)", "def __init__(self, autoencoder, latent_space):\r\n self._autoencoder = autoencoder\r\n self._latent_space = latent_space", "def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)", "def SimpleSparseTensorFrom(x):\n x_ix = []\n x_val = []\n for batch_i, batch in enumerate(x):\n for time, val in enumerate(batch):\n x_ix.append([batch_i, time])\n x_val.append(val)\n x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]\n\n return x_ix, x_val, x_shape", "def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype", "def __init__(self, filename, default_seq=None, key_function=None, as_raw=False, strict_bounds=False):\n self.filename = filename\n self.faidx = Faidx(filename, key_function=key_function, as_raw=as_raw,\n default_seq=default_seq, strict_bounds=strict_bounds)", "def _binary_3d_label_to_sparse_value(labels):\n indices = []\n values = []\n for d0, labels_d0 in enumerate(labels):\n for d1, labels_d1 in enumerate(labels_d0):\n d2 = 0\n for class_id, label in enumerate(labels_d1):\n if label == 1:\n values.append(class_id)\n indices.append([d0, d1, d2])\n d2 += 1\n else:\n assert label == 0\n shape = [len(labels), len(labels[0]), len(labels[0][0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))", "def scipy_sparse_to_spmatrix(A):\n coo = A.tocoo()\n SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape)\n return SP", "def create_adjacency_matrix(graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2)+1)\n A = sparse.coo_matrix((values, (index_1,index_2)),shape=(node_count,node_count),dtype=np.float32)\n return A", "def __init__(self, rows, cols, bandwidth=0, sizeHint=None, matrix=None, storeZeros=True):\n sizeHint = sizeHint or max(rows, cols) * bandwidth\n if matrix is None:\n tmpMatrix = spmatrix.ll_mat(1, 1, 1)\n if hasattr(tmpMatrix, 'storeZeros'):\n matrix = spmatrix.ll_mat(rows, cols, sizeHint, storeZeros)\n else:\n matrix = spmatrix.ll_mat(rows, cols, sizeHint)\n\n _PysparseMatrix.__init__(self, matrix=matrix)", "def initialize_dense_cost(height, width):\n cost = sparse.initialize_dense_cost_cpp(height, width)\n return cost", "def __init__(self, mesh):\n _PysparseIdentityMatrix.__init__(self, size=mesh.numberOfCells)", "def compress_csr(self):\n _, unique, indices = np.unique(\n self.m*self.rows + self.cols,\n return_index=True, return_inverse=True)\n self.rows = self.rows[unique]\n self.cols = self.cols[unique]\n self.vals = np.bincount(indices, weights=self.vals)", "def initialize_asm_ksp_obj(matrix_A):\n ksp_obj = PETSc.KSP().create()\n ksp_obj.setOperators(matrix_A,matrix_A)\n ksp_obj.setFromOptions()\n ksp_obj.setUp()\n return ksp_obj", "def convert_to_one_hot(a,max_val=None):\n N = a.size\n data = np.ones(N,dtype=int)\n sparse_out = sparse.coo_matrix((data,(np.arange(N),a.ravel())), shape=(N,max_val))\n return np.array(sparse_out.todense())", "def get_cvxopt_sparse_intf():\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()", "def convert_sparse_layer(layer_name: str,\n weights: Dict[str, np.ndarray],\n coordinates: np.ndarray,\n input_size: int,\n output_size: int,\n is_msp: bool) -> str:\n components: List[str] = []\n var_name = convert_name(layer_name)\n\n # Create the 1d weight array\n weight_name = '{0}_DATA'.format(var_name)\n if is_msp:\n persistent = '#pragma PERSISTENT({0})'.format(weight_name)\n components.append(persistent)\n\n kernel_name = '{0}/kernel:0'.format(layer_name)\n\n fp_weights = array_to_fixed_point(weights[kernel_name],\n precision=PRECISION,\n width=WIDTH)\n weight_array = '{{{0}}}'.format(','.join(map(str, fp_weights)))\n weight_var = 'static int16_t {0}[] = {1};'.format(weight_name, weight_array)\n components.append(weight_var)\n\n # Convert to a sparse CSR matrix\n rows = coordinates[:, 0]\n cols = coordinates[:, 1]\n coo_mat = sp.coo_matrix((weights[kernel_name], (rows, cols)))\n\n csr_mat = coo_mat.tocsr()\n\n # Create the row and column arrays\n row_name = '{0}_ROWS'.format(var_name)\n\n if is_msp:\n persistent = '#pragma PERSISTENT({0})'.format(row_name)\n components.append(persistent)\n\n row_array = '{{{0}}}'.format(','.join(map(str, csr_mat.indptr)))\n row_var = 'static uint16_t {0}[] = {1};'.format(row_name, row_array)\n components.append(row_var)\n\n col_name = '{0}_COLS'.format(var_name)\n\n if is_msp:\n persistent = '#pragma PERSISTENT({0})'.format(col_name)\n components.append(persistent)\n\n col_array = '{{{0}}}'.format(','.join(map(str, csr_mat.indices)))\n col_var = 'static uint16_t {0}[] = {1};'.format(col_name, col_array)\n components.append(col_var)\n\n # Create the block sparse matrix\n mat_name = '{0}_KERNEL'.format(var_name)\n nnz = len(coordinates)\n mat_var = 'static SparseMatrix {0} = {{ {1}, {2}, {3}, {4}, {5}, {6} }};'.format(mat_name, weight_name, output_size, input_size, row_name, col_name, nnz)\n components.append(mat_var)\n\n # Create the bias vector\n bias_name = '{0}/bias:0'.format(layer_name)\n bias_var = convert_matrix(name=bias_name,\n mat=weights[bias_name],\n precision=PRECISION,\n width=WIDTH,\n is_msp=is_msp)\n components.append(bias_var)\n\n return '\\n'.join(components)", "def to_csr(self):\n return sparse.csr_matrix((self.data, (self.col, self.row)),\n shape=(self.nrows, self.ncols))", "def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))", "def j_sparse_vector_wrapper_to_scipy_spmatrix(j_obj: JavaObject):\n indices = np.frombuffer(j_obj.getIndicesBytes(), dtype=\"<i4\")\n values = np.frombuffer(j_obj.getValuesBytes(), dtype=\"<f8\")\n size = j_obj.getSize()\n indptr = np.array([0, indices.shape[0]], dtype=np.int32)\n return csr_matrix((values, indices, indptr), shape=(1, size), dtype=np.float64).todok()", "def make_example(scg_line, barcode, count_no, genes_no,\n cluster=None, categories=None):\n feat_map = {}\n\n idx, vals = to_sparse(scg_line)\n feat_map['indices'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=idx))\n\n feat_map['values'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=vals))\n\n feat_map['barcode'] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[str.encode(barcode)]))\n\n feat_map['genes_no'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[genes_no]))\n\n feat_map['count_no'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[count_no]))\n\n # add hot encoding for classification problems\n if cluster:\n feat_map['cluster_1hot'] = tf.train.Feature(\n int64_list=tf.train.Int64List(\n value=[int(c == cluster) for c in categories]))\n\n feat_map['cluster_int'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[int(cluster)]))\n\n return tf.train.Example(features=tf.train.Features(feature=feat_map))", "def __init__(self, functions=None, variables=None, global_resource=None):\n self.ssa = NetworkEnsemble()\n if functions is None:\n self.ssa.functions = dict()\n else:\n self.ssa.functions = functions\n if variables is None:\n self.ssa.variables = dict()\n else:\n self.ssa.variables = variables\n if global_resource is None:\n self.ssa.global_resource = dict()\n else:\n self.ssa.global_resource = global_resource", "def expand_csr_adj(adj, count:int):\n r,c = adj.shape\n \n adj = sp.vstack(\n [adj, sp.csr_matrix(np.zeros((count, c)))])\n adj = sp.hstack(\n [adj, sp.csr_matrix(np.zeros((r + count, count)))])\n \n return adj", "def sparse_identity_benchmark(benchmark, default_parameters):\n\n from bempp.api.operators.boundary.sparse import identity\n from bempp.api import function_space\n\n grid = bempp.api.shapes.regular_sphere(6)\n space = function_space(grid, \"DP\", 1)\n\n fun = lambda: identity(\n space, space, space, parameters=default_parameters\n ).weak_form()\n\n benchmark(fun)", "def convert_sa_to_one_hot(sa_array):\n # flatten\n sa_array = np.reshape(sa_array, [-1, 2, 1])\n # combine two features\n index = np.multiply(sa_array[:, 0], 2) + sa_array[:, 1]\n #np.reshape(index, (index.shape[0], 1))\n\n # transfer to one hot\n enc = preprocessing.OneHotEncoder(sparse=False)\n enc.fit([[1], [2], [3], [4]])\n one_hot_code = enc.transform(index)\n\n # reshape back to shape [..., sequence_length, 4]\n one_hot_code = np.reshape(one_hot_code, [-1, DATA_SEQUENCE_LEN, 4])\n return one_hot_code", "def create_sparse_data_per_step(\n self,\n state_id_list: KeyedJaggedTensor,\n state_id_score_list: KeyedJaggedTensor,\n action_id_list: KeyedJaggedTensor,\n action_id_score_list: KeyedJaggedTensor,\n seq_len: int,\n ):\n # Convert id_list data as id score list data with weight = 1\n state_id_list._weights = torch.ones_like(state_id_list.values())\n action_id_list._weights = torch.ones_like(action_id_list.values())\n\n # For each step, we merge all sparse data into one KeyedJaggedTensor\n state_id_list_per_step = split_sequence_keyed_jagged_tensor(\n state_id_list, seq_len\n )\n state_id_score_list_per_step = split_sequence_keyed_jagged_tensor(\n state_id_score_list, seq_len\n )\n action_id_list_per_step = split_sequence_keyed_jagged_tensor(\n action_id_list, seq_len\n )\n action_id_score_list_per_step = split_sequence_keyed_jagged_tensor(\n action_id_score_list, seq_len\n )\n sparse_data_per_step = [\n KeyedJaggedTensor.concat(\n [\n state_id_list_per_step[i],\n action_id_list_per_step[i],\n state_id_score_list_per_step[i],\n action_id_score_list_per_step[i],\n ]\n )\n for i in range(seq_len)\n ]\n return sparse_data_per_step", "def train_autoencoder(self, x_train, x_test):\n # train the autoencoder\n batch_size = 32\n self.autoencoder.fit(x_train,\n x_train,\n validation_data=(x_test, x_test),\n epochs=10,\n batch_size=batch_size)", "def test_import_sparse_values_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.mat').toarray())" ]
[ "0.63621956", "0.62124324", "0.60858965", "0.59326434", "0.5876335", "0.5779511", "0.576974", "0.5758959", "0.5684206", "0.56325567", "0.56308913", "0.5587562", "0.55547976", "0.5466962", "0.5456364", "0.52503127", "0.5241987", "0.5203801", "0.5184267", "0.51695746", "0.51491255", "0.5141162", "0.5115967", "0.51068705", "0.5081264", "0.5074779", "0.5066938", "0.50650316", "0.502198", "0.50199896", "0.49938658", "0.4970645", "0.4962511", "0.49578118", "0.4944767", "0.49422845", "0.49396756", "0.49325034", "0.49290785", "0.49188292", "0.49076566", "0.49055567", "0.49037126", "0.48914275", "0.48750046", "0.48665345", "0.48648623", "0.4864014", "0.48634076", "0.4849095", "0.48490903", "0.4844682", "0.48360378", "0.4833353", "0.48258802", "0.48032802", "0.47932386", "0.47905818", "0.47866955", "0.47859707", "0.47818694", "0.4778728", "0.4777008", "0.47713703", "0.4765594", "0.47637913", "0.4758435", "0.47535267", "0.4748295", "0.47318748", "0.47314313", "0.47251078", "0.47217068", "0.47197405", "0.47190687", "0.47028613", "0.46990544", "0.4698907", "0.4687868", "0.46853688", "0.4675385", "0.4668097", "0.46633464", "0.46547946", "0.46526003", "0.46354112", "0.46326736", "0.46262908", "0.46064004", "0.4593993", "0.45924968", "0.45921504", "0.45884025", "0.45797792", "0.45797178", "0.457738", "0.4574273", "0.455133", "0.4550055", "0.45423692" ]
0.63101643
1
Create a ContractiveAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', c_jacobian=1, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(ContractiveAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, c_jacobian, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def _define_encoder(self):\n raise NotImplementedError", "def build_full_conv_autoencoder():\n input_img = Input(shape=(84, 84, 3))\n\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c3')(x)\n encoded = MaxPooling2D((3, 3), border_mode='same')(x)\n\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c4')(encoded)\n x = UpSampling2D((3, 3))(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c5')(x)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c6')(x)\n x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 4, 4, activation='sigmoid', border_mode='same', name='c7')(x)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder", "def build_conv_combo_autoencoder():\n input_img = Input(shape=(84, 84, 1))\n\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((3, 3), border_mode='same')(x)\n x = Flatten()(x)\n encoded = Dense(512, activation='relu')(x)\n\n encoded_input = Input((512,))\n d1 = Dense(9408, activation='relu')(encoded_input)\n d2 = Reshape((14, 14, 48))(d1)\n d3 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c5')(d2)\n d4 = UpSampling2D((3, 3))(d3)\n d5 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c6')(d4)\n d6 = UpSampling2D((2, 2))(d5)\n decoded = Convolution2D(1, 4, 4, activation='relu', border_mode='same', name='c9')(d6)\n\n encoder = Model(input=input_img, output=encoded, name='conv_encoder')\n decoder = Model(input=encoded_input, output=decoded, name='conv_decoder')\n\n autoencoder = Sequential(name='full_conv_autoencoder')\n autoencoder.add(encoder)\n autoencoder.add(decoder)\n\n encoder.compile(optimizer='adam', loss='mse')\n encoder.summary()\n decoder.compile(optimizer='adam', loss='mse')\n decoder.summary()\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder, encoder, decoder", "def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')", "def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))", "def simple_autoencoder(X_train_input, X_test_input, n_components = 100):\r\n ncol = X_train_input.shape[1]\r\n input_dim = Input(shape = (ncol,))\r\n \r\n # Define the number of encoder dimensions\r\n encoding_dim = n_components\r\n \r\n # Define the encoder layer\r\n encoded = Dense(encoding_dim, activation = 'relu')(input_dim)\r\n \r\n # Define the decoder layer\r\n decoded = Dense(ncol, activation = 'tanh')(encoded)\r\n \r\n # Combine the encoder and decoder into a model\r\n autoencoder = Model(inputs = input_dim, outputs = decoded)\r\n \r\n # Configure and train the autoencoder\r\n autoencoder.compile(optimizer = 'adam', loss = 'mse')\r\n autoencoder.fit(X_train_input, X_train_input, epochs = 50, batch_size = 128, shuffle = True,\r\n validation_data = (X_test_input, X_test_input),verbose = 1)\r\n \r\n # Use the encoder to extract the reduced dimension from the autoencoder\r\n encoder = Model(inputs = input_dim, outputs = encoded)\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n x = tf.keras.layers.Input(shape=(1,), dtype=tf.string)\n\n h = tf.keras.layers.Lambda(UniversalEmbedding, output_shape=(512,))(x)\n\n return Model(inputs=x, outputs=h, name='encoder')", "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def build(self) -> KM.Model:\n\n # For decoder number of features in opposite order of encoder\n decoder_features = self.encoder_features.copy()\n decoder_features.reverse()\n\n # build the encoder model\n self.encoder_model = self.encoder(\n features=self.encoder_features, name=\"encoder\"\n )\n\n # build the decoder model\n decoder = self.decoder(features=decoder_features, name=\"decoder\")\n\n input_tensor = KL.Input(\n shape=(32, 32, 3)\n ) # shape of images for cifar10 dataset\n\n # Encode the images\n encoded = self.encoder_model(input_tensor)\n # Decode the image\n decoded = decoder(encoded[-1])\n\n return KM.Model(inputs=input_tensor, outputs=decoded, name=\"AutoEncoder\")", "def build(\n n_iter=500, encoding_dim=3, depth=2, nh=20, activation='linear',\n initial_learning_rate=1e-3, solver='Adam', batch_size=32,\n random_state=10, early_stopping=False, patience=10, lamda=1e-1,\n knob_cols=None, auto_refit=True, max_refit_attempts=10):\n assert knob_cols is not None\n\n encoder_hidden_layers = [int(nh / (2**i)) for i in range(depth - 1)]\n if len(encoder_hidden_layers) > 0:\n if 0 in encoder_hidden_layers or encoder_hidden_layers[-1] < encoding_dim:\n return None\n decoder_hidden_layers = encoder_hidden_layers[::-1]\n hidden_layer_sizes = encoder_hidden_layers + \\\n [encoding_dim] + decoder_hidden_layers\n activations = [activation] * 2 * depth\n ae = FancyAutoEncoder(\n n_iter, hidden_layer_sizes, activations, initial_learning_rate,\n solver=solver, batch_size=batch_size, random_state=random_state,\n early_stopping=early_stopping, patience=patience, lamda=lamda,\n knob_cols=knob_cols, auto_refit=auto_refit,\n max_refit_attempts=max_refit_attempts)\n return ae", "def auto_encoder(data: np.ndarray) -> np.ndarray:\n input_img = Input(shape=(784,))\n encoded = Dense(128, activation='relu')(input_img)\n encoded = Dense(64, activation='relu')(encoded)\n encoded = Dense(32, activation='relu')(encoded)\n\n decoded = Dense(64, activation='relu')(encoded)\n decoded = Dense(128, activation='relu')(decoded)\n decoded = Dense(784, activation='sigmoid')(decoded)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.fit(x_train, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))", "def build_encoder(opt, embeddings):\n enc_type = opt.encoder_type if opt.model_type == \"text\" else opt.model_type\n return str2enc[enc_type].from_opt(opt, embeddings)", "def __init__(self, n_inpt, n_hidden, hidden_transfer='identity',\n out_transfer='identity', loss='squared', tied_weights=True,\n batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(AutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def autoencoder(self, data):\n with tf.variable_scope(\"autoencoder\"):\n latent = self.encoder(data)\n _, output = self.decoder(latent)\n\n return output, latent", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n n_stacks = len(dims) - 1\n # input\n input_img = Input(shape=(dims[0],), name='input')\n x = input_img\n # internal layers in encoder\n for i in range(n_stacks-1):\n x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x)\n\n # hidden layer\n encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here\n\n x = encoded\n # internal layers in decoder\n for i in range(n_stacks-1, 0, -1):\n x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x)\n\n # output\n x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x)\n decoded = x\n return Model(inputs=input_img, outputs=decoded, name='AE'), Model(inputs=input_img, outputs=encoded, name='encoder')", "def __init__(self, autoencoder, latent_space):\r\n self._autoencoder = autoencoder\r\n self._latent_space = latent_space", "def train_autoencoder(self, x_train, x_test):\n # train the autoencoder\n batch_size = 32\n self.autoencoder.fit(x_train,\n x_train,\n validation_data=(x_test, x_test),\n epochs=10,\n batch_size=batch_size)", "def _create_encoder(self):\n logger.debug(\"GumBolt::_create_encoder\")\n return HierarchicalEncoder(\n input_dimension=self._flat_input_size,\n n_latent_hierarchy_lvls=self.n_latent_hierarchy_lvls,\n n_latent_nodes=self.n_latent_nodes,\n n_encoder_layer_nodes=self.n_encoder_layer_nodes,\n n_encoder_layers=self.n_encoder_layers,\n skip_latent_layer=False,\n smoother=\"Gumbel\",\n cfg=self._config)", "def __call__(self, *args, **kwargs):\n return self.encoder_model(args[0])", "def __call__(self, *args, **kwargs):\n return self.encoder_model(args[0])", "def build_encoder(opt, embeddings, structure_embeddings):\n return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings, structure_embeddings)", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def encoder(self, inputs):\n pass", "def __init__(self, **kwargs):\n var_defaults = {\n \"bias_init\" : 'zeros',\n \"weight_init\" : [0.0, 0.1],\n \"seed\" : None,\n \"num_hid_nodes\" : 32,\n \"activations\": 'sigmoid',\n \"lr\" : 0.1,\n \"decay\": 0,\n \"momentum\" : 0,\n \"nesterov\" : False,\n \"loss\" : 'mean_squared_error',\n \"epochs\" : 10,\n \"batch_size\" : 256,\n \"verbose\" : 2\n }\n for var, default in var_defaults.items():\n setattr(self, var, kwargs.get(var, default))\n self.autoencoder = Sequential()", "def deep_autoencoder(X_train_input, X_test_input, encoding_dim = 20):\r\n input_dim = X_train_input.shape[1]\r\n \r\n autoencoder = Sequential()\r\n \r\n # Encoder Layers\r\n autoencoder.add(Dense(4 * encoding_dim, input_shape=(input_dim,), activation='relu'))\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(encoding_dim, activation='relu'))\r\n \r\n # Decoder Layers\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(4 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(input_dim, activation='sigmoid'))\r\n \r\n autoencoder.compile(optimizer='adam', loss='binary_crossentropy')\r\n autoencoder.fit(X_train_input, X_train_input,\r\n epochs=50,\r\n batch_size=256,\r\n validation_data=(X_test_input, X_test_input))\r\n \r\n input_img = Input(shape=(input_dim,))\r\n encoder_layer1 = autoencoder.layers[0]\r\n encoder_layer2 = autoencoder.layers[1]\r\n encoder_layer3 = autoencoder.layers[2]\r\n encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def resnet_autoencoder_v1(encoder_depth, decoder_depth, width_multiplier, metric_channels, # noqa\n cifar_stem=False, data_format='channels_last',\n dropblock_keep_probs=None, dropblock_size=None,\n mask_augs=0., greyscale_viz=False, skip=True):\n encoder = resnet_encoder_v1(encoder_depth, \n width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n decoder = resnet_decoder_v1(decoder_depth=decoder_depth,\n encoder_depth=encoder_depth,\n width_multiplier=width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n metric = learned_metric_v1(data_format=data_format, metric_channels=metric_channels) \n \n return resnet_autoencoder_v1_generator(\n encoder=encoder,\n decoder=decoder,\n metric=metric,\n skip=skip,\n mask_augs=mask_augs,\n greyscale_viz=greyscale_viz,\n data_format=data_format)", "def encoder(self) -> IntegerEncoder:\n\n return self._encoder", "def build_cae_encoder(self):\n _input = Input(shape=self.input_shape) # adapt this if using 'channels_first' image data format\n\n _x = Conv2D(32, (3, 3), activation=self.hidden_activation, padding='same')(_input)\n _x = MaxPooling2D((2, 2), padding='same')(_x)\n _x = Conv2D(32, (3, 3), activation=self.hidden_activation, padding='same')(_x)\n _x = MaxPooling2D((2, 2), padding='same')(_x)\n _x = Conv2D(16, (3, 3), activation=self.hidden_activation, padding='same')(_x)\n _encoded = MaxPooling2D((2, 2), padding='same')(_x)\n\n return Model(_input, _encoded, name='cae_encoder')", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n encoder = None\n if encoder_type == \"text\":\n if self.text_encoding == \"tfidf\":\n encoder = self._fit_tfidf(\n frame[prop], max_dim=self.text_encoding_max_dimension)\n elif self.text_encoding == \"word2vec\":\n encoder = self._fit_word2vec(frame[prop])\n elif encoder_type == \"category\":\n encoder = self._fit_multibin(frame[prop])\n elif encoder_type == \"numeric\":\n if self.standardize_numeric:\n encoder = self._fit_standard_scaler(\n frame[prop],\n missing_numeric=self.missing_numeric,\n imputation_strategy=self.imputation_strategy)\n return encoder", "def __init__(self, encoder_base: int = 2, encoder_precision: int = 16) -> None:\n self.encoder_base = encoder_base\n self.encoder_precision = encoder_precision", "def build_encoder(shift):\n ### TODO.", "def encode(self, input_):\n return self.encoder(input_)", "def register_for_auto_class(cls, auto_class=\"FlaxAutoModel\"):\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class", "def build_encoder(img_shape):\n input_img = Input(shape=(img_shape)) \n x = Conv2D(16, (3, 3), activation='tanh', padding='same')(input_img)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.5)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((3, 3), padding='same')(x)\n x = Flatten()(x)\n encoded = Dense(540, activation='tanh')(x)\n Encoder=Model(input_img,encoded,name='encoder')\n return input_img,encoded,Encoder", "def _apply_encoder(self, frame, prop, encoder, encoder_type=\"category\"):\n pass", "def create(cls: Type[Sequence], sequence: bytes, alphabet: Alphabet) -> Sequence:\n return cls(lib.imm_seq_create(sequence, alphabet.imm_abc), alphabet)", "def test_init(self):\n default_encoder_type = type(Encoder())\n\n payload = Payload()\n self.assertIsInstance(payload.encoder, default_encoder_type)\n\n json_encoder = JSONEncoder()\n payload = Payload(encoder=json_encoder)\n self.assertEqual(payload.encoder, json_encoder)", "def make(self, **kwargs):\n return bytes()", "def __new__(cls, *args, **kwargs):\n if cls.__name__ != 'Codec':\n return super().__new__(cls)\n if kwargs.get('type'):\n t_cls = ClassFactory.get_cls(ClassType.CODEC, kwargs.pop('type'))\n else:\n t_cls = ClassFactory.get_cls(ClassType.CODEC)\n return super().__new__(t_cls)", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n pass", "def encode(\n cls: Type[\"DataDocument\"], encoding: str, data: D, **kwargs: Any\n ) -> \"DataDocument[D]\":\n # Dispatch encoding\n blob = lookup_serializer(encoding).dumps(data, **kwargs)\n\n inst = cls(blob=blob, encoding=encoding)\n inst._cache_data(data)\n return inst", "def get_trainer(self):\n return AutoEncoderTrainer", "def fit_transform(self):\n if self.enc_types == \"label\":\n return self._label_encoding()\n elif self.enc_types == \"ohe\":\n return self._one_hot_encoder()\n elif self.enc_types == \"binary\":\n return self._binarization()\n else:\n raise Exception(\"Encoding type not understood\")", "def base_encoder(cls, data, init_encoder, downsize_encoder, input_encoder):\n #todo: maybe do positional encoding before passing to init_encoder\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data)", "def test_autoencoder():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = read_images(dataset_path, labels_list=['normal', 'glare_small'], test_size=0.25)\n X_train_im = []\n for im in X_train:\n img = preprocess_image(im)\n img = np.array(img)\n img = img.flatten()\n X_train_im.append(img)\n X_train_im = np.array(X_train_im)\n\n X_test_im = []\n for im in X_test:\n img = preprocess_image(im)\n img = np.array(img)\n img = img.flatten()\n X_test_im.append(img)\n X_test_im = np.array(X_test_im)\n\n autoenc = VAE(encoder_neurons=[16, 32], decoder_neurons=[32, 16], latent_dim=32, epochs=50)\n autoenc.fit(X_train_im, y_train)\n y_pred = autoenc.predict(X_test_im)\n y_test_scores = autoenc.decision_function(X_test_im)\n conf_mtx_test = confusion_matrix(y_test, y_pred, labels=[0, 1])\n evaluate_print('vae', y_test, y_test_scores)\n print(conf_mtx_test)", "def testEncoder(self):\n params = copy.copy(self.typical_instance)\n params.prob_f = 0.5\n params.prob_p = 0.5\n params.prob_q = 0.75\n\n rand_funcs = rappor.SimpleRandFuncs(params, MockRandom())\n rand_funcs.cohort_rand_fn = lambda a, b: a\n e = rappor.Encoder(params, 0, rand_funcs=rand_funcs)\n\n cohort, bloom_bits_irr = e.encode(\"abc\")\n\n self.assertEquals(0, cohort)\n self.assertEquals(0x000ffff, bloom_bits_irr)", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 1)", "def build_label_transform():\n\n return NALabelEncoder()", "def get_or_make_label_encoder(params, problem, mode, label_list=None, zero_class=None):\n problem_path = params.ckpt_dir\n create_path(problem_path)\n le_path = os.path.join(problem_path, '%s_label_encoder.pkl' % problem)\n\n if mode == 'train' and not os.path.exists(le_path):\n label_encoder = LabelEncoder()\n\n label_encoder.fit(label_list, zero_class=zero_class)\n\n label_encoder.dump(le_path)\n\n else:\n label_encoder = LabelEncoder()\n label_encoder.load(le_path)\n\n return label_encoder", "def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder':\n encoder_seq = EncoderSequence([], dtype=config.dtype)\n cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,\n config.model_size,\n config.max_seq_len_source,\n fixed_pos_embed_scale_up_input=True,\n fixed_pos_embed_scale_down_positions=False,\n prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)\n encoder_seq.append(cls, **encoder_params)\n if config.conv_config is not None:\n encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,\n prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)\n\n encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX)\n\n return encoder_seq", "def __init__(self, coder):\n self.coder = coder", "def autoencoder(input_dims, filters, latent_dims):\n e_inputs = keras.Input(input_dims)\n d_inputs = keras.Input(latent_dims)\n\n encoder = e_inputs\n for f in filters:\n encoder = keras.layers.Conv2D(\n f, (3, 3), activation='relu', padding='same')(encoder)\n encoder = keras.layers.MaxPooling2D((2, 2), padding='same')(encoder)\n\n decoder = d_inputs\n for i in reversed(range(1, len(filters))):\n decoder = keras.layers.Conv2D(\n filters[i], (3, 3), activation='relu', padding='same')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n\n decoder = keras.layers.Conv2D(\n filters[0], (3, 3), activation='relu', padding='valid')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n decoder = keras.layers.Conv2D(input_dims[-1], (3, 3),\n activation='sigmoid',\n padding='same')(decoder)\n\n encoder = keras.Model(e_inputs, encoder)\n decoder = keras.Model(d_inputs, decoder)\n\n auto = keras.Model(e_inputs, decoder(encoder(e_inputs)))\n auto.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")\n\n return encoder, decoder, auto", "def SemiAutoencoder(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def __init__(self, encoder=None, **kwargs):\n if encoder is None:\n encoder = dumps\n self.json_settings = kwargs\n self._encoder = encoder", "def encoder(self, features=[8], name=\"encoder\") -> KM.Model:\n input_tensor = KL.Input(\n shape=(32, 32, 3)\n ) # shape of images for cifar10 dataset\n encoded = KL.Conv2D(\n features[0],\n 3,\n strides=(2, 2),\n padding=\"same\",\n use_bias=False,\n name=name + f\"_conv_{1}\",\n )(input_tensor)\n encoded = KL.Activation(\"relu\")(KL.BatchNormalization()(encoded))\n encoded_list = [encoded]\n\n # Prepare the skip tensor from input\n skip_input_tensor = KL.Activation(\"relu\")(\n KL.BatchNormalization()(\n KL.Conv2D(features[0], 1, strides=1, use_bias=False)(input_tensor)\n )\n )\n skip_input_tensor = KL.SpatialDropout2D(rate=0.2)(skip_input_tensor)\n skip_input_tensor = KL.AveragePooling2D(pool_size=(2, 2), strides=2)(\n skip_input_tensor\n )\n skip_tensors = tf.concat(\n [\n skip_input_tensor, # Routing info from input tensor to next levels\n encoded, # Routes info from second level to next levels\n ],\n axis=-1,\n )\n for i, feature_num in enumerate(features[1:], start=2):\n encoded, skip_tensors = conv_block(\n encoded,\n skip_tensors,\n features_in=features[i - 2],\n features_out=feature_num,\n name=name + f\"_conv_{i}\",\n )\n encoded_list.append(encoded)\n return KM.Model(inputs=input_tensor, outputs=encoded_list, name=name)", "def autoencoder(X, inp_dims=2048):\n drop = tf.keras.layers.Dropout(rate=0.2)\n FC1 = tf.layers.Dense(units=inp_dims // 2, activation=\"tanh\", name='fc1')\n FC2 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc2')\n FC3 = tf.layers.Dense(units=inp_dims // 8, activation=None, name='fc3')\n Act = tf.keras.layers.Activation(activation=\"tanh\")\n # FC4 = tf.layers.Dense(units=inp_dims // 8,activation=\"tanh\",name='fc4')\n FC5 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc5')\n FC6 = tf.layers.Dense(units=inp_dims // 2, activation=None, name='fc6')\n FC7 = tf.layers.Dense(units=inp_dims, activation=None, name='fc7')\n X = FC1(drop(X))\n X = FC2(drop(X))\n X = FC3(X)\n fea = X\n X_up = Act(X)\n X_up = FC5(X_up)\n X_up = FC6(drop(X_up))\n pred = FC7(drop(X_up))\n return pred, fea", "def one_hot_encode(x):\n\n # check if encoder has been previously created, if not make a global var an initialize it\n if 'encoder' not in globals():\n global encoder\n encoder = LabelBinarizer()\n encoder.fit(range(10))\n\n return encoder.transform(x)", "def getLabelEncoder():\n classes = list(string.letters + string.digits)\n classes.append('')\n le = LabelEncoder()\n le.fit(classes)\n\n return le", "def default_encoder(obj):\n\n if isinstance(obj, np.ndarray):\n data_b64 = base64.b64encode(np.ascontiguousarray(obj).data)\n return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape)\n\n if isinstance(obj, dict):\n result = dict()\n for k, v in obj.items():\n result[k] = default_encoder(v)\n\n return result\n\n return obj", "def encode(self): # pragma: no cover\n pass", "def node_encoder_construct(cfg, model_name='node_encoder', **kwargs):\n encoders = node_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown node encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)", "def encoder(self, value):\n self._tensor.encoder = value", "def encode(image):\n from encoder import launch\n launch(image)", "def encode(self):\n \n assert False, \"Not implemented.\"", "def fit_autoencoder(df, latent_dim=8):\n df = np.asarray(df)\n input_dim = df.shape[1]\n\n class Autoencoder(Model):\n def __init__(self, latent_dim):\n super(Autoencoder, self).__init__()\n self.latent_dim = latent_dim\n self.encoder = tf.keras.Sequential([\n layers.Dense(input_dim, activation='tanh'),\n layers.Dense(20, activation='tanh'),\n layers.Dense(15, activation='tanh'),\n layers.Dense(latent_dim, activation='elu')\n ])\n self.decoder = tf.keras.Sequential([\n layers.Dense(20, activation='tanh'),\n layers.Dense(input_dim, activation='tanh')\n ])\n\n def call(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return decoded\n\n autoencoder = Autoencoder(latent_dim)\n autoencoder.compile(optimizer='adam',\n loss='mse')\n autoencoder.fit(df,\n df,\n epochs=50 if DEBUG else 250,\n shuffle=True,\n batch_size=256,\n validation_split=.2 if DEBUG else .1)\n return autoencoder", "def encode(input):\n return ModelEncoder().encode(input)", "def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',\n out_transfer='identity', reconstruct_loss='squared',\n c_sparsity=1, sparsity_loss='bern_bern_kl',\n sparsity_target=0.01,\n tied_weights=True, batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(SparseAutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer,\n reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target,\n tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def __init__(self, *args):\n this = _libsbml.new_FbcToCobraConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def autoencoder_model(optimizer, learning_rate, \n filter_block1, kernel_size_block1, \n filter_block2, kernel_size_block2, \n filter_block3, kernel_size_block3, \n filter_block4, kernel_size_block4, \n activation_str, padding):\n # Input Tensors - fully conv\n input_img = Input(shape=(None, None, 1))\n # Encoder Part\n x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding=padding)(input_img) # 420x540x32\n x = Activation('relu')(x)\n x = MaxPooling2D()(x) # 210x270x32\n encoded = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding=padding)(x) # 105x135x32\n # Decoder Part\n x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding=padding)(encoded) # 210x270x32\n x = Activation('relu')(x)\n x = UpSampling2D()(x) # 420x540x32\n decoded = Conv2D(filters=filter_block4, kernel_size=kernel_size_block4, activation='sigmoid', padding=padding)(x) # 420x540x1\n\n # Build the model\n autoencoder = Model(inputs=input_img, outputs=decoded)\n opt = optimizer(learning_rate=learning_rate)\n autoencoder.compile(loss=\"binary_crossentropy\", optimizer=opt)\n autoencoder.summary()\n return autoencoder", "def get_convolutional_encoder(config: ConvolutionalEncoderConfig, prefix: str) -> 'Encoder':\n encoder_seq = EncoderSequence([], dtype=config.dtype)\n cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,\n config.num_embed,\n max_seq_len=config.max_seq_len_source,\n fixed_pos_embed_scale_up_input=False,\n fixed_pos_embed_scale_down_positions=True,\n prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)\n encoder_seq.append(cls, **encoder_params)\n encoder_seq.append(ConvolutionalEncoder, config=config)\n return encoder_seq", "def __init__(self, encoding=None, object_hook=None):\n self.encoding = encoding\n self.object_hook = object_hook", "def _get_encoder(self, params):\n # TODO: refactor method\n\n # Check if encoder was already trained with these parameters\n encoder_id = Encoder.generate_id(params)\n self._logger.debug(\"Retrieving encoder model: \" + str(encoder_id))\n\n # Check if matching encoder is in memory\n if encoder_id in self._trained_encoders:\n self._logger.debug(\"Loading encoder from in-memory cache: \" + str(encoder_id))\n return self._trained_encoders[encoder_id]\n else:\n # Check if matching encoder on disk\n prev_model = None\n if self._encoder_dir is not None:\n prev_model = Encoder.load_if_exists(self._encoder_dir, encoder_id)\n\n if prev_model is not None:\n self._logger.debug(\"Loaded encoder from disk-cache: \" + str(encoder_id))\n encoder = Encoder(params)\n docs = self._get_docs(encoder, params['doc2vec_docs'])\n encoder.set_documents(docs)\n encoder.set_model(prev_model)\n self._trained_encoders[encoder_id] = encoder\n return encoder\n else:\n self._logger.debug(\"Training new encoder model: \" + str(encoder_id))\n encoder = Encoder(params)\n docs = self._get_docs(encoder, params['doc2vec_docs'])\n encoder.set_documents(docs)\n encoder.train()\n self._trained_encoders[encoder_id] = encoder\n self._logger.debug(\"Added encoder to cache: \" + str(encoder_id))\n\n # Save encoder\n if self._encoder_dir is not None:\n encoder.save(self._encoder_dir + \"/\" + encoder_id)\n return encoder", "def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder:\n params = dict(kwargs)\n if infer_hidden:\n params['num_hidden'] = self.get_num_hidden()\n\n sig_params = inspect.signature(cls.__init__).parameters\n if 'dtype' in sig_params and 'dtype' not in kwargs:\n params['dtype'] = self.dtype\n encoder = cls(**params)\n self.encoders.append(encoder)\n return encoder", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 16)", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 8)", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 2)", "def get_encoder(encoding):\n if encoding == Encoding.V1_THRIFT:\n return _V1ThriftEncoder()\n if encoding == Encoding.V1_JSON:\n return _V1JSONEncoder()\n if encoding == Encoding.V2_JSON:\n return _V2JSONEncoder()\n if encoding == Encoding.V2_PROTO3:\n return _V2ProtobufEncoder()\n raise ZipkinError(\"Unknown encoding: {}\".format(encoding))", "def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)", "def codec(self):\n raise NotImplementedError", "def encode(self) -> bytes:\n\n return bytes()", "def encode(self) -> bytes:\n\n return bytes()", "def instantiate_algorithm(args):\n if args.algorithm == 'xor_encoding':\n return XorEncoding(block_size=args.block_size, intensity=args.intensity)\n\n raise RuntimeError('Algorithm type not detected')", "def setup_encoder_initializer(self):\n if self.mode != \"inference\":\n # Restore inception variables only.\n saver = tf.train.Saver(self.autoencoder_variables)\n\n def restore_fn(sess):\n tf.logging.info(\"Restoring Autoencoder variables from checkpoint dir %s\",\n self.config.autoencoder_checkpoint_dir)\n saver.restore(sess, tf.train.latest_checkpoint(\n self.config.autoencoder_checkpoint_dir))\n\n if self.use_pretrained_ae:\n self.init_fn = restore_fn\n else:\n self.init_fn = None", "def Init(*args, **kwargs):\n return _gdi_.EncodingConverter_Init(*args, **kwargs)", "def load_encoder(checkpoint, encoder_cls,\n HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT, encoder_name, bidirectional):\n model = encoder_cls(HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT,\n gate=encoder_name, bidirectional=bidirectional)\n model.load_state_dict(checkpoint['en'])\n model.eval()\n return model", "def create_encoding(df):\n vocab = []\n vocab_df = df[\"company\"] + df[\"address\"] + df[\"date\"] + df[\"total\"]\n [vocab.extend(row) for row in vocab_df]\n enc = LabelEncoder()\n enc.fit(vocab)\n return enc", "def edge_encoder_construct(cfg, model_name='edge_encoder', **kwargs):\n encoders = edge_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown edge encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def __init__(self, shift):\n encoder, decoder = self._make_coder_dicts(shift)\n self._encoder = encoder\n self._decoder = decoder", "def __init__(self, shift):\n encoder, decoder = self._make_coder_dicts(shift)\n self._encoder = encoder\n self._decoder = decoder", "def coder(self):\r\n return self.coder", "def __init__(self, CC: object, auto_offset_reset: str=\"latest\"):\n self.config = CC.config\n if self.config[\"messaging_service\"]!=\"none\" and \"kafka\" in self.config and self.config['messaging_service']==\"kafka\":\n self.hostIP = self.config['kafka']['host']\n self.hostPort = self.config['kafka']['port']\n self.auto_offset_reset= auto_offset_reset\n self.producer = KafkaProducer(bootstrap_servers=str(self.hostIP)+\":\"+str(self.hostPort), api_version=(0,10),\n value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n compression_type='gzip')\n\n self.consumer = KafkaConsumer(bootstrap_servers=str(self.hostIP)+\":\"+str(self.hostPort), api_version=(0,10),\n auto_offset_reset=self.auto_offset_reset)", "def create(cls, _):\n return cls", "def set_encoder(attribute: str, encoder: typing.Callable) -> None:\n if encoder is not None:\n __attribute_decoders[attribute.lower()] = encoder", "def __init__(self, *args):\n this = _libsbml.new_CobraToFbcConverter(*args)\n try: self.this.append(this)\n except: self.this = this", "def create_character(c: Character) -> Character:\n c.create_character(c.dna_generator)\n return c", "def autoencoder3(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 4)" ]
[ "0.59766114", "0.5971633", "0.58636886", "0.57971674", "0.565155", "0.5615126", "0.54221565", "0.53008664", "0.52868307", "0.5247722", "0.524635", "0.52189344", "0.52125883", "0.51889825", "0.51658624", "0.51599807", "0.5148567", "0.5141506", "0.51331234", "0.511349", "0.50875247", "0.50875247", "0.5087345", "0.5085928", "0.5072798", "0.5041415", "0.4997499", "0.4968858", "0.4951028", "0.49446365", "0.49363697", "0.49270812", "0.4888129", "0.4878017", "0.48771298", "0.48374194", "0.48330373", "0.48155212", "0.480567", "0.4756163", "0.47526664", "0.473315", "0.47120795", "0.46886274", "0.46865788", "0.46850199", "0.46814686", "0.46762964", "0.46679202", "0.46679184", "0.46607217", "0.4658987", "0.46587858", "0.46515787", "0.46505207", "0.46492606", "0.46453595", "0.46112338", "0.45891878", "0.4589108", "0.45850632", "0.45833698", "0.4577819", "0.457047", "0.4569557", "0.4568892", "0.45631006", "0.4561485", "0.45604694", "0.45587218", "0.4549633", "0.45480874", "0.45475456", "0.4541664", "0.45355007", "0.45340142", "0.45329782", "0.4532329", "0.45276868", "0.4526593", "0.45237145", "0.4522583", "0.4522583", "0.4514471", "0.4507174", "0.45033404", "0.4501437", "0.44981095", "0.44890454", "0.4469567", "0.44694224", "0.44694224", "0.44688278", "0.4467668", "0.44636723", "0.44565564", "0.4456157", "0.44555464", "0.4448781", "0.4440241" ]
0.5832884
3
Create a DenoisingAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', noise_type='gauss', c_noise=.2, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(DenoisingAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, noise_type, c_noise, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None climin.initialize.randomize_normal(self.parameters.data) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_encoder(self):\n raise NotImplementedError", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def SemiAutoencoder(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def __init__(self, **kwargs):\n var_defaults = {\n \"bias_init\" : 'zeros',\n \"weight_init\" : [0.0, 0.1],\n \"seed\" : None,\n \"num_hid_nodes\" : 32,\n \"activations\": 'sigmoid',\n \"lr\" : 0.1,\n \"decay\": 0,\n \"momentum\" : 0,\n \"nesterov\" : False,\n \"loss\" : 'mean_squared_error',\n \"epochs\" : 10,\n \"batch_size\" : 256,\n \"verbose\" : 2\n }\n for var, default in var_defaults.items():\n setattr(self, var, kwargs.get(var, default))\n self.autoencoder = Sequential()", "def getLabelEncoder():\n classes = list(string.letters + string.digits)\n classes.append('')\n le = LabelEncoder()\n le.fit(classes)\n\n return le", "def encode(\n cls: Type[\"DataDocument\"], encoding: str, data: D, **kwargs: Any\n ) -> \"DataDocument[D]\":\n # Dispatch encoding\n blob = lookup_serializer(encoding).dumps(data, **kwargs)\n\n inst = cls(blob=blob, encoding=encoding)\n inst._cache_data(data)\n return inst", "def build_label_transform():\n\n return NALabelEncoder()", "def _create_encoder(self):\n logger.debug(\"GumBolt::_create_encoder\")\n return HierarchicalEncoder(\n input_dimension=self._flat_input_size,\n n_latent_hierarchy_lvls=self.n_latent_hierarchy_lvls,\n n_latent_nodes=self.n_latent_nodes,\n n_encoder_layer_nodes=self.n_encoder_layer_nodes,\n n_encoder_layers=self.n_encoder_layers,\n skip_latent_layer=False,\n smoother=\"Gumbel\",\n cfg=self._config)", "def __init__(self, *args, **kwargs):\n super(Encoder, self).__init__(*args, **kwargs)\n self._mask = (1 << self._precision) - 1", "def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)", "def __init__(self, n_feature, n_hidden, n_output):\n super(simpleAE, self).__init__()\n self.Encoder = Encoder(n_feature, n_hidden)\n self.Decoder = Decoder(n_hidden, n_output)\n self.model_name = 'simpleAE'", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def train(data, n_visible=16*16, n_hidden=200, batch_size=20,\n learning_rate=0.1, n_epochs=5, beta=3.0, sparsity=0.0,\n weight_decay=0.0, stop_diff=None, corruption_rate=0.3):\n da = DenoisingAutoencoder(n_visible=n_visible, n_hidden=n_hidden)\n da.train(data, batch_size=batch_size, corruption_rate=corruption_rate,\n learning_rate=learning_rate, n_epochs=n_epochs, stop_diff=stop_diff)\n return da", "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def __init__(self, encoder_base: int = 2, encoder_precision: int = 16) -> None:\n self.encoder_base = encoder_base\n self.encoder_precision = encoder_precision", "def encoder(self, inputs):\n pass", "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder", "def build_encoder(opt, embeddings, structure_embeddings):\n return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings, structure_embeddings)", "def build_encoder(opt, embeddings):\n enc_type = opt.encoder_type if opt.model_type == \"text\" else opt.model_type\n return str2enc[enc_type].from_opt(opt, embeddings)", "def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))", "def base_encoder(cls, data, init_encoder, downsize_encoder, input_encoder):\n #todo: maybe do positional encoding before passing to init_encoder\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data)", "def build_label_transform():\n\n return RobustLabelEncoder(\n labels=['0'], fill_label_value='1', include_unseen_class=True\n )", "def __init__(self, n_inpt, n_hidden, hidden_transfer='identity',\n out_transfer='identity', loss='squared', tied_weights=True,\n batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(AutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def __init__(self):\n super(BaseRNNEncoder, self).__init__()", "def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',\n out_transfer='identity', reconstruct_loss='squared',\n c_jacobian=1, tied_weights=True, batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(ContractiveAutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer,\n reconstruct_loss, c_jacobian,\n tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def create_encoding(df):\n vocab = []\n vocab_df = df[\"company\"] + df[\"address\"] + df[\"date\"] + df[\"total\"]\n [vocab.extend(row) for row in vocab_df]\n enc = LabelEncoder()\n enc.fit(vocab)\n return enc", "def encoder(self) -> IntegerEncoder:\n\n return self._encoder", "def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')", "def _create_encoder(self):\n\n def _init_weights(layer):\n \"\"\"Initializes the weights of a layer based on type.\"\"\"\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass\n\n kernel_size = 5\n pad = 2\n input_channels = 1\n first_conv_channels = 6\n second_conv_channels = 16\n max_pool_kernel = 2\n linear_size = 120\n n_pixels = 7\n\n encoder = nn.Sequential(\n nn.Conv2d(\n input_channels, first_conv_channels, kernel_size, padding=pad),\n nn.BatchNorm2d(first_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n nn.Conv2d(\n first_conv_channels, second_conv_channels, kernel_size,\n padding=pad),\n nn.BatchNorm2d(second_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n utils.Flatten(),\n nn.Linear(n_pixels * n_pixels * self.n_digits * second_conv_channels,\n linear_size),\n nn.BatchNorm1d(linear_size),\n nn.ReLU(),\n nn.Linear(linear_size, self.embedding_dim),\n nn.Linear(self.embedding_dim, self.n_classes, bias=False),\n )\n\n encoder.apply(_init_weights)\n\n # This is the empirical approximation for initialization the vMF\n # distributions for each class in the final layer.\n if self.use_vmf:\n utils.vmf_class_weight_init(encoder[-1].weight, self.kappa_confidence,\n self.embedding_dim)\n\n return encoder", "def __init__(self, autoencoder, latent_space):\r\n self._autoencoder = autoencoder\r\n self._latent_space = latent_space", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def encoder_disc_model(input_size=(32, 32, 1), filter_width=5, min_data_width=4,\n min_conv_filters=64, output_size=100, stride=2, activation=\"relu\",\n encoder_output_activation=\"linear\",\n dropout_alpha=0):\n num_layers = int(np.log2(input_size[0]) - np.log2(min_data_width))\n curr_conv_filters = min_conv_filters\n image_input = Input(shape=input_size, name=\"enc_input\")\n model = image_input\n for c in range(num_layers):\n model = Conv2D(curr_conv_filters, filter_width,\n strides=(stride, stride), padding=\"same\")(model)\n if activation == \"leaky\":\n model = LeakyReLU(0.2)(model)\n else:\n model = Activation(activation)(model)\n if activation == \"selu\":\n model = AlphaDropout(dropout_alpha)(model)\n else:\n model = Dropout(dropout_alpha)(model)\n curr_conv_filters *= 2\n model = Flatten()(model)\n enc_model = Dense(256, kernel_regularizer=l2())(model)\n if activation == \"leaky\":\n enc_model = LeakyReLU(0.2)(enc_model)\n else:\n enc_model = Activation(activation)(enc_model)\n enc_model = Dense(output_size, kernel_regularizer=l2())(enc_model)\n enc_model = Activation(encoder_output_activation)(enc_model)\n disc_model = Dense(1, kernel_regularizer=l2())(model)\n disc_model = Activation(\"sigmoid\")(disc_model)\n return disc_model, enc_model, image_input", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n x = tf.keras.layers.Input(shape=(1,), dtype=tf.string)\n\n h = tf.keras.layers.Lambda(UniversalEmbedding, output_shape=(512,))(x)\n\n return Model(inputs=x, outputs=h, name='encoder')", "def autoencoder3(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def _create_fffner_model(huggingface_bert_config):\n encoder_cfg = FFFNerEncoderConfig()\n encoder = FFFNerEncoder(\n vocab_size=huggingface_bert_config.vocab_size,\n hidden_size=huggingface_bert_config.hidden_size,\n num_layers=huggingface_bert_config.num_hidden_layers,\n num_attention_heads=huggingface_bert_config.num_attention_heads,\n inner_dim=huggingface_bert_config.intermediate_size,\n inner_activation=tf_utils.get_activation(\n huggingface_bert_config.hidden_act),\n output_dropout=huggingface_bert_config.hidden_dropout_prob,\n attention_dropout=huggingface_bert_config.attention_probs_dropout_prob,\n max_sequence_length=huggingface_bert_config.max_position_embeddings,\n type_vocab_size=huggingface_bert_config.type_vocab_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=encoder_cfg.initializer_range),\n output_range=encoder_cfg.output_range,\n embedding_width=huggingface_bert_config.hidden_size,\n norm_first=encoder_cfg.norm_first)\n return encoder", "def __call__(self, *args, **kwargs):\n return self.encoder_model(args[0])", "def __call__(self, *args, **kwargs):\n return self.encoder_model(args[0])", "def make_encoder_ph(self):\n info = self._module.get_input_info_dict('encode')['x']\n return tf.placeholder(dtype=info.dtype, shape=info.get_shape())", "def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder:\n params = dict(kwargs)\n if infer_hidden:\n params['num_hidden'] = self.get_num_hidden()\n\n sig_params = inspect.signature(cls.__init__).parameters\n if 'dtype' in sig_params and 'dtype' not in kwargs:\n params['dtype'] = self.dtype\n encoder = cls(**params)\n self.encoders.append(encoder)\n return encoder", "def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',\n out_transfer='identity', reconstruct_loss='squared',\n c_sparsity=1, sparsity_loss='bern_bern_kl',\n sparsity_target=0.01,\n tied_weights=True, batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(SparseAutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer,\n reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target,\n tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def build(\n n_iter=500, encoding_dim=3, depth=2, nh=20, activation='linear',\n initial_learning_rate=1e-3, solver='Adam', batch_size=32,\n random_state=10, early_stopping=False, patience=10, lamda=1e-1,\n knob_cols=None, auto_refit=True, max_refit_attempts=10):\n assert knob_cols is not None\n\n encoder_hidden_layers = [int(nh / (2**i)) for i in range(depth - 1)]\n if len(encoder_hidden_layers) > 0:\n if 0 in encoder_hidden_layers or encoder_hidden_layers[-1] < encoding_dim:\n return None\n decoder_hidden_layers = encoder_hidden_layers[::-1]\n hidden_layer_sizes = encoder_hidden_layers + \\\n [encoding_dim] + decoder_hidden_layers\n activations = [activation] * 2 * depth\n ae = FancyAutoEncoder(\n n_iter, hidden_layer_sizes, activations, initial_learning_rate,\n solver=solver, batch_size=batch_size, random_state=random_state,\n early_stopping=early_stopping, patience=patience, lamda=lamda,\n knob_cols=knob_cols, auto_refit=auto_refit,\n max_refit_attempts=max_refit_attempts)\n return ae", "def train_autoencoder(self, x_train, x_test):\n # train the autoencoder\n batch_size = 32\n self.autoencoder.fit(x_train,\n x_train,\n validation_data=(x_test, x_test),\n epochs=10,\n batch_size=batch_size)", "def one_hot_encode(x):\n\n # check if encoder has been previously created, if not make a global var an initialize it\n if 'encoder' not in globals():\n global encoder\n encoder = LabelBinarizer()\n encoder.fit(range(10))\n\n return encoder.transform(x)", "def simple_autoencoder(X_train_input, X_test_input, n_components = 100):\r\n ncol = X_train_input.shape[1]\r\n input_dim = Input(shape = (ncol,))\r\n \r\n # Define the number of encoder dimensions\r\n encoding_dim = n_components\r\n \r\n # Define the encoder layer\r\n encoded = Dense(encoding_dim, activation = 'relu')(input_dim)\r\n \r\n # Define the decoder layer\r\n decoded = Dense(ncol, activation = 'tanh')(encoded)\r\n \r\n # Combine the encoder and decoder into a model\r\n autoencoder = Model(inputs = input_dim, outputs = decoded)\r\n \r\n # Configure and train the autoencoder\r\n autoencoder.compile(optimizer = 'adam', loss = 'mse')\r\n autoencoder.fit(X_train_input, X_train_input, epochs = 50, batch_size = 128, shuffle = True,\r\n validation_data = (X_test_input, X_test_input),verbose = 1)\r\n \r\n # Use the encoder to extract the reduced dimension from the autoencoder\r\n encoder = Model(inputs = input_dim, outputs = encoded)\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def _DataEncoder(\n embedding_size: int, hidden_size: int,\n num_layers: int = 1, cell: str = \"GRU\",\n dropout: float = 0.25\n ) -> RNNEncoder:\n return RNNEncoder(in_size=embedding_size, hidden_size=hidden_size,\n num_layers=num_layers, cell=cell,\n dropout=dropout,\n init_rnn=\"default\")", "def __init__(\n self,\n encoder: EncoderMnist,\n decoder: DecoderMnist,\n latent_dim: int,\n input_pert: callable,\n name: str = \"model\",\n loss_f: callable = nn.MSELoss(),\n ):\n super(AutoEncoderMnist, self).__init__()\n self.latent_dim = latent_dim\n self.encoder = encoder\n self.decoder = decoder\n self.input_pert = input_pert\n self.name = name\n self.loss_f = loss_f\n self.checkpoints_files = []\n self.lr = None", "def make_encoder(self, input_size: int, latent_size: int) -> nn.Module:\n pass", "def __init__(self, AEs):\n \"\"\" the default view of the stacked autoencoders\"\"\"\n sa = AEs\n \"\"\" the encoder view of the stacked autoencoders \"\"\"\n ec = Cat([a.ec for a in sa])\n \"\"\" the decoder view of the stacked autoencoders \"\"\"\n dc = Cat([a.dc for a in reversed(sa)])\n\n self.sa = sa # default view\n self.ec = ec # encoder view\n self.dc = dc # decoder view\n\n nts = []\n nts.extend(ec)\n nts.extend(dc)\n super(SAE, self).__init__(nts)", "def deep_autoencoder(X_train_input, X_test_input, encoding_dim = 20):\r\n input_dim = X_train_input.shape[1]\r\n \r\n autoencoder = Sequential()\r\n \r\n # Encoder Layers\r\n autoencoder.add(Dense(4 * encoding_dim, input_shape=(input_dim,), activation='relu'))\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(encoding_dim, activation='relu'))\r\n \r\n # Decoder Layers\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(4 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(input_dim, activation='sigmoid'))\r\n \r\n autoencoder.compile(optimizer='adam', loss='binary_crossentropy')\r\n autoencoder.fit(X_train_input, X_train_input,\r\n epochs=50,\r\n batch_size=256,\r\n validation_data=(X_test_input, X_test_input))\r\n \r\n input_img = Input(shape=(input_dim,))\r\n encoder_layer1 = autoencoder.layers[0]\r\n encoder_layer2 = autoencoder.layers[1]\r\n encoder_layer3 = autoencoder.layers[2]\r\n encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def encode(input):\n return ModelEncoder().encode(input)", "def __init__(self, vocab_size, embedding_size, output_size):\n super(VanillaEncoder, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n self.gru = nn.GRU(embedding_size, output_size)", "def default_encoder(obj):\n\n if isinstance(obj, np.ndarray):\n data_b64 = base64.b64encode(np.ascontiguousarray(obj).data)\n return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape)\n\n if isinstance(obj, dict):\n result = dict()\n for k, v in obj.items():\n result[k] = default_encoder(v)\n\n return result\n\n return obj", "def encode(self, input_):\n return self.encoder(input_)", "def auto_encoder(data: np.ndarray) -> np.ndarray:\n input_img = Input(shape=(784,))\n encoded = Dense(128, activation='relu')(input_img)\n encoded = Dense(64, activation='relu')(encoded)\n encoded = Dense(32, activation='relu')(encoded)\n\n decoded = Dense(64, activation='relu')(encoded)\n decoded = Dense(128, activation='relu')(decoded)\n decoded = Dense(784, activation='sigmoid')(decoded)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.fit(x_train, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))", "def encoder(enc_input, attn_bias, n_layer, n_head,\n d_key, d_value, d_model, d_inner_hid, pos_enc,\n preporstprocess_dropout, attention_dropout,\n relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n for i in range(n_layer):\n enc_output = encoder_layer(enc_input, attn_bias, n_head,\n d_key, d_value, d_model,d_inner_hid, pos_enc,\n prepostprocess_dropout, attention_dropout,relu_dropout,\n preprocess_cmd, postprocess_cmd\n )\n enc_input = enc_output\n enc_output = pre_process_layer(enc_output,\n preprocess_cmd, preporstprocess_dropout)\n return enc_output", "def __init__(self, encoder=None, **kwargs):\n if encoder is None:\n encoder = dumps\n self.json_settings = kwargs\n self._encoder = encoder", "def autoencoder(self, data):\n with tf.variable_scope(\"autoencoder\"):\n latent = self.encoder(data)\n _, output = self.decoder(latent)\n\n return output, latent", "def build_encoder(shift):\n ### TODO.", "def __init__(self, input_size, hidden_size, bidirection, config):\r\n super(Encoder, self).__init__()\r\n\r\n self.config = config\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.bidirection = bidirection\r\n\r\n if self.config.model in ['LSTM', 'GRU']:\r\n self.rnn = getattr(nn, self.config.model)(self.input_size, self.hidden_size, self.config.nlayer_enc,\r\n batch_first=True, dropout=self.config.dropout,\r\n bidirectional=self.bidirection)\r\n else:\r\n try:\r\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[self.config.model]\r\n except KeyError:\r\n raise ValueError(\"\"\"An invalid option for `--model` was supplied,\r\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\r\n self.rnn = nn.RNN(self.input_size, self.hidden_size, self.config.nlayers, nonlinearity=nonlinearity,\r\n batch_first=True, dropout=self.config.dropout, bidirectional=self.bidirection)", "def make_encoder(opt, embeddings, intent_size, output_size, use_history=False, hidden_depth=1, identity=None,\n hidden_size=None):\n # encoder = StateEncoder(intent_size=intent_size, output_size=output_size,\n # state_length=opt.state_length, extra_size=3 if opt.dia_num>0 else 0 )\n\n # intent + price\n diaact_size = (intent_size+1)\n extra_size = 3 + 2\n if hidden_size is None:\n hidden_size = opt.hidden_size\n if not opt.use_utterance:\n embeddings = None\n if use_history:\n extra_size = 3\n # + pmask\n diaact_size += 1\n if identity is None:\n encoder = HistoryIDEncoder(None, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n # encoder = HistoryIDEncoder(identity, diaact_size*2+extra_size, embeddings, output_size,\n # hidden_depth=hidden_depth)\n encoder = HistoryIDEncoder(identity, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n if identity is None:\n encoder = CurrentEncoder(diaact_size*opt.state_length+extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n else:\n extra_size = 3\n # + pmask\n diaact_size += 1\n encoder = HistoryIDEncoder(identity, diaact_size * opt.state_length, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n\n return encoder", "def build(self) -> KM.Model:\n\n # For decoder number of features in opposite order of encoder\n decoder_features = self.encoder_features.copy()\n decoder_features.reverse()\n\n # build the encoder model\n self.encoder_model = self.encoder(\n features=self.encoder_features, name=\"encoder\"\n )\n\n # build the decoder model\n decoder = self.decoder(features=decoder_features, name=\"decoder\")\n\n input_tensor = KL.Input(\n shape=(32, 32, 3)\n ) # shape of images for cifar10 dataset\n\n # Encode the images\n encoded = self.encoder_model(input_tensor)\n # Decode the image\n decoded = decoder(encoded[-1])\n\n return KM.Model(inputs=input_tensor, outputs=decoded, name=\"AutoEncoder\")", "def abstract_encoder(label):\n global dictionary, wv, table\n model = torch.load('model/description_encoder')\n # label = label.lower()\n try:\n abstract = dictionary[label]\n d = abstract.translate(table).lower()\n d = d.replace('resource/', '').split()\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n hidden = model.init_hidden()\n except KeyError:\n return np.random.randn(100)\n try:\n for word in r:\n p, hidden = model(Variable(torch.tensor([[word]])),\n hidden)\n p = p[0][0].detach().numpy()\n return p\n except (KeyError, IndexError, TypeError) as _:\n return np.random.randn(100)", "def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,\n allow_nan=True, cls=None, indent=None, separators=None,\n encoding='utf-8', default=None, sort_keys=False, **kw):\n # cached encoder\n if (not skipkeys and ensure_ascii and\n check_circular and allow_nan and\n cls is None and indent is None and separators is None and\n encoding == 'utf-8' and default is None and not sort_keys and not kw):\n return _default_encoder.encode(obj)\n if cls is None:\n cls = JSONEncoder\n return cls(\n skipkeys=skipkeys, ensure_ascii=ensure_ascii,\n check_circular=check_circular, allow_nan=allow_nan, indent=indent,\n separators=separators, encoding=encoding, default=default,\n sort_keys=sort_keys, **kw).encode(obj)", "def encode(self): # pragma: no cover\n pass", "def __init__(self, emb_dim=100, window_size=3, init_emb=None,\n hidden_dim=100, vocab_size=0, PAD_IDX=None):\n super(CharCNNEncoder, self).__init__(\n emb_dim=emb_dim, window_size=window_size, init_emb=init_emb,\n hidden_dim=hidden_dim, splitter=None, vocab_size=vocab_size,\n PAD_IDX=PAD_IDX)", "def __init__(self,\n name: str,\n vocabulary: Vocabulary,\n data_id: str,\n embedding_size: int,\n rnn_size: int,\n max_input_len: int = None,\n dropout_keep_prob: float = 1.0,\n rnn_cell: str = \"GRU\",\n attention_type: type = None,\n attention_fertility: int = 3,\n attention_state_size: int = None,\n save_checkpoint: str = None,\n load_checkpoint: str = None) -> None:\n\n # TODO Think this through.\n s_ckp = \"input_{}\".format(save_checkpoint) if save_checkpoint else None\n l_ckp = \"input_{}\".format(load_checkpoint) if load_checkpoint else None\n\n # TODO! Representation runner needs this. It is not simple to do it in\n # recurrent encoder since there may be more source data series. The\n # best way could be to enter the data_id parameter manually to the\n # representation runner\n self.data_id = data_id\n\n input_sequence = EmbeddedSequence(\n name=\"{}_input\".format(name),\n vocabulary=vocabulary,\n data_id=data_id,\n embedding_size=embedding_size,\n max_length=max_input_len,\n save_checkpoint=s_ckp,\n load_checkpoint=l_ckp)\n\n RecurrentEncoder.__init__(\n self,\n name=name,\n input_sequence=input_sequence,\n rnn_size=rnn_size,\n dropout_keep_prob=dropout_keep_prob,\n rnn_cell=rnn_cell,\n attention_type=attention_type,\n attention_fertility=attention_fertility,\n attention_state_size=attention_state_size,\n save_checkpoint=save_checkpoint,\n load_checkpoint=load_checkpoint)", "def build_encoder(img_shape):\n input_img = Input(shape=(img_shape)) \n x = Conv2D(16, (3, 3), activation='tanh', padding='same')(input_img)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.5)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = AveragePooling2D((3, 3), padding='same')(x)\n x = Flatten()(x)\n encoded = Dense(540, activation='tanh')(x)\n Encoder=Model(input_img,encoded,name='encoder')\n return input_img,encoded,Encoder", "def __init__(\n self,\n input_vocab_size: int,\n output_vocab_size: int,\n embedding_size: int,\n layers_size: int,\n dropout: float,\n attention_size: int,\n ):\n super().__init__(NAME)\n self.input_vocab_size = input_vocab_size\n self.output_vocab_size = output_vocab_size\n\n self.encoder = Encoder(input_vocab_size, embedding_size, layers_size, dropout)\n self.attention_layer = BahdanauAttention(attention_size)\n self.decoder = Decoder(output_vocab_size, embedding_size, layers_size, dropout)\n\n self._embedding_size = embedding_size", "def __init__(self,\n name: str,\n vocabularies: List[Vocabulary],\n data_ids: List[str],\n embedding_sizes: List[int],\n rnn_size: int,\n max_input_len: int = None,\n dropout_keep_prob: float = 1.0,\n rnn_cell: str = \"GRU\",\n attention_type: type = None,\n attention_fertility: int = 3,\n attention_state_size: int = None,\n save_checkpoint: str = None,\n load_checkpoint: str = None) -> None:\n s_ckp = \"input_{}\".format(save_checkpoint) if save_checkpoint else None\n l_ckp = \"input_{}\".format(load_checkpoint) if load_checkpoint else None\n\n input_sequence = EmbeddedFactorSequence(\n name=\"{}_input\".format(name),\n vocabularies=vocabularies,\n data_ids=data_ids,\n embedding_sizes=embedding_sizes,\n max_length=max_input_len,\n save_checkpoint=s_ckp,\n load_checkpoint=l_ckp)\n\n RecurrentEncoder.__init__(\n self,\n name=name,\n input_sequence=input_sequence,\n rnn_size=rnn_size,\n dropout_keep_prob=dropout_keep_prob,\n rnn_cell=rnn_cell,\n attention_type=attention_type,\n attention_fertility=attention_fertility,\n attention_state_size=attention_state_size,\n save_checkpoint=save_checkpoint,\n load_checkpoint=load_checkpoint)", "def load_encoder(checkpoint, encoder_cls,\n HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT, encoder_name, bidirectional):\n model = encoder_cls(HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT,\n gate=encoder_name, bidirectional=bidirectional)\n model.load_state_dict(checkpoint['en'])\n model.eval()\n return model", "def BriefDescriptorExtractor_create(bytes=None, use_orientation=None): # real signature unknown; restored from __doc__\n pass", "def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,\n allow_nan=True, cls=None, indent=None, separators=None,\n encoding='utf-8', **kw):\n # cached encoder\n if (skipkeys is False and ensure_ascii is True and\n check_circular is True and allow_nan is True and\n cls is None and indent is None and separators is None and\n encoding == 'utf-8' and not kw):\n return _default_encoder.encode(obj)\n if cls is None:\n cls = JSONEncoder\n return cls(\n skipkeys=skipkeys, ensure_ascii=ensure_ascii,\n check_circular=check_circular, allow_nan=allow_nan, indent=indent,\n separators=separators, encoding=encoding,\n **kw).encode(obj)", "def build_full_conv_autoencoder():\n input_img = Input(shape=(84, 84, 3))\n\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c3')(x)\n encoded = MaxPooling2D((3, 3), border_mode='same')(x)\n\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c4')(encoded)\n x = UpSampling2D((3, 3))(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c5')(x)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c6')(x)\n x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 4, 4, activation='sigmoid', border_mode='same', name='c7')(x)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder", "def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data, data_mask)", "def create_model(opts):\n # G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.conv_dim)\n # D = DCDiscriminator(conv_dim=opts.conv_dim)\n G = DCGenerator()\n D = DCDiscriminator()\n\n return G, D", "def Aut(A):\n return Embeddings(A,A)", "def __init__(self, N, dm, h, hidden, input_vocab, target_vocab,\n max_seq_input, max_seq_target, drop_rate=0.1):\n super().__init__()\n self.encoder = Encoder(N, dm, h, hidden, input_vocab,\n max_seq_input, drop_rate)\n self.decoder = Decoder(N, dm, h, hidden, target_vocab,\n max_seq_target, drop_rate)\n self.linear = tf.keras.layers.Dense(target_vocab)", "def create_decoder():\n # Create decoder instance and add predictors\n \n try:\n decoder = decoding.DECODER_REGISTRY[args.decoder](args)\n except Exception as e:\n logging.fatal(\"An %s has occurred while initializing the decoder: %s\"\n \" Stack trace: %s\" % (sys.exc_info()[0],\n e,\n traceback.format_exc()))\n sys.exit(\"Could not initialize decoder.\")\n\n add_predictor(decoder)\n return decoder", "def _create_trainable_quantizer(bw, name, quantizer):\n # Initialize trainable parameters to None\n self.register_parameter(f'{name}_encoding_min', None)\n self.register_parameter(f'{name}_encoding_max', None)\n # Pass name of tensor quantizer and reference of Wrapper to tensor quantizer\n # Input quantizer\n new_quantizer = tensor_quantizer_factory(bw, self._round_mode,\n quant_scheme,\n self._is_symmetric,\n enabled_by_default=True,\n data_type=self._data_type)\n new_quantizer.name = name\n new_quantizer.wrapper_ref = self\n new_quantizer.device = self.device\n initialize_learned_grid_quantizer_attributes(new_quantizer, quantizer)\n return new_quantizer", "def test_Encoder_encode_decode_nack(self):\n interest = Interest(\"/data/test\")\n n = Nack(\"/data/test\", NackReason.NO_CONTENT, interest=interest)\n en = self.encoder1.encode(n)\n dn = self.encoder1.decode(en)\n self.assertTrue(n == dn)", "def Encoder(self, x_test):\n return self.Encoder.predict(x_test)", "def __init__(self, encoded_dataset):\n self._encoded_dataset = encoded_dataset\n\n self._model = None\n self._encoder_model = None\n self._encoder_model_inf = None\n self._decoder_model = None\n self._decoder_model_inf = None", "def edge_encoder_construct(cfg, model_name='edge_encoder', **kwargs):\n encoders = edge_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown edge encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)", "def dd(self, name):\n return DD(name)", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n pass", "def _preorder_encode(self):\n features = np.expand_dims(self.get_features(), axis=0)\n\n features = np.pad(features, (1, 0),\n 'constant', constant_values=(0, 0))\n return features.transpose(1, 0), np.array([[1], [0], [0]])", "def __init__(self, emb_dim=100, window_size=3, init_emb=None,\n hidden_dim=100, add_dim=0, vocab_size=0,\n PAD_IDX=None):\n super(WordCNNEncoder, self).__init__(\n emb_dim=emb_dim, window_size=window_size, init_emb=init_emb,\n hidden_dim=hidden_dim, splitter=u' ', add_dim=add_dim,\n vocab_size=vocab_size, PAD_IDX=PAD_IDX)", "def __init__(self, word_ids, word_embedd, word_alphabet, char_alphabet):\n super(FeedForwardGenerator, self).__init__()\n word_ids = sorted(word_ids)\n\n self.word_embedd = word_embedd\n self.word_ids = word_ids\n self.word_ids_set = set(word_ids)\n m_emb = word_embedd.weight.size(-1)\n weight = torch.index_select(word_embedd.weight, 0, torch.tensor(word_ids, device=cfg.device))\n # only want weights that we care, by index_select\n self.obfenc = ObfEncoder(len(word_ids), m_emb, word_ids, weight)\n self.word_alphabet = word_alphabet\n self.char_alphabet = char_alphabet\n self.step = 0\n self.last_t = 0", "def node_encoder_construct(cfg, model_name='node_encoder', **kwargs):\n encoders = node_encoder_dict()\n encoder_cfg = cfg[model_name]\n name = encoder_cfg.get('name', 'geo')\n if not name in encoders:\n raise Exception(\"Unknown node encoder name provided:\", name)\n\n return encoders[name](encoder_cfg, **kwargs)", "def encode(y):\n le = LabelEncoder()\n le.fit(y)\n print(list(le.classes_))\n y = le.transform(y)\n return y", "def encode(self, x: Tensor) ->Tensor:\n return self.encoder(x)[0]", "def ordinal_encoder(name: str,\n categories: typing.Union[str, list] = \"auto\",\n dtype: type = np.float64,\n handle_unknown: str = \"error\",\n unknown_value: float = None):\n rval = scope.sklearn_OrdinalEncoder(\n categories=categories,\n dtype=dtype,\n handle_unknown=handle_unknown,\n unknown_value=unknown_value\n )\n\n return rval", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n n_stacks = len(dims) - 1\n # input\n input_img = Input(shape=(dims[0],), name='input')\n x = input_img\n # internal layers in encoder\n for i in range(n_stacks-1):\n x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x)\n\n # hidden layer\n encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here\n\n x = encoded\n # internal layers in decoder\n for i in range(n_stacks-1, 0, -1):\n x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x)\n\n # output\n x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x)\n decoded = x\n return Model(inputs=input_img, outputs=decoded, name='AE'), Model(inputs=input_img, outputs=encoded, name='encoder')", "def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)", "def build_encoder(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tctx = proj[0][-1]\n\n\treturn embedding, x_mask, ctx", "def __init__(self, coder):\n self.coder = coder", "def resnet_autoencoder_v1(encoder_depth, decoder_depth, width_multiplier, metric_channels, # noqa\n cifar_stem=False, data_format='channels_last',\n dropblock_keep_probs=None, dropblock_size=None,\n mask_augs=0., greyscale_viz=False, skip=True):\n encoder = resnet_encoder_v1(encoder_depth, \n width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n decoder = resnet_decoder_v1(decoder_depth=decoder_depth,\n encoder_depth=encoder_depth,\n width_multiplier=width_multiplier,\n cifar_stem=cifar_stem, \n data_format=data_format,\n dropblock_keep_probs=dropblock_keep_probs, \n dropblock_size=dropblock_size)\n\n metric = learned_metric_v1(data_format=data_format, metric_channels=metric_channels) \n \n return resnet_autoencoder_v1_generator(\n encoder=encoder,\n decoder=decoder,\n metric=metric,\n skip=skip,\n mask_augs=mask_augs,\n greyscale_viz=greyscale_viz,\n data_format=data_format)", "def make_stax_model(self):", "def encode(self):\n \n assert False, \"Not implemented.\"", "def __init__(self, num_layers, num_heads, D, mlp_dim, dropout=0.1, norm_eps=1e-12):\n super(TransformerEncoder, self).__init__()\n\n # Create num_layers of TransformerBlock\n self.encoder = Sequential(\n [\n TransformerBlock(num_heads=num_heads,\n D=D,\n hidden_layers=[mlp_dim, D],\n dropout=dropout,\n norm_eps=norm_eps)\n for _ in range(num_layers)\n ]\n )" ]
[ "0.6016746", "0.55889976", "0.545366", "0.54502213", "0.5415948", "0.54003376", "0.54001844", "0.53689516", "0.534991", "0.5316329", "0.5203007", "0.51553774", "0.5107471", "0.5106341", "0.5100852", "0.50989753", "0.5094069", "0.5073897", "0.50694823", "0.5016635", "0.50073105", "0.49909428", "0.49849224", "0.4968867", "0.49330625", "0.4929339", "0.4921299", "0.4915225", "0.49001294", "0.4899458", "0.48936906", "0.4887773", "0.48832044", "0.4882318", "0.48650587", "0.48558393", "0.48558393", "0.48418808", "0.4831698", "0.4829127", "0.48104933", "0.47984135", "0.47937956", "0.47871768", "0.4752624", "0.47521406", "0.47369084", "0.47293147", "0.47271788", "0.47205934", "0.46755654", "0.46610877", "0.46549267", "0.46507895", "0.46497324", "0.4647944", "0.46379802", "0.46142375", "0.46093482", "0.45774785", "0.45765573", "0.45690572", "0.4568237", "0.45635912", "0.4556402", "0.45511743", "0.45507795", "0.45470095", "0.4542188", "0.45408246", "0.45405596", "0.45321566", "0.45250154", "0.45206213", "0.45093662", "0.45088094", "0.44763565", "0.4460877", "0.44530985", "0.4440088", "0.4436867", "0.44356203", "0.44308645", "0.44109046", "0.4401737", "0.439402", "0.43895236", "0.43734333", "0.43625385", "0.4359146", "0.43554837", "0.43515006", "0.43504494", "0.4350215", "0.43471637", "0.4345469", "0.43432224", "0.43402708", "0.43394032", "0.43350238" ]
0.6117516
0
restrict to the content's language
def test_languages(self, client): root = Node.root() en = Type1(title="Hello World", state="published", node=root, language="en").save() nl = Type1(title="Hoi Wereld", state="published", node=root, language="nl").save() enspoke = en.spoke() feed = WheelFeed(enspoke) assert en.content_ptr in feed.items() assert nl.content_ptr not in feed.items()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_forced(self, lang):\r\n return False", "def process_request(self, request):\n\n if request.COOKIES.get('site_language'):\n if request.COOKIES['site_language'] == '':\n language = 'fr'\n else:\n language = request.COOKIES['site_language']\n # You should add here some code to check teh language\n # variable is safe...\n translation.activate(language)\n request.LANGUAGE_CODE = translation.get_language()", "def patch_request(self, request):\n if 'HTTP_ACCEPT_LANGUAGE' in request.META:\n # Preserve the browser provided language just in case,\n # the underscore prefix means that you probably shouldn't be using it anyway\n request.META['_HTTP_ACCEPT_LANGUAGE'] = request.META['HTTP_ACCEPT_LANGUAGE']\n\n language_code = configuration_helpers.get_value('LANGUAGE_CODE', settings.LANGUAGE_CODE)\n request.META['HTTP_ACCEPT_LANGUAGE'] = language_code", "def _is_lang_change(self, request):\n if 'lang' not in request.GET:\n return False\n\n return not any(request.path.endswith(url) for url in self.exempt_urls)", "def translate(self, language=None):", "def validate_lang(lang):\n if lang in LANGUAGE_OPTIONS.keys():\n return True", "def use_en(self):\n pass", "def clean_lang(self):\n lang = self.cleaned_data.get('lang', None)\n if not lang in self.greetings:\n raise forms.ValidationError(\n \"We couldn't find the language you selected {}\"\n \" Please select another\".format(lang)\n )\n return lang", "def test_list_source_language(self):\n\n # check if documentalist has access to the list view\n self.login_documentalist()\n response = self.client.get('/languages/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/languages/')\n self.assertContains(response, \"português\")", "def filter_content_by_language(self, keyword, content_type=None):\n\n if content_type is None:\n is_article, is_podcast, is_video = True, True, True\n else:\n is_article, is_podcast, is_video = self.get_content_type(content_type)\n\n try:\n response = {\"status\": \"ok\"}\n if is_article:\n db_articles = self.db_connector.connect_to_collection(self.db_config[MONGODB][COLLECTION_ARTICLES])\n response[\"articles\"] = list(db_articles.find({\"language\": keyword}, {'_id': 0}))\n if is_podcast:\n db_podcasts = self.db_connector.connect_to_collection(self.db_config[MONGODB][COLLECTION_PODCASTS])\n response[\"podcasts\"] = list(db_podcasts.find({\"language\": keyword}, {'_id': 0}))\n if is_video:\n db_videos = self.db_connector.connect_to_collection(self.db_config[MONGODB][COLLECTION_VIDEOS])\n response[\"videos\"] = list(db_videos.find({\"language\": keyword}, {'_id': 0}))\n return response\n except:\n return {\"status\": \"bad\"}", "def get_meta_lang(self):\n # we have a lang attribute in html\n attr = self.parser.getAttribute(self.article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n items = [\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\n ]\n for item in items:\n meta = self.parser.getElementsByTag(self.article.doc, **item)\n if meta:\n attr = self.parser.getAttribute(meta[0], attr='content')\n break\n\n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n\n return None", "def process_request(self, request):\r\n if request.user.is_authenticated() and 'django_language' not in request.session:\r\n user_pref = UserPreference.get_preference(request.user, LANGUAGE_KEY)\r\n if user_pref:\r\n request.session['django_language'] = user_pref", "def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang", "def getMetaLang(self, article):\n # we have a lang attribute in html\n attr = Parser.getAttribute(article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n kwargs = {'tag':'meta',\n 'attr':' http-equiv',\n 'value':'content-language'}\n meta = Parser.getElementsByTag(article.doc, **kwargs)\n if meta:\n attr = Parser.getAttribute(meta[0], attr='content')\n \n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n \n return None", "def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks", "async def funslate(self,ctx,lang=\"ja\"):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n await self.translater(ctx,lang,wordsDict.generate())", "def removeLanguage(language):", "def process_text(self, text, language):", "def process_response(self, request, response):\n\n if not request.COOKIES.get('site_language'):\n response.set_cookie('site_language',\n '')\n translation.deactivate()\n return response", "def test_lang_subset_unlikely_language(en_multilingual):\n sentences = [\"你好\" * 200]\n docs = [Document([], text=text) for text in sentences]\n en_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"en\"]\n\n processor = en_multilingual.processors['langid']\n model = processor._model\n text_tensor = processor._text_to_tensor(sentences)\n en_idx = model.tag_to_idx['en']\n predictions = model(text_tensor)\n assert predictions[0, en_idx] < 0, \"If this test fails, then regardless of how unlikely it was, the model is predicting the input string is possibly English. Update the test by picking a different combination of languages & input\"", "def language_selector(context):\r\n output = \"\"\r\n from django.conf import settings\r\n i18 = getattr(settings, 'USE_I18N', False)\r\n if i18:\r\n template = \"admin/language_selector.html\"\r\n context['i18n_is_set'] = True\r\n try:\r\n output = render_to_string(template, context)\r\n except:\r\n pass\r\n return output", "def get_meta_lang(self, article):\r\n # we have a lang attribute in html\r\n attr = self.parser.getAttribute(article.doc, attr='lang')\r\n if attr is None:\r\n # look up for a Content-Language in meta\r\n items = [\r\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\r\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\r\n ]\r\n for item in items:\r\n meta = self.parser.getElementsByTag(article.doc, **item)\r\n if meta:\r\n attr = self.parser.getAttribute(meta[0], attr='content')\r\n break\r\n\r\n if attr:\r\n value = attr[:2]\r\n if re.search(RE_LANG, value):\r\n return value.lower()\r\n\r\n return None", "def remove_rows_with_non_english_movies(df):\n df = df[df['original_language'] == 'en']\n return df", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list", "def browserLanguages(request):\n fallback = []\n accepted = request.http_accept_language\n if accepted:\n # Extract the languages names from the string\n accepted = accepted.split(',')\n accepted = map(lambda x: x.split(';')[0], accepted)\n # Add base language for each sub language. If the user specified\n # a sub language like \"en-us\", we will try to to provide it or\n # a least the base language \"en\" in this case.\n for lang in accepted:\n lang = lang.lower()\n fallback.append(lang)\n if '-' in lang:\n baselang = lang.split('-')[0]\n fallback.append(baselang)\n return fallback", "def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }", "def wikiLanguages():\n return languages", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n self.language = article.meta_lang[:2]\r\n self.language = self.config.target_language", "def test_course_explicit_english(self):\r\n self.client = AjaxEnabledTestClient()\r\n self.client.login(username=self.uname, password=self.password)\r\n\r\n resp = self.client.get_html('/course/',\r\n {},\r\n HTTP_ACCEPT_LANGUAGE='en'\r\n )\r\n\r\n self.assertContains(resp,\r\n '<h1 class=\"page-header\">My Courses</h1>',\r\n status_code=200,\r\n html=True)", "def test_language_parent_fallback(self): \n SiteWording.objects.filter(identifier='test_1', language__code='en-us').delete()\n \n activate('en-us')\n self.assertEqual(get_wording_text('test_1'), 'en')", "def not_supported_languages() -> Optional[List[Text]]:\n return [\"zh\", \"ja\", \"th\"]", "def detect_language(self):\n if not self.clean:\n self._text_clean()\n if not self.clean:\n return\n self.payload = \"q={}\".format(self.text)\n resp = requests.request('POST', self.url_language, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.language = json.loads(resp.text)['data']['detections'][0][0]['language']\n except KeyError:\n return", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n return article.meta_lang[:2]\r\n return self.config.target_language", "def language_filter(tweet_objects):\n\n filtered_list = []\n\n for tweet in tweet_objects: \n lang = detect(text_from_tweet(tweet))\n if lang == 'en':\n filtered_list.append(tweet)\n\n return filtered_list", "def disableVocolaTakesLanguages(self):\n key = \"VocolaTakesLanguages\"\n self.userregnl.set(key, 0)", "def apply_language_filter(self):\n all_langs = self.data.keys()\n langs_to_remove = [l for l in all_langs if not self.config.filter_language(l)]\n for lang in langs_to_remove:\n self.data.pop(lang)\n # Make sure we've not removed all languages\n if not self.data.keys():\n raise ValueError(\"Language filters leave nothing in the dataset for model '%s'!\" % self.name)\n # Keep a sorted list so that the order of things in XML is deterministic\n self.languages = sorted(list(self.data.keys()))", "def is_english(text):\n\n lang = langid.classify(text)\n if lang and 'en' in lang[0]:\n return True\n return False", "def test_language_fix(self):\n #TODO\n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(lang, get_language())", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def test_language_sensitivity(self): \n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(get_wording_text('test_1'), lang)", "def set_language(request):\r\n user = request.user\r\n lang_pref = request.POST.get('language', None)\r\n\r\n if lang_pref:\r\n UserPreference.set_preference(user, LANGUAGE_KEY, lang_pref)\r\n return HttpResponse('{\"success\": true}')\r\n\r\n return HttpResponseBadRequest('no language provided')", "def set_language(request, lang_code):\n next = '/'\n response = http.HttpResponseRedirect(next)\n\n if int(lang_code) == 1:\n lang_code = 'en'\n elif int(lang_code) == 2:\n lang_code = 'ru'\n else:\n lang_code = 'ru'\n\n if lang_code and check_for_language(lang_code):\n\n request.session['django_language'] = lang_code\n\n return response", "def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context['lang_versions'] = self.get_translations()\n context['default_lang'] = (settings.LANGUAGES[0][0])\n return context", "def allows_language_choice(self, allows_language_choice):\n\n self._allows_language_choice = allows_language_choice", "def compare_language(language):\n if language in module.availableLanguages:\n return True\n else:\n return False", "def requires_matching_languages(self):\n return self._requires_matching_languages", "def test_00_i18n_anonymous(self):\r\n # First default 'en' locale\r\n with self.app as c:\r\n err_msg = \"The page should be in English\"\r\n res = c.get('/', headers=[('Accept-Language', 'en')])\r\n assert \"Community\" in res.data, err_msg\r\n # Second with 'es' locale\r\n with self.app as c:\r\n err_msg = \"The page should be in Spanish\"\r\n res = c.get('/', headers=[('Accept-Language', 'es')])\r\n assert \"Comunidad\" in res.data, err_msg", "def search_lang(self,strz):\n\t\tfor lang in languages: #languages = list of allow lang words\n\t\t\tif lang in strz:\n\t\t\t\tif len(self.language)>0:\n\t\t\t\t\tself.language+='.'+lang.replace(\".\",\"\")\n\t\t\t\telse:\n\t\t\t\t\tself.language+=lang.replace(\".\",\"\")\n\t\t\t\tstrz =strz.replace(lang,\"\")\n\t\treturn strz", "async def translate(self,ctx,lang=\"ja\",txt=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n await self.translater(ctx,lang,txt)", "def test_placeholder_untranslated_content(self):\n from pages import settings as pages_settings\n setattr(pages_settings, \"PAGE_USE_SITE_ID\", False)\n page = self.new_page(content={})\n placeholder = PlaceholderNode('untrans', page='p', untranslated=True)\n placeholder.save(page, 'fr-ch', 'test-content', True)\n placeholder.save(page, 'en-us', 'test-content', True)\n self.assertEqual(len(Content.objects.all()), 1)\n self.assertEqual(Content.objects.all()[0].language, 'en-us')\n\n placeholder = PlaceholderNode('untrans', page='p', untranslated=False)\n placeholder.save(page, 'fr-ch', 'test-content', True)\n self.assertEqual(len(Content.objects.all()), 2)\n\n # test the syntax\n page = self.new_page()\n template = django.template.loader.get_template(\n 'pages/tests/untranslated.html')\n context = Context({'current_page': page, 'lang':'en-us'})\n self.assertEqual(template.render(context), '')", "def loadLanguage(request, lang):\n request.clock.start('loadLanguage')\n from MoinMoin import caching\n # farm notice: for persistent servers, only the first wiki requesting some language\n # gets its cache updated - a bit strange and redundant, but no problem.\n cache = caching.CacheEntry(request, arena='i18n', key=lang)\n import MoinMoin.request\n langfilename = os.path.join(MoinMoin.request.prefix + \"/i18n\", '%s.py' % filename(lang))\n needsupdate = cache.needsUpdate(langfilename)\n if debug: request.log(\"i18n: langfilename %s needsupdate %d\" % (langfilename, needsupdate))\n if not needsupdate:\n try:\n (uc_texts, uc_unformatted) = pickle.loads(cache.content())\n except (IOError, ValueError, pickle.UnpicklingError): # bad pickle data, no pickle\n if debug: request.log(\"i18n: pickle %s load failed\" % lang)\n needsupdate = 1\n\n if needsupdate: \n from MoinMoin.util import pysupport\n lang_module = \"MoinMoin.i18n.%s\" % filename(lang)\n try:\n # Language module without text dict will raise AttributeError\n texts = pysupport.importName(lang_module, \"text\")\n except ImportError:\n if debug: request.log(\"i18n: import of module %s failed.\" % lang_module)\n request.clock.stop('loadLanguage')\n return None, None\n meta = pysupport.importName(lang_module, \"meta\") \n encoding = meta['encoding']\n\n # convert to unicode\n if debug: request.log(\"i18n: processing unformatted texts of lang %s\" % lang)\n uc_unformatted = {}\n for key, text in texts.items():\n ukey = key.decode(encoding)\n utext = text.decode(encoding)\n uc_unformatted[ukey] = utext\n\n if meta.get('wikimarkup', False):\n if debug: request.log(\"i18n: processing formatted texts of lang %s\" % lang)\n # use the wiki parser now to replace some wiki markup with html\n uc_texts = {}\n for key, text in uc_unformatted.items():\n try:\n uc_texts[key] = formatMarkup(request, text)\n except: # infinite recursion or crash\n if debug:\n request.log(\"i18n: crashes in language %s on string: %s\" % (lang, text))\n uc_texts[key] = \"%s*\" % text\n else:\n uc_texts = uc_unformatted\n if debug: request.log(\"i18n: dumping lang %s\" % lang)\n cache.update(pickle.dumps((uc_texts, uc_unformatted), PICKLE_PROTOCOL))\n request.clock.stop('loadLanguage')\n return uc_texts, uc_unformatted", "def set_language(request):\n response = HttpResponseRedirect(get_redirect_url(request))\n\n if request.method == 'POST':\n lang_code = request.POST.get('language', None)\n if lang_code and check_for_language(lang_code):\n request.session[settings.LANGUAGE_SESSION_KEY] = lang_code\n\n return response", "def lang(self, lang):\n self.addHeader('Accept-Language', lang)", "def add_game_language_subscriber(event):\n request = event.request\n # TODO: look up game language from a cookie or something\n en = db.get_by_identifier_query(db.t.Language, u'en').first()\n request.tmpl_context.game_language = en", "def setlang(request):\n next = request.GET.get('next', None)\n if not is_safe_url(url=next, host=request.get_host()):\n next = request.META.get('HTTP_REFERER')\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n response = redirect(next)\n\n lang_code = request.GET.get('language', None)\n if lang_code and check_for_language(lang_code):\n if hasattr(request, 'session'):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n else:\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN)\n\n return response", "def is_valid_language(self, file):\n if not self.languages or get_file_type(file[\"path\"]) in self.languages:\n return True\n return False", "def is_valid_language(self, file):\n if not self.languages or get_file_type(file[\"path\"]) in self.languages:\n return True\n return False", "def set_default_language(request):\n response = HttpResponseRedirect(get_redirect_url(request))\n\n if request.method == 'POST':\n lang_code = request.POST.get('language', None)\n if lang_code and check_for_language(lang_code):\n request.user.moderator_profile.language = lang_code\n request.user.moderator_profile.save()\n messages.success(request, _(\"Your default language has been updated.\"))\n\n return response", "def translate_as(self, lang):\n trans = PublicationLocalization.objects.filter(publication=self,\n language=lang,\n is_active=True).first()\n if trans:\n self.title = trans.title\n self.subheading = trans.subheading\n self.content = trans.content", "def language(self):\n if self.consent:\n self.consent.language\n translation.activate(self.consent.language)\n self._language = translation.get_language()\n else:\n self._language = settings.LANGUAGE_CODE\n return self._language", "def test_get_language(self):\n with translation.override(\"fr\"):\n # Despite being\n # Initialize form in other language.\n x = SimpleModel(shared=\"SHARED\", tr_title=\"TRANS\", _current_language=\"nl\")\n self.assertEqual(x.get_current_language(), \"nl\")\n x.save()\n\n x2 = SimpleModel.objects.language(\"nl\").get(pk=x.pk)\n self.assertEqual(x2.get_current_language(), \"nl\")\n self.assertEqual(x2.shared, \"SHARED\")\n self.assertEqual(x2.tr_title, \"TRANS\")", "def get_language(self, text):\n try:\n post_lang = detect(text)\n except:\n post_lang = 'N/A'\n return post_lang", "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def UpdateCulture(self):\n culture = None\n db_area = self.request.pageinfo.DbArea\n if db_area == const.DM_CIC and self.cic:\n culture = self.cic.Culture\n\n if db_area == const.DM_VOL and self.vol:\n culture = self.vol.Culture\n\n if culture:\n self.request.language.setSystemLanguage(culture)", "def is_accepting(self):\n return (self.position == 1) and (self.lhs.content == LANGUAGE)", "def language(self, target):\n self._check_target(target)\n return target.language or self._default_language", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "async def lang(self, ctx):\n\n # Get server language\n with open('serverconfig/lang.json', 'r') as f:\n language = json.load(f)\n old_language = language[str(ctx.guild.id)]\n\n # Get embed\n with open(f'embeds/{old_language}/language.json', 'r') as f:\n embed = json.load(f)\n\n # Send embed with select menu\n select = await ctx.reply(embed=discord.Embed.from_dict(embed['list']).set_thumbnail(url=self.client.user.avatar_url),\n components=[SelectMenu(custom_id='lang-menu', placeholder='Languages', options=[\n SelectOption(label='EN',\n value='EN',\n description='English'),\n SelectOption(label='ES',\n value='ES',\n description=\"Español\")\n ])], mention_author=False, remove_after=20)\n\n # Check select menu\n def check_selection(i: discord.Interaction, select_menu):\n return i.message == select and i.author.id == ctx.message.author.id\n interaction, select_menu = await self.client.wait_for('selection_select', check=check_selection)\n lang = select_menu.values[0]\n\n await select.delete()\n\n # Update language\n language[str(ctx.guild.id)] = lang\n with open('serverconfig/lang.json', 'w') as f:\n json.dump(language, f, indent=4)\n\n # Gets and sends embed of new language\n with open(f\"embeds/{lang}/language.json\", \"r\") as f:\n embed = json.load(f)\n msg = await ctx.send(embed=discord.Embed.from_dict(embed['change']))\n time.sleep(20)\n await msg.delete()", "def set_language(self, lang):\n self.lang = lang", "def testLanguage(self):\n if self.language in tools.LANGUAGES:\n self.assertEqual(\n self.language,\n self.config.language\n )\n else:\n self.assertNotEqual(\n self.language,\n self.config.language\n )\n self.assertEqual(\n tools.LANGUAGE_DEFAULT,\n self.config.language\n )", "def setlang(request):\n form = SetLanguageForm(request.POST or None)\n if form.is_valid():\n user_language = form.cleaned_data['language']\n translation.activate(user_language)\n response = HttpResponseRedirect(form.cleaned_data['next'])\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user_language)\n return response", "def translate_to(self, lang):\n TranslatableWindow.translate_all(lang)", "def get_language(self):\r\n return self.language", "def define_content(self, html):\n self.html_template(html, lang=\"en\")\n self.add_language(\"en\")", "def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t", "def is_stopword(self, word, language):", "def lang_postprocessing(variables):\n return variables", "def setup_site_languages(context):\n portal = context.getSite()\n ltool = portal.portal_languages\n \n defaultLanguage = bc.default_language\n supportedLanguages = list(bc.zope_i18n_allowed_languages.split())\n ltool.manage_setLanguageSettings(defaultLanguage, supportedLanguages,\n setUseCombinedLanguageCodes=True,\n setCookieN=True, setRequestN=True)\n logger.info(\"Site languages enabled.\")", "def _clean_accept_headers(self, request):\r\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', None)\r\n if accept is None or accept == '*':\r\n return\r\n\r\n new_accept = \", \".join(\r\n self._format_accept_value(lang, priority)\r\n for lang, priority\r\n in dark_parse_accept_lang_header(accept)\r\n if self._is_released(lang)\r\n )\r\n\r\n request.META['HTTP_ACCEPT_LANGUAGE'] = new_accept", "def assertAcceptEquals(self, value, request):\r\n self.assertEquals(\r\n value,\r\n request.META.get('HTTP_ACCEPT_LANGUAGE', UNSET)\r\n )", "def contentextract(text):\n stopword=stopwords.words('english')\n punctuation=['.','?','!',',',';',\"''\",'\"\"',\"'\",\"--\",\"``\",\"|\",\"<\",\">\",\"...\",\"......\",\"'s\",':','[',']',\n '(',')','#','*','$','%','@','^','-','+','=','/','{','}','\\\\','\"','&']\n content=[w for w in text if w.lower() not in stopword]\n content=[w for w in content if w not in punctuation]\n return content", "def process_request(self, request):\r\n if not DarkLangConfig.current().enabled:\r\n return\r\n\r\n self._clean_accept_headers(request)\r\n self._activate_preview_language(request)", "def test_lang_is_not_supported(app):\n rv = app.test_client().post('/tokenize', json={\n 'text':'这是中文'})\n json_data = rv.get_json()\n msg = json_data['message']\n assert msg == 'Language not supported'", "def get_language(self):\n return self.lang", "def list_languages(self):\n known = [ob.capitalize() for ob in self.caller.languages.known_languages]\n known += [\"Arvani\"]\n self.msg(\"{wYou can currently speak:{n %s\" % \", \".join(known))\n self.msg(\n \"You can learn %s additional languages.\"\n % self.caller.languages.additional_languages\n )", "def fetchTranslation(self, language):\n pass", "def language(self):\r\n return self._get('language', {})", "def _activate_preview_language(self, request):\r\n if 'clear-lang' in request.GET:\r\n if 'django_language' in request.session:\r\n del request.session['django_language']\r\n\r\n preview_lang = request.GET.get('preview-lang', None)\r\n\r\n if not preview_lang:\r\n return\r\n\r\n if preview_lang in self.released_langs:\r\n return\r\n\r\n request.session['django_language'] = preview_lang", "def validate_translated_text(language_json):\n allowed_codes = {code for code, name in settings.LANGUAGES}\n for code, translated in language_json.items():\n if code not in allowed_codes:\n raise ValidationError(f'Unrecognized language code: {code}')\n if not isinstance(translated, str):\n raise ValidationError(f'Translation for {code} must be a string')", "def get_languages(self):\n titles = Title.objects.filter(page=self)\n if not hasattr(self, \"languages_cache\"):\n languages = []\n for t in titles:\n if t.language not in languages:\n languages.append(t.language)\n self.languages_cache = languages\n return self.languages_cache", "def test_content(google_translator):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string\n assert google_translator.translate(text='좋은') == \"good\"", "def use_zh(self):\n pass", "def get_translation(self):", "async def rmvlang(self, context, *args):\n if not can_manage_roles(context):\n await context.channel.send(MODO_FORBIDDEN)\n elif len(args) < 1:\n await context.channel.send(LANG_MISSING)\n else:\n server = context.message.guild\n lang = normalize(args[0])\n if lang not in get_langs(server):\n await context.channel.send(LANG_UNKNOWN.format(lang=lang, channel=ref_suggestion(server)))\n else:\n await get_role_know(server, lang).delete()\n await get_role_learn(server, lang).delete()\n await context.channel.send(LANG_RMV.format(lang=lang))", "def language(self, text_language):\n language = text_language.strip().lower()\n if language in LANGUAGE_TO_CODE:\n self._language_code = LANGUAGE_TO_CODE[language]\n else:\n self._language_code = language[:2]", "def test_get_languages(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual([settings.LANGUAGE_CODE], story.get_languages())", "def test_get_languages(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual([settings.LANGUAGE_CODE], story.get_languages())", "def srclang(self):\n return self.__srclang", "def getIsMultilingual(self):\n return self.getOrDefault(self.isMultilingual)" ]
[ "0.6518715", "0.6337609", "0.62430346", "0.61772054", "0.6115925", "0.6033609", "0.60031694", "0.60027224", "0.59728533", "0.59191805", "0.5822976", "0.5796649", "0.5761532", "0.5755782", "0.5725612", "0.57049775", "0.5696995", "0.56784564", "0.56742376", "0.5659563", "0.5643547", "0.56357384", "0.5621939", "0.5611254", "0.5596322", "0.5594667", "0.5555248", "0.55532205", "0.55519986", "0.55336976", "0.5533641", "0.5512834", "0.55017823", "0.54715115", "0.5467398", "0.5464816", "0.54640967", "0.5446473", "0.54462886", "0.544531", "0.54356056", "0.5431692", "0.54297537", "0.5417921", "0.5416113", "0.5413581", "0.54135334", "0.53860104", "0.5383606", "0.5380689", "0.5376133", "0.53679514", "0.53618914", "0.5355413", "0.5353965", "0.5351311", "0.53490233", "0.53490233", "0.5343454", "0.53404814", "0.533623", "0.53276616", "0.53259164", "0.5325829", "0.53201777", "0.53169835", "0.53054154", "0.52921754", "0.52834684", "0.5283169", "0.52826124", "0.5260847", "0.52537894", "0.5249977", "0.52499586", "0.5238376", "0.5237161", "0.52362216", "0.52282435", "0.52273613", "0.52269185", "0.52213144", "0.5219344", "0.5210653", "0.51935637", "0.5188053", "0.51875687", "0.51752985", "0.5172256", "0.5170533", "0.5166743", "0.5158105", "0.51549125", "0.5152578", "0.5145576", "0.5137672", "0.5125772", "0.5125772", "0.51239294", "0.51211745" ]
0.52251047
81
Eliminate duplicates in a sorted list. Returns a new sorted list with the same elements in list1, but with no duplicates. This function can be iterative.
def remove_duplicates(list1): tample = [float('inf')] for elem in list1: if elem in tample: continue tample.append(elem) return tample[1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_duplicates(list1):\n temp_list = []\n\n # for each element in list1 check if is already in the temp list\n # if it is not yet there, copy it.\n for item in list1:\n if item not in temp_list:\n temp_list.append(item)\n\n return temp_list", "def remove_duplicates(list1):\n #iterative, not recursive\n if len(list1) == 0:\n return list1\n new_list = []\n new_list.append(list1[0])\n for item in list1[1:]:\n if item != new_list[-1]:\n new_list.append(item)\n return new_list", "def remove_duplicates(list1):\n if len(list1) == 0:\n return []\n result_list = [list1[0]]\n last_index = 0\n for dummy_index in range(1,len(list1)):\n if list1[dummy_index] != list1[last_index]:\n result_list.append(list1[dummy_index])\n last_index = dummy_index\n return result_list", "def remove_duplicates(list1):\r\n if len(list1) == 1 or len(list1) == 0:\r\n return [item for item in list1]\r\n else:\r\n if list1[-1] == list1[-2]:\r\n return remove_duplicates(list1[:-1])\r\n else:\r\n new_list = remove_duplicates(list1[:-1])\r\n new_list.append(list1[-1])\r\n return new_list", "def removeDuplicates(list):\n\treturn set((item for item in list))", "def deduplicate_list(lst):\n return list(set(lst))", "def removeDuplicates(list):\n\treturn Set((item for item in list))", "def remove_duplicates(somelist):\n return set(somelist)", "def remove_duplicates(mylist):\n return list(set(mylist))", "def remove_duplicates(list1):\n result = []\n \n for word in list1:\n if word not in result:\n result.append(word)\n return result", "def dedup(lst):\n new_lst = []\n seen = set()\n for elem in lst:\n if elem not in seen:\n new_lst.append(elem)\n seen.add(elem)\n\n return new_lst", "def remove_duplicates(lst):\n\tnew_lst = list()\n\tfor item in lst:\n\t\tif item not in new_lst:\n\t\t\tnew_lst.append(item)\n\treturn new_lst", "def remove_duplicates(self,list_):\r\n ret =[]\r\n\r\n for item in list_:\r\n if item not in ret:\r\n ret.append(item)\r\n removed = len(list_)-len(ret)\r\n logger.info('%d duplicate%s removed.' %(removed,plural_or_not(removed)))\r\n return ret", "def dedup_list(l):\n return list(set(l))", "def unique(list1):\n \n # intilize a null list \n unique_list = [] \n \n # traverse for all elements \n for x in list1: \n # check if exists in unique_list or not \n if x not in unique_list: \n unique_list.append(x)\n return unique_list", "def remove_identical(list):\n seen = set()\n seen_add = seen.add\n return [x for x in list if not (x in seen or seen_add(x))]", "def list_remove_duplicates(l):\n seen = set()\n seen_add = seen.add\n return [x for x in l if not (x in seen or seen_add(x))]", "def dedup_list(my_list):\r\n new_list = []\r\n for elem in my_list:\r\n if elem not in new_list:\r\n new_list.append(elem)\r\n return new_list", "def remove_repeats(list1: List[int], list2: List[int]) -> List[int]:\n result = []\n for num in list2:\n if num not in list1:\n result.append(num)\n \n return result", "def remove_duplicates(lst):\n lst.sort()\n lst_without_duplicates = [x for (x, _) in groupby(lst)]\n num_removed = len(lst) - len(lst_without_duplicates)\n print(\"Removed %d duplicates!\" % num_removed)\n return lst_without_duplicates", "def remove_duplicates(list):\n x = 0\n while x < len(list):\n y = x + 1\n while y < len(list):\n if list[x] == list[y]:\n del list[y]\n else:\n y += 1\n x += 1\n return list", "def dedupe_list(input):\n return list(set(input))", "def remove_duplicates_orderly(cls, list_with_duplicates: list, preserve_first_encounter: bool = True,\n\t\t\t\t\t\t\t\t preserve_original_list: bool = False) -> list:\n\t\tlist_set = set(list_with_duplicates)\n\t\tlist_new = list_with_duplicates.copy() if preserve_original_list else list_with_duplicates\n\t\tif len(list_new) == len(list_set): # No extra\n\t\t\treturn list_new\n\t\tif preserve_first_encounter:\n\t\t\tlist_new.reverse()\n\t\tfor index in range(len(list_new) - 1, -1, -1):\n\t\t\titem = list_new[index]\n\t\t\tif item in list_set:\n\t\t\t\tlist_set.remove(item)\n\t\t\telse:\n\t\t\t\tlist_new.pop(index)\n\t\tif preserve_first_encounter:\n\t\t\tlist_new.reverse()\n\t\treturn list_new", "def remove_duplicates(my_list):\n result = []\n for item in my_list:\n if item not in result:\n result.append(item)\n return result", "def get_list_no_duplicates(list_with_duplicates):\n set_elts = set([])\n list_no_duplicates = []\n for elt in list_with_duplicates:\n if elt not in set_elts:\n list_no_duplicates.append(elt)\n set_elts.add(elt)\n return list_no_duplicates", "def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))", "def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli", "def find_remove_duplicates(list_of_values):\r\n output = []\r\n seen = set()\r\n for value in list_of_values:\r\n if value not in seen:\r\n output.append(value)\r\n seen.add(value)\r\n return output", "def unique_list(duplicates_list):\n used = set()\n unique = [x for x in duplicates_list\n if x not in used and\n (used.add(x) or True)]\n return unique", "def test__remove_duplicates(self):\n\n result = deduped_list\n expected = [\n 'Fred',\n 'Dave',\n 'Sarah',\n 'John',\n 'Matthew',\n 'Joanna',\n 'Marjorie',\n 'Anna',\n 'Tony',\n 'Sam',\n 'Eric',\n 'Susan',\n 'Arthur',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def uniq(ordered_list):\n assert isinstance(ordered_list, list)\n list_ = ordered_list[:]\n if not ordered_list:\n return []\n previous_item = list_[0]\n for item in list_[1:]:\n if item == previous_item:\n list_.remove(item)\n previous_item = item\n return list_", "def unique_list(\n l1: list,\n l2: list,\n ) -> list:\n\n l = list((set(l1) | set(l2)) - (set(l1) & set(l2)))\n\n return l", "def remove_dup2(linkedlist):", "def dedup1(linked_list):\n\tif linked_list is None:\n\t\traise ValueError('Invalid input')\n\n\t# Mark and sweep\n\tcurrent = linked_list\n\tseen = set([current.value])\n\twhile current.next is not None:\n\t\tif current.next.value in seen:\n\t\t\tcurrent.next = current.next.next\n\t\telse:\n\t\t\tcurrent = current.next # Move forward\n\n\treturn linked_list", "def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))", "def remove_repeated(l1, l2):\n for i in range(len(l1)-1):\n j=i+1\n while j<len(l1):\n if l1[j] == l1[i]:\n l1.pop(j)\n l2.pop(j)\n else:\n j+=1", "def remove_duplicates_from_unsorted_list(linked_list: SinglyLinkedList):\n # Set to store seen values\n unique_list_nodes = set()\n current_node = linked_list.head\n\n if current_node:\n unique_list_nodes.add(current_node.data)\n previous_node = current_node\n current_node = current_node.next\n\n while current_node:\n # If current value is seen before\n if current_node.data in unique_list_nodes:\n previous_node.next = current_node.next\n else:\n previous_node = current_node\n unique_list_nodes.add(current_node.data)\n current_node = current_node.next", "def _deduplicate(lst):\n out = []\n for i in lst:\n if i not in out:\n out.append(i)\n return out", "def remove_duplicates(l):\n unique = set() # we use a set because ``elem in set`` is much faster than ``elem in list``\n i = 0\n while i < len(l):\n elem = l[i]\n if elem in unique:\n del l[i]\n else:\n unique.add(elem)\n i += 1\n return l", "def unique(input_list):\n output = []\n for item in input_list:\n if item not in output:\n output.append(item)\n return output", "def remove_duplicates(id_list):\n id_set = set(id_list)\n id_set_dup = id_set.intersection(id_set)\n id_set_diff = id_set.symmetric_difference(id_set)\n id_set_unique = id_set_dup.union(id_set_diff)\n id_list_unique = list(id_set_unique)\n\n return id_list_unique", "def de_dup_and_sort(input):\r\n if input== None:\r\n return None\r\n input = list(input)\r\n input = remove_duplicates(input)\r\n input.sort()\r\n return input", "def remove_dups(nums):\r\n nums[:] = sorted(list(set(nums)))\r\n return nums", "def deduped(items):\n \n return list(set(items))", "def unique(temp_list):\n seen = set()\n return [x for x in temp_list if not (x in seen or seen.add(x))]", "def remove_duplicates(linked_list):\n elems = set()\n prev = None\n for node in linked_list.iternodes():\n if node.value in elems:\n prev.next_node = node.next_node\n else:\n elems.add(node.value)\n prev = node", "def unique(li):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in li if not (x in seen or seen_add(x))]", "def unique_list(inlist):\n return set(inlist)", "def uniq(listinput):\n\t\"\"\" This will be provided for the student. \"\"\"\n\toutput = []\n\tfor x in listinput:\n\t\tif x not in output:\n\t\t\toutput.append(x)\n\treturn output", "def remove_duplicates(lst):\n (els, inds) = np.unique(lst, return_index=True)\n out = np.zeros(lst.shape, dtype=lst.dtype)\n out[inds] = els\n return out", "def remdup_preserve_order(lst):\n val = set()\n val_add = val.add\n return [x for x in lst if not ((x in val) or val_add(x))]", "def remove_duplicates(input: List[str]) -> List[str]:\n\n output = input # Replace with your logic\n\n return output", "def remove_list_redundancies(l):\n return sorted(list(set(l)), lambda a, b : l.index(a) - l.index(b))", "def listops_uniq(list_a):\r\n retlist = []\r\n for item in list_a:\r\n if item not in retlist:\r\n retlist.append(item)\r\n\r\n return retlist", "def without_duplicates(words):\n\n # the long way: add each item to a set through iteration\n #\n # duplicate_remover = set([])\n # for word in words:\n # duplicate_remover.add(word)\n # words = list(duplicate_remover)\n # return words\n\n # the quick version: convert to set removes duplicates, convert back\n words = set(words)\n return list(words)", "def unique(list1):\n # insert the list to the set\n list_set = set(list1)\n # convert the set to the list\n unique_list = (list(list_set))\n for x in unique_list:\n return(x,)", "def unique(lst1):\n lst2 = []\n if not(isinstance(lst1, list)):\n raise ValueError\n for i in lst1:\n if i not in lst2:\n lst2.append(i)\n return lst2", "def duplicates(ls: list):\n\n seen = set([])\n dups = set([])\n\n for x in ls:\n if x in seen:\n dups.add(x)\n else:\n seen.add(x)\n\n return dups", "def remove_duplicates(numbers: List[int]) -> List[int]:\n#[SOLUTION]\n import collections\n c = collections.Counter(numbers)\n return [n for n in numbers if c[n] <= 1]", "def remove_duplicates(self, objects: list):\n # Filter list removing duplicates\n result = [\n item\n for index, item in enumerate(objects)\n if item not in objects[index + 1 :]\n ]\n return result", "def removeDuplicates(seq):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in seq if not (x in seen or seen_add(x))]", "def without_duplicates(words):\n # initialize empty list where non duplicate words will be stored\n non_duplicate_words = []\n\n # iterate over list\n for item in words:\n # if an item in list is not already in list containing duplicate words..\n if item != non_duplicate_words:\n # append item if not in dupe list\n non_duplicate_words.append(item)\n\n # use set because sets do not contain duplicate items\n non_duplicate_words = set(non_duplicate_words)\n\n # return a list of non duplicate items\n return list(non_duplicate_words)", "def remove_dups(ll: SinglyLinkedList):\n seen = set()\n current = ll.head\n prev = None\n while current is not None:\n if current.data in seen:\n prev.next = current.next\n temp = current\n current = current.next\n temp.next = None\n else:\n seen.add(current.data)\n prev = current\n current = current.next", "def _purge_duplicates(f):\n @functools.wraps(f)\n def wrapper(*args, **kwds):\n ret_val = f(*args, **kwds)\n new_list = []\n for item in ret_val:\n if item in new_list:\n continue\n new_list.append(item)\n return new_list\n return wrapper", "def make_unique(in_list):\n new_list = []\n for l in in_list:\n if l not in new_list:\n new_list.append(l)\n return new_list", "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))", "def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList", "def deduped(items):\n\n # # create an empty dictionary\n # # create an emtpy list that we will return \n # # Loop through the items in the list, if the item is not in the dict, add item to the list, and to the dict\n # # If the item is in the dict, increase the count by 1\n # # If the item is in the dict already, dont add the item to the list\n # # return list\n\n\n # duplicate_counts = {}\n\n # deduped = []\n\n # for item in items:\n # duplicate_counts[item] = duplicate_counts.get(item, 0) + 1\n\n\n # if duplicate_counts[item] == 1:\n # deduped.append(item)\n\n # return deduped\n\n ##################### HB SOLUTION ####################################\n\n # # sets are great for de-duplicating lists:\n # # sets dont maintain oder though, so if we want our answer to be in order\n # # we have to do the de-duplicating by hand\n # # however... this runtime would be O(n^2) becaause we have a for loop\n # # and nested inside that, we have an in which is a hidden for-loop\n # # for every charecter that we are looping over, we have to loop in deduped\n # # to check if that charecter is in there\n # # we dont want this \n\n # deduped = []\n\n # for char in items:\n # if char not in deduped:\n # deduped.append(char)\n \n # return deduped\n\n # instead we can use use a set to keep track of what we have seen and use a list\n # to hold the final results\n\n # keep track of what we have seen\n seen = set()\n\n # deduped will be what we return \n deduped = []\n\n for item in items:\n if item not in seen:\n deduped.append(item)\n seen.add(item)\n\n return deduped", "def drop_matches(list1, list2):\n list1.sort()\n list2.sort()\n matches = []\n i = j = 0\n lenLst1 = len(list1)\n lenLst2 = len(list2)\n while i < lenLst1 and j < lenLst2:\n if list1[i] < list2[j]:\n matches.append(list1[i])\n i+=1\n elif list1[i] > list2[j]:\n matches.append(list2[j])\n j+=1\n else: #they are the same\n i+=1\n j+=1\n while i < lenLst1:\n matches.append(list1[i])\n i+=1\n while j < lenLst2:\n matches.append(list2[j])\n j+=1\n return len(matches), matches", "def _trim_duplicates(all_matches):\n trimmed_list = IndexedSet()\n for match in all_matches:\n if (\n match\n and match not in trimmed_list\n and match[::-1] not in trimmed_list\n ):\n trimmed_list.add(match)\n return trimmed_list", "def remove_duplicates(possible_vulns):\n return list(set(possible_vulns))", "def makeUnique(list):\n\tu = []\n\tfor l in list:\n\t\tif not l in u:\n\t\t\tu.append(l)\n\treturn u", "def removeDups(lst):\n\n return list(dict.fromkeys(lst) )", "def no_dupli(L):\r\n N = [L.pop(0)]\r\n while L != []:\r\n k = 0\r\n flag = True\r\n while k < len(N) and L != [] and flag:\r\n if (N[k] == L[0]).all():\r\n L.pop(0)\r\n flag = False\r\n else:\r\n k = k + 1\r\n if k == len(N):\r\n N.append(L.pop(0))\r\n flag = False\r\n \r\n return N", "def unique_list2(mylist):\n\n newlist = []\n for num in mylist:\n if num not in newlist:\n newlist.append(num)\n print(newlist)", "def remove_consequetive_duplicates(your_list):\n out = [v for i, v in enumerate(your_list) if i == 0 or v != your_list[i-1]]\n if type(your_list) == np.ndarray:\n return np.array(out)\n return out", "def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())", "def intersect(list1, list2):\n result_list = []\n #list3 = remove_duplicates(list1)\n for dummy_element in list1:\n if list2.count(dummy_element) > 0 and result_list.count(dummy_element) == 0:\n result_list.append(dummy_element)\n return result_list", "def remove_duplicates_and_nones(items):\n new_list = [item for item in items if item is not None]\n\n return list(set(new_list))", "def dedupe(items):\n seen = set()\n for item in items:\n if item not in seen:\n yield item\n seen.add(item)", "def get_duplicates(input_list):\n size = len(input_list)\n duplicates = list()\n for i in range(size):\n k = i + 1\n for j in range(k, size):\n if input_list[i] == input_list[j] and input_list[i] not in duplicates:\n duplicates.append(input_list[i])\n return duplicates", "def unique(lis):\n seen = set()\n ret = []\n for elm in lis:\n if elm not in seen:\n ret.append(elm)\n seen.add(elm)\n return ret", "def remove_l2_from_l1(l1, l2):\r\n return [element for element in l1 if element not in l2]", "def unique_list(input_list):\n output_list = []\n if len(input_list) > 0:\n dim = _sp.shape(input_list)[1]\n for i in input_list:\n match = False\n for j in output_list:\n if dim == 3:\n if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]:\n match = True\n elif dim == 2:\n if i[0] == j[0] and i[1] == j[1]:\n match = True\n elif dim == 1:\n if i[0] == j[0]:\n match = True\n if match is False:\n output_list.append(i)\n return output_list", "def duplicated(list):\n u, c = np.unique(list, return_counts=True)\n dup = u[c > 1]\n return dup", "def duplicates(items):\n duplicate_items = set()\n for item in items:\n if items.count(item) > 1:\n duplicate_items.add(item)\n duplicate_list = list(duplicate_items)\n\n\n return sorted(duplicate_list)", "def remove_duplicates(self):\n cur = self.head\n prev = None\n\n dup_values = dict()\n\n while cur:\n if cur.data in dup_values:\n # Remove node:\n prev.next = cur.next\n else:\n # Have not encountered element before.\n dup_values[cur.data] = 1\n prev = cur\n cur = prev.next", "def __remove_duplicates(self, word_list: List[str]) -> List[str]:\n\n # here comes the extra complicated move to remove duplicate words from a query\n # this approach always keeps words which are at the beginning of a query and only removes duplicate words\n # that occur later in the query\n unique_word_list = []\n\n for word in word_list:\n if word not in unique_word_list:\n unique_word_list.append(word)\n\n return unique_word_list", "def removeDoublon(liste):\n tmp=[]\n for i,elt in enumerate(liste):\n if elt not in tmp:\n tmp.append(elt)\n return tmp", "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li", "def remove_sorted_duplicates(self):\n cur = self.head\n while cur is not None and cur.next is not None:\n if cur.next.data == cur.data:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return self.head", "def _naive_unique(li):\n tmp = []\n for el in li:\n if el not in tmp:\n tmp.append(el)\n return tmp", "def unique_list(src_list):\n return list(OrderedDict.fromkeys(src_list).keys())", "def dedup2(linked_list):\n\tif linked_list is None:\n\t\traise ValueError('Invalid input')\n\n\tdef _seen(node):\n\t\t\"\"\"Return True if `node` has already been seen *earlier*\n\t\tin the linked list.\n\n\t\t\"\"\"\n\t\tcheck = linked_list\n\t\twhile check != node:\n\t\t\tif check.value == node.value:\n\t\t\t\treturn True\n\t\t\tcheck = check.next\n\t\treturn False\n\n\t# Iterate through the list\n\tcurrent = linked_list\n\twhile current.next is not None:\n\t\tif _seen(current.next):\n\t\t\tcurrent.next = current.next.next\n\t\telse:\n\t\t\tcurrent = current.next # Move forward\n\n\treturn linked_list", "def remove_duplicates_slow(linked_list):\n current = linked_list.head\n while current:\n runner = current\n while runner:\n if runner.next_node and runner.next_node.value == current.value:\n # delete this duplicate\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n current = current.next_node", "def find_unique_common_items(list1, list2):\n\n dedupe_1 = set(list1)\n dedupe_2 = set(list2)\n # set math!\n unique = dedupe_1 & dedupe_2\n\n # just FYI when I left out the line below, and unique was of type set,\n # it still passed the tests, despite the docstring indicating a list\n # should be returned\n unique = list(unique)\n\n return unique", "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]", "def task7_unique_number(lst):\n unique = []\n for elem in lst:\n check_list = lst.copy()\n lst.remove(elem)\n if elem not in lst:\n unique.append(elem)\n lst = check_list\n return unique", "def list_difference(list1, list2):\r\n diff_list = []\r\n for item in list1:\r\n if not item in list2:\r\n diff_list.append(item)\r\n else:\r\n if list2.count(item) != list1.count(item) and not item in diff_list:\r\n diff_list.append(item) \r\n return diff_list" ]
[ "0.82367706", "0.8172117", "0.8020477", "0.7850647", "0.76956123", "0.76429844", "0.7602725", "0.750563", "0.74992764", "0.742413", "0.73368996", "0.7323583", "0.7320335", "0.7309752", "0.7279414", "0.7272679", "0.72361135", "0.72262025", "0.721125", "0.718744", "0.7170773", "0.7170712", "0.7159779", "0.70928544", "0.7018214", "0.6940318", "0.69213825", "0.68613994", "0.68538773", "0.68270814", "0.68220615", "0.6794563", "0.6793005", "0.6789003", "0.6782677", "0.67724067", "0.676889", "0.67622596", "0.6683343", "0.66765696", "0.6664801", "0.66458744", "0.6635917", "0.6626176", "0.6611625", "0.6589805", "0.65715814", "0.653923", "0.6527067", "0.65189224", "0.6507735", "0.650372", "0.64938855", "0.6467336", "0.6454675", "0.6446995", "0.6429137", "0.6423222", "0.64068186", "0.6349882", "0.6347616", "0.6341463", "0.6336202", "0.6334655", "0.63216054", "0.63183963", "0.63172835", "0.63152313", "0.62750185", "0.62676793", "0.6262523", "0.62523025", "0.6234386", "0.6218373", "0.6217015", "0.61868703", "0.61862177", "0.61734813", "0.61681014", "0.6164473", "0.6155508", "0.61399305", "0.61371386", "0.6133011", "0.6123017", "0.6104455", "0.6097742", "0.6091516", "0.6082585", "0.60778743", "0.60611", "0.6038538", "0.6035855", "0.60337347", "0.6026686", "0.59925276", "0.5962297", "0.59525806", "0.5948608", "0.59449565" ]
0.8031551
2
Compute the intersection of two sorted lists. Returns a new sorted list containing only elements that are in both list1 and list2. This function can be iterative.
def intersect(list1, list2): intersection_list = [] list1_idx = 0 list2_idx = 0 while list2_idx < len(list2) and list1_idx < len(list1): if list2[list2_idx] == list1[list1_idx]: intersection_list.append(list2[list2_idx]) list1_idx += 1 list2_idx += 1 elif list2[list2_idx] > list1[list1_idx]: list1_idx += 1 else: list2_idx += 1 return intersection_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersection(list1, list2):\n list3 = [value for value in list1 if value in list2]\n return list3", "def intersect(list1, list2):\r\n if len(list1) == 0 or len(list2) == 0:\r\n return []\r\n else:\r\n if list1[0] == list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(intersect(list1[1:], list2[1:]))\r\n return new_list\r\n elif list1[0] < list2[0]:\r\n return intersect(list1[1:], list2)\r\n else:\r\n return intersect(list1, list2[1:])", "def intersect(list1, list2):\n intersect_list = []\n\n # check if the items in list1 are in list2 and add them to the list\n for item1 in list1:\n if item1 in list2:\n intersect_list.append(item1)\n\n return intersect_list", "def intersection(lst1, lst2):\n lst3 = [value for value in lst1 if value in lst2]\n return lst3", "def intersection(llist_1 : LinkedList, llist_2 : LinkedList) -> LinkedList:\n # Convert to set to remove repeated entries in each list\n lset_1 = list_to_set(llist_1)\n lset_2 = list_to_set(llist_2)\n\n # Initialize empty intersec_list\n intersec_list = LinkedList()\n \n # Populate list_of_candidates with all elements from lset_1\n list_of_candidates = []\n for item in lset_1:\n list_of_candidates.append(item)\n\n # Only add to intersec_list the items from lset_2 available in list_of_candidates\n for item in lset_2:\n if item in list_of_candidates:\n intersec_list.append(item)\n\n return intersec_list", "def list_intersect(l1: List[Any], l2: List[Any]) -> List[Any]:\n return [item for item in l1 if item in l2]", "def listops_intersect(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def intersect_set(list1, list2):\n return (set(list1) & set(list2))", "def intersection(lst1, lst2):\n temp = set(lst2)\n lst3 = [value for value in lst1 if value in temp]\n return lst3", "def list_intersection(listA, listB):\n # create empty intersection\n intersection = []\n\n # for each item a of listA\n for a in listA:\n # is any b of listB the same as a? \n for b in listB:\n if a == b:\n # found a, add it to intersection\n intersection.append(a)\n\n # all loops done, return intersection\n return intersection", "def intersection(list1, list2):\r\n\r\n list3 = []\r\n\r\n if not list1 and not list2: # Both lists passed as arguments are empty\r\n return list3\r\n\r\n elif not list1 and list2: # The first list passed as argument is empty\r\n return list2\r\n\r\n elif list1 and not list2: # The second list passed as argument is empty\r\n return list1\r\n\r\n else:\r\n list3 = [value for value in list1 if value in list2] # Getting only the common values of the two lists\r\n return list3", "def sorted_intersect(self, list1, list2):\n ### Begin your code\n result = []\n p1 = 0\n p2 = 0\n s1 = int(math.sqrt(len(list1)))\n s2 = int(math.sqrt(len(list2)))\n while (p1 < len(list1) and p2 < len(list2)):\n if list1[p1] == list2[p2]:\n result.append(list1[p1])\n p1 = p1 + 1\n p2 = p2 + 1\n elif list1[p1] < list2[p2]:\n if (p1 + s1 >= len(list1) or list1[p1 + s1] > list2[p2]):\n p1 = p1 + 1\n else:\n p1 = p1 + s1\n else:\n if (p2 + s2 >= len(list2) or list2[p2 + s2] > list2[p2]):\n p2 = p2 + 1\n else:\n p2 = p2 + s2\n # print(result)\n return result\n ### End your code", "def intersect(nums1: List[int], nums2: List[int]) -> List[int]:\n if not nums1 or not nums2:\n return False\n if len(nums1) == 1 and len(nums2) == 1:\n if nums1[0] == nums2[0]:\n return nums1\n\n intersection = list()\n\n for x in nums1:\n if x in nums2:\n nums2.remove(x)\n intersection.append(x)\n return intersection", "def intersect(list1, list2):\n result_list = []\n #list3 = remove_duplicates(list1)\n for dummy_element in list1:\n if list2.count(dummy_element) > 0 and result_list.count(dummy_element) == 0:\n result_list.append(dummy_element)\n return result_list", "def find_intersection(nums1, nums2)->list:\n set1 = set(nums1)\n set2 = set(nums2)\n intersection = set()\n\n for i in set1:\n if i in set2: # The item is in both lists\n intersection.add(i)\n return list(intersection)", "def intersect(self, other_list):\n assert type(other_list) == type(self)\n \n# if len(self.vals) >= len(other_list.vals):\n# big = self.vals\n# small = other_list.vals\n# else:\n# small = self.vals\n# big = other_list.vals\n# \n# common_list = intSet()\n# for e in big:\n# if e in small:\n# common_list.insert(e)\n# return common_list\n\n common_list = intSet() \n for e in self.vals:\n if other_list.member(e): #if the current e is a member of other_list\n common_list.insert(e)\n return common_list", "def intersection(a, b):\n return list(set(a) & set(b))", "def intersect(a, b):\n return(list(set(a) & set(b)))", "def intersect(a,b):\n\treturn list(set(a) & set(b))", "def intersection(A,B):\n set_A = A\n set_B = B\n sorted_intersection = []\n for elements in set_A:\n if elements in set_B:\n sorted_intersection.append(elements)\n return sorted_intersection", "def intersect(a, b):\r\n return list(set(a) & set(b))", "def intersect(a, b):\n return list(set(a) & set(b))", "def get_contained(list1, list2):\n return [x for x in list1 for y in list2 if x == y]", "def __intersect(a, b):\n a = [elem.lower() for elem in a]\n b = [elem.lower() for elem in b]\n return list(set(a) & set(b))", "def intersect(list1, list2):\n result = []\n \n for word in list1:\n if word in list2:\n result.append(word)\n return result", "def intersect(self, left=[], right=[]):\n return list(set(left).intersection(right))", "def common_elements(list1, list2):\n result = []\n for element in list1:\n if element in list2:\n result.append(element)\n return result", "def __intersect(self, a, b):\n a = [elem.lower() for elem in a]\n b = [elem.lower() for elem in b]\n return list(set(a) & set(b))", "def findIntersection(lOne, lTwo):\n\tsOne = set(lOne)\n\tsTwo = set(lTwo)\n\tsInt = sOne.intersection(sTwo)\n\treturn list(sInt)", "def intersection(set_1, set_2):\n intersection_list = []\n\n for number in set_1:\n if number in set_2:\n intersection_list.append(number)\n \n print(\"Intersection:\", intersection_list)\n return set_1, set_2", "def intersection(llist_1, llist_2):\n hashmap = {}\n return_linked_list = LinkedList()\n node = llist_1.get_head()\n while node:\n hashmap[node.get_value()] = 0\n node = node.get_next()\n node = llist_2.get_head()\n while node:\n if node.get_value() in hashmap:\n if hashmap[node.get_value()] == 1:\n node= node.get_next()\n continue\n\n return_linked_list.append(node.get_value())\n hashmap[node.get_value()] = 1\n node = node.get_next()\n if return_linked_list.size() == 0:\n return 'No intersections found'\n return return_linked_list", "def __listintersect(self, c1, c2):\n s2 = {}\n for delta in c2:\n s2[delta] = 1\n\n\tc = []\n\tfor delta in c1:\n if s2.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "def intersection(self, other: list) -> 'List':\n if not isinstance(other, list):\n raise ValueError('The comparing element is not a list')\n\n return List(item for item in self if item in other)", "def intersectie_multimi(lista_1, lista_2):\n lista_intersectie = []\n for elemente_lista_1 in lista_1:\n for elemente_lista_2 in lista_2:\n if elemente_lista_1 == elemente_lista_2:\n lista_intersectie.append(elemente_lista_1)\n return lista_intersectie", "def intersect_list(n):\n list_a = range(n)\n list_b = range(n-3, 2 * n)\n start = timeit.default_timer()\n in_both = []\n for x in list_a:\n if x in list_b:\n in_both.append(x)\n run_time = timeit.default_timer() - start\n return run_time, in_both", "def intersect_lists(lists):\n if lists:\n return sorted(set.intersection(*[set(l) for l in lists]))\n else:\n return list()", "def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result", "def intersection(self, other):\n intersection_set = Set()\n\n for bucket in self.buckets:\n for element in bucket.iterate():\n if other.contains(element):\n intersection_set.add(element)\n return intersection_set", "def list_intersection(head1, head2):\n\n retval = None\n\n while head1 is not None:\n cur = head2\n while cur is not None:\n if head1.val == cur.val:\n\n # initialize the first node\n if retval is None:\n retval = ListNode(cur.val)\n copy = retval\n\n # Add node to list\n else:\n copy.next = ListNode(cur.val)\n copy = copy.next\n \n break\n \n cur = cur.next\n \n head1 = head1.next\n \n return retval", "def intersection(self, *other):\n new_ordered_set = OrderedSet()\n\n for element in self:\n for obj in other:\n if element not in obj:\n break\n else:\n new_ordered_set.add(element)\n\n return new_ordered_set", "def intersection(self, *lists):\n if self.is_a(set):\n return _(self._.intersection(*lists))\n return _(_intersection(self._, *lists))", "def intersect(t1,t2):\n result=()\n for e in t1:\n if e in t2:\n result += (e,)\n return result", "def common_items(self, list1, list2):\n common_list = [value for value in list1 if value in list2]\n print(\"Common elements from both lists\")\n print(common_list)", "def compare(lst1, lst2):\n bonnie = []\n for item in lst1:\n if item in lst2:\n bonnie.append(item)\n return bonnie", "def intersect(iterable, other):\n return 0", "def intersect(p1, p2):\n posts = []\n i = 0\n j = 0\n while i < len(p1) and j < len(p2):\n if p1[i] == p2[j]:\n posts.append(p1[i])\n i += 1\n j += 1\n elif p1[i] < p2[j]:\n i += 1\n else:\n j += 1\n return posts", "def intersect(self, other):\n result = IntSet()\n map(result.insert, [e for e in self.vals if e in other.vals])\n return result", "def insercionListas(L1,L2):\n return set(L1) & set(L2)", "def _intersect(arrlist_1, arrlist_2):\n hash_table = {}\n for i, point in enumerate(arrlist_1):\n hash_table[point] = i\n for i, point in enumerate(arrlist_2):\n if point in hash_table:\n return point\n return None", "def intersect(a, b): \n # print(len(list(set(a) & set(b))), 'unique and matching names between FPL and Understat')\n return list(set(a) & set(b))", "def intersects(iterable1, iterable2):\n return find(lambda x: x in iterable1, iterable2) is not None", "def set_intersection(set_a, set_b):\n \n intersection = set_b & set_a\n \n return intersection", "def intersection(A, B):\n\n if max(0, min(A[1], B[1]) - max(A[0], B[0])):\n # intersection is not empty!\n if A[0] < B[0]:\n return [B[0], A[1]]\n else:\n return [A[0], B[1]]\n else:\n print 'Warning: intersection is an empty set!'\n return [np.nan, np.nan]", "def _pair_intersection(\n cls,\n availabilities_a: List[\"Availability\"],\n availabilities_b: List[\"Availability\"],\n ) -> List[\"Availability\"]:\n result = []\n\n # yay for O(b*a) time! I am sure there is some fancy trick to make this faster,\n # but we're dealing with less than 100 items in total, sooo.. ¯\\_(ツ)_/¯\n for a in availabilities_a:\n for b in availabilities_b:\n if a.overlaps(b, True):\n result.append(a.intersect_with(b))\n\n return result", "def find_common_items(list1, list2):\n\n common_items = []\n\n # iterate through every item in first list\n for item in list1:\n # during each iteration of first list, also iterate through second list\n # this is terribly inefficient!\n for second_item in list2:\n if item == second_item:\n common_items.append(item)\n\n return common_items", "def intersections(boxes0, boxes1):\n intersections = []\n for box0 in boxes0:\n current_intersections = [intersection(box0, box1) for box1 in boxes1]\n intersections.extend([i for i in current_intersections if i is not None])\n return intersections", "def compare_lists(firstlist, secondlist):\n common = set(firstlist).intersection(secondlist)\n first_difference = [item for item in firstlist if item not in common]\n second_difference = [item for item in secondlist if item not in common]\n return (common, first_difference, second_difference)", "def intersect(a, b):\r\n #print(a, b)\r\n #first case, we have , in gs a and gs b\r\n try:\r\n b_a = a.split(',')\r\n b_b = b.split(',')\r\n return list(set(b_a) & set(b_b))\r\n except:\r\n pass", "def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list", "def reformat_order(list1: List[int], list2: List[int]) -> List[int]:\n result = []\n for item in list1:\n if item in list2:\n result.append(item)\n \n return result", "def all_in_list (list1, list2):\n return all(map(lambda c: c in list2, list1) )", "def intersection(arrays):\n # Create hash table (dict) to store numbers in for faster O(1) lookup (for \n # any individual lookup):\n # numbers = {}\n\n # Create list for intersection of the sets:\n # intersection = []\n\n # Populate hash table with numbers from the first list (keys), because any numbers \n # not in the first list will not be in the intersection of the lists, by definition.\n numbers = {item:False for item in arrays[0]}\n # Now check the other input lists in order, removing any number/item that is not in both:\n for list in arrays[1:]:\n for item in list: # NOT actually O(n**2); just O(n) for the whole input matrix.\n # Mark as True to flag any items that are in the intersection of the two lists:\n if item in numbers:\n numbers[item] = True\n # Keep only the numbers that are in the intersection of the two lists:\n numbers = {key:value for key, value in numbers.items() if value == True}\n # Mark all as False again to start a fresh comparison with the next list:\n for item in numbers:\n numbers[item] = False\n\n return [*numbers.keys()]", "def intersection(s1, s2):\n \"*** YOUR CODE HERE ***\"\n return s1.intersection(s2) # ...", "def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list", "def intersection(A, B, *C):\n return setutils(\"intersection\", A, B, *C)", "def merge(list1, list2):\n res = []\n index_i, index_j = 0, 0\n while index_i < len(list1) and index_j < len(list2):\n if list1[index_i] <= list2[index_j]:\n res.append(list1[index_i])\n index_i += 1\n else:\n res.append(list2[index_j])\n index_j += 1\n res += list1[index_i:]\n res += list2[index_j:]\n return res", "def intersect(self, other):\n # Initialize a new intSet \n commonValueSet = intSet()\n # Go through the values in this set\n for val in self.vals:\n # Check if each value is a member of the other set \n if other.member(val):\n commonValueSet.insert(val)\n return commonValueSet", "def find_unique_common_items(list1, list2):\n\n # convert each list to set; will remove duplicates\n set1 = set(list1)\n set2 = set(list2)\n\n # use & operator to find common values between set1 and set2\n unique_set = set1 & set2\n\n return unique_set", "def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result", "def intersection(*seqs):\n return (item for item in seqs[0]\n if all(item in seq for seq in seqs[1:]))", "def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result", "def is_in_list(list_one, list_two):\n \n for element in list_one:\n if element in list_two:\n return True\n return False", "def merge(lst1, lst2):\n\n results = []\n i = 0\n j = 0\n\n while i <= len(lst1) - 1 and j <= len(lst2) - 1:\n\n if lst1[i] < lst2[j]:\n results.append(lst1[i])\n i += 1\n else:\n results.append(lst2[j])\n j += 1\n\n if i == len(lst1):\n results.extend(lst2[j:])\n else:\n results.extend(lst1[i:])\n\n return results", "def common_elements(s1, s2):\n\n return set(s1 & s2)", "def intersection(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n raise NotImplementedError(\"You need to implement this as part of the assignment.\")", "def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output", "def nonempty_intersection(list1, list2):\n return len(list(set(list1) & set(list2))) > 0", "def intersecting_entries(self, other):\n self_keys = set(self._entries.keys())\n other_keys = set(other._entries.keys())\n common_keys = self_keys.intersection(other_keys)\n return ([(self._entries[key], other._entries[key])\n for key in common_keys])", "def intersect(list1, list2):\n #iterative, not recursive\n intersect_list = []\n outer_list = []\n inner_list = []\n len_list1 = len(list1)\n len_list2 = len(list2)\n start_outer = 0\n start_inner = 0\n inner_start = 0\n if len_list1 <= len_list2:\n outer_list = list1\n inner_list = list2\n else:\n outer_list = list2\n inner_list = list1\n end_outer = len(outer_list)\n end_inner = len(inner_list)\n if DEBUG_I:\n print \"end_inner\", end_inner\n print \"end_outer\", end_outer\n \"\"\"\n Method 2\n #Somehow worse efficiency than index(item)\n for item in outer_list:\n for dummy_idx in range(start_inner, end_inner):\n if item == inner_list[dummy_idx]:\n intersect_list.append(item)\n if DEBUG_I:\n print \"updating start_inner:\",dummy_idx\n start_inner = dummy_idx\n \n #Method 1\n #Not terrible efficiency, not amazingly bad\n for item in outer_list:\n if item in inner_list:\n inner_start = inner_list.index(item)\n intersect_list.append(item)\n \n #Method 3 - am best\n for item in outer_list:\n if item in inner_list[start_inner:]:\n intersect_list.append(item)\n start_inner = inner_list.index(item)\n if DEBUG_I:\n print \"updating start_inner:\", start_inner \n\n #Method 4, am try to use generator \n for item in outer_list:\n for dummy_idx in gen_range(start_inner, end_inner):\n if item == inner_list[dummy_idx]:\n intersect_list.append(item)\n if DEBUG_I:\n print \"updating start_inner:\",dummy_idx\n start_inner = dummy_idx\n \n #Method 5 - try to break on find\n for item in outer_list:\n for dummy_idx in range(start_inner, end_inner):\n if item == inner_list[dummy_idx]:\n intersect_list.append(item)\n if DEBUG_I:\n print \"updating start_inner:\",dummy_idx\n start_inner = dummy_idx\n break \n \"\"\"\n #Method 6 - dict\n #outer_dict = {entry: entry for entry in outer_list}\n outer_dict = {}\n for entry in outer_list:\n outer_dict[entry] = entry\n for entry in inner_list:\n if entry in outer_dict:\n intersect_list.append(entry)\n \n return intersect_list", "def __listsCommon(list1, list2) :\r\n for i in range(len(list1)) :\r\n for j in range(len(list2)) :\r\n if (list1[i]==list2[j]) :\r\n return True\r\n return False", "def intersection(\n cls, *availabilitysets: List[\"Availability\"]\n ) -> List[\"Availability\"]:\n\n # get rid of any overlaps and unmerged ranges in each set\n availabilitysets = [cls.union(avialset) for avialset in availabilitysets]\n # bail out for obvious cases (there are no sets given, one of the sets is empty)\n if not availabilitysets:\n return []\n if not all(availabilitysets):\n return []\n # start with the very first set ...\n result = availabilitysets[0]\n for availset in availabilitysets[1:]:\n # ... subtract each of the other sets\n result = cls._pair_intersection(result, availset)\n return result", "def union(list1, list2):\n new_list = list1\n for literal in list2:\n negate_literal = copy.deepcopy(literal)\n negate_literal.negate = not negate_literal.negate\n if negate_literal in list1:\n new_list.remove(negate_literal)\n continue\n if literal not in list1:\n new_list.append(literal)\n return new_list", "def intersection(sets):\n return functools.reduce(set.intersection, [s for s in sets])", "def intersection(sets):\n return reduce(set.intersection, [s for s in sets])", "def intersection(*entities):\n from entity import GeometryEntity\n\n entities = GeometryEntity.extract_entities(entities, False)\n if len(entities) <= 1:\n return []\n\n res = GeometryEntity.do_intersection(entities[0], entities[1])\n for entity in entities[2:]:\n newres = []\n for x in res:\n newres.extend(GeometryEntity.do_intersection(x, entity))\n res = newres\n return res", "def intersect(self, other: Wire) -> List[Intersection]:\n intersections = []\n for segment_a, segment_b in product(self.wire_segments, other.wire_segments):\n intersection = segment_a.intersect(segment_b)\n if intersection and intersection.location != self.ORIGIN:\n intersections.append(intersection)\n\n return intersections", "def intersect(t1, t2):\n\n result = ()\n for e in t1:\n if e in t2:\n result += (e,) # signifies tuple\n\n return (result)", "def infirstlist(xs, ys):\n result = []\n xi = 0\n yi = 0\n\n while True:\n if xi >= len(xs):\n return result\n\n if yi >= len(ys): # we have come to the end of the second list.\n result.extend(xs[xi:]) # dump the rest of the array, since we know it's not in the second list.\n return result\n\n if xs[xi] not in ys: # its not in the second list\n result.append(xs[xi])\n else:\n yi += 1\n\n xi += 1", "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer", "def insecondlist(xs, ys):\n return infirstlist(ys, xs) # Cause really, I am lazy :P", "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list", "def compare_lists(self, list1, list2):\n matching_items = []\n\n list1 = list1.copy()\n list2 = list2.copy()\n\n for item in list1:\n if item in list2:\n matching_items.append(item)\n\n for m in matching_items:\n for c in range(list1.count(m)):\n list1.remove(m)\n for c in range(list2.count(m)):\n list2.remove(m)\n if list1 or list2:\n tmp_match = False\n else:\n tmp_match = True\n return tmp_match, list1, list2", "def remove_common(first: list, second: list):\n return list(set(first)-set(second))", "def naive(l1: ListNode, l2: ListNode) -> ListNode:\n n = (get_length(l1), l1)\n m = (get_length(l2), l2)\n smaller = min(n, m, key=lambda x: x[0])[1] # choose the smaller list, of l1 and l2\n larger = max(n, m, key=lambda x: x[0])[1]\n seen = {}\n while smaller: # Using the smaller list, track seen nodes using a hash table\n seen[smaller] = True\n smaller = smaller.next\n while larger:\n if larger in seen: # if any node in the larger list has been seen (overlaps with smaller list) return it\n return larger\n larger = larger.next\n return None # no intersection", "def listops_union(list_a,list_b):\r\n\r\n retlist = list_a[:]\r\n for item in list_b: \r\n if item not in list_a:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def __and__(self, other):\n return intersect(self, other)", "def get_jurisdiction_common_members(a: List[int], b: List[int]) -> Set[int]:\n a_set = set(a)\n b_set = set(b)\n\n if a_set & b_set:\n return a_set & b_set\n else:\n return set()", "def merge(list1, list2):\n merged = []\n if len(list1) < 1 or len(list2) <1:\n return list1 + list2\n else:\n ind_1 = 0\n ind_2 = 0\n while ind_1 < len(list1) and ind_2 < len(list2):\n #some appends to lists\n if list1[ind_1] < list2[ind_2]:\n merged.append(list1[ind_1])\n ind_1 += 1\n elif list2[ind_2] < list1[ind_1]:\n merged.append(list2[ind_2])\n ind_2 += 1\n elif list1[ind_1] == list2[ind_2]:\n merged.append(list1[ind_1])\n merged.append(list2[ind_2])\n ind_1 += 1\n ind_2 += 1\n #if reach end of one list, copy the remainder of the other\n if ind_1 >= len(list1) and ind_2 < len(list2):\n merged += list2[ind_2:]\n ind_2 = len(list2)\n elif ind_2 >= len(list2) and ind_1 < len(list1):\n merged += list1[ind_1:]\n ind_1 = len(list1)\n return merged", "def intersection(arrays):\n # Your code here\n hash = {}\n hash2 = {}\n for i in range(len(arrays[0])):\n hash[arrays[0][i]] = i\n\n for key in hash:\n if key in arrays[1]:\n hash2[key] = hash[key]\n print(hash2)\n \n for i in range(2, len(arrays)):\n for key in hash2:\n if key not in arrays[i]:\n hash2[key] = None\n\n list1 = [key for key in hash2 if hash2[key] != None] \n result = list1\n\n return result" ]
[ "0.81946236", "0.8157978", "0.8081259", "0.802535", "0.79895693", "0.7963669", "0.79506075", "0.7948855", "0.7923734", "0.7879321", "0.7839831", "0.7740573", "0.76637924", "0.76356244", "0.76244", "0.75989366", "0.7588414", "0.75148344", "0.75115436", "0.74970204", "0.7495804", "0.74750376", "0.7367644", "0.7335016", "0.7334077", "0.72861814", "0.7237578", "0.72080284", "0.71922296", "0.70377964", "0.7022043", "0.7016011", "0.69860613", "0.69555944", "0.6879537", "0.6823199", "0.68218267", "0.6820091", "0.6761182", "0.66943705", "0.66671693", "0.6636941", "0.6593602", "0.6575178", "0.6548386", "0.65402824", "0.6512917", "0.65106964", "0.65009665", "0.6462791", "0.6432619", "0.64322954", "0.6428283", "0.64167595", "0.6404167", "0.63296705", "0.632905", "0.62891465", "0.6278332", "0.6276999", "0.62585324", "0.62566423", "0.6253535", "0.62208027", "0.62187296", "0.6212174", "0.6209687", "0.6188833", "0.617976", "0.6179185", "0.6171925", "0.6161335", "0.61571574", "0.6144037", "0.61390096", "0.61203706", "0.6108263", "0.6091504", "0.60668004", "0.6054488", "0.6039767", "0.60298914", "0.6023361", "0.6006225", "0.60036576", "0.6001671", "0.5996131", "0.5992692", "0.5987129", "0.59762406", "0.5976203", "0.5967532", "0.5966269", "0.59534466", "0.5949347", "0.59463376", "0.5940225", "0.59362984", "0.593578", "0.5926672" ]
0.8113677
2
Merge two sorted lists. Returns a new sorted list containing those elements that are in either list1 or list2. This function can be iterative.
def merge(list1, list2): res = [] index_i, index_j = 0, 0 while index_i < len(list1) and index_j < len(list2): if list1[index_i] <= list2[index_j]: res.append(list1[index_i]) index_i += 1 else: res.append(list2[index_j]) index_j += 1 res += list1[index_i:] res += list2[index_j:] return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list", "def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer", "def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list", "def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output", "def merge(list1, list2):\n merged = []\n if len(list1) < 1 or len(list2) <1:\n return list1 + list2\n else:\n ind_1 = 0\n ind_2 = 0\n while ind_1 < len(list1) and ind_2 < len(list2):\n #some appends to lists\n if list1[ind_1] < list2[ind_2]:\n merged.append(list1[ind_1])\n ind_1 += 1\n elif list2[ind_2] < list1[ind_1]:\n merged.append(list2[ind_2])\n ind_2 += 1\n elif list1[ind_1] == list2[ind_2]:\n merged.append(list1[ind_1])\n merged.append(list2[ind_2])\n ind_1 += 1\n ind_2 += 1\n #if reach end of one list, copy the remainder of the other\n if ind_1 >= len(list1) and ind_2 < len(list2):\n merged += list2[ind_2:]\n ind_2 = len(list2)\n elif ind_2 >= len(list2) and ind_1 < len(list1):\n merged += list1[ind_1:]\n ind_1 = len(list1)\n return merged", "def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result", "def merge(lst1, lst2):\n\n results = []\n i = 0\n j = 0\n\n while i <= len(lst1) - 1 and j <= len(lst2) - 1:\n\n if lst1[i] < lst2[j]:\n results.append(lst1[i])\n i += 1\n else:\n results.append(lst2[j])\n j += 1\n\n if i == len(lst1):\n results.extend(lst2[j:])\n else:\n results.extend(lst1[i:])\n\n return results", "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list", "def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list", "def merge(lst1, lst2):\n if not lst1 or not lst2:\n return lst1 + lst2\n elif lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])", "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "def merge(l1, l2):\n i = j = 0\n output = []\n\n while i < len(l1) and j < len(l2):\n if l1[i] <= l2[j]:\n output.append(l1[i])\n i += 1\n else:\n output.append(l2[j])\n j += 1\n\n output.extend(l1[i:] + l2[j:])\n\n return output", "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li", "def merge(list1, list2):\n merge_list = []\n l1_copy = list(list1)\n l2_copy = list(list2)\n\n # cycling through list1 and list2: we check the first element in\n # list2, if it's smaller than the first element in list1 we copy it to\n # the merge list and pop it out of list2. Else we break the loop and\n # copy the first element of list1, then pop it and proceed again\n while l1_copy:\n while l2_copy:\n if l2_copy[0] < l1_copy[0]:\n merge_list.append(l2_copy[0])\n l2_copy.pop(0)\n else:\n break\n merge_list.append(l1_copy[0])\n l1_copy.pop(0)\n\n # if list2 is not empty once list1 is, add the remaining elements to the\n # end of the merge list\n if l2_copy:\n merge_list.extend(l2_copy)\n\n return merge_list", "def merge_two_sorted_lists(lst1, lst2):\n\n dummy_head = tail = ListNode() # head and tail start pointing to the same dummy node, then tail converges\n while lst1 and lst2:\n if lst1.data < lst2.data:\n tail.next = lst1 # the FIRST tail.next node is where the actual merge begins\n lst1 = lst1.next\n else:\n tail.next = lst2\n lst2 = lst2.next\n tail = tail.next\n # append the remaining nodes of list 1 or list 2\n tail.next = lst1 or lst2 # when one list becomes None, the 'or' returns the remaining nodes of the other\n return dummy_head.next # dummy_head.next is the node appended with the FIRST tail.next statement", "def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result", "def merge(l1, l2):\n # Edge cases, where nothing is to be done.\n if l1 is None and l2 is None: return l1\n if l1 is None: return l2\n if l2 is None: return l1\n\n # Vars to hold,\n # head -> a dummy head to keep a reference to the start of the merged\n # list.\n # _iter -> to move through the merged list.\n head = ListNode(float('-inf'))\n _iter = head\n\n # As long as both the lists are not exhausted,\n while l1 and l2:\n\n # Make the next of _iter as the smaller node.\n if l1.val <= l2.val:\n _iter.next = l1\n l1 = l1.next\n else:\n _iter.next = l2\n l2 = l2.next\n # Move _iter forward.\n _iter = _iter.next\n\n # If either of the lists remain, add them to the end,\n # Note: at-least one of the lists would be exhausted by now,\n # and the remaining one is sorted in itself, which is why this works.\n if not l1: _iter.next = l2\n if not l2: _iter.next = l1\n\n # Return a reference to the start of the merged list.\n return head.next", "def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list", "def merge(l1, l2):\n\n #Reverse the lists\n l1 = list(reversed(l1))\n l2 = list(reversed(l2))\n\n ret = []\n\n while True:\n # If either list is empty, reverse the other one and append it to the end\n if not l1:\n ret.extend(reversed(l2))\n return ret\n if not l2:\n ret.extend(reversed(l1))\n return ret\n\n # Append the lowest last element of the two lists\n ret.append(l1.pop() if l1[-1] < l2[-1] else l2.pop())", "def merge_lists(a_lst, b_lst):\n\n i = 0\n j = 0\n merged_list = []\n while i < len(a_lst) and j < len(b_lst):\n \n if a_lst[i] < b_lst[j]:\n merged_list.append(a_lst[i])\n i += 1\n else:\n merged_list.append(b_lst[j])\n j += 1\n if i < len(a_lst):\n merged_list.extend(a_lst[i:])\n if j < len(b_lst):\n merged_list.extend(b_lst[j:])\n return merged_list", "def merge_lists(l1, l2):\n return [ *l1, *l2 ]", "def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result", "def _merge_two_sorted_list(sorted_list_head, sorted_list_tail):\n sorted_list_result = list()\n head_index = 0\n tail_index = 0\n len_head = len(sorted_list_head)\n len_tail = len(sorted_list_tail)\n\n while head_index < len_head and tail_index < len_tail:\n print(sorted_list_head, ' : ', sorted_list_tail)\n if sorted_list_head[head_index] < sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n head_index += 1\n elif sorted_list_head[head_index] > sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_tail[tail_index])\n tail_index += 1\n elif sorted_list_head[head_index] == sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n sorted_list_result.append(sorted_list_tail[tail_index])\n head_index += 1\n tail_index += 1\n\n if head_index < len_head:\n sorted_list_result.extend(sorted_list_head[head_index:])\n elif tail_index < len_tail:\n sorted_list_result.extend(sorted_list_tail[tail_index:])\n\n return sorted_list_result", "def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1", "def merge_in(list_a: list, list_b: list):\n end_a = 0\n\n while list_a[end_a] is not None:\n end_a += 1\n end_a -= 1\n\n assert (end_a + len(list_b) < len(list_a))\n\n a_index = end_a\n b_index = len(list_b) - 1\n\n for k in range(len(list_a) - 1, -1, -1):\n if b_index < 0 or (a_index >= 0 and list_a[a_index] > list_b[b_index]):\n list_a[k] = list_a[a_index]\n a_index -= 1\n else:\n list_a[k] = list_b[b_index]\n b_index -= 1", "def merge(left, right):\n\n ## if the list is empty\n if not len(left) or not len(right):\n return left or right\n\n ## merge the list in sorted manner\n result = []\n i, j = 0, 0\n while (len(result) < len(left) + len(right)):\n if left[i] < right[j]:\n result.append(left[i])\n i+= 1\n else:\n result.append(right[j])\n j+= 1\n if i == len(left) or j == len(right):\n result.extend(left[i:] or right[j:])\n break\n \n return result", "def merge(first_list, second_list):\r\n result_list = []\r\n\r\n def check_for_group():\r\n \"\"\"Inner function,so that it has access to merges' local variables,\r\n that checks for groups\"\"\"\r\n if first_list[0][0] == second_list[0][0]:\r\n try:\r\n result = first_list[0][0], str(int(first_list[0][1]) + int(second_list[0][1]))\r\n except ValueError:\r\n result = first_list[0][0], str(float(first_list[0][1]) + float(second_list[0][1]))\r\n result_list.append(result)\r\n first_list.remove(first_list[0])\r\n second_list.remove(second_list[0])\r\n return True\r\n return False\r\n\r\n while first_list and second_list:\r\n if first_list[0] > second_list[0]:\r\n if not check_for_group():\r\n result_list.append(second_list[0])\r\n second_list.remove(second_list[0])\r\n else:\r\n if not check_for_group():\r\n result_list.append(first_list[0])\r\n first_list.remove(first_list[0])\r\n empty_lists(first_list, second_list, result_list)\r\n return result_list", "def merge(l1,l2):\n\n result = []\n\n while l1 and l2:\n if l1[0] < l2[0]:\n result.append(l1.pop(0))\n else:\n result.append(l2.pop(0))\n\n while l1:\n result.append(l1.pop(0))\n\n while l2:\n result.append(l2.pop(0)) \n\n return result", "def merge(items1, items2):\r\n # TODO: Repeat until one list is empty\r\n # TODO: Find minimum item in both lists and append it to new list\r\n # TODO: Append remaining items in non-empty list to new list\r", "def merge(a, b):\n result = []\n\n # Append smallest values to result until either list is exhausted\n i = j = 0\n while i < len(a) and j < len(b):\n if compare(a[i], b[j]) < 0:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n j += 1\n\n # Append all remaining values from the unexhausted list\n if i < len(a):\n result.extend(a[i:])\n else:\n result.extend(b[j:])\n\n return result", "def merge(left, right):\n\n # Initializing pointers.\n leftPtr = 0\n rightPtr = 0\n result = []\n\n # Merging and sorting two sublists.\n while leftPtr < len(left) and rightPtr < len(right):\n if left[leftPtr][0] < right[rightPtr][0] or \\\n (left[leftPtr][0] == right[rightPtr][0] and left[leftPtr][1] < right[rightPtr][1]):\n result.append(left[leftPtr])\n leftPtr += 1\n else:\n result.append(right[rightPtr])\n rightPtr += 1\n\n # Extending the leftover in the sublists.\n if leftPtr < len(left):\n result.extend(left[leftPtr:])\n elif rightPtr < len(right):\n result.extend(right[rightPtr:])\n\n return result", "def merge(items1, items2):\n # TODO: Running time: O(n + m), where n is the size of items 1 and m is the size of items 2\n # TODO: Memory usage: ??? Why and under what conditions\n # TODO: Repeat until one list is empty\n left_index = 0\n right_index = 0\n merge_list = []\n while (left_index < len(items1)) and (right_index < len(items2)):\n # TODO: Find minimum item in both lists and append it to new list\n if items1[left_index] > items2[right_index]:\n merge_list.append(items2[right_index])\n right_index += 1\n elif items1[left_index] < items2[right_index]:\n merge_list.append(items1[left_index])\n left_index += 1\n elif items1[left_index] == items2[right_index]:\n merge_list.append(items1[left_index])\n merge_list.append(items2[right_index])\n right_index += 1\n left_index += 1\n # TODO: Append remaining items in non-empty list to new list\n if left_index == len(items1):\n merge_list.extend(items2[right_index:])\n elif right_index == len(items2):\n merge_list.extend(items1[left_index:])\n\n # Alternate solution\n # Add remaining items to merge_sort list from either items1 or items2\n # Only one is guaranteed to run \n # for index in range(left_index, len(items1)):\n # merge_sort.append(index)\n\n # for index in range(right_index, len(items1)): \n # merge_sort.append(index)\n return merge_list", "def merge(left, right):\n aList = []\n lt = 0\n rt = 0\n\n #Repeatedly move the smallest of left and right to the new list\n while lt < len(left) and rt < len(right):\n if left[lt] < right[rt]:\n aList.append(left[lt])\n lt += 1\n else:\n aList.append(right[rt])\n rt += 1\n\n #There will only be elements left in one of the original two lists.\n\n #Append the remains of left (lt..end) on to the new list.\n while lt < len(left):\n aList.append(left[lt])\n lt += 1\n \n #Append the remains of right (rt..end) on to the new list.\n while rt < len(right):\n aList.append(right[rt])\n rt += 1\n\n return aList", "def merge(left_list, right_list):\n if not len(left_list) or not len(right_list):\n return left_list or right_list\n\n result = []\n i, j = 0, 0\n left_trips_dict = {trip.trip_id: trip for trip in left_list}\n right_trips_dict = {trip.trip_id: trip for trip in right_list}\n while (len(result) < len(left_list) + len(right_list)):\n ranked_two_trips_ids = fixtures.rank_trips([left_list[i],right_list[j]])\n # if ids[0] belogs to left, ad the trip of id[0] to result and inc the left\n if ranked_two_trips_ids[0] in left_trips_dict.keys():\n result.append(left_trips_dict[ranked_two_trips_ids[0]])\n i+= 1\n else:\n result.append(right_trips_dict[ranked_two_trips_ids[0]])\n j+= 1\n if i == len(left_list) or j == len(right_list):\n result.extend(left_list[i:] or right_list[j:])\n break \n return result", "def merge_list(list1, list2):\n\n current1 = list1.head\n current2 = list2.head\n\n if current1 == None and current2 == None:\n raise Exception(\"lists are empty\")\n\n if not current1:\n list1.head = list2.head\n return list1.head\n\n if not current2:\n return list1.head\n\n temp = current2.next\n\n while current1.next and current2.next:\n current1.next, current2.next = current2, current1.next\n current1 = current2.next\n current2, temp = temp, temp.next\n\n if not current1.next:\n current1.next = current2\n return list1.head\n\n if not current2.next:\n current1.next, current2.next = current2, current1.next\n return list1.head", "def merge(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> List[Any]:\n return [*list1, *list2]", "def merge(arr1, arr2):\n out = []\n # Iterate while neither list is empty\n while arr1 and arr2:\n # Compare heads, pop smallest head and append to output\n if arr1[0] <= arr2[0]:\n out.append(arr1.pop(0))\n else:\n out.append(arr2.pop(0))\n # Concat whichever array has more elements\n if arr1:\n out.extend(arr1)\n else:\n out.extend(arr2)\n return out", "def _merge(S1, S2, mylist):\n i = 0\n j = 0\n while i + j < len(mylist):\n if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):\n mylist[i+j] = S1[i] # Copy ith element of S1 as next item of mylist\n i += 1\n else:\n mylist[i+j] = S2[j] # Copy jth element of S2 as next item of mylist\n j += 1", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def merge(left, right):\n ret = []\n li = ri = 0\n while li < len(left) and ri < len(right):\n if left[li] <= right[ri]:\n ret.append(left[li])\n li += 1\n else:\n ret.append(right[ri])\n ri += 1\n if li == len(left):\n ret.extend(right[ri:])\n else:\n ret.extend(left[li:])\n return ret", "def union(llist_1 : LinkedList, llist_2 : LinkedList) -> LinkedList:\n # Convert to set to remove repeated entries in each list\n lset_1 = list_to_set(llist_1)\n lset_2 = list_to_set(llist_2)\n \n # Combine the two sets to create a union\n union_list = LinkedList()\n list_of_added = []\n for item in lset_1:\n union_list.append(item)\n list_of_added.append(item)\n\n for item in lset_2:\n if item not in list_of_added:\n union_list.append(item)\n\n return union_list", "def listops_union(list_a,list_b):\r\n\r\n retlist = list_a[:]\r\n for item in list_b: \r\n if item not in list_a:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def merge_lists(lists1, lists2):\n merged_lists = []\n for list1, list2 in zip(lists1, lists2):\n merged_lists.append(list1 + list2)\n return merged_lists", "def merge(left_sort_list, right_sort_list):\n left_index = 0\n right_index = 0\n left_len = len(left_sort_list)\n right_len = len(right_sort_list)\n temp_list = []\n while left_index <= left_len - 1 and right_index <= right_len - 1:\n if left_sort_list[left_index] <= right_sort_list[right_index]:\n temp_list.append(left_sort_list[left_index])\n left_index += 1\n else:\n temp_list.append(right_sort_list[right_index])\n right_index += 1\n if left_index == left_len:\n temp_list += right_sort_list[right_index:]\n else:\n temp_list += left_sort_list[left_index:]\n return temp_list", "def list_update(l1, l2):\n return filter(lambda e : e not in l2, l1) + list(l2)", "def merge(l, s1, l1, s2, l2):\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1", "def reformat_order(list1: List[int], list2: List[int]) -> List[int]:\n result = []\n for item in list1:\n if item in list2:\n result.append(item)\n \n return result", "def merge_sort(in_list1: list) -> list:\n if in_list1 is None:\n return []\n if len(in_list1) == 1:\n return [in_list1[0]]\n _list1,_list2= in_list1[:int(((len(in_list1)+1)/2))],\\\n in_list1[int(((len(in_list1)+1)/2)):]\n _ordered_list1 = merge_sort(_list1)\n _ordered_list2 = merge_sort(_list2)\n return merge_ordered_list(_ordered_list1,_ordered_list2)", "def merge(left,right):\n result = []\n comparision_count = 0\n left_index , right_index = 0 , 0\n # Compare elements of one list with another until we run out of atleast one list\n while left_index < len(left) and right_index < len(right):\n comparision_count = comparision_count + 1\n if left[left_index] < right[right_index]:\n result.append(left[left_index])\n left_index = left_index + 1\n else:\n result.append(right[right_index])\n right_index = right_index + 1\n # Appending the rest of the elements to the result\n for element in left[left_index:]:\n result.append(element)\n for element in right[right_index:]:\n result.append(element)\n return (result,comparision_count)", "def merge_sort(list1):\n if len(list1) <= 1:\n answer = list(list1)\n assert answer == sorted(answer)\n return answer\n\n mid = len(list1) // 2\n\n list_low = merge_sort(list1[0:mid])\n list_high = merge_sort(list1[mid:])\n\n answer = merge(list_low, list_high)\n assert answer == sorted(answer)\n return answer", "def merge(left, right):\n\tl = []\n\ti = 0\n\tj = 0\n\n\twhile i < len(left) and j < len(right):\n\t\tif left[i] < right[j]:\n\t\t\tl.append(left[i])\n\t\t\ti += 1\n\t\telse:\n\t\t\tl.append(right[j])\n\t\t\tj += 1\n\n\twhile i < len(left):\n\t\tl.append(left[i])\n\t\ti += 1\n\n\twhile j < len(right):\n\t\tl.append(right[j])\n\t\tj += 1\n\n\treturn l", "def merge_two(l, r):\n new = []\n i1, i2 = 0, 0\n while i1 != len(l) and i2 != len(r):\n if l[i1] < r[i2]:\n new.append(l[i1])\n i1 += 1\n else:\n new.append(r[i2])\n i2 += 1\n\n new.extend(l[i1:])\n new.extend(r[i2:])\n return new", "def merge_lists(lists):\n if lists:\n return sorted(set.union(*[set(l) for l in lists]))\n # TODO order by number of matching terms\n else:\n return list()", "def merge_list(list1, list2, id_index=0):\r\n\tid_list1 = [row[id_index] for row in list1]\r\n\tduplicates = []\r\n\tfor row in list2:\r\n\t\tif row[id_index] in id_list1:\r\n\t\t\tduplicates.append(row)\r\n\t\telse:\r\n\t\t\tlist1.append(row)\r\n\treturn list1, duplicates", "def merge(sorted_left, sorted_right):\n merged = []\n idx_left, idx_right = 0, 0\n \n while idx_left < len(sorted_left) and idx_right < len(sorted_right):\n if sorted_left[idx_left] <= sorted_right[idx_right]:\n merged.append(sorted_left[idx_left])\n idx_left += 1\n else:\n merged.append(sorted_right[idx_right])\n idx_right += 1\n \n # Append the remaining to merged\n # If you want to determine which half remains\n \"\"\"\n if idx_left < len(sorted_left):\n merged.extend(sorted_left[idx_left:])\n else:\n merged.extend(sorted_right[idx_right:])\n \"\"\"\n \n merged.extend(sorted_left[idx_left:])\n merged.extend(sorted_right[idx_right:])\n return merged", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n left = merge_sort(list1[:len(list1)/2])\n right = merge_sort(list1[len(list1)/2:])\n return merge(left, right)", "def union(list_a: list, list_b: list) -> list:\n if list_a is None:\n list_a = [None]\n if list_b is None:\n list_b = [None]\n return list(set(list_a) | set(list_b))", "def concat_lists(list1, list2):\n\n # return list1 + list2\n for item in list2:\n list1.append(item)\n\n return list1", "def union(llist_1, llist_2):\n union_set = set()\n return_linked_list = LinkedList()\n node = llist_1.get_head()\n while node:\n union_set.add(node.get_value())\n node = node.get_next()\n node = llist_2.get_head()\n while node:\n union_set.add(node.get_value())\n node = node.get_next()\n for item in union_set:\n return_linked_list.append(item)\n if return_linked_list.size() == 0:\n return 'No unions found'\n return return_linked_list", "def merge(left, right):\n new = []\n left_index, right_index = 0, 0\n len_left, len_right = len(left), len(right)\n while left_index < len_left and right_index < len_right:\n if left[left_index] <= right[right_index]:\n new.append(left[left_index])\n left_index += 1\n else:\n new.append(right[right_index])\n right_index += 1\n new += left[left_index:]\n new += right[right_index:]\n return new", "def merge_sort(list1):\r\n if len(list1) == 0 or len(list1) == 1:\r\n return [item for item in list1]\r\n else:\r\n mid = len(list1) / 2\r\n left = merge_sort(list1[:mid])\r\n right = merge_sort(list1[mid:])\r\n return merge(left, right)", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n \n mid_point = int(len(list1)/2)\n \n return merge(merge_sort(list1[:mid_point]), merge_sort(list1[mid_point:]))", "def intersect(list1, list2):\r\n if len(list1) == 0 or len(list2) == 0:\r\n return []\r\n else:\r\n if list1[0] == list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(intersect(list1[1:], list2[1:]))\r\n return new_list\r\n elif list1[0] < list2[0]:\r\n return intersect(list1[1:], list2)\r\n else:\r\n return intersect(list1, list2[1:])", "def merge(arr1, arr2):\n\tres = []\n\n\ti = j = 0\n\n\twhile i< len(arr1) and j < len(arr2):\n\t\tif arr1[i] < arr2[j]:\n\t\t\tres.append(arr1[i])\n\t\t\ti+=1\n\t\telse:\n\t\t\tres.append(arr2[j])\n\t\t\tj+=1\n\n\twhile i < len(arr1):\n\t\tres.append(arr1[i])\n\t\ti +=1\n\n\twhile j < len(arr2):\n\t\tj +=1\n\t\tres.append(arr2[j])\n\n\treturn res", "def merge(arr1,arr2):\n i = 0\n j = 0\n new_list = []\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n new_list.append(arr1[i])\n i += 1\n else:\n new_list.append(arr2[j])\n j += 1\n if i == len(arr1):\n new_list.extend(arr2[j:])\n if j == len(arr2):\n new_list.extend(arr1[i:])\n return new_list", "def merge(a: List[int], b: List[int]) -> List[int]:\n merged = []\n i = j = 0\n alen = len(a)\n blen = len(b)\n while i < alen or j < blen:\n aval = a[i] if i < alen else float(\"inf\")\n bval = b[j] if j < blen else float(\"inf\")\n if aval <= bval:\n merged.append(a[i])\n i += 1\n else:\n merged.append(b[j])\n j += 1\n return merged", "def merge(a, b):\r\n # your code here\r\n \r\n m = []\r\n i, j = 0, 0\r\n \r\n while i < len(a) and j < len(b):\r\n if a[i] < b[j]:\r\n m.append(a[i])\r\n i += 1\r\n else:\r\n m.append(b[j])\r\n j += 1\r\n \r\n m += a[i:] + b[j:]\r\n \r\n return m", "def linear_merge(sorted1, sorted2):\n first_pointer = 0\n second_pointer = 0\n sorted_result = []\n\n while second_pointer < len(sorted2) and first_pointer < len(sorted1):\n if sorted1[first_pointer] < sorted2[second_pointer]:\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n else:\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while second_pointer < len(sorted2):\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while first_pointer < len(sorted1):\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n\n\n return sorted_result", "def merge(left, right):\r\n \r\n ls = []\r\n i = 0\r\n j = 0\r\n \r\n while i < len(left) and j < len(right):\r\n if left[i] < right[j]:\r\n ls.append(left[i])\r\n i += 1\r\n else:\r\n ls.append(right[j])\r\n j += 1\r\n \r\n \r\n while i < len(left):\r\n ls.append(left[i])\r\n i += 1\r\n \r\n while j < len(right):\r\n ls.append(right[j])\r\n j += 1\r\n \r\n return ls", "def merge(left,right):\n l = []\n i = 0\n j = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n l.append(left[i])\n i += 1\n else:\n l.append(right[j])\n j += 1\n\n while i < len(left):\n l.append(left[i])\n i += 1\n while j < len(right):\n l.append(right[j])\n j += 1\n return l", "def merge(a,b):\n c = []\n while len(a) != 0 and len(b) != 0:\n if a[0] < b[0]:\n c.append(a[0])\n a.remove(a[0])\n else:\n c.append(b[0])\n b.remove(b[0])\n if len(a) == 0:\n c += b\n else:\n c += a\n return c", "def merge_sort(a, b):\n l = []\n while a and b:\n if a[0] < b[0]:\n l.append(a.pop(0))\n else:\n l.append(b.pop(0))\n return l + a + b", "def merge_sorted_list(left_sublist,right_sublist):\n left_index=right_index=0\n sorted_list=[]\n base_list_length=len(left_sublist)+len(right_sublist)\n while len(sorted_list)<base_list_length:\n if left_sublist[left_index]<right_sublist[right_index]:\n sorted_list.append(left_sublist[left_index])\n left_index+=1\n else:\n sorted_list.append(right_sublist[right_index])\n right_index+=1\n \n if left_index==len(left_sublist):\n sorted_list+=right_sublist[right_index:]\n break\n if right_index==len(right_sublist):\n sorted_list+=left_sublist[left_index:]\n break\n \n return sorted_list", "def merge(left, right):\n left_index = 0\n right_index = 0\n result = []\n # Copy the smaller element amongst the left and the right half\n # and add to the list\n while left_index < len(left) and right_index < len(right):\n if left[left_index] <= right[right_index]:\n result.append(left[left_index])\n left_index += 1\n else:\n result.append(right[right_index])\n right_index += 1\n # Copy any elements remaining in the left half\n if left_index < len(left):\n result.extend(left[left_index:])\n # Copy any elements remaining in the right half\n if right_index < len(right):\n result.extend(right[right_index:])\n return result", "def merge(a1, a2):\n\n i, j = 0, 0\n result = [] # resulting array\n while i < len(a1) and j < len(a2): # both array have iterables\n if a1[i] < a2[j]:\n result.append(a1[i])\n i += 1\n elif a1[i] > a2[j]:\n result.append(a2[j])\n j += 1\n else:\n result.append(a1[i])\n result.append(a2[j])\n i += 1\n j += 1\n\n if i == len(a1): # array a1 was exhaused, append the remaining contents of the second array to the result\n result.extend(a2[j:])\n if j == len(a2): # array a2 was exhaused, append the remaining contents of the first array to the result\n result.extend(a1[i:])\n\n return result", "def merge(items1, items2):\n # TODO: Repeat until one list is empty\n # TODO: Find minimum item in both lists and append it to new list\n # TODO: Append remaining items in non-empty list to new list\n sorted_list = []\n while len(items1) > 0 and len(items2) > 0:\n if items1[0] > items2[0]:\n sorted_list.append(items2.pop(0))\n else:\n sorted_list.append(items1.pop(0))\n sorted_list.extend(items1)\n del items1\n sorted_list.extend(items2)\n del items2\n return sorted_list\n\n # front = 0\n # back = (len(items1) - 1)\n # while len(items2) > 0:\n # value = items2.pop()\n # while front <= back:\n # pivot = ((front + back) // 2)\n # # if p f and b all equal the same index\n # if front == back:\n # # if the value is greater append at the back\n # if value > items1[back]:\n # items1.insert(back + 1, value)\n # break\n # # if the value is less than insert at index 0\n # if items1[back] < value:\n # items1.insert(0, value)\n # break\n # # if the value is equal to the value insert at index 0\n # # if f, p, and b are greater than the value\n # if items1[front] > value:\n # # insert the value before f and p\n # items1.insert(front, value)\n # break\n # # if b, p, and f are less than the value\n # if items1[back] < value:\n # # insert the value after b and p\n # items1.insert(back + 1, value)\n # break\n # if items1[pivot] > value:\n # back = pivot - 1\n # elif items1[pivot] < value:\n # front = pivot + 1\n # elif items1[pivot] == value:\n # items1.insert(pivot + 1, value)\n # break\n # front = 0\n # back = (len(items1) - 1)\n # return items1", "def merge_alt(nums1, nums2):\r\n length = len(nums1)\r\n i = 0\r\n while nums2 and i < length:\r\n element = nums2.pop(0)\r\n if element < nums1[i]:\r\n nums1.insert(i, element)\r\n else:\r\n nums1.insert(i + 1, element)\r\n i += 1\r\n length += 1\r\n i += 1\r\n nums1 += nums2", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n else:\n mid = len(list1) / 2\n return merge(merge_sort(list1[0:mid]),merge_sort(list1[mid:]))", "def merge(arr1, arr2):\n i = 0\n j = 0\n sol = []\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n sol.append(arr1[i])\n i += 1\n else:\n sol.append(arr2[j])\n j += 1\n if i < len(arr1):\n sol.extend(arr1[i:])\n if j < len(arr2):\n sol.extend(arr2[j:])\n return sol", "def do_list_merge(li1, li2=None, attr=None, unique_fn=None, set_fn=set):\n if not li1 and not li2:\n return []\n elif li2 and not li1:\n li1, li2 = li2, li1\n\n new_list = li1[:]\n\n if li2 is None:\n pass\n\n elif attr is None and unique_fn is None:\n new_list.extend(li2)\n\n else:\n if attr is not None:\n if isinstance(attr, basestring):\n def unique_fn(d):\n return d[attr]\n\n if unique_fn is not None:\n unique_fn = GlobalFns(unique_fn)\n\n comparables_1 = {unique_fn(el): idx for idx, el in enumerate(li1)}\n if len(set_fn(comparables_1)) < len(comparables_1):\n raise ValueError(\"li1 is not unique wrt. unique_fn\")\n\n comparables_2 = [unique_fn(el) for el in li2]\n if len(set_fn(comparables_2)) < len(comparables_2):\n raise ValueError(\"li2 is not unique wrt. unique_fn\")\n\n for idx2, cmp_2 in enumerate(comparables_2):\n el2 = li2[idx2]\n if cmp_2 in comparables_1:\n idx1 = comparables_1[cmp_2]\n new_list[idx1] = el2\n else:\n new_list.append(el2)\n\n return new_list", "def merge (left, right):\n i = 0\n j = 0\n n = len(left)\n m = len(right)\n out = []\n\n while i < n and j < m:\n if left[i] < right[j]:\n out.append(left[i])\n i += 1\n else:\n out.append(right[j])\n j += 1\n\n if i is n:\n for l in xrange(j, m):\n out.append(right[l])\n elif j is m:\n for l in xrange(i, n):\n out.append(left[l])\n\n return out", "def merge(left, right):\n\n ret = []\n\n while len(left) != 0 and len(right) != 0:\n if left[0] <= right[0]:\n ret.append(left.pop(0))\n else:\n ret.append(right.pop(0))\n\n while len(left) != 0:\n ret.append(left.pop(0))\n \n while len(right) != 0:\n ret.append(right.pop(0))\n \n return ret", "def intersect(list1, list2):\n intersect_list = []\n\n # check if the items in list1 are in list2 and add them to the list\n for item1 in list1:\n if item1 in list2:\n intersect_list.append(item1)\n\n return intersect_list", "def combinelists(oldlst, newlst):\n combined = oldlst\n if newlst not in oldlst:\n combined.append(newlst)\n return combined", "def interleave(list1, list2):\r\n result = [] #Create an empty list which later we use it to add our result in it.\r\n extra = [] #Create an empty list which later we use it to sort out the extra cards.\r\n if len(list2) > len(list1):\r\n new_list = zip(list2, list1)\r\n for idx in range(len(list1),len(list2)):\r\n extra.append(list2[idx])\r\n else:\r\n new_list = zip(list1, list2)\r\n for idx in range(len(list2),len(list1)):\r\n extra.append(list1[idx])\r\n for item1, item2 in new_list:\r\n result.append(item1)\r\n result.append(item2)\r\n for item in extra:\r\n result.append(item)\r\n return result", "def _merge_list_of_dict(first, second, prepend=True):\n first = _cleanup(first)\n second = _cleanup(second)\n if not first and not second:\n return []\n if not first and second:\n return second\n if first and not second:\n return first\n # Determine overlaps\n # So we don't change the position of the existing terms/filters\n overlaps = []\n merged = []\n appended = []\n for ele in first:\n if _lookup_element(second, next(iter(ele))):\n overlaps.append(ele)\n elif prepend:\n merged.append(ele)\n elif not prepend:\n appended.append(ele)\n for ele in second:\n ele_key = next(iter(ele))\n if _lookup_element(overlaps, ele_key):\n # If there's an overlap, get the value from the first\n # But inserted into the right position\n ele_val_first = _lookup_element(first, ele_key)\n merged.append({ele_key: ele_val_first})\n else:\n merged.append(ele)\n if not prepend:\n merged.extend(appended)\n return merged", "def intersect(list1, list2):\n intersection_list = []\n\n list1_idx = 0\n list2_idx = 0\n\n while list2_idx < len(list2) and list1_idx < len(list1):\n\n if list2[list2_idx] == list1[list1_idx]:\n intersection_list.append(list2[list2_idx])\n list1_idx += 1\n list2_idx += 1\n\n elif list2[list2_idx] > list1[list1_idx]:\n list1_idx += 1\n else:\n list2_idx += 1\n\n return intersection_list", "def merge():\n a = []\n b = []\n c = []\n d = []\n print \"Enter number of elements in first list\"\n n = int(raw_input())\n print \"enter number of elements in second list\"\n m = int(raw_input())\n print \"Now Enter the elements of first list\"\n for k in range(n):\n a.append(raw_input(\"enter an element:\"))\n print \"Now Enter the elements of second list\"\n for l in range(m):\n b.append(raw_input(\"enter an element:\"))\n\n print a\n print b\n \n for i in a:\n c.append(i)\n for j in b:\n c.append(j)\n length = m + n\n for p in range(length):\n temp = c[0]\n for i in c:\n if int(i) <= int(temp):\n temp = i\n d.append(temp)\n c.remove(temp)\n \n \n print 'The merged list in increasing order is:',d", "def union(self, other: list) -> 'List':\n if not isinstance(other, list):\n raise ValueError('The comparing element is not a list')\n\n return List(self + other).unique()", "def listExpend(input_list_1, input_list_2):\r\n output_list = []\r\n for element_1, element_2 in zip(input_list_1, input_list_2):\r\n output_list += [element_1]*element_2\r\n return output_list", "def sorted_intersect(self, list1, list2):\n ### Begin your code\n result = []\n p1 = 0\n p2 = 0\n s1 = int(math.sqrt(len(list1)))\n s2 = int(math.sqrt(len(list2)))\n while (p1 < len(list1) and p2 < len(list2)):\n if list1[p1] == list2[p2]:\n result.append(list1[p1])\n p1 = p1 + 1\n p2 = p2 + 1\n elif list1[p1] < list2[p2]:\n if (p1 + s1 >= len(list1) or list1[p1 + s1] > list2[p2]):\n p1 = p1 + 1\n else:\n p1 = p1 + s1\n else:\n if (p2 + s2 >= len(list2) or list2[p2 + s2] > list2[p2]):\n p2 = p2 + 1\n else:\n p2 = p2 + s2\n # print(result)\n return result\n ### End your code", "def merge_two_iterators(itr1: Iterator, itr2: Iterator) -> List:\n\n result = []\n elem1, elem2 = next(itr1), next(itr2)\n\n def rest_elems(itr):\n for _, el in enumerate(itr):\n result.append(el)\n\n while True:\n if elem1 < elem2:\n result.append(elem1)\n try:\n elem1 = next(itr1)\n except StopIteration:\n result.append(elem2)\n rest_elems(itr2)\n return result\n elif elem1 == elem2:\n result.append(elem1)\n result.append(elem2)\n try:\n elem1 = next(itr1)\n except StopIteration:\n rest_elems(itr2)\n return result\n\n try:\n elem2 = next(itr2)\n except StopIteration:\n rest_elems(itr1)\n return result\n else:\n result.append(elem2)\n try:\n elem2 = next(itr2)\n except StopIteration:\n result.append(elem1)\n rest_elems(itr1)\n return result", "def merge(left, right, inversions):\n\tmerged = []\n\til, ir = 0, 0\n\tlenl, lenr = len(left), len(right)\n\twhile il < lenl or ir < lenr:\n\t\tif il < lenl and ir < lenr:\n\t\t\tif left[il] <= right[ir]:\n\t\t\t\tmerged.append(left[il])\n\t\t\t\til += 1\n\t\t\telse:\n\t\t\t\telt = right[ir]\n\t\t\t\tmerged.append(elt)\n\t\t\t\t# elt occurs after elements in the left list, but is less\n\t\t\t\t# than all remaining elements in the left list. Therefore,\n\t\t\t\t# there are as many inversions of the form (i, elt) as\n\t\t\t\t# there are remaining elements in the left list.\n\t\t\t\tfor _ in xrange(lenl - il):\n\t\t\t\t\tinversions[elt] += 1\n\t\t\t\tir += 1\t\t\t\t\n\t\telif il < lenl:\n\t\t\tmerged.append(left[il])\n\t\t\til += 1\n\t\telse:\n\t\t\tmerged.append(right[ir])\n\t\t\tir += 1\n\treturn merged", "def merge(x, y):\n try:\n from itertools import izip\n except ImportError:\n izip = zip\n from numpy import concatenate\n return (concatenate((a, b)) for a, b in izip(x, y))", "def listops_intersect(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def concatenateList(list1, list2):\n outputList = []\n\n ## list1\n # if it's an empty list\n if len(list1) == 0:\n outputList.append(list1)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list1[0], list):\n for i in range(len(list1)):\n outputList.append(list1[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list1)\n\n ## list2\n # if it's an empty list\n if len(list2) == 0:\n outputList.append(list2)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list2[0], list):\n for i in range(len(list2)):\n outputList.append(list2[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list2)\n\n return outputList", "def union(list1, list2):\n new_list = list1\n for literal in list2:\n negate_literal = copy.deepcopy(literal)\n negate_literal.negate = not negate_literal.negate\n if negate_literal in list1:\n new_list.remove(negate_literal)\n continue\n if literal not in list1:\n new_list.append(literal)\n return new_list", "def _merge_data(self, d1, d2):\n if not d1 and not d2:\n if isinstance(d1, dict) or isinstance(d2, dict):\n return {}\n return []\n if not d2:\n return d1\n if not d1:\n return d2\n if isinstance(d1, list) and isinstance(d2, list):\n return list(set(d1 + d2))\n if isinstance(d1, list) and not isinstance(d2, dict):\n if d2 in d1:\n return d1\n return d1 + make_list(d2)\n if isinstance(d2, list) and not isinstance(d1, dict):\n if d1 in d2:\n return d2\n return d2 + make_list(d1)\n if not isinstance(d1, dict) and not isinstance(d2, dict):\n if d1 == d2:\n return make_list(d1)\n else:\n return [d1, d2]\n\n res = d1\n for key2, val2 in d2.items():\n if key2 in res:\n res[key2] = self._merge_data(val2, res[key2])\n else:\n res[key2] = val2\n return res", "def insercionListas(L1,L2):\n return set(L1) & set(L2)" ]
[ "0.81090695", "0.80801195", "0.8072913", "0.80316395", "0.8031528", "0.8013592", "0.8011788", "0.7974852", "0.795297", "0.7931757", "0.7878907", "0.7860104", "0.7858302", "0.78444135", "0.78015894", "0.7787144", "0.7747671", "0.7741996", "0.7730882", "0.76854265", "0.76350236", "0.7587222", "0.7578839", "0.75489444", "0.7538549", "0.74662346", "0.73885775", "0.73850167", "0.73498404", "0.7342837", "0.73388994", "0.7321964", "0.7277017", "0.7251057", "0.7243925", "0.72368693", "0.7166755", "0.7141956", "0.7109053", "0.7096433", "0.7091785", "0.707469", "0.70572", "0.7048935", "0.7048304", "0.7037753", "0.7026133", "0.7012592", "0.7008206", "0.69910336", "0.6963104", "0.69282085", "0.6891759", "0.68242836", "0.6819659", "0.6814044", "0.67917836", "0.6782877", "0.6780683", "0.6755143", "0.6735061", "0.67279106", "0.67195684", "0.67067844", "0.6704842", "0.66993606", "0.6686698", "0.6681512", "0.66667736", "0.6625785", "0.6618097", "0.6616648", "0.66159534", "0.66090477", "0.65591204", "0.6544813", "0.65181", "0.6495906", "0.649192", "0.6480175", "0.64639956", "0.64536214", "0.6430349", "0.64196974", "0.64038557", "0.63792723", "0.6370124", "0.6309997", "0.63025177", "0.6297189", "0.629402", "0.6290504", "0.627795", "0.6270023", "0.6255581", "0.6237392", "0.62357926", "0.62283814", "0.62272507", "0.6226922" ]
0.7963335
8
Sort the elements of list1. Return a new sorted list with the same elements as list1. This function should be recursive.
def merge_sort(list1): if len(list1) <= 1: return list1 left = merge_sort(list1[:len(list1)/2]) right = merge_sort(list1[len(list1)/2:]) return merge(left, right)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(in_list1: list) -> list:\n if in_list1 is None:\n return []\n if len(in_list1) == 1:\n return [in_list1[0]]\n _list1,_list2= in_list1[:int(((len(in_list1)+1)/2))],\\\n in_list1[int(((len(in_list1)+1)/2)):]\n _ordered_list1 = merge_sort(_list1)\n _ordered_list2 = merge_sort(_list2)\n return merge_ordered_list(_ordered_list1,_ordered_list2)", "def sort_list(self,list_):\r\n list_.sort()", "def sort_1(l):\n pass", "def merge_sort(list1):\n if list1 == []:\n return list1\n else:\n pivot = list1[0]\n lesser = [item for item in list1 if item < pivot]\n pivots = [item for item in list1 if item == pivot]\n greater = [item for item in list1 if item > pivot]\n return merge_sort(lesser) + pivots + merge_sort(greater)", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n \n mid_point = int(len(list1)/2)\n \n return merge(merge_sort(list1[:mid_point]), merge_sort(list1[mid_point:]))", "def merge_sort(list1):\r\n if len(list1) == 0 or len(list1) == 1:\r\n return [item for item in list1]\r\n else:\r\n mid = len(list1) / 2\r\n left = merge_sort(list1[:mid])\r\n right = merge_sort(list1[mid:])\r\n return merge(left, right)", "def merge_sort(list1):\n if len(list1) <= 1:\n answer = list(list1)\n assert answer == sorted(answer)\n return answer\n\n mid = len(list1) // 2\n\n list_low = merge_sort(list1[0:mid])\n list_high = merge_sort(list1[mid:])\n\n answer = merge(list_low, list_high)\n assert answer == sorted(answer)\n return answer", "def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list", "def merge_sort(l: list) -> list:\r\n # Trap for lists with one or fewer elements.\r\n if len(l) <= 1:\r\n return l[:]\r\n # Divide the list into 2\r\n mid = len(l) // 2\r\n first = l[mid:]\r\n second = l[:mid]\r\n # Recursively sort smaller lists and merge the two resulting lists.\r\n left = merge_sort(first)\r\n right = merge_sort(second)\r\n return merge(left, right)", "def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result", "def add_sort_list(\n l1: list,\n l2: list,\n ) -> list:\n \n # Add two lists\n l = l1 + l2\n\n # Sort the added lists\n l.sort()\n\n return l", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n else:\n mid = len(list1) / 2\n return merge(merge_sort(list1[0:mid]),merge_sort(list1[mid:]))", "def sort_2(l):\n l.reverse()", "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "def merge_sort(list):\n\n\tif len(list) <= 1:\n\t\treturn list\n\n\tleft_half, right_half = split(list)\n\tleft = merge_sort(left_half)\n\tright = merge_sort(right_half)\n\n\treturn merge(left, right)", "def sort_4(l):\n l = list(set(l))\n l.sort()", "def reorder(l: List[Any]) -> List[Any]:\n sorted_list: List[Any] = list()\n sorted_list.append(l[0])\n for i in range(1, len(l)):\n index: int = 0\n while index < i and l[i] > sorted_list[index]:\n index += 1\n sorted_list.insert(index, l[i])\n return sorted_list", "def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def merge(list1, list2):\n merge_list = []\n l1_copy = list(list1)\n l2_copy = list(list2)\n\n # cycling through list1 and list2: we check the first element in\n # list2, if it's smaller than the first element in list1 we copy it to\n # the merge list and pop it out of list2. Else we break the loop and\n # copy the first element of list1, then pop it and proceed again\n while l1_copy:\n while l2_copy:\n if l2_copy[0] < l1_copy[0]:\n merge_list.append(l2_copy[0])\n l2_copy.pop(0)\n else:\n break\n merge_list.append(l1_copy[0])\n l1_copy.pop(0)\n\n # if list2 is not empty once list1 is, add the remaining elements to the\n # end of the merge list\n if l2_copy:\n merge_list.extend(l2_copy)\n\n return merge_list", "def merge_sort(list):\r\n \r\n if len(list) <= 1:\r\n return list\r\n \r\n left_half, right_half = split(list)\r\n left = merge_sort(left_half)\r\n right = merge_sort(right_half)\r\n \r\n return merge(left, right)", "def reformat_order(list1: List[int], list2: List[int]) -> List[int]:\n result = []\n for item in list1:\n if item in list2:\n result.append(item)\n \n return result", "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list", "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "def __sort(self, _list, _index, desc, pop_first_element=False):\n if _index != 0:\n _list = [(x[_index], x) for x in _list]\n \n _list.sort()\n \n if desc:\n _list.reverse()\n\n if _index != 0 or pop_first_element: \n _list = [x[1] for x in _list]\n\n return _list", "def merge_sort_aux(l, start1, last2):\n nonlocal c, w, r\n\n def merge(l, s1, l1, s2, l2): \n \"\"\"\n Sort the sublists and merge two halves\n \n Parameter\n ----------------------\n l: unsorted list\n list\n s1: the index of the first element of the 1st list (left side)\n int \n l1: the index of the last element of the 1st list (left side)\n int\n s2: the index of the first element of the 2nd list (right side)\n int\n l2: the index of the last element of the 2nd list (right side)\n int\n \"\"\"\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1 \n \n # Split the list to sublist untill size become one\n c += 1\n if start1 < last2:\n last1 = (start1 + last2) // 2 \n start2 = last1 + 1\n merge_sort_aux(l, start1, last1) #the left side\n merge_sort_aux(l, start2, last2) #the right side\n # Call merge function to merge subarrays \n merge(l, start1, last1, start2, last2)", "def custom_sort(vector:list)->list:\n if len(vector) <= 2:\n return vector\n\n else:\n mid = len(vector) // 2\n vector.insert(1, vector.pop(mid))\n i = 1\n for idx in range(2, len(vector)-1, 2):\n vector.insert(idx+1, vector.pop(mid+i))\n i +=1\n return vector", "def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer", "def merge_sort(list):\n # Base Condition\n if len(list) <= 1:\n return list\n\n left_half, right_half = split(list)\n left = merge_sort(left_half)\n right = merge_sort(right_half)\n\n return merge(left,right)", "def merge_sort(input_list: list) -> list:\n n = len(input_list)\n if n <= 1:\n return input_list\n else:\n left = merge_sort(input_list[:n // 2])\n right = merge_sort(input_list[n // 2:])\n return merge(left, right)", "def merge_sort(list1):\n left = []\n right = []\n #merged = []\n if len(list1) <= 1:\n if DEBUG_MS:\n print \"returning\", list1\n return list1\n else:\n pivot = int(math.floor(len(list1)/2))\n if DEBUG_MS:\n print \"pivot\", pivot\n #left = merge_sort(list1[:pivot])\n #right = merge_sort(list1[pivot:])\n left = merge_sort(list1[:pivot])\n right = merge_sort(list1[pivot:])\n #return [min(merge_sort(list1[:pivot]))] + [max(merge_sort(list1[pivot:]))]\n if DEBUG_MS:\n print \"return merge(\", left, \",\" , right, \")\"\n return merge(left, right)", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def reverse_sort(list_to_sort: List) -> List:\n if len(list_to_sort) == 1:\n return list_to_sort\n list_to_sort = [e for e in list_to_sort] # to avoid sorting in place, comment this to change original list\n for j in range(len(list_to_sort) - 2, -1, -1):\n sorted_element = list_to_sort[j]\n # print('sorted element:', sorted_element)\n i = j + 1\n while i < len(list_to_sort) and list_to_sort[i] > sorted_element:\n list_to_sort[i - 1] = list_to_sort[i]\n i += 1\n list_to_sort[i - 1] = sorted_element\n # print('list after insertion:', list_to_sort)\n return list_to_sort", "def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')", "def reverse_sort_lists(list_1, list_2):\n list_1_sorted, list_2_sorted = zip(*sorted(zip(list_1, list_2), key=operator.itemgetter(0), reverse=True))\n return list_1_sorted, list_2_sorted", "def merge(l1,l2):\n\n result = []\n\n while l1 and l2:\n if l1[0] < l2[0]:\n result.append(l1.pop(0))\n else:\n result.append(l2.pop(0))\n\n while l1:\n result.append(l1.pop(0))\n\n while l2:\n result.append(l2.pop(0)) \n\n return result", "def merge_sort(l): \n # Raise value\n if not isinstance(l, list):\n raise TypeError(\"Not a list\")\n\n # Initialize variables to count\n c = r = w = 0\n\n def merge_sort_aux(l, start1, last2):\n \"\"\"\n Split the list to sublist till size becomes one by recursively calls itself \n and merge them\n \n Parameter\n -------------------\n start1: the first index of the list in need of splitting\n int\n last2: the last index of the list in need of splitting\n int\n \"\"\"\n nonlocal c, w, r\n\n def merge(l, s1, l1, s2, l2): \n \"\"\"\n Sort the sublists and merge two halves\n \n Parameter\n ----------------------\n l: unsorted list\n list\n s1: the index of the first element of the 1st list (left side)\n int \n l1: the index of the last element of the 1st list (left side)\n int\n s2: the index of the first element of the 2nd list (right side)\n int\n l2: the index of the last element of the 2nd list (right side)\n int\n \"\"\"\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1 \n \n # Split the list to sublist untill size become one\n c += 1\n if start1 < last2:\n last1 = (start1 + last2) // 2 \n start2 = last1 + 1\n merge_sort_aux(l, start1, last1) #the left side\n merge_sort_aux(l, start2, last2) #the right side\n # Call merge function to merge subarrays \n merge(l, start1, last1, start2, last2)\n \n start = 0\n last = len(l) - 1\n merge_sort_aux(l, start, last) \n \n return c, r, w", "def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1", "def sort(left,right):\n return_list = []\n while len(left)>0 and len(right)>0:\n \"note to self, while rechecks criteria every time, so left and right and constantly checked\"\n return_list.append(left.pop(0) if left[0]< right[0] else right.pop(0))\n return_list+= left if len(left)>0 else right\n return return_list", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n temp = lst\n switched = True\n while switched:\n switched = False\n for i in range(len(temp) - 1):\n if compare(temp[i], temp[i + 1]) == 1:\n temp[i], temp[i + 1] = temp[i + 1], temp[i]\n switched = True\n\n return temp", "def sorting(my_list):\n for indx in range(1,len(my_list)):\n i=indx\n while i>0:\n if my_list[i]<my_list[i-1]:\n temp=my_list[i-1]\n my_list[i-1]=my_list[i]\n my_list[i]=temp\n i=i-1\n return my_list", "def merge(left_sort_list, right_sort_list):\n left_index = 0\n right_index = 0\n left_len = len(left_sort_list)\n right_len = len(right_sort_list)\n temp_list = []\n while left_index <= left_len - 1 and right_index <= right_len - 1:\n if left_sort_list[left_index] <= right_sort_list[right_index]:\n temp_list.append(left_sort_list[left_index])\n left_index += 1\n else:\n temp_list.append(right_sort_list[right_index])\n right_index += 1\n if left_index == left_len:\n temp_list += right_sort_list[right_index:]\n else:\n temp_list += left_sort_list[left_index:]\n return temp_list", "def sort(List):\n\n if len(List) < 2:\n return List\n else:\n mid = len(List)//2\n leftHalf = sort(List[:mid])\n rightHalf = sort(List[mid:])\n return merge(leftHalf, rightHalf)", "def tree_sort(l: List(int)) -> List(int):\n\n tree = Tree()\n [tree.add_child(n) for n in l]\n return tree.collate()", "def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result", "def recursive_sort(list_to_sort, key=0):\n length = len(list_to_sort)\n if length <= 1:\n return list_to_sort\n swaplist = list_to_sort.copy()\n for i in range(0, length - 1):\n if swaplist[i][key] > swaplist[i + 1][key]:\n (swaplist[i], swaplist[i + 1]) = \\\n (swaplist[i + 1], swaplist[i])\n return recursive_sort(swaplist[0:length - 1], key) \\\n + swaplist[length - 1:length]", "def sort(List):\n if not isinstance(List, (list, tuple)):\n raise TypeError(\"Argument must be list or tuple\")\n List = list(List).copy()\n sorted = False\n iter = len(List) - 1\n while (sorted == False):\n sorted = True\n for i in range(iter):\n if List[i] > List[i+1]:\n List[i],List[i+1] = List[i+1],List[i]\n sorted = False\n iter -= 1\n\n return List", "def merge_sort(cls, num_list):\n if len(num_list) > 1:\n first_half = num_list[:len(num_list) // 2]\n second_half = num_list[len(num_list) // 2:]\n cls.merge_sort(first_half)\n cls.merge_sort(second_half)\n first_index = 0\n second_index = 0\n list_index = 0\n\n while first_index < len(first_half) and \\\n second_index < len(second_half):\n if first_half[first_index] > second_half[second_index]:\n num_list[list_index] = second_half[second_index]\n second_index += 1\n else:\n num_list[list_index] = first_half[first_index]\n first_index += 1\n list_index += 1\n\n for i in range(first_index, len(first_half)):\n num_list[list_index] = first_half[first_index]\n list_index += 1\n first_index += 1\n\n for x in range(second_index, len(second_half)):\n num_list[list_index] = second_half[second_index]\n list_index += 1\n second_index += 1", "def sort(lst):\n \"*** YOUR CODE HERE ***\"\n if len(lst) <= 0:\n return []\n return [min(lst)] + sort(remove_first(lst, min(lst)))", "def __SortLists(self): \n\n \n AS=argsort(self.__NumList)\n\n self.__IndList=[self.__IndList[i] for i in AS]#list(self.__IndList[AS])\n self.__ObjList=[self.__ObjList[i] for i in AS]#list(self.__IndList[AS])\n self.__NumList=[self.__NumList[i] for i in AS]", "def sorted(x) -> List:\n pass", "def reorderList(self, head: ListNode) -> None:\n if not head:\n return None\n\n def reverse(head):\n newnode = None\n cur = head\n while cur:\n nextnode = cur.next\n cur.next=newnode\n newnode = cur\n cur = nextnode\n return newnode\n\n def getmid(head):\n slow, fast = head, head\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n\n def merge(l1, l2):\n while l1 and l2:\n l1_tmp = l1.next\n l2_tmp = l2.next\n\n l1.next = l2\n l1 = l1_tmp\n\n l2.next = l1\n l2 = l2_tmp\n ptr1 = head\n left_mid = getmid(head)\n mid = left_mid.next\n left_mid.next=None\n\n ptr2 = reverse(mid)\n merge(ptr1, ptr2)", "def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))", "def merge(elements):\n if not isinstance(elements, (list, tuple)):\n raise ValueError('A list/tuple of values should be given.')\n\n # Get the instance of the data structure given.\n instance = type(elements)\n\n if instance is tuple:\n # Convert the tuple of elements to list of elements. We need to\n # convert the tuple to list because a tuple is immutable. You cannot\n # swap the elements of a tuple.\n elements = list(elements)\n\n is_str = all(isinstance(element, str) for element in elements)\n\n if any(isinstance(element, str) for element in elements) and not is_str:\n # When any of the element in the list is a string, we should then\n # check whether all the other elements are strings or not.\n raise ValueError(\"int() and str() type can't be specified at the same \"\n \"time\")\n\n if len(elements) <= 1:\n # A list of size 1 is already sorted.\n return elements\n\n # Using \"//\" gives you int() value.\n mid_position = len(elements) // 2\n\n # Recursively sort both sub-lists\n left = merge(elements[:mid_position])\n right = merge(elements[mid_position:])\n\n # Merge the sorted sub-lists.\n sorted = []\n\n while left and right:\n if left[0] <= right[0]:\n # When the first element of left sub-list is less than first\n # element of right sub-list, append the first item of left\n # sub-list to the sorted list and remove that element from left\n # sub-list.\n sorted.append(left[0])\n left.pop(0)\n else:\n # When the first element of right sub-list is less than first\n # element of left sub-list, append the first item of right\n # sub-list to the sorted list and remove that element from right\n # sub-list.\n sorted.append(right[0])\n right.pop(0)\n\n # There is a possibility that the left sub-list might have elements left\n # in it. Append the elements from left sub-list to the sorted list.\n while left:\n sorted.append(left[0])\n left.pop(0)\n\n # There is a possibility that the right sub-list might have elements left\n # in it. Append the elements from right sub-list to the sorted list.\n while right:\n sorted.append(right[0])\n right.pop(0)\n\n if instance is tuple:\n # Convert the data structure back to tuple if the user has provided a\n # tuple of values.\n sorted = tuple(sorted)\n\n return sorted", "def sort_list():\n fun_list = basic_list_exception.make_list()\n fun_list.sort()\n return fun_list", "def sort_0(l):\n l.sort()", "def merge_sort(a_list):\n if len(a_list) <= 1:\n # a list with one element is sorted by definition\n return a_list\n # apply recursion if length is 2 or more\n else:\n middle_term = len(a_list) // 2\n left_half = a_list[:middle_term]\n right_half = a_list[middle_term:]\n\n left_half = merge_sort(left_half)\n right_half = merge_sort(right_half)\n\n return merge_lists(left_half, right_half)", "def sort(self):\n self.chain_list.sort()\n for chain in self.chain_list:\n chain.sort()", "def sort_3(l):\n l.sort(reverse=True)", "def qsort1(list):\r\n if list == []: \r\n return []\r\n else:\r\n pivot = list[0]\r\n lesser = qsort1([x for x in list[1:] if x < pivot])\r\n greater = qsort1([x for x in list[1:] if x >= pivot])\r\n return lesser + [pivot] + greater", "def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list", "def get_sorted_list(words_list):\n # Convert the words to lower case\n words = [word.strip().lower() for word in words_list]\n # Sort the list and return the sorted list\n return merge_sort(words)", "def qsort2(list):\n if list == []: \n return []\n else:\n pivot = list[0]\n lesser, equal, greater = partition(list[1:], [], [pivot], [])\n return qsort2(lesser) + equal + qsort2(greater)", "def buble_sort(lst):\n lst_sorted = copy.copy(lst)\n for i in range(len(lst_sorted)):\n for j in range(len(lst_sorted)):\n if j == len(lst_sorted) - 1:\n continue\n if lst_sorted[j][1] > lst_sorted[j + 1][1]:\n lst_sorted[j], lst_sorted[j+1] = lst_sorted[j+1], lst_sorted[j]\n\n return lst_sorted", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n for i in range(1, len(lst)): #loops through each element starting at the second one\n for j in range(i, 0, -1): #loops through each element coming before i starting at i and going backwards\n if compare(lst[j], lst[j-1]) < 0: #checks to see if the previous element is smaller than the current (by saying <0 we keep the sort stable as well)\n lst[j], lst[j-1] = lst[j-1], lst[j] #if they are, we switch them\n else:\n break #if they are not, we know that the element is in its proper place\n return lst", "def reorderList(self, head: ListNode) -> None:\n if not head or not head.next:\n return\n # Split the list\n first, second = head, head\n while first and first.next:\n first, second = first.next.next, second.next\n mid, p = second.next, second.next\n second.next = None\n # Reverse the second half\n while p and p.next:\n nxt = p.next\n p.next = nxt.next\n nxt.next = mid\n mid = nxt\n # Interweave\n p1, p2 = head, mid\n while p1 and p2:\n p1nxt, p2nxt = p1.next, p2.next\n p1.next, p2.next = p2, p1nxt\n p1, p2 = p1nxt, p2nxt", "def heap_sort(list):\n pass", "def sort(self, *args: Any, **kwargs: Any) -> BaseList:\n super().sort(*args, **kwargs)\n return self", "def sort_list_pairs(list1, list2, **kwargs):\n order = kwargs.get('order', 'descending')\n\n if type(list1) == np.ndarray:\n list1 = list1.tolist()\n\n if type(list2) == np.ndarray:\n list2 = list2.tolist()\n list1, list2 = zip(*sorted(zip(list1, list2)))\n\n if order == 'descending':\n return list1[::-1], list2[::-1]\n elif order == 'ascending':\n return list1, list2", "def sortList(lst, reverse=False, key=None):\n return sorted(lst, key=key, reverse=reverse)", "def insertionSort(list):", "def qsort1(list):\n if list == []: \n return []\n else:\n pivot = list[0]\n lesser = qsort1([x for x in list[1:] if x < pivot])\n greater = qsort1([x for x in list[1:] if x >= pivot])\n return lesser + [pivot] + greater", "def shell_sort(a_list):\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n a_list = insertion_sort(\n a_list,\n start=start_position,\n gap=sublist_count\n )\n sublist_count = sublist_count // 2\n return a_list", "def reOrderListOfListByFirstMember(listOfList=None):\n\tfirstList = listOfList[0]\n\tx_ar = numpy.array(firstList, numpy.float)\n\t#sort x_ar and y_ar must be in the order of x_ar\n\tindexOfOrderList = numpy.argsort(x_ar)\n\treturnListOfList = []\n\tfor ls in listOfList:\n\t\tar = numpy.array(ls, numpy.float)\n\t\tar = ar[indexOfOrderList]\n\t\treturnListOfList.append(ar)\n\treturn PassingData(listOfList=returnListOfList)", "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li", "def merge_sort(input_list,start,end):\n if start < end:\n mid=(start+end)//2\n merge_sort(input_list,start,mid)\n merge_sort(input_list,mid+1,end)\n return merge(input_list,start,mid,end)", "def remove_duplicates(list1):\n #iterative, not recursive\n if len(list1) == 0:\n return list1\n new_list = []\n new_list.append(list1[0])\n for item in list1[1:]:\n if item != new_list[-1]:\n new_list.append(item)\n return new_list", "def merge_sort(my_list):\n if len(my_list) < 1:\n return my_list\n if len(my_list) > 1:\n middle = len(my_list) // 2\n left_half = my_list[:middle]\n right_half = my_list[middle:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = 0\n j = 0\n k = 0\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n my_list[k] = left_half[i]\n i += 1\n else:\n my_list[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n my_list[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n my_list[k] = right_half[j]\n j += 1\n k += 1\n\n return my_list", "def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output", "def merge_quick_sort(L):\n list1 = []\n list2 = []\n (evens, odds) = merge_sort.split(L)\n list1 += quick_sort.quick_sort(evens)\n list2 += quick_sort.quick_sort(odds)\n x = merge_sort.merge(list1,list2)\n return x", "def sort_list(list, key):\r\n list.sort(lambda x,y: cmp(key(x), key(y))) # Python < 2.4 hack\r\n return list", "def merge_sort(items):\n # TODO: Check if list is so small it's already sorted (base case)\n # TODO: Split items list into approximately equal halves\n # TODO: Sort each half by recursively calling merge sort\n # TODO: Merge sorted halves into one list in sorted order\n if len(items) > 1:\n pivot = len(items) // 2\n right = merge_sort(items[pivot:])\n left = merge_sort(items[:pivot])\n sorted_list = merge(left, right)\n else:\n sorted_list = items\n # change the input items \n items[:] = sorted_list\n return items", "def mergesort(lst, inversions):\n\t# inversions contains inverted list elements, once for each inversion\n\tif len(lst) == 1:\n\t\treturn lst\n\tcut_idx = (len(lst) + 1) / 2\n\tleft = lst[:cut_idx]\n\tright = lst[cut_idx:]\n\tleft = mergesort(left, inversions)\n\tright = mergesort(right, inversions)\n\treturn merge(left, right, inversions)", "def quick_sort(unsorted_list):\r\n\tsorted_list = list(unsorted_list)\r\n\tless = []\r\n\tequal = []\r\n\tgreater = []\r\n\tif len(sorted_list) > 1:\r\n\t\tpivot = sorted_list[0]\r\n\t\tfor item in sorted_list:\r\n\t\t\tif item < pivot:\r\n\t\t\t\tless.append(item)\r\n\t\t\telif item == pivot:\r\n\t\t\t\tequal.append(item)\r\n\t\t\telif item > pivot:\r\n\t\t\t\tgreater.append(item)\r\n\t\treturn quick_sort(less) + equal + quick_sort(greater)\r\n\telse:\r\n\t\treturn sorted_list", "def sort(*, list : Union[List[Any], ConduitVariable], reverse : bool = False) -> None:\n list.sort(key = None, reverse = reverse)", "def selection_sort(input_list):\n for i in range(len(input_list)-1):\n ## By slicing the list incrementally, swap first element with lowest\n ## element of sliced list\n temp_item = input_list[i]\n smallest_index = smallest_elem_index(input_list[i:])\n input_list[i] = input_list[smallest_index+i]\n input_list[smallest_index+i] = temp_item\n return input_list", "def remove_duplicates(list1):\r\n if len(list1) == 1 or len(list1) == 0:\r\n return [item for item in list1]\r\n else:\r\n if list1[-1] == list1[-2]:\r\n return remove_duplicates(list1[:-1])\r\n else:\r\n new_list = remove_duplicates(list1[:-1])\r\n new_list.append(list1[-1])\r\n return new_list", "def quick_sort(my_list):\n if len(my_list) == 0:\n return []\n else:\n pivot = my_list[0]\n left = [element for element in my_list if element[1] < pivot[1]]\n pivots = [element for element in my_list if element[1] == pivot[1]]\n right = [element for element in my_list if element[1] > pivot[1]]\n return quick_sort(left) + pivots + quick_sort(right)", "def merge_sorted_list(left_sublist,right_sublist):\n left_index=right_index=0\n sorted_list=[]\n base_list_length=len(left_sublist)+len(right_sublist)\n while len(sorted_list)<base_list_length:\n if left_sublist[left_index]<right_sublist[right_index]:\n sorted_list.append(left_sublist[left_index])\n left_index+=1\n else:\n sorted_list.append(right_sublist[right_index])\n right_index+=1\n \n if left_index==len(left_sublist):\n sorted_list+=right_sublist[right_index:]\n break\n if right_index==len(right_sublist):\n sorted_list+=left_sublist[left_index:]\n break\n \n return sorted_list", "def mergeKLists(self, lists: 'List[ListNode]') -> 'ListNode':\n if not lists:\n return None\n nodes = []\n for head in lists:\n while head:\n nodes.append(head.val)\n head = head.next\n\n nodes.sort()\n\n newList = ListNode(0)\n curr = newList\n for node in nodes:\n curr.next = ListNode(node)\n curr = curr.next\n return newList.next", "def reorderList(self, head: ListNode) -> None:\n if not head:\n return\n\n # find the mid point\n slow = fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n\n # reverse the second half in-place\n # 4 -> 5 -> 6 = 6 -> 5 -> 4\n pre, node = None, slow\n while node:\n pre, node.next, node = node, pre, node.next\n\n # Merge in-place; Note : the last node of \"first\" and \"second\" are the same\n first, second = head, pre\n while second.next:\n first.next, first = second, first.next\n second.next, second = first, second.next\n return", "def merge(list1, list2):\n res = []\n index_i, index_j = 0, 0\n while index_i < len(list1) and index_j < len(list2):\n if list1[index_i] <= list2[index_j]:\n res.append(list1[index_i])\n index_i += 1\n else:\n res.append(list2[index_j])\n index_j += 1\n res += list1[index_i:]\n res += list2[index_j:]\n return res", "def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list", "def merge_sort(items):\n # print(items)\n # Divide the unsorted list until only 1 element remains\n if len(items) <= 1:\n return items\n\n mid = len(items) // 2\n # Merge sort recursively on both hl1ves\n left, right = merge_sort(items[0:mid]), merge_sort(items[mid:])\n # print(left, right)\n # Return the merged output\n return merge(left, right)", "def sort(student_list):\n for i in range(len(student_list) - 1):\n for x in range(len(student_list) - 1):\n if student_list[x] > student_list[x + 1]:\n student_list[x], student_list[x + 1] = \\\n student_list[x + 1], student_list[x]", "def sort_unit_lst(self, attrname, lst2sort):\n comp = []\n for unit in lst2sort:\n importance = self._importance_rank(unit, attrname)\n comp.append((unit, importance))\n comp = sorted(comp, key= lambda x: x[1], reverse=True)\n\n return [x[0] for x in comp]", "def merge_sort(L):\n n = len(L)\n if n < 2:\n return L\n mid = n // 2\n left = L[:mid]\n right = L[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(L, left, right)" ]
[ "0.76767325", "0.732384", "0.7290804", "0.7187142", "0.7152374", "0.71103555", "0.7107798", "0.7064439", "0.6912521", "0.6830091", "0.6827197", "0.67525494", "0.66716707", "0.6565023", "0.6531662", "0.6518769", "0.65044874", "0.64856726", "0.6469198", "0.6412194", "0.6409195", "0.6406963", "0.63928574", "0.6388656", "0.6353851", "0.6346843", "0.6346413", "0.6344975", "0.6342813", "0.6340903", "0.63271606", "0.6301701", "0.6300604", "0.6287337", "0.6263896", "0.62419486", "0.6207476", "0.6206216", "0.62057817", "0.6203104", "0.61957717", "0.6191763", "0.61913174", "0.6190659", "0.6179675", "0.6168894", "0.61517715", "0.6143689", "0.6127907", "0.6115153", "0.61142975", "0.6107835", "0.6105976", "0.61051387", "0.6103153", "0.60993683", "0.6089576", "0.60799813", "0.6074659", "0.6072863", "0.6053876", "0.60405046", "0.60292757", "0.6018994", "0.6010241", "0.6008327", "0.6001702", "0.59969854", "0.5991388", "0.5974268", "0.59613395", "0.5945981", "0.5945359", "0.5941575", "0.5933651", "0.59308285", "0.592667", "0.5913954", "0.591377", "0.59126365", "0.5912192", "0.5910434", "0.5903348", "0.5902173", "0.5893162", "0.58915246", "0.58889174", "0.58884734", "0.58859086", "0.58819133", "0.5874967", "0.58572894", "0.5848361", "0.58277786", "0.5825856", "0.58242416", "0.5823244", "0.5821404", "0.58156747", "0.5810021" ]
0.70278823
8
Generate all strings that can be composed from the letters in word in any order. Returns a list of all strings that can be formed from the letters in word. This function should be recursive.
def gen_all_strings(word): if word == '': return [''] else: first = word[0] rest = word[1:] rest_strings = gen_all_strings(rest) all_words = [] for string in rest_strings: for leter in range(len(string)+1): all_words.append(string[0:leter]+first+string[leter:]) return rest_strings + all_words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(word[1:]) + all_strings", "def gen_all_strings(word):\r\n if len(word) == 0:\r\n return ['']\r\n else:\r\n first = word[0]\r\n rest = gen_all_strings(word[1:])\r\n new = []\r\n for item in rest:\r\n if len(item) > 0:\r\n for pos in range(len(item)):\r\n new.append(item[:pos] + first + item[pos:])\r\n new.append(item + first)\r\n new.append(first)\r\n new.extend(rest)\r\n return new", "def gen_all_strings(word):\n if DEBUG_GAS:\n print \"WORD\", word\n if len(word) < 1:\n if DEBUG_GAS:\n print \"BASE ZERO\"\n print \"len(word)\", len(word)\n print \"word\", word\n return ['']\n if len(word) == 1:\n if DEBUG_GAS:\n print \"BASE ONE\"\n print \"len(word)\", len(word)\n print \"word\", word\n return ['', word]\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n permutations = []\n if DEBUG_GAS:\n print \"rest_strings\", rest_strings\n print first, rest\n for item in rest_strings:\n if DEBUG_GAS:\n print \"rest_strings item\", item\n for dummy_idx in range(len(item)+1):\n if DEBUG_GAS:\n print \"dummy_idx\", dummy_idx\n print \"item\", item\n permutations.append(str(item[:dummy_idx] + first + item[dummy_idx:]))\n for item in permutations:\n rest_strings.append(item)\n return rest_strings", "def word_perms(word):\n\t# Question 4a: Generates all strings that are permutations of the letters in word\n\treturn {''.join(w) for w in permutations(word)}", "def gen_all_strings(word):\n if len(word) == 0:\n return [\"\"]\n elif len(word) == 1:\n return [\"\",word]\n else:\n result_strings = []\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n new_strings = []\n for rest_string in rest_strings:\n for dummy_index in range(len(rest_string)):\n #在首位插入\n if dummy_index == 0:\n new_string = first + rest_string\n new_strings.append(new_string)\n #在中间插入 \n else:\n new_string = rest_string[0:dummy_index] + first + rest_string[dummy_index:]\n new_strings.append(new_string)\n #在末尾插入\n new_strings.append(rest_string + first)\n \n result_strings.extend(rest_strings)\n result_strings.extend(new_strings)\n \n return result_strings", "def gen_all_strings(word):\n anagrams = []\n\n # if the word has a length of 0, append an empty string\n if len(word) == 0:\n anagrams.append('')\n\n else:\n # split the word in first letter + rest\n first = word[0]\n rest = word[1 :]\n\n # generate the strings for the rest of the word\n rest_strings = gen_all_strings(rest)\n\n # add the strings to the anagrams list\n anagrams.extend(rest_strings)\n\n # create new strings by relocating the first character\n # in every possible position\n for string in rest_strings:\n for idx in range(len(string) + 1):\n new_string = string[: idx] + first + string[idx :]\n anagrams.append(new_string)\n\n return anagrams", "def encode_word(word: str) -> List[str]:\n inner_letters = word[1:-1]\n inner_letters = shuffle(inner_letters)\n return [word[0], *inner_letters, word[-1]]", "def generate_words(combo,scrabble_words_dict):\n word_set = set()\n for w in itertools.permutations(combo):\n word = ''.join(w)\n if word in scrabble_words_dict:\n word_set.add(word)\n return word_set", "def __lettersToString(self, words):\r\n \r\n li = []\r\n \r\n for word in words:\r\n li.append(\"\".join(word))\r\n \r\n return li", "def generate_alphabet_from_word(word):\n word = \" \"+word+\" \"\n chars = [char for char in word] # Getting letters from the word\n chars += map(add, chars[:-1], chars[1:]) # Adding bigrams to the list\n\n # Computing hash of items and add 0 to the list\n return set([0] + [anagram_hash(c) for c in set(chars)])", "def get_combo(starting_letter, length): # Apparently ngrams beyond bigrams only have two letter file names. Still keeping this for generality, but should always be run with length=2 in this context\n alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n combos = list(itertools.combinations(alpha, length - 1))\n combos = [starting_letter + ''.join(item) for item in combos]\n\n return combos", "def word_combination(wlist:list) -> list :\r\n\r\n if wlist and len(wlist)>1:\r\n return chain(*map(lambda x: combinations(wlist, x), range(1, len(wlist)+1)))\r\n else :\r\n return wlist", "def _process(self, word: str) -> List[str]:\n # if a blank arrives from splitting, just return an empty list\n if len(word.strip()) == 0:\n return []\n word = self.convert_consonantal_i(word)\n my_word = \" \" + word + \" \"\n letters = list(my_word)\n positions = []\n for dipth in self.diphthongs:\n if dipth in my_word:\n dipth_matcher = re.compile(\"{}\".format(dipth))\n matches = dipth_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n matches = self.kw_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n letters = string_utils.merge_next(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions.clear()\n if not self._contains_vowels(\"\".join(letters)):\n return [\n \"\".join(letters).strip()\n ] # occurs when only 'qu' appears by ellision\n positions = self._starting_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_right(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._starting_consonants_only(letters)\n positions = self._ending_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_left(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._ending_consonants_only(letters)\n positions = self._find_solo_consonant(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_solo_consonant(letters)\n positions = self._find_consonant_cluster(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_consonant_cluster(letters)\n return letters", "def get_words(f: str, letters: List[str]) -> List[str]:\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n words_file = open(f)\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in words_file:\r\n word = word[:-1].lower()\r\n if len(word) >= 4:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def _possible_words(self):\n new_words = []\n for word in self._words:\n if word not in (self._used_words + tuple(self._tried_words)):\n for i in range(len(self._start)):\n if word[:i] + word[i+1:] == self._start[:i] + self._start[i+1:]:\n new_words.append(word)\n new_words.sort()\n return new_words", "def letters_generator():\n def multiletters(seq):\n for n in itertools.count(1):\n for s in itertools.product(seq, repeat=n):\n yield \"\".join(s)\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return multiletters(letters)", "def anagrams(word): \n if len(word) < 2:\n yield word\n else:\n for i, letter in enumerate(word):\n if not letter in word[:i]: #avoid duplicating earlier words\n for j in anagrams(word[:i]+word[i+1:]):\n yield j+letter", "def word_generator():\n all_words = []\n for _ in range(NUM_UNIQUE_WORDS):\n rand_word = ''.join(random.choice(CHARS) for _ in range(WORD_SIZE))\n all_words.append(rand_word)\n return all_words", "def find_words(text):\n print \"finding combinations\"\n length = len(text)\n n = length - 1\n num_combos = 2 ** (length - 1)\n\n bins = []\n for i in range(num_combos):\n num = bin(i).rsplit('b', 1)[1]\n num_str = num.zfill(n)\n bins.append(num_str)\n\n total_combos = []\n for binary_num in bins:\n combo = []\n for i in range(n):\n if binary_num[i] == '1':\n combo.append(text[i])\n combo.append(',')\n else:\n combo.append(text[i])\n\n combo.append(text[-1])\n combo = ''.join(combo)\n combo = combo.split(',')\n total_combos.append(combo)\n\n return total_combos", "def gen_linear_anagram_candidates(word):\n anagram_candidates = []\n for pos in range(1, len(word)):\n anagram_candidates += [word[pos:] + word[0:pos]]\n return anagram_candidates", "def check_words(combinations):\n translations = []\n for c in combinations:\n translation = []\n found_def = True\n for char in c:\n food_word = Food_Word.find_match(char)\n if food_word:\n translation.append(food_word.get_json())\n else:\n entries = Dict_Entry.find_matches(char)\n if entries != []:\n for entry in entries:\n translation.append(entry.get_json())\n elif len(char) == 1:\n # If the character isn't in the dictionary (usually punctuation)\n d = {\n \"char\": char,\n \"pinyin\": \"\",\n \"english\": \"\" \n }\n translation.append(d)\n else:\n found_def = False\n break\n if found_def:\n return translation", "def get_words(f, letters):\n # lettrs = []\n # okay = True\n # words = []\n # nline = ''\n # with open(f, 'r') as vocabulary:\n # for line in vocabulary.readlines():\n # nline = line.replace(\"\\n\", \"\").lower()\n # if 4 <= len(nline) <= 9 and letters[4] in nline:\n # lettrs = list(nline)\n # for lettr in lettrs:\n # if lettr not in letters:\n # okay = False\n # break\n # else:\n # okay = True\n # if okay is True:\n # words.append(nline)\n #\n # lettrs = copy.copy(letters)\n # nwords = []\n # okay = True\n # for word in words[::1]:\n # lettrs = copy.copy(letters)\n # for letter in word:\n # if letter in lettrs:\n # lettrs[lettrs.index(letter)] = '0'\n # else:\n # okay = False\n # break\n # if okay is True:\n # nwords.append(word)\n # okay = True\n #\n # unique = True\n # words = []\n # for word in nwords:\n # if nwords.count(word) > 1:\n # nwords.remove(word)\n # nwords.sort()\n # return nwords\n res = []\n cort_letters = []\n our_letters = []\n res = []\n f = open(f, 'r')\n for line in f:\n line = line.replace(\"\\n\", \"\").strip().lower()\n if 4 <= len(line) <= 9:\n if letters[4] in line:\n count = 0\n for each_letter in line:\n if each_letter in letters:\n count += 1\n if count == len(line):\n our_letters.append(line)\n f.close()\n for each_word in our_letters:\n count_let = 0\n for each_letter in each_word:\n if each_word.count(each_letter) <= letters.count(each_letter):\n count_let += 1\n if count_let == len(each_word):\n res.append(each_word)\n for each in res:\n if res.count(each) > 1:\n res.remove(each)\n return sorted(res)", "def get_word_pattern(word: str) -> str:\n word = word.upper()\n next_num = 0\n letter_nums = {}\n word_pattern = []\n\n for letter in word:\n if letter not in letter_nums:\n letter_nums[letter] = str(next_num)\n next_num += 1\n word_pattern.append(letter_nums[letter])\n return \".\".join(word_pattern)", "def _get_replacement_words(self, word):\n\n if len(word) <= 1:\n return []\n\n candidate_words = []\n\n start_idx = 1 if self.skip_first_char else 0\n end_idx = (len(word) - 2) if self.skip_last_char else (len(word) - 1)\n\n if start_idx >= end_idx:\n return []\n\n if self.random_one:\n i = np.random.randint(start_idx, end_idx)\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n else:\n for i in range(start_idx, end_idx):\n candidate_word = word[:i] + word[i + 1] + word[i] + word[i + 2 :]\n candidate_words.append(candidate_word)\n\n return candidate_words", "def lettergen():\n for repeat in range(1, 10):\n for item in itertools.product(ascii_uppercase, repeat=repeat):\n yield \"\".join(item)", "def get_combo(un_lit):\n\n done_lit = []\n li_count = len(un_lit)\n\n for letter in un_lit: # for each letter in the provided\n placeholder = 0\n for num in range(li_count) # for each pos in list\n if letter.index == placeholder:\n temp_lit = \n\n elif letter.index > placeholder:\n \n elif letter.index < placeholder:\n\n done_lit.append(temp_lit)\n placeholder += 1", "def all_words(self, all_possible_words):\n result = []\n for word in all_possible_words:\n result = result + [word[i:j]\n for i in range(len(word)) for j in range(i + 2, len(word) + 1)]\n\n return result", "def get_words(self, chars = None):\n if not self.branches.keys():\n return ['']\n\n if self.pre:\n def apre(word,letter):\n return letter + word\n else:\n def apre(word,letter):\n return word + letter\n\n if chars:\n sub = self.check(chars)\n if sub:\n return [apre(x,chars) for x in sub.get_words()]\n else:\n return []\n\n # If this node marks an existing word, pass back empty string to parent\n # nodes to rebuild this word separately from any derived compound words\n if self.exists:\n selfwordmarker = ['']\n else:\n selfwordmarker = []\n\n return [word for sublist in \\\n [[apre(word,key) for word in self.branches[key].get_words()]\\\n for key in self.branches.keys()]\\\n for word in sublist] + selfwordmarker", "def getSequence(word):\n\n # We'll construct our structure in sequence, and store\n # info about last character, consonant runs, and vowel runs.\n sequence = str()\n c_run = bool()\n v_run = bool()\n word = word.lower()\n\n vowels = set(['a', 'e', 'i', 'o', 'u'])\n\n for i, char in enumerate(word):\n # Handle vowels with y corner-cases.\n # If 'y' and preceded by a consonant, or a pure vowel\n if (c_run and char == 'y') or (char in vowels):\n # Account for the consonant run\n if c_run:\n sequence += 'C'\n c_run = False\n v_run = True\n # Else char is a consonant\n else:\n # Account for the vowel run\n if v_run:\n sequence += 'V'\n v_run = False\n c_run = True\n # Now account for the last character in the loop\n sequence += 'C' if c_run else 'V'\n\n return sequence", "def generate_alphabet_combinations(length: int = 2) -> List[str]:\n assert length > 0\n alphabets = string.ascii_lowercase\n\n return [\n ''.join(combination)\n for n in range(1, length+1)\n for combination in product(alphabets, repeat=n)\n ]", "def generate_words(word_size, letters):\r\n # Read file from dictionary text\r\n # Make a DICTIONARY base on word size:\r\n f = open('English.txt')\r\n dictionary = dict()\r\n for line in f:\r\n line = line.strip('\\n') #get rid of \\n at the end of line\r\n if dictionary.has_key(len(line)): \r\n dictionary[len(line)].append(line)\r\n else:\r\n dictionary[len(line)] = [line] #the line in [] is important\r\n\r\n # Go through the DICTIONARY with same word size\r\n # Check if the word has the characters in letter list:\r\n possible_words = []\r\n for word in dictionary[word_size]:\r\n for letter in word:\r\n if letter not in letters:\r\n correct = False\r\n break\r\n else: \r\n correct = True \r\n if (correct):\r\n possible_words.append(word) \r\n return possible_words", "def word_forms(self, word):\n result = set()\n for dic_name in self.dictionaries.keys():\n for vector in self.dictionaries[dic_name].word_forms(word):\n result.add(tuple(vector))\n return filter(lambda x: len(x), result)", "def letters():\n letters = \"BINGO\"\n for letter in letters:\n yield letter", "def allPossibleWords(Rack):\n def checkWord(word):\n return stringInRack(word,Rack)\n return filter(checkWord, Dictionary)", "def __get_all_possible_substrings(base_string):\n substrings = []\n for n in range(1, len(base_string) + 1):\n for i in range(len(base_string) - n + 1):\n substrings.append(base_string[i:i + n])\n return substrings", "def calculate_construction(self, word):\r\n \r\n construction = \"\"\r\n for c in word.lower():\r\n if c in self.vowels:\r\n construction += \"v\"\r\n elif c in letters:\r\n construction += \"c\"\r\n return construction", "def get_pure_user_words(user_words: List[str], letters: List[str], words_from_dict: List[str]) -> List[str]:\r\n unknown_words = []\r\n for wordd in user_words:\r\n if wordd not in words_from_dict:\r\n unknown_words.append(wordd)\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in unknown_words:\r\n if len(word) >= 4 and len(word) <= 9:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def generate_strings(char_list, length):\n if length <= 0:\n yield []\n elif length == 1:\n for char in char_list:\n yield [char]\n else:\n for char in char_list:\n for l in generate_strings(char_list, length-1):\n yield [char] + l", "def allstrings2(alphabet, length):\n\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n\n return c", "def all_words(self):\n\n for char, child in self.children.items():\n if child.is_word:\n yield f\"{char}\"\n else:\n for each in child.all_words():\n yield char + each", "def ladder(word: str) -> List[str]:\n found_words = set()\n for i in range(len(word)):\n pattern = list(word)\n pattern[i] = '.'\n search_results = search(\"^\" + \"\".join(pattern) + \"$\")\n for result in search_results:\n if result != word:\n found_words.add(result)\n return found_words", "def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list", "def solve(puzzle, words):\r\n with open(words) as inp:\r\n word_docu = json.load(inp)\r\n word_collect = {word.upper() for word in word_docu[\"words\"]}\r\n\r\n with open(puzzle) as puzzle:\r\n letter_str = ''\r\n word_lst = []\r\n letter_verti = ''\r\n\r\n for line in puzzle:\r\n puzzle_str = line.rstrip() + ' '\r\n word_lst.append(puzzle_str)\r\n letter_str = letter_str + puzzle_str\r\n\r\n for i in range(len(word_lst)):\r\n letter_verti = letter_verti + ' '\r\n for word in word_lst:\r\n letter_verti = letter_verti + word[i]\r\n letter_str = letter_str + letter_verti\r\n\r\n words_puzzle = []\r\n\r\n for word in word_collect:\r\n if word in letter_str:\r\n words_puzzle.append(word)\r\n\r\n return words_puzzle", "def letterCombinations(self, digits: str) -> [str]:\n return Combinations(digits).ans", "def split_by_words(term):\n if not term:\n return []\n # make all chars in lower case\n term = term.lower()\n # main rules\n splitted_by_size = re.findall(re_words, term) or [term]\n # separators\n splitted_by_seps = [re.split(r'[_ @,.\\-()/№\\\"]', word) for word in splitted_by_size]\n # convert to simple array\n flat_list = [word for wordlist in splitted_by_seps for word in wordlist]\n # transliteration\n translitted = []\n for word in flat_list:\n try:\n translitted += custom_transliterate(word)\n translitted.append(word)\n translitted.append(translit(word, reversed=True))\n except Exception as e:\n logging.debug(\"Translit error: %s - %s\", str(e), word)\n # unique\n unique_list = list(set(translitted))\n return unique_list", "def __stringToLetters(self, words):\r\n li = []\r\n \r\n for word in words:\r\n li.append(list(word))\r\n \r\n return li", "def remove_two_letters(word):\n for i in range(len(word) - 1):\n first_part = word[:i]\n for j in range(i + 1, len(word)):\n yield first_part + word[i + 1:j] + word[j + 1:]", "def shuffle(word: str) -> List[str]:\n letters = list(word)\n while True:\n random.shuffle(letters)\n new_word = \"\".join(letters)\n if new_word != word:\n return letters", "def compute_possibles(letters, slots, dictionary_words, context):\n\n words = dictionary_words\n\n # if we have a known number of slots filter\n # our word list down to words w/ that manny letters\n if slots:\n words = ifilter(f.word_len(slots), words)\n\n # filter our word list down to words who's\n # letters are a subset of the given letters\n words = ifilter(f.letter_subset(letters), words)\n\n # we now have our final iterator of possible solutions\n return words", "def cheat(self) -> List[str]:\n all_possible_words = self.trie.get_all_possible_words(\n self.get_current_reels_letters()\n )\n better_words = OrderedDict()\n for word in all_possible_words:\n score = self.scorer.calculate_word_score(word)\n if len(better_words) > 2:\n first_word = next(iter(better_words.items()))\n if first_word[0] < score:\n better_words.popitem(last=False)\n better_words[score] = word\n else:\n better_words[score] = word\n better_words = OrderedDict(sorted(better_words.items()))\n return [f\"{word} ({score})\" for score, word in better_words.items()]", "def _setup(self, word) -> List[str]:\n if len(word) == 1:\n return [word]\n for prefix in self.constants.PREFIXES:\n if word.startswith(prefix):\n (first, rest) = string_utils.split_on(word, prefix)\n if self._contains_vowels(rest):\n return string_utils.remove_blank_spaces(\n self._process(first) + self._process(rest)\n )\n # a word like pror can happen from ellision\n return string_utils.remove_blank_spaces(self._process(word))\n if word in self.constants.UI_EXCEPTIONS.keys():\n return self.constants.UI_EXCEPTIONS[word]\n return string_utils.remove_blank_spaces(self._process(word))", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def solve(chars, length):\n return generate_greedy(generate_string_list(length, chars))", "def compile_word(word):\n # Your code here.\n if word.isalpha() and word.islower():\n return word\n if not word.isalpha():\n return word\n result = []\n mul = 1\n word = word[::-1]\n for w in word:\n if w.isalpha and w.isupper():\n result.append(str(mul) + '*' + w + \"+\")\n else:\n result.append(w)\n mul = mul*10\n ans = ''.join(result)\n return ans[:-1]", "def doubletwochars(word: str) -> Iterator[str]:\n\n if len(word) < 5:\n return\n\n # TODO: 1) for vacacation yields \"vacation\" twice, hunspell's algo kinda wiser\n # 2) maybe just use regexp?..\n for i in range(2, len(word)):\n if word[i-2] == word[i] and word[i-3] == word[i-1]:\n yield word[:i-1] + word[i+1:]", "def all_words_iterator(root,word = [],level=0,alpha_size=21):\n \n # If node is leaf node, it indicates end of string\n\n if root.isEndOfWord:\n yield ''.join(word)\n \n for i in range(alpha_size):\n # if NON NULL child is found \n # add parent key to str and \n # call the display function recursively \n # for child node \n if (root.children[i]):\n if level < len(word):\n word[level] = root.children[i].char \n else:\n word.append(root.children[i].char)\n yield from Trie.all_words_iterator(root.children[i],word,level+1)", "def get_word(letters):\r\n\r\n word = \"\"\r\n for letter in letters:\r\n word += letter \r\n \r\n return word", "def get_possible_vowels(self, word_set):\r\n \r\n vowels = \"\"\r\n for word in word_set:\r\n # Check if existing vowel is in word.\r\n if any(vowel in word for vowel in vowels):\r\n continue\r\n # Find most common letter and assume it's a vowel\r\n vowel, probability = '', 0\r\n for c in word:\r\n _, number = self.letters.get_value(c)\r\n if number > probability:\r\n vowel = c\r\n probability = number\r\n vowels += vowel\r\n return vowels", "def build(pattern, words, seen, list):\r\n return [word for word in words\r\n if re.search(pattern, word) and word not in seen.keys() and word not in list]", "def get_combinations(text):\n combinations = []\n arr = []\n slen = len(text)\n __find_factor(slen,slen,combinations,arr)\n \n elements = []\n for comb in combinations:\n tmp = [0] + comb\n elements.append([text[tmp[i]:tmp[i]+tmp[i+1]] for i in range(len(tmp)-1)])\n return elements", "def make_text(chains):\n words = []\n not_end_of_list = True\n # your code goes here\n \n # starts with a capital lettered word from source text\n capitalized_ngrams = []\n for key in chains.keys():\n # check if the first tuple in key[0][0]\n if key[0][0].isupper():\n capitalized_ngrams.append(key)\n \n \n\n selected_keys = list(capitalized_ngrams)\n count = 0\n while not_end_of_list:\n choice_n = choice(selected_keys)\n\n if count == 0:\n words.extend(choice_n)\n \n if chains[choice_n] and count <= 150:\n # as long as there is an option, picks a random element from dict list\n choose_next = choice(chains[choice_n])\n # adds new word to list\n words.append(choose_next)\n # creates a list of keys whose last item in tuple is item from list\n selected_keys = [x for x in chains.keys() if x == tuple([*choice_n[1:], choose_next])]\n # it is possible continues\n if selected_keys:\n pass\n else:\n not_end_of_list = False\n\n count += 1\n \n else:\n not_end_of_list = False\n\n return \" \".join(words)", "def jumbled(word_list, n):\n # Create the anagram \n selected = random.sample(word_list, n)\n bag = LetterBag(\"\")\n for word in selected:\n bag.merge(LetterBag(word))\n letters = list(bag.as_string())\n print(\"Letters: {}\".format(letters))\n random.shuffle(letters)\n result = \"\".join(letters)\n return result", "def can_make_word(word, letters):\n grouped_chars = group_input(word)\n for char in letters:\n\n if is_empty(grouped_chars):\n return True\n\n if char in grouped_chars and grouped_chars[char] > 0:\n grouped_chars[char] -= 1\n\n return is_empty(grouped_chars)", "def generate_wordnet_candidates(self, word):\n candidates = set()\n if self.check_if_replacable(word):\n for synset in wordnet.synsets(word):\n for lemma in synset.lemmas():\n converted = convert(lemma.name().lower(), word)\n if converted != word and converted != None:\n try:\n w1 = wordnet.synsets(word)[0]\n w2 = wordnet.synsets(converted)[0]\n similarity = w1.wup_similarity(w2)\n if isinstance(similarity,float) and w1.wup_similarity(w2) >0.6 :\n candidates.add(converted)\n except:\n pass\n # print(\"candidate\",word,candidates)\n return candidates", "def allstrings2(alphabet, length):\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n for value in c:\n \tfvalue = ''.join(value)\n \tprint fvalue\n return \"\"", "def alpha_chars_pairs (text):\n alpha_text = list (alpha_chars (text))\n return itertools.combinations (alpha_text)", "def generate_words(self) -> set:\n\n words = set()\n max_word_length = int(self._restrictions['max-word-length'])\n\n def _reduce(seq):\n if self.is_word(seq):\n if len(seq) <= max_word_length and seq not in words:\n words.add(seq)\n else:\n if self.get_terminals_count(seq) <= max_word_length:\n first_non_terminal = seq[\n self.get_first_non_terminal_index(seq)]\n\n for transition in sorted(\n self._transitions[first_non_terminal]):\n _reduce(seq.replace(first_non_terminal, transition))\n\n for entry_transition in sorted(self._transitions[\n self._starting_non_terminal]):\n _reduce(entry_transition)\n\n return words", "def all_reducible(word_dict):\n res = []\n for word in word_dict:\n t = is_reducible(word, word_dict)\n if t != []:\n res.append(word)\n return res", "def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]", "def fuzz_word(word):\r\n res = []\r\n\r\n for c in word:\r\n for i in range(3):\r\n c += random.choice(symbols)\r\n res.append(c)\r\n\r\n return ''.join(res)", "def all_words(root,word = [],level=0,alpha_size=21):\n # If node is leaf node, it indicates end of string\n if root.isEndOfWord:\n print(''.join(word))\n \n for i in range(alpha_size):\n # if NON NULL child is found \n # add parent key to str and \n # call the display function recursively \n # for child node \n if (root.children[i]):\n if level < len(word):\n word[level] = root.children[i].char \n else:\n word.append(root.children[i].char)\n Trie.all_words(root.children[i],word,level+1)", "def generate_transitional_modifications(self, word: str = \"\") -> list: # noqa: C901\n possible_modifications = []\n\n # Create a list of possible initial words\n word_list = [word]\n\n # Replace Umlaute to handle things like \"H_ä_userschlucht\"\n # If the first letter is an Umlaut, it is not going to be changed\n if 'ä' in word and not word[0].lower() == 'ä':\n tmp_word = word.replace(u'ä', 'a')\n word_list.append(tmp_word)\n\n if 'ö' in word and not word[0].lower() == 'ö':\n tmp_word = word.replace(u'ö', 'o')\n word_list.append(tmp_word)\n\n if 'ü' in word and not word[0].lower() == 'ü':\n tmp_word = word.replace(u'ü', 'u')\n word_list.append(tmp_word)\n\n for word in word_list:\n # Consider the unmodified word lowered and capitalized as possible modifications\n possible_modifications.append(word.lower())\n possible_modifications.append(word.capitalize())\n\n \"\"\"\n M O D I F Y W O R D S\n Noun Rules\n \"\"\"\n # If not last letter is 's'\n # Remove s\n # Remove s, add e\n if word[-1:] == \"s\": #\n # action = [\"-s\"]\n # action2 = [\"-s\", \"+e\"]\n possible_modifications.append(word[:-1].lower())\n possible_modifications.append(word[:-1].lower() + \"e\")\n\n possible_modifications.append(word[:-1].capitalize())\n possible_modifications.append(word[:-1].capitalize() + \"e\")\n\n # If not last letter is 'e'\n # Add e\n if not word[-1:] == \"e\": # Kirch|turm (Kirch) -> (Kirche)\n # action = [\"+e\"]\n possible_modifications.append(word.lower() + \"e\")\n possible_modifications.append(word.capitalize() + \"e\")\n\n # If not last letter is 'n'\n # Add n\n if word[-1:] == \"n\": # Hasen|braten (Hasen) -> (Hase)\n # action = [\"-n\"]\n possible_modifications.append(word[:-1].lower())\n possible_modifications.append(word[:-1].capitalize())\n\n # If last letter IS 'e'\n # Remove e\n if word[-1:] == \"e\": # Hunde|hütte (Hunde) -> (Hund)\n # action = [\"-e\"]\n possible_modifications.append(word[:-1].lower())\n possible_modifications.append(word[:-1].capitalize())\n\n # If word ends on \"en\"\n # Remove \"en\"\n if word[-2:] == \"en\": # Taten|drang (Taten) -> (Tag)\n # action = [\"-en\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n # If word ends on \"er\"\n # Remove \"er\"\n if word[-2:] == \"er\": # Bücher|Regal (Bücher/Bucher) -> (Büch/Buch)\n # action = [\"-er\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n # If word ends on \"ns\"\n # Remove \"ns\"\n if word[-2:] == \"ns\": # Glaubens|frage (Glaubens) -> (Glaube)\n # action = [\"-ns\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n # If word ends on \"ens\"\n # Remove \"ens\"\n if word[-3:] == \"ens\": # Herzens|güte (Herzens) -> (Herz)\n # action = [\"-ens\"]\n possible_modifications.append(word[:-3].lower())\n possible_modifications.append(word[:-3].capitalize())\n\n # If ends on \"es\"\n # Remove \"es\"\n if word[-2:] == \"es\": # Kindes|wohl (Kindes) -> (Kind)\n # action = [\"-es\"]\n possible_modifications.append(word[:-2].lower())\n possible_modifications.append(word[:-2].capitalize())\n\n \"\"\"\n Verb Rules\n \"\"\"\n # If word does not end on \"en\" and not on \"e\"\n # Add -en\n if not word[-2:] == \"en\" and not word[-1:] == \"e\":\n # action = [\"+en\"]\n possible_modifications.append(word.lower() + \"en\")\n possible_modifications.append(word.capitalize() + \"en\")\n\n # If word ends on \"en\" PR word ends on \"em\"\n # Add -en, remove -e- in context of n, m)\n # This is totally different to the NOUN rule above\n if word[-2:] == \"en\" or word[-2:] == \"em\":\n # action = [\"+n\", \"+en\"]\n possible_modifications.append(word[:-2].lower() + word[-1:] + \"en\")\n possible_modifications.append(word[:-2].capitalize() + word[-1:] + \"en\")\n\n # If word does not end on \"n\"\n # Add -n\n if not word[-1:] == \"n\":\n # action = [\"+n\"]\n possible_modifications.append(word.lower() + \"n\")\n possible_modifications.append(word.capitalize() + \"n\")\n\n # modification is valid if:\n # - not in stopwords\n # - len > 2\n # - not in forbidden modifier list\n # - in lemma list\n\n possible_modifications = [w for w in possible_modifications if w.lower() not in self.stop_words\n and len(w) > 2\n and str(w) in self.lemma_data]\n\n return possible_modifications", "def word_to_chars(self, word):\n chars = list()\n if word == self.eos or word == self.sos:\n chars.append(self.char_to_id[word])\n else:\n word = \"^\" + word + \"$\"\n for ch in word:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.char_to_id and flag == 1:\n chars.append(self.char_to_id[ch])\n else:\n chars.append(self.char_to_id['<unk>'])\n return chars", "def generate(base):\n if base == '':\n yield base\n else:\n for character in JugglerPassGen.dictionary(base[0]):\n for rest in JugglerPassGen.generate(base[1:]):\n yield character + rest", "def generateWord(self, parameters=None):\n\t\t# Initial set-up\n\t\tvowels = ['a', 'e', 'i', 'o', 'u']\n\t\tspecialVowels = ['y']\n\n\t\tconsonants = ['b', 'c', 'd', 'f', 'g', 'h', 'k', 'l', 'm', 'n', 'p', 'r', 's', 't']\n\t\tspecialConsonants = ['j', 'q', 'v', 'w', 'x', 'z']\n\n\t\tnewLetterFraction = 5\n\t\tvowelChance = 50 #percent\n\n\t\t#Determine how many words we're going to have to generate\n\t\trepeats = 1\n\t\tif parameters and len(parameters) > 0:\n\t\t\trepeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25)\n\n\t\twords = []\n\t\tfor i in xrange(0, repeats):\n\t\t\tword = u\"\"\n\t\t\tcurrentVowelChance = vowelChance\n\t\t\tcurrentNewLetterFraction = newLetterFraction\n\t\t\tconsonantCount = 0\n\t\t\twhile random.randint(0, currentNewLetterFraction) <= 6:\n\t\t\t\tif random.randint(1, 100) <= currentVowelChance:\n\t\t\t\t\tconsonantCount = 0\n\t\t\t\t\t#vowel. Check if we're going to add a special or normal vowel\n\t\t\t\t\tif random.randint(1, 100) <= 10:\n\t\t\t\t\t\tword += random.choice(specialVowels)\n\t\t\t\t\t\tcurrentVowelChance -= 30\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(vowels)\n\t\t\t\t\t\tcurrentVowelChance -= 20\n\t\t\t\telse:\n\t\t\t\t\tconsonantCount += 1\n\t\t\t\t\t#consonant, same deal\n\t\t\t\t\tif random.randint(1, 100) <= 25:\n\t\t\t\t\t\tword += random.choice(specialConsonants)\n\t\t\t\t\t\tcurrentVowelChance += 30\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(consonants)\n\t\t\t\t\t\tcurrentVowelChance += 20\n\t\t\t\t\tif consonantCount > 3:\n\t\t\t\t\t\tcurrentVowelChance = 100\n\t\t\t\tcurrentNewLetterFraction += 1\n\n\t\t\t#Enough letters added. Finish up\n\t\t\tword = word[0].upper() + word[1:]\n\t\t\twords.append(word)\n\n\t\t#Enough words generated, let's return the result\n\t\treturn u\", \".join(words)", "def extrachar(word: str) -> Iterator[str]:\n if len(word) < 2:\n return\n\n for i in range(0, len(word)):\n yield word[:i] + word[i+1:]", "def easy_words(a_list):\n\n easy_list = [word for word in a_list if len(word) in range(4,7)]\n return easy_list", "def words_uses_only(letters):\n\treturn {w for w in word_set if uses_only(w, letters)}", "def crossword_words(crossword: list) -> list:\n pass", "def get_all_words(self):\n words = []\n \n ______________________________________________\n \n words.append(self.root)\n \n for branch in self.branches.values():\n \n __________________________________________\n \n return _______________________________________", "def anagrams(word):\n\t# Question 4b: Generates all permutations of word and filters it to contain only valid words\n\treturn word_perms(word) & word_sets[len(word)]", "def create_word(char_list):", "def generate_letter_maps(self):\n\n word_count = len(self.words)\n last_percent = 0\n\n # Do no-blank words.\n for i, word in enumerate(self.words):\n letters = \"\".join(sorted(set(word)))\n self.letters_map[letters].append(word)\n\n # Do one-blank words.\n for subword in self.remove_one_letter(letters):\n self.letters_map_one_blank[subword].append(word)\n\n # Do two-blank words.\n for subword in self.remove_two_letters(letters):\n self.letters_map_two_blanks[subword].append(word)\n\n # Show progress information.\n percent = int(i*100/word_count)\n if percent/10 != last_percent/10:\n print \" %d%%\" % percent\n last_percent = percent", "def sub_words(word):\n sub_words_lst = []\n for i in range(len(word)):\n sub_word = word[:i]+word[i+1:]\n sub_words_lst.append(sub_word)\n return sub_words_lst", "def mismatch(word, mismatches):\n for d in range(mismatches+1):\n for locs in itertools.combinations(range(len(word)), d):\n this_word = [[char] for char in word]\n for loc in locs:\n orig_char = word[loc]\n this_word[loc] = [base for base in \"ACGTN\" if base != orig_char]\n for poss in itertools.product(*this_word):\n yield \"\".join(poss)", "def descramble(word, tree):\n match = []\n \n # Helper functions\n dictionaryTree = firstLetter(scrambled_word, tree)\n doubleLetter(scrambled_word, dictionaryTree)\n wordLength(scrambled_word, dictionaryTree)\n \n # Creates a tree out of the pruned dictionaryTree\n tree = BinarySearchTree(dictionaryTree)\n \n for words in dictionaryTree:\n wordLetters = str.split(word)\n count = 0\n for letters in wordLetters:\n if letters == word[count]:\n count += 1\n if count == len(word):\n match.append(word)\n return match", "def split_word_in_all_comps(self, term: str) -> List[str]:\n all_stems = []\n\n words = term.split()\n for word in words:\n stems = self.decompound(word)\n all_stems.extend(stems)\n\n for stem in stems:\n more_stems = self.split_word_in_all_comps(stem)\n all_stems.extend(more_stems)\n\n return all_stems", "def perm_gen_lex(a):\n\n if a == '':\n return []\n elif len(a) == 1:\n return [a]\n else:\n perm_list = []\n \"\"\"For each character in the input string\"\"\"\n for i in range(len(a)):\n\n \"\"\"Form a simpler string by removing the character from the input string\n Generate all permutations of the simpler string recursively\"\"\"\n if i == len(a)-1:\n simple_string = a[:i]\n else:\n simple_string = a[:i] + a[(i+1):]\n\n simple_string_list = perm_gen_lex(simple_string)\n\n \"\"\"Add the removed character to the front of each permutation of the simpler string, and\n add the resulting permutation to the list\"\"\"\n for val in simple_string_list:\n perm_list.append(a[i] + val)\n\n return perm_list", "def get_all(splitted_string, word_ngram, sort_ngrams=False):\n for ngram in range(1, word_ngram + 1):\n for word_pos in range(len(splitted_string) - ngram + 1):\n if sort_ngrams:\n yield (\"_\".join(sorted(splitted_string[word_pos:word_pos + ngram])))\n else:\n yield (\"_\".join(splitted_string[word_pos:word_pos + ngram]))", "def check_words_in_trie(self, trie, words):\n result = []\n # get the unique combinations for our search\n word_set = set(words)\n print('The Number of possible combinations is:', len(words), '.\\n The Number of unique combinations is:',\n len(word_set), '.')\n for word in word_set:\n checked = self.in_trie(trie, word)\n if checked:\n result.append(checked)\n return result", "def lookup_pronunciations_for_word(word: Text) -> Sequence[Word]:\n return EnglishUtils.all_possible_forms_for(word)", "def constrAlphabet(i) :\n \n global alphabet\n \n for j in range(0,len(i)) :\n if i[j] not in i[:j] :\n alphabet.append(i[j])", "def _internal_contains(self, letters):\n len_letters = len(letters)\n for word in self.words:\n len_word = len(word)\n word_with_letters_removed = WordUtils.remove_letters_from_word(word, letters)\n if len(word_with_letters_removed) == (len_word - len_letters):\n yield word", "def get_combinations_regexp(values):\n result = []\n for value in values:\n result.extend([value.lower(), value.upper(), value.title()])\n return '|'.join(result)", "def make_text(chains):\n\n text = []\n nchars = 0\n\n # Starting ngram (as tuple), first word in tuple must be uppercase\n start = choice(get_uppercase(chains))\n\n # Add starting ngram to text list\n text.extend(start)\n\n # Add length of words in first bigram and two spaces to nchars\n nchars += len(start[0]) + len(start[1]) + 2\n\n while nchars < 119:\n # Choose next word randomly from list\n new_word = choice(chains[start])\n\n # add length of new word to nchars\n # add one for space between words\n nchars += len(new_word) + 1\n\n if nchars > 120:\n break\n else:\n # Add new word to text list\n text.append(new_word)\n\n # Generate tuple for next ngram\n new_key = start[1:] + (new_word,)\n\n # Break out of loop if bigram doesn't exist\n if new_key in chains:\n start = new_key\n else:\n break\n\n text.append(\"#hackbrightgracejan17\")\n\n # Find last sentence punctuation in text\n text_string = ' '.join(text)\n\n # period = text_string.rfind('.')\n # exclamation = text_string.rfind('!')\n # question = text_string.rfind('?')\n\n # largest = max(period, exclamation, question)\n\n # # Remove everything after the last punctuation, if there is anything\n # if len(text_string) == largest+1:\n # return text_string\n # else:\n # return text_string[:largest+1]\n\n return text_string", "def generate_pairs_of_words(word_list):\n def pair_words(word_list, i, j, connector):\n return word_list[i] + connector + word_list[j]\n pairs = []\n n = len(word_list)\n for i in range(n-1):\n for j in range(i+1, n):\n pairs.append(pair_words(word_list, i, j, ' '))\n pairs.append(pair_words(word_list, j, i, ' '))\n pairs.append(pair_words(word_list, i, j, '-'))\n pairs.append(pair_words(word_list, j, i, '-'))\n pairs.append(pair_words(word_list, i, j, '_'))\n pairs.append(pair_words(word_list, j, i, '_'))\n pairs.append(pair_words(word_list, i, j, ''))\n pairs.append(pair_words(word_list, j, i, ''))\n outputs = list(set(pairs)) # remove duplicates\n return outputs", "def generate_word(model, whitelist, topk, radix=\"\"):\n if whitelist.empty():\n yield \"\", 0\n else:\n for prefix, prefix_proba in iterate_continutations(model, radix, whitelist, topk):\n if prefix is None:\n continue\n for suffix, suffix_proba in generate_word(\n model,\n whitelist.sub(LetterBag(prefix)),\n topk,\n radix + prefix):\n if suffix is None:\n continue\n yield prefix + suffix, prefix_proba + suffix_proba\n yield None, 0", "def find_words_using_all_vowels():\n pass", "def generate_solutions(possible_words, labels):\r\n return []", "def zip_letters(xl, yl, dxl, dyl, rl, word):\n return (\n ([pl.pop(0) if pl else None for pl in (xl, yl, dxl, dyl, rl)], char)\n for char in word)" ]
[ "0.8034503", "0.7417961", "0.7359351", "0.7112968", "0.7106626", "0.68921024", "0.6748245", "0.6620748", "0.64630806", "0.63716835", "0.6366732", "0.6345676", "0.63174015", "0.6314541", "0.62938344", "0.6261687", "0.62095034", "0.6180001", "0.61566484", "0.61404824", "0.6124511", "0.61240244", "0.61183274", "0.6101016", "0.6099071", "0.60719234", "0.6065209", "0.6055909", "0.6048719", "0.60304874", "0.598222", "0.5970538", "0.59676147", "0.5957371", "0.59540814", "0.5943494", "0.59353626", "0.59215784", "0.5914204", "0.59112406", "0.5908138", "0.58849245", "0.58747566", "0.58661336", "0.58624977", "0.5853891", "0.5849905", "0.58281446", "0.5825385", "0.5825009", "0.57978505", "0.5796515", "0.57833225", "0.5779818", "0.5770146", "0.5765249", "0.57650566", "0.57296515", "0.57028246", "0.5696047", "0.56889236", "0.568278", "0.5680318", "0.5666916", "0.5664408", "0.5655494", "0.5654373", "0.56442195", "0.5644095", "0.56433", "0.5638454", "0.56357056", "0.56324065", "0.56287867", "0.56277776", "0.5623246", "0.5621328", "0.5618034", "0.56037426", "0.5601706", "0.55941993", "0.5593722", "0.5587311", "0.5577485", "0.5572226", "0.55697244", "0.5565721", "0.5561572", "0.5548633", "0.55332404", "0.5531894", "0.55207694", "0.5519411", "0.5518894", "0.5518329", "0.5506947", "0.5495047", "0.5471838", "0.5470177", "0.5466464" ]
0.7376517
2
Load word list from the file named filename. Returns a list of strings.
def load_words(filename): url = codeskulptor.file2url(filename) netfile = urllib2.urlopen( "http://codeskulptor-assets.commondatastorage.googleapis.com/assets_scrabble_words3.txt") words_file = netfile.readlines() words = [word[:-2] for word in words_file] return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_wordlist(filename):\n with open(filename) as f:\n \tdata = f.read().splitlines()\n return data", "def loadWords():\n inFile = open(wordFile, 'r')\n wordlist = []\n for line in inFile:\n wordlist.append(line)\n return wordlist", "def loadWords():\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n return wordlist", "def loadWords() -> List[str]:\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n\n return wordList", "def load_words():\r\n## print \"Loading word list from file...\"\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r', 0)\r\n # wordlist: list of strings\r\n wordlist = []\r\n for line in inFile:\r\n wordlist.append(line.strip().lower())\r\n## print \" \", len(wordlist), \"words loaded.\"\r\n return wordlist", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def load_words(filename):\n url = codeskulptor.file2url(filename)\n word_file = urllib2.urlopen(url)\n \n all_words = []\n for line in word_file.readlines():\n all_words.append(line.strip())\n \n \n return all_words", "def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def load_words():\n print\n \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print\n \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def load_words():\n \n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordlist: list of strings\n wordlist = []\n for line in inFile:\n wordlist.append(line.strip().lower())\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n return wordList", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words(filename):\r\n url = codeskulptor.file2url(filename)\r\n netfile = urllib2.urlopen(url)\r\n ans = []\r\n for line in netfile.readlines():\r\n ans.append(line[:-1])\r\n return ans", "def load_words():\n print \"Loading word list from file...\"\n in_file = open(WORDLIST_FILENAME, 'r', 0)\n line = in_file.readline()\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist", "def load_words(filename):\n url = codeskulptor.file2url(filename)\n netfile = urllib2.urlopen(url)\n \n words = []\n for line in netfile.readlines():\n words.append(line.replace('\\n',''))\n \n return words", "def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)", "def load_words():\n print(\"Loading word list from file..\")\n WORDLIST_FILENAME = \"words.txt\"\n # with open('words.txt', 'r') as f:\n # inFile = f.read()\n inFile = open(WORDLIST_FILENAME, 'r')\n wordlist = []\n\n for line in inFile:\n wordlist.append(line.strip().lower())\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words_from_file(filename):\n f = open(filename, \"r\")\n file_content = f.read()\n f.close()\n wds = file_content.split()\n return wds", "def loadWords():\r\n print(\"Loading word list from file...\")\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r')\r\n # line: string\r\n line = inFile.readline()\r\n # wordlist: list of strings\r\n wordList = line.split()\r\n\r\n print(\" \", len(wordList), \"words loaded.\")\r\n return wordList", "def loadWords():\n print(\"Loading word list from file...\")\n inFile = open(WORDLIST_FILENAME, 'r')\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def read_words(filename):\n # load assets\n word_file = urllib2.urlopen(filename)\n \n # read in files as string\n words = word_file.read()\n \n # template lines and solution lines list of line string\n word_list = words.split('\\n')\n print \"Loaded a dictionary with\", len(word_list), \"words\"\n return word_list", "def loadWords():\n print \"Loading word list from file...\"\n inFile = open(WORDLIST_FILENAME, 'r')\n wordList = inFile.read().split()\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def loadWords():\n print \"Loading word list from file...\"\n inFile = open(WORDLIST_FILENAME, 'r')\n wordList = inFile.read().split()\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def loadWords():\n print \"Loading word list from file...\"\n inFile = open(WORDLIST_FILENAME, 'r')\n wordList = inFile.read().split()\n print \" \", len(wordList), \"words loaded.\"\n return wordList", "def load_words():\n print(\"Loading word list from file...\")\n inFile = open(WORDLIST_FILENAME, 'r')\n line = inFile.readline()\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def load_words():\n print(\"Loading word list from file...\")\n inFile = open(WORDLIST_FILENAME, 'r')\n line = inFile.readline()\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def import_words(file_name):\n with open(file_name) as word_list:\n words = []\n for line in word_list:\n number, word = line.strip().split(\"\\t\")\n words.append(word.strip())\n # print(f\"Imported {(len(word_dict))} words\")\n\n return words", "def read_words(filename):\n # load assets\n word_file = urlopen(filename)\n \n # read in files as string\n words = word_file.read()\n \n # template lines and solution lines list of line string\n # if the input value is '\\n' then TypeError: a bytes-like object is required, not 'str'\n word_list = words.split(b'\\n')\n word_list = [word.decode('ascii') for word in word_list]\n print(\"Loaded a dictionary with\", len(word_list), \"words\")\n return word_list", "def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words", "def load_words(silent=False):\n if(not silent):\n print(\"Loading word list from file...\")\n global WORDLIST_FILENAME\n # inFile: filek\n if os.path.exists(\"psets/4/words.txt\"):\n WORDLIST_FILENAME = \"psets/4/\"+WORDLIST_FILENAME\n inFile = open(WORDLIST_FILENAME, 'r')\n\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist", "def loadWords():\n print(\"-\" * 20)\n print(\"Loading word list from file...\")\n\n inFile = open(WORDLIST_FILENAME, \"r\")\n line = inFile.readline()\n wordList = line.split()\n print(len(wordList), \"words loaded.\")\n return wordList", "def load_words(filename):\n\n url = codeskulptor.file2url(WORDFILE)\n netfile = urllib2.urlopen(url)\n data = netfile.read()\n return data", "def read_file_to_list(filename):\n with open(os.path.join(DIRECTORY, filename), \"r\") as f:\n return [word.strip() for word in f.readlines()]", "def read_word_file(self, filename):\n words = []\n try:\n file = open(filename, 'rt', encoding='utf8')\n words = [word[:-1] for word in file.readlines()]\n\n except Exception as e:\n print(f'[-] Error occurred while reading word file: {e}')\n\n return words", "def load_words():\r\n \r\n my_file = open(\"words.txt\")\r\n words = my_file.read()\r\n words_list = words.split(\" \")\r\n return (words_list)\r\n my_file.close()", "def load_file(path_to_file) -> list:\r\n\tif not os.path.exists(path_to_file):\r\n\t\tlg.critical('The file %s doesn\\'t exist !' % path_to_file)\r\n\t\tsys.exit()\r\n\r\n\tfile = open(path_to_file, 'r')\r\n\twords = [word for word in file.read().split('\\n') if len(word) > 1]\r\n\tfile.close()\r\n\r\n\treturn words", "def load(self, file_name):\n try:\n [self.add_word(w) for w in open(file_name).read().splitlines()]\n except IOError as e:\n print(e)", "def load_words():\n with open(DICTIONARY) as f:\n return [line.strip() for line in f]", "def load(filename: str) -> list:\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))", "def get_word_list(filename):\n f = open(filename,'r')\n word_list = list()\n for line in f:\n for word in line.split():\n word_list.append(word.lower().strip())\n return word_list", "def load_input_word_list(file_path):\n if not os.path.isfile(file_path):\n return False\n\n word_list = list()\n\n with open(file_path, 'r') as fp:\n while True:\n line = fp.readline()\n if not line:\n break\n\n data = line.split(' ')\n text = data[0].lower().strip(Setting.NONWORD_CHARACTERS)\n\n if not text:\n continue\n\n text = text.replace('_', ' ')\n\n score = float(data[1])\n\n if score < 0:\n kind = WordKindEnum.NEG\n else:\n kind = WordKindEnum.POS\n\n word = Word(text, score, kind)\n word_list.append(word)\n\n return word_list", "def loadList(file_name):\n with open(file_name) as f:\n l = [line.strip() for line in f]\n return l", "def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = split(line)\n print \" \", len(wordlist), \"words loaded.\"\n print 'Enter play_hangman() to play a game of hangman!'\n return wordlist", "def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = split(line)\n print \" \", len(wordlist), \"words loaded.\"\n print 'Enter play_hangman() to play a game of hangman!'\n return wordlist", "def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = split(line)\n print \" \", len(wordlist), \"words loaded.\"\n print 'Enter play_hangman() to play a game of hangman!'\n return wordlist", "def load_words(file_path: str) -> List[Word]:\n \n words = load_words_raw(file_path)\n \n \n words = remove_stop_words(words)\n\n \n words = remove_duplicates(words)\n \n return words", "def load_words():\n f = open('words.txt', 'r')\n words_list = f.readlines()\n f.close()\n split_words_list = words_list[0].split(' ')\n return split_words_list", "def loadWords(self):\n print \"Loading word list from file...\"\n\n __readingParam = 'r'\n __inputFile = open(WORDLIST_FILENAME, readingParam)\n\n __lineReader = inputFile.readline()\n __wordlist = string.split(lineReader)\n\n print \" \", len(wordlist), \"words loaded.\"\n\n return random.choice(wordlist)", "def read_words(filename):\n with open(filename, encoding=\"utf-8\") as file:\n words = file.read().splitlines()\n return words", "def loadListFromFile (filename):\n retval = []\n filename = os.path.expanduser (filename)\n if not os.path.exists (filename):\n print(\"Error: file '%s' does not exist.\"%(filename))\n raise RuntimeError(\"Bad filename\")\n source = open (filename, 'r') \n for line in source.readlines():\n line = re.sub (r'#.+$', '', line) # remove comment characters\n line = line.strip()\n if len (line):\n retval.append (line)\n source.close()\n return retval", "def read_list_words(infile):\n\twords = []\n\tfin = open(infile)\n\tfor line in fin:\n\t\twords.append(line.strip())\n\treturn words", "def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'", "def read_word_list(file_name):\r\n\twith open(file_name) as word_list_file:\r\n\t\treturn set(word.strip() for word in word_list_file)", "def read_data(filename,words):\n try:\n f = open(filename)\n reader = f.read().splitlines()\n for line in reader:\n #print(line[0])\n words.add(line.lower())\n f.close()\n except IOError:\n print 'Input file reading failed,'\n return words", "def get_word_list(file_name):\n\tnew_list = []\n\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\tend_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\twhile lines[end_line].find('End of the Project Gutenberg EBook') == -1:\n\t\tend_line -= 1\n\tlines = lines[curr_line + 1:end_line]\n\n\tlong_lines = ''.join(str(e) for e in lines)\n\tlong_lines = long_lines.lower()\n\tlong_lines = long_lines.translate(None, punctuation)\n\n\twords = long_lines.split()\n\tfor item in words:\n\t\tnew_list.append(item)\n\n\treturn new_list", "def load(filename):\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))\n # sys.exit(1)", "def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst", "def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words", "def get_word_list(file_name):\n\n\tstoryEdit = []\n\n\t#Reads the file starting after the beginning\t\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\tlines = lines[curr_line+1:]\n\n\n\t#Loops through each row, making everything lowercase and replacing all punctuation\n\tfor row in lines:\n\t \trow = row.lower()\n\t \trow = row.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\t \tstoryEdit += row.split()\n\n\n\t#Returns the final list as \n\treturn storyEdit", "def fetch_words(filename):\n data = [] #empty list\n with urlopen(filename) as story:\n for line in story:\n words = line.decode('utf-8').split() #must decode into strings and then separate with spaces\n #print(lists)\n for word in words:\n data.append(word)\n return(data)", "def _get_wordlist(file_name):\n ifile = codecs.open(file_name, 'r', encoding='utf-8')\n for _ in range(int(ifile.__next__())):\n yield (ifile.__next__().strip() for _ in range(int(ifile.__next__())))", "def file_to_list(filename, dir=\"../resources\"):\n os.chdir(dir)\n vocabulary = []\n f = open(filename, \"r\")\n lines = f.readlines()\n for line in lines:\n vocabulary.append(line.replace(\"\\n\", \"\"))\n return vocabulary", "def get_word_list(file_name):\n file_ = open(file_name, 'r')\n lines = file_.readlines()\n\n start_line = 0\n while lines[start_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n start_line += 1\n\n lines = lines[start_line+1:]\n\n end_line = 0\n while lines[end_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n end_line += 1\n\n lines = lines[:end_line-3]\n\n list_ = ' '.join(lines)\n list_ = str.lower(list_)\n list_ = list_.translate(None, string.punctuation)\n list_ = list_.split()\n\n return list_", "def load_text_words(filename):\n text_words = []\n with open(filename) as f:\n for line in f:\n line = line.strip()\n items = line.split()\n items = list(map(str.lower, items))\n items = list(map(lambda x: x.strip('.,[]!?;:'), items))\n text_words.extend(items)\n # return a list of words from the text.\n return text_words", "def get_words(filepath: str = \"words.txt\") -> list:\n fpath = Path(filepath)\n if not fpath.exists():\n raise FileNotFoundError(f\"Specified dictionary ({filepath}) not found\")\n\n if fpath.is_dir():\n raise ValueError(\"Filepath is a folder, not a file\")\n\n with fpath.open() as f:\n words = list(set([x.strip() for x in f.readlines()]))\n\n return words", "def create_english_word_list(filename):\n global global_english_word_list\n\n if not global_english_word_list:\n with open(filename) as f:\n for line in f:\n global_english_word_list.append(re.sub(r'\\s+', '', line))", "def load_cows(filename):\r\n print(\"Loading words from file...\")\r\n # inFile: file\r\n inFile = open(filename, 'r')\r\n # wordlist: list of strings\r\n wordlist = {}\r\n for line in inFile:\r\n cow = line.split(',')\r\n wordlist[cow[0]] = int(cow[1]) # 0: name, 1: weight\r\n inFile.close()\r\n print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist", "def read_words(filename):\r\n\ttry:\r\n\t\twith open(filename) as f_obj:\r\n\t\t\tcontents = f_obj.read()\r\n\texcept FileNotFoundError:\r\n\t\tpass\r\n\telse:\r\n\t\t\"\"\"Print the words in the file.\"\"\"\r\n\t\tprint(contents)", "def load_dictionary(filename):\n\n word_list = []\n freq_sum = 0\n\n # nacitanie zo suboru\n with open(filename) as f:\n for line in f:\n freq, val = line.split()\n word_list.append(Word(int(freq), val))\n freq_sum += int(freq)\n\n # lexikograficke usporiadanie slov\n word_list_sorted = sorted(word_list, key=operator.attrgetter('value'))\n\n return word_list_sorted, freq_sum", "def importBrainstormWordsFile(filename):\n #init the list with all words in the file\n allWords = []\n \n #open the brainstorming words file and read the lines\n with open(filename, 'r') as fp:\n lines = fp.read().splitlines()\n \n #split the lines for the idiots that didn't read the instructions and add them to the output\n for curLine in lines:\n if curLine.startswith('Please type one'):\n continue\n cutLines = curLine.replace(',',' ').split()\n \n #cycle the word and add them\n for curWord in cutLines:\n allWords.append(curWord.strip().lower())\n \n return allWords", "def loadFromFile(self, filename):\n\t\treturn []", "def importDictionary():\n with open('res/dictionary.txt', 'r') as f:\n lines = f.readlines()\n result = [word.strip() for word in lines]\n return result", "def load_file(filename):\n with open(filename, \"r\") as f:\n return f.readlines()", "def get_words_from_file(filename):\n with open(filename, newline='') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=' ')\n return list(csv_reader)", "def make_word_list():\n result = []\n for line in open('words.txt'):\n word = line.strip()\n result.append(word)\n return result", "def import_text(file):\n\n # Only use alpha-numeric words from file\n with open(file=file, mode='r') as text:\n word_list = [word for word in text.read().split() if word.isalnum()]\n return word_list", "def list_words():\n fin = open('words.txt')\n words = []\n for line in fin:\n words.append(line.strip())\n fin.close()\n return words", "def get_sentence_list_for_word_file(file_path: str) -> List[str]:\n # get file data\n with open(file_path, 'r') as review_file:\n file_text = review_file.read().splitlines()\n return file_text" ]
[ "0.8982847", "0.85380477", "0.85197324", "0.84018725", "0.8372622", "0.8360392", "0.8360392", "0.8360392", "0.83501935", "0.83436346", "0.83436346", "0.8328561", "0.8309319", "0.82969534", "0.82897973", "0.82897973", "0.8267962", "0.8260606", "0.8249803", "0.8249803", "0.8249803", "0.8249803", "0.8249803", "0.8249803", "0.8249803", "0.8249803", "0.8249803", "0.8209614", "0.8209226", "0.819398", "0.819314", "0.81852466", "0.81823033", "0.81823033", "0.81823033", "0.81823033", "0.81823033", "0.81823033", "0.81823033", "0.81625634", "0.8159385", "0.8133153", "0.813293", "0.80641145", "0.80641145", "0.80641145", "0.8062951", "0.8062951", "0.8036114", "0.80062157", "0.80040294", "0.799995", "0.79842746", "0.7952772", "0.7912863", "0.7884134", "0.7880499", "0.7878703", "0.7867126", "0.7854223", "0.7782581", "0.7748626", "0.77244645", "0.76996595", "0.7698787", "0.7698787", "0.7698787", "0.7629407", "0.7615809", "0.7592688", "0.7568727", "0.75532895", "0.75296617", "0.7529653", "0.7470275", "0.74503136", "0.7395446", "0.7358168", "0.73581064", "0.73385686", "0.7337327", "0.7312647", "0.73109734", "0.730104", "0.7296862", "0.72942287", "0.72467005", "0.72426736", "0.72050303", "0.72030693", "0.71958697", "0.7195705", "0.7174892", "0.71687746", "0.716513", "0.716023", "0.7153294", "0.7149494", "0.713439", "0.71033245" ]
0.812224
43
Fixture for setting up configuration parser
def setup_config(): config = configparser.ConfigParser() config.read(CONFIG_PATH) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.parser = create_parser()", "def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)", "def setUp(self):\r\n\t\tself._configuration_ = Declare.Configuration.read(\"configuration.json\")", "def setUp(self):\n self.parser = echo.create_parser()", "def setUp(self):\n self.parser = echo.create_parser()", "def setUpConfig(self):\n pass", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def test_everything():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/config.toml\",\n \"ENV\", # Temporarily enabled, needs seperate optional dotenv test\n ]\n )\n\n assert \"root_url\" in str(c._crve_configs)\n assert c.root_url == \"test url\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n )\n assert c.defaults_toml() == default_toml", "def setup_parser_config(subparsers):\r\n parser = subparsers.add_parser('config', help='Freeseer configuration functions')\r\n subparsers = parser.add_subparsers(dest=\"config_service\")\r\n setup_parser_config_reset(subparsers)\r\n setup_parser_config_youtube(subparsers)", "def setUp(self):\n self.parser = command_line.get_args()", "def test_fully_default_configuration(self):\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 1)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(gif|png|jpg|bmp)$')", "def test_normal_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'url_list_file: ./urls\\n'\n 'output_directory: ./output\\n'\n 'max_depth: 6\\n'\n 'crawl_interval: 1\\n'\n 'crawl_timeout: 5\\n'\n 'target_url: .*\\.(gif|png|jpg|bmp)$\\n'\n 'thread_count: 8\\n'\n )\n\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 6)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 5)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(gif|png|jpg|bmp)$')", "def configure(self, parser: argparse.ArgumentParser) -> None:\n pass", "def parse_config(self):\n # TODO: parse config file\n pass", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def setUp(self) -> None:\n\n # Call the superclass setup\n super().setUp()\n\n # Read the config file from the settings\n self.config = read_settings(form_abs_path(__file__, \"../settings.cfg\"), \"Space Invaders\")", "def test_parser():\n\n parser = configparser.RawConfigParser()\n version = '1.2.3'\n string = 'string-value'\n bool = 'False'\n literal = \"['a', 'b', 'c']\"\n literal2 = '1.23'\n section = 'dashboard'\n\n parser.add_section(section)\n parser.set(section, 'APP_VERSION', version)\n parser.set(section, 'string', string)\n parser.set(section, 'bool', bool)\n parser.set(section, 'literal', literal)\n parser.set(section, 'literal2', literal2)\n\n assert parse_version(parser, section, 'default') == version\n assert parse_string(parser, section, 'string', 'default') == string\n assert not parse_bool(parser, section, 'bool', 'True')\n assert parse_literal(parser, section, 'literal', 'default') == ['a', 'b', 'c']\n assert parse_literal(parser, section, 'literal2', 'default') == 1.23", "def __init__(self, _confFixture, _settings):\n self._conf = _confFixture\n self._settings = _settings", "def test_partly_default_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'max_depth: 10\\n'\n 'crawl_interval: 2\\n'\n 'crawl_timeout: 10\\n'\n 'target_url: .*\\.(com|cn|net)$\\n'\n )\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 10)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 2)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 10)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(com|cn|net)$')", "def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)", "def setup_parser(self, parser):", "def test_config():\n args = Namespace(molecule=\"nucleotide\", verbose=False)\n config = core.Config.from_args(args)\n assert config.verbose is False\n assert config.molecule == 'nucleotide'\n assert config.extended_validation == 'none'\n\n args = Namespace(molecule=\"protein\", verbose=True)\n config = core.Config.from_args(args)\n assert config.verbose is True\n assert config.molecule == 'protein'", "def testconfig(self):\n\n configuration = Parser.getNodeTag(self, self.xmlDoc, \"configuration\")\n metadatadb = Parser.getNodeTag(self, configuration, \"metadatadb\") \n self.user = Parser.getNodeVal(self, metadatadb, \"user\")\n self.host = Parser.getNodeVal(self, metadatadb, \"host\")\n self.port = Parser.getNodeVal(self, metadatadb, \"port\")\n self.database = Parser.getNodeVal(self, metadatadb, \"database\")\n self.metaDBSchema = Parser.getNodeVal(self, metadatadb, \"schema\")\n \n try:\n self.passwd = Parser.getNodeVal(self, self.metadatadb, \"passwd\")\n self.metaDB = self.user + \"/\" + self.passwd + \"@\" + self.host + \":\" + self.port + \"/\" \\\n + self.database + \":\" + self.metaDBSchema\n except Exception:\n self.metaDB = self.user + \"@\" + self.host + \":\" + self.port + \"/\" + self.database + \":\" \\\n + self.metaDBSchema", "def test_read_namespaced_build_config(self):\n pass", "def test_config_class():\n assert config is not None", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def config():\n data = \"\"\"[YESSSSMS]\nLOGIN = 03211234567\nPASSWD = MySecr3t\nDEFAULT_TO = +43664123123123\nMVNO = YESSS\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_define():\n client = TestClient()\n client.run(\"config set general.fakeos=Linux\")\n conf_file = load(client.cache.conan_conf_path)\n assert \"fakeos = Linux\" in conf_file\n\n client.run('config set general.compiler=\"Other compiler\"')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler = Other compiler' in conf_file\n\n client.run('config set general.compiler.version=123.4.5')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler.version = 123.4.5' in conf_file\n assert \"14\" not in conf_file\n\n client.run('config set general.new_setting=mysetting')\n conf_file = load(client.cache.conan_conf_path)\n assert 'new_setting = mysetting' in conf_file\n\n client.run('config set proxies.https=myurl')\n conf_file = load(client.cache.conan_conf_path)\n assert \"https = myurl\" in conf_file.splitlines()", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')", "def load_configuration(self) -> None:\n config_file = self.default_config_file\n if self.config_file:\n config_file = self.config_file\n self.config = configparser.ConfigParser(delimiters=\"=\")\n # mypy is unhappy with us assigning to a method - (monkeypatching?)\n self.config.optionxform = lambda option: option # type: ignore\n self.config.read(config_file)", "def __check_configuration__(self, parser):\n if not parser.has_section('core'):\n self.logger.error('The config file should contain a core section with at least the module_path specified')\n sys.exit(1)\n\n else:\n if parser.get('core', 'modules_path', fallback=None) is None:\n self.logger.error('The configuration file should contain at least the modules_path value in core section.')\n sys.exit(1)\n\n if not parser.has_section('mysql'):\n self.logger.error('The config file should contain a mysql section.')\n sys.exit(1)\n\n else:\n if parser.get('mysql', 'host', fallback=None) is None:\n self.logger.error('The config file should contain the host value in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'port', fallback=None) is None:\n self.logger.error('The config file should contain the port value in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'user', fallback=None) is None:\n self.logger.error('The config file should contain the user in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'password', fallback=None) is None:\n self.logger.error('The config file should contain the password of the user in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'server_id', fallback=None) is None:\n self.logger.error('The config file should contain the server_id in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'tables', fallback=None) is not None:\n tables = [table.strip() for table in parser.get('mysql', 'tables').split(',')]\n for table in tables:\n if not parser.has_section(table):\n self.logger.error('The config file should contain a section about the table : %s' % table)\n exit(1)\n if parser.get(table, 'index_label', fallback=None) is None :\n self.logger.error('The config file should contain a table section with a index_label value.')\n exit(1)\n else:\n self.logger.error('The config file should contain a tables value with all the tables to replicate.')\n exit(1)", "def init(subparsers):\n parser = subparsers.add_parser('generate-config', help='Generate the WebRPG configuration file')\n parser.add_argument('--filename', default='production.ini', help='Configuration file name')\n parser.add_argument('--sqla-connection-string', default=None, help='SQLAlchemy database connection string')\n parser.set_defaults(func=generate_config)", "def configure_test(self, test, config_json):\n pass", "def test_config_from_file(self):\n parser = Parser()\n args = parser.parser.parse_args(['-c'])\n if args.config:\n config = Config()\n config.config_file = \"./config\"\n config.config = test_config\n config.config_from_file()\n self.assertTrue(config.config)\n os.remove(config.config_file)", "def test_parse_config(self):\n user_config = {\"weighted_display_name_like\": \"testabc [SoMeThInG]\"}\n _, _, module_config = create_user_directory_search_module_with_config(\n user_config\n )\n\n # Check that the generated config contains what we expect\n self.assertEqual(\n module_config.weighted_display_name_like,\n user_config[\"weighted_display_name_like\"],\n )", "def parse_config(parser):\n parser.add_argument('--config-file', '-c', help='config filename',\n default='config.yaml')\n return parser", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def test_basic_parsers():", "def configuration():", "def configure_parser_config(subparsers):\n help_msg = \"Configure data paths for MotifScan.\"\n desc_msg = help_msg + dedent(f\"\"\" \n\n Commands listed below enable users to change the default installation\n location of genome/motif data files and check the paths of installed \n genome assemblies or motif sets.\n\n The user specific config file is located at: {user_rc_path}\n \"\"\")\n\n epilog_msg = dedent(\"\"\"\n Examples:\n --------- \n 1) Display all values set in the config file:\n\n motifscan config --show\n\n 2) Change the default installation location for genome assemblies:\n\n motifscan config --set-default-genome <path>\n\n 3) Change the default installation location for motif sets:\n\n motifscan config --set-default-motif <path> \n\n 4) Get the genome path of a specific genome assembly:\n\n motifscan config --get-genome <genome_name>\n\n 5) Change the motif path for a specific motif set:\n\n motifscan config --set-motif <motif_set> <path>\n \"\"\")\n\n parser = subparsers.add_parser(\n \"config\", description=desc_msg, help=help_msg, epilog=epilog_msg,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser_basic = parser.add_argument_group(\"Basic Options\")\n parser_basic.add_argument(\n \"--show\", dest=\"show\", action=\"store_true\", default=False,\n help=\"Show all configured values.\")\n\n parser_default = parser.add_argument_group(\"Default Install Location\")\n parser_default.add_argument(\n \"--set-default-genome\", metavar=\"PATH\", dest=\"set_default_genome\",\n help=\"Set the default installation path for genome assemblies.\")\n parser_default.add_argument(\n \"--set-default-motif\", metavar=\"PATH\", dest=\"set_default_motif\",\n help=\"Set the default installation path for motif sets.\")\n\n parser_genome = parser.add_argument_group(\"Genome Path Options\")\n parser_genome.add_argument(\n \"--get-genome\", metavar=\"NAME\", dest=\"get_genome\",\n help=\"Get the genome path of a specific genome assembly.\")\n parser_genome.add_argument(\n \"--set-genome\", metavar=(\"NAME\", \"PATH\"), dest=\"set_genome\", nargs=2,\n help=\"Set the genome path for a specific genome assembly.\")\n parser_genome.add_argument(\n \"--rm-genome\", metavar=\"NAME\", dest=\"rm_genome\",\n help=\"Remove a specific genome assembly.\")\n\n parser_motif = parser.add_argument_group(\"Motif Path Options\")\n parser_motif.add_argument(\n \"--get-motif\", metavar=\"NAME\", dest=\"get_motif\",\n help=\"Get the motif path of a specific motif set.\")\n parser_motif.add_argument(\n \"--set-motif\", metavar=(\"NAME\", \"PATH\"), dest=\"set_motif\", nargs=2,\n help=\"Set the motif path for a specific motif set.\")\n parser_motif.add_argument(\n \"--rm-motif\", metavar=\"NAME\", dest=\"rm_motif\",\n help=\"Remove a specific motif set.\")\n\n parser = _add_verbose_argument(parser)\n parser.set_defaults(func=config.run)", "def test_gen_parser(self):\n pass", "def _new():\n\treturn ConfigParser(\n\tdelimiters = ('=',),\n\tcomment_prefixes = ('#', ';'),\n\tdefault_section = 'default',\n\tallow_no_value = False,\n\tstrict = False,\n\tinterpolation = ExtendedInterpolation(),\n\tdefaults = {\n\t\t'debug': False,\n\t\t'datadir': path.join(path.expanduser('~'), '.local', 'rosshm'),\n\t\t'log.level': 'warn',\n\t\t'core.enable': True,\n\t\t'db.driver': 'sqlite',\n\t\t'db.name': 'rosshmdb',\n\t\t'db.config': '',\n\t\t'static.enable': True,\n\t\t'web.enable': True,\n\t},\n)", "def initialize_from_config(self):", "def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)", "def test_parse_config(self):\n config_file = os.path.join('top', 'conf', 'top.conf')\n\n self._c.set_config_file(config_file)\n self._c.parse_config()\n\n received = self._c.adp_loop\n expected = 30\n msg = 'AdpB2CConfig.adp_loop error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_dirs\n expected = ['/var/ftp/pub/nparcel/adp/in']\n msg = 'AdpB2CConfig.adp_dirs error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.archive_dir\n expected = '/data/top/archive'\n msg = 'AdpB2CConfig.archive_dir error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_file_formats\n expected = []\n msg = 'AdpB2CConfig.adp_file_formats error'\n self.assertListEqual(received, expected, msg)\n\n # For the default configuration file the [db] section is blank\n received = self._c.db_kwargs()\n msg = 'AdpB2CConfig.db_kwargs error'\n self.assertIsNone(received, msg)\n\n received = self._c.code_header\n expected = 'TP Code'\n msg = 'AdpB2CConfig.code_header error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_headers\n expected = {'agent.code': 'TP Code',\n 'agent.dp_code': 'DP Code',\n 'agent.name': 'ADP Name',\n 'agent.address': 'Address',\n 'agent.suburb': 'Suburb',\n 'agent.state': 'State',\n 'agent.postcode': 'Postcode',\n 'agent.opening_hours': 'Opening Hours',\n 'agent.notes': 'Notes',\n 'agent.parcel_size_code': 'ADP Accepts Parcel Size',\n 'agent.phone_nbr': 'Phone',\n 'agent.contact_name': 'Contact',\n 'agent.email': 'Email',\n 'agent.fax_nbr': 'Fax',\n 'agent.latitude': 'Latitude',\n 'agent.longitude': 'Longitude',\n 'agent.status': 'Active',\n 'delivery_partner.id': 'DP Id',\n 'login_account.username': 'Username'}\n msg = 'AdpB2CConfig.adp.headers error'\n self.assertDictEqual(received, expected, msg)\n\n received = self._c.delivery_partners\n expected = ['Nparcel', 'ParcelPoint', 'Toll', 'National Storage']\n msg = 'AdpB2CConfig.adp.delivery_partners error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.adp_default_passwords\n expected = {'nparcel': 'aaaa',\n 'parcelpoint': 'bbbb',\n 'toll': 'cccc',\n 'national storage': 'dddd'}\n msg = 'AdpB2CConfig.adp_default_passwords error'\n self.assertDictEqual(received, expected, msg)", "def setUp(self):\n self._wiki = None\n self._app = None\n self.rootdir = mkdtemp()\n self.create_file(u'config.py', self.config_content)", "def test_minimal_configuration(self):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'minimal-cfg-file.ini'))\n self.cfg = configure(args)\n cfg = ElasticBlastConfig(self.cfg, task = ElbCommand.SUBMIT)\n\n self.assertTrue(cfg.blast.db_source)\n self.assertEqual(cfg.blast.db_source, DBSource.GCP)\n\n self.assertTrue(cfg.blast.batch_len)\n self.assertEqual(cfg.blast.batch_len, 10000)\n\n self.assertTrue(cfg.blast.mem_request)\n self.assertEqual(cfg.blast.mem_request, '0.5G')\n\n self.assertTrue(cfg.blast.mem_limit)\n expected_mem_limit = f'{get_machine_properties(cfg.cluster.machine_type).memory - SYSTEM_MEMORY_RESERVE}G'\n self.assertEqual(cfg.blast.mem_limit, expected_mem_limit)\n\n self.assertTrue(cfg.timeouts.init_pv > 0)\n self.assertTrue(cfg.timeouts.blast_k8s > 0)\n\n ElasticBlastConfig(self.cfg, task = ElbCommand.SUBMIT)", "def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)", "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def test_config_opts(sc):\n assert sc.server_name is not None\n assert sc.deployment == Deployment.stg\n assert sc.admins is not None\n assert sc.command_handler is not None\n assert sc.command_handler_work_dir is not None\n assert sc.command_handler_pvc_env_var is not None\n assert sc.command_handler_image_reference is not None\n assert sc.command_handler_k8s_namespace is not None\n assert sc.fas_password is not None\n assert sc.testing_farm_secret is not None\n assert sc.github_requests_log_path is not None\n assert sc.webhook_secret is not None\n assert sc.validate_webhooks is not None\n assert sc.gitlab_token_secret is not None", "def config(self, **kw):\n self.cfg_fixture.config(**kw)", "def setup_parser(self, parser, args):\r\n\r\n pass", "def configure(self) -> None:", "def test_loads_a_config_file(self):\n from test.resources import config\n self.assertIsInstance(config, type(sys))\n self.assertIsNotNone(config.example)\n self.assertEqual(config.example.config_option, 'config-value')", "def init_config() -> Config:\n ...", "def setUp(self):\n\n PyFunceble.load_config(generate_directory_structure=False)\n\n self.domains = [\n \"google.com\",\n \"twitter.com\",\n \"github.com\",\n \"facebook.com\",\n \"hello.world\",\n \"world.hello\",\n ]", "def test_create_namespaced_build_config(self):\n pass", "def test_config_ok_config(self):\n test_data = (\"[gnupg]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"\\n\"\n \"[data]\\n\"\n \"\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\"\n \"\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data)\n config = Config(\"test_config.conf\")\n self.assertIn(\"gnupg\", config.config.sections())\n self.assertIn(\"amazon-s3\", config.config.sections())\n self.assertEqual(config.config.get(\n \"gnupg\", \"recipients\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"gnupg\", \"signer\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"access_key\"), \"ACCESSKEY\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"secret_access_key\"), \"SECRETACCESSKEY\")\n self.assertEqual(config.config.get(\n \"data\", \"bucket\"), \"DATABUCKET\")\n self.assertEqual(config.config.get(\n \"metadata\", \"bucket\"), \"METADATABUCKET\")\n os.remove(\"test_config.conf\")", "def test_config_to_dict_py2(self):\n if PYTHON_VERSION > 2:\n return\n\n from ConfigParser import ConfigParser\n fixture = ConfigParser()\n fixture.add_section('something')\n fixture.set('something', 'value', 'stuff')\n\n self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))", "def config():", "def config():", "def configure(self, config: Config) -> Config:\n return config.parse(self.PARSER) if self.PARSER else config", "def __init__(self, argparser):\n super().__init__()\n argparser.add_argument(\n \"-b\", \"--config-seed\", dest=\"config_seed\",\n help=\"configuration seed/blob\",\n type=str, default=constants.DEFAULT_CONFIG_SEED\n )\n argparser.add_argument(\n \"-e\", \"--config-variable\", dest=\"config_variable\",\n help=\"name of environment variable with config\",\n type=str, default=constants.DEFAULT_CONFIG_ENV_KEY\n )\n argparser.add_argument(\n \"-c\", \"--config-file\", dest=\"config_file\",\n help=\"path to config file\",\n type=str, default=constants.DEFAULT_CONFIG_PATH\n )\n argparser.add_argument(\n \"-s\", \"--suite\", dest=\"suite\",\n help=\"test suite to run\",\n type=str\n )\n argparser.add_argument(\n \"-l\", \"--list-suites\", dest=\"list_suites\",\n help=\"list available test suites\",\n action=\"store_true\"\n )", "def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG.read(os.path.join(os.path.dirname(__file__)))\n self.IPS = []", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)", "def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)", "def setup_parser():\n parser = HelpfulParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('infile', type=str, help=\"input data file\")\n\n parser.add_argument('-u', '--usage', action=\"help\",\n help=\"show this help message and exit\")\n parser.add_argument('-h', '--host', metavar='HOST', type=str,\n default='localhost', help='Server hostname')\n parser.add_argument('-p', '--port', metavar='PORT', type=int,\n default='3000', help='Server port')\n parser.add_argument('-U', '--user', metavar='USER', type=str,\n default=None, help='Username')\n parser.add_argument('-P', '--passwd', metavar='PW', type=str,\n default=None, help='Password')\n parser.add_argument('-n', '--nspace', metavar='NS', type=str,\n default='test', help='Namespace')\n parser.add_argument('-s', '--set', metavar='SET', type=str,\n default='osm', help='Set name')\n return parser", "def test_handle_parsing(self):\n # Construct the location of the parser config file.\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',\n 'conf', 'parser.cfg'))\n # Read the config and fill in empty values.\n self.cp = ConfigParser.ConfigParser()\n self.cp.read(path)\n self.cp.set('site_info', 'site_name', 'TestSite')\n self.cp.set('site_info', 'lrms_server', 'TestServer')\n # It's hard to test handle_parsing, but we can check that no erorrs are\n # raised and that the load_records method is called with an empty list.\n bin.parser.handle_parsing('SGE', self.mock_db, self.cp)\n self.mock_db.load_records.assert_called_once_with([])", "def setup_parser():\n\n psr_desc=\"cfdi engine service interface\"\n psr_epi=\"select a config profile to specify defaults\"\n\n psr = argparse.ArgumentParser(\n description=psr_desc, epilog=psr_epi)\n\n psr.add_argument('-nmp', action='store_true', dest='nmp',\n help='unique process approach (useful in development)')\n\n psr.add_argument('-d', action='store_true', dest='debug',\n help='print debug information')\n\n psr.add_argument('-c', '--config', action='store',\n dest='config',\n help='load an specific config profile')\n\n psr.add_argument('-p', '--port', action='store',\n dest='port',\n help='launches service on specific port')\n\n return psr.parse_args()", "def test_configuration(self):\n self.assertEqual(self.Test.adapter_config['write'],\n { 'adapter': TestAdapter, 'foo': 'bar' })", "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )", "def load_test_config(root_fs) -> Config:\n with root_fs.open('config.yaml') as fd:\n return Config.parse(fd)", "def _createConfigParser(self):\n return ConfigParser.ConfigParser()", "def config_parse(conf=None, args=None):\n if args is None:\n args = []\n args += ['--config-file', etcdir('neutron.conf')]\n if conf is None:\n config.init(args=args)\n else:\n conf(args)", "def test_config_to_dict_py3(self):\n if PYTHON_VERSION < 3:\n return\n\n from configparser import ConfigParser\n fixture = ConfigParser()\n fixture['something'] = { 'value': 'stuff' }\n\n self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))", "def test_test_config(self):\r\n\r\n app = create_app('movieapp.settings.TestConfig')\r\n\r\n assert app.config['DEBUG'] is True\r\n assert app.config['SQLALCHEMY_ECHO'] is True\r\n assert app.config['CACHE_TYPE'] == 'null'", "def setUp(self) -> None:\n self.config = TMConfiguration(\n \"q2\",\n TMTape(\n tape=\"abcdefghij\",\n blank_symbol=\".\",\n current_position=2,\n ),\n )\n\n self.config2 = MTMConfiguration(\n \"q1\",\n (\n TMTape(\n tape=\"abcdefghij\",\n blank_symbol=\".\",\n current_position=2,\n ),\n TMTape(\n tape=\"klmnopq\",\n blank_symbol=\".\",\n current_position=5,\n ),\n ),\n )", "def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def __init__(self, config_file):\n \n self.log = logging.getLogger(__name__)\n\n self.parser = ConfigParser.ConfigParser()\n if os.path.exists(config_file) and os.path.isfile(config_file):\n self.parser.read(config_file)\n self.log.debug(\"opened configuration '%s'\" % config_file)\n else:\n raise ConfigError(\"Config file missing\", \"File '%s' doesn't exist.\" % (config_file))\n\n self.config_file = config_file\n self.check_config()", "def mocked_config_file():\n data = \"\"\"[YESSSSMS]\nLOGIN = 03211234567\nPASSWD = MySecr3t\nDEFAULT_TO = +43664123123123\nMVNO = GOOOD\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def setup_config():\n global config\n config = modConfig.Config(cmdline.config)", "def test_simple_config_file(self):\n # create the config file:\n test_name = \"unit_test_test_1\"\n K = \"'key'\"\n fam = 3\n W = 5\n D = 8\n num_circuits = 1\n num_inputs = 1\n config_file_text = \"\\n\".join([\"test_type = RANDOM\",\n \" = \".join([\"K\", str(K)]),\n \" = \".join([\"fam\", str(fam)]),\n \" = \".join([\"W\", str(W)]),\n \" = \".join([\"D\", str(D)]),\n \" = \".join([\"num_circuits\",\n str(num_circuits)]),\n \" = \".join([\"num_inputs\",\n str(num_inputs)]),\n \" = \".join([\"generate\", \"True\"])])\n config_file = StringIO.StringIO(config_file_text)\n # create the parser/generator:\n fho = tfho.test_file_handle_object()\n pag = gen.parser_and_generator(test_name, config_file, fho)\n pag.parse_and_generate()\n # retrieve the test file and check that it is correct:\n test_file = fho.get_file(os.path.join(test_name, \"test.txt\"))\n test_file_text = test_file.getvalue()\n expected_test_file_text = \"\\n\".join(\n [\"KEY\",\n os.path.join(\"stealth\", test_name, \"key\", str(1)),\n \"CIRCUIT\",\n os.path.join(\"stealth\", test_name, \"circuit\", str(1)),\n \"INPUT\",\n os.path.join(\"stealth\", test_name, \"input\", str(1)),\n \"\"])\n self.assertEqual(expected_test_file_text, test_file_text)\n # retrieve the key file and check that it is correct:\n key_file = fho.get_file(os.path.join(test_name, \"key\", \"1\"))\n key_file_text = key_file.getvalue()\n self.assertEqual(K, key_file_text)\n # retrieve the input and check that it is correct:\n input_file = fho.get_file(os.path.join(test_name, \"input\", \"1\"))\n input_file_text = input_file.getvalue()\n # check that input text begins and ends with a bracket:\n self.assertEqual(\"[\", input_file_text[0])\n self.assertEqual(\"]\", input_file_text[-1])\n # check that all bits are 0 or 1:\n for bit in input_file_text[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # retrieve the circuit and check that it begins with the correct header:\n circuit_file = fho.get_file(os.path.join(test_name, \"circuit\", \"1\"))\n circuit_file_text = circuit_file.getvalue()\n circuit_header = circuit_file_text.split(\"\\n\")[0]\n (W_string, D_string, fam_string) = circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n D_value = float(D_string.split(\"=\")[-1])\n fam_value = int(fam_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(D, D_value)\n self.assertEqual(fam, fam_value)", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config: str) -> None:\n self.configuration = config", "def configure_parser(sub_parsers):\n\n parser = sub_parsers.add_parser(\n 'ants',\n description='Solve a traveling salesman problem using ant colony optimization',\n help='Ant colony optimization for the traveling salesman problem')\n\n parser.add_argument(\n '-r',\n '--rho',\n type=float,\n default=.5,\n help='Evaporation rate (default 0.5)')\n parser.add_argument(\n '-a',\n '--alpha',\n type=float,\n default=.5,\n help='Relative importance of the pheromone (default 0.5)')\n parser.add_argument(\n '-b',\n '--beta',\n type=float,\n default=.5,\n help='Relative importance of the heuristic information (default 0.5)')\n parser.add_argument(\n '-q',\n '--q',\n type=float,\n default=1.,\n help='Constant Q. Used to calculate the pheromone, laid down on an edge (default 1)')\n parser.add_argument(\n '-n',\n '--iteration-number',\n type=int,\n default=10,\n help='Number of iterations to execute (default 10)')\n parser.add_argument(\n '-o',\n '--two-opt',\n action='store_true',\n default=False,\n help='Enable to use 2-opt local search after each iteration (default off)')\n parser.add_argument(\n '-t',\n '--tsp-file',\n type=str,\n default=path.join(path.abspath(path.dirname(inspect.getfile(inspect.currentframe()))), 'resources/burma14.tsp'),\n help='Path of the tsp file that shall be loaded (default loads the built-in burma14.tsp)')\n\n parser.add_argument(\n 'ant_number',\n type=int,\n help='Number of ants used for solving')\n\n parser.set_defaults(func=_run_aco4tsp)", "def test_configuration():\n config = Configuration()\n\n assert config.relay_pin is not None\n assert config.relay_pin >= 1\n assert config.relay_pin < 32\n assert config.seconds_between_checks > 0\n assert config.seconds_to_power_off > 0\n assert config.seconds_to_wait_after_power_on > config.seconds_to_power_off", "def configure(self, options, conf):", "def setup(parser):\n global debug\n global config\n global file_list\n global job_sets\n global from_saved_state\n\n args = parser.parse_args()\n\n if args.debug:\n debug = True\n print_message('Running in debug mode', 'ok')\n\n # read through the config file and setup the config dict\n config = {}\n if not args.config:\n parser.print_help()\n sys.exit()\n else:\n try:\n confParse = ConfigParser.ConfigParser()\n confParse.read(args.config)\n for section in confParse.sections():\n config[section] = {}\n for option in confParse.options(section):\n opt = confParse.get(section, option)\n if not opt:\n if 'pass' in option and not args.no_monitor:\n opt = getpass('>> ' + option + ': ')\n else:\n opt = raw_input('>> ' + option + ': ')\n if opt.startswith('[') or opt.startswith('{'):\n opt = json.loads(opt)\n config[section][option] = opt\n except Exception as e:\n msg = 'Unable to read config file, is it properly formatted json?'\n print_message(msg)\n print_debug(e)\n return -1\n\n if args.no_ui:\n config['global']['ui'] = False\n else:\n debug = False\n config['global']['ui'] = True\n\n if args.dry_run:\n config['global']['dry_run'] = True\n else:\n config['global']['dry_run'] = False\n\n if args.no_cleanup:\n config['global']['no_cleanup'] = True\n else:\n config['global']['no_cleanup'] = False\n\n if args.no_monitor:\n config['global']['no_monitor'] = True\n print \"Turning off remote monitoring\"\n else:\n config['global']['no_monitor'] = False\n \n if args.size:\n config['transfer']['size'] = args.size\n else:\n config['transfer']['size'] = 100\n \n if args.viewer:\n print 'Turning on output_viewer mode'\n config['global']['viewer'] = True\n else:\n config['global']['viewer'] = False\n\n # setup config for file type directories\n for key, val in config.get('global').get('output_patterns').items():\n new_dir = os.path.join(\n config['global']['data_cache_path'],\n key)\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n if val == 'mpaso.hist.am.timeSeriesStatsMonthly':\n config['global']['mpas_dir'] = new_dir\n elif val == 'mpascice.hist.am.timeSeriesStatsMonthly':\n config['global']['mpas_cice_dir'] = new_dir\n elif val == 'cam.h0':\n config['global']['atm_dir'] = new_dir\n elif val == 'mpaso.rst.0':\n config['global']['mpas_rst_dir'] = new_dir\n elif val == 'rpointer':\n config['global']['rpt_dir'] = new_dir\n elif val == 'mpas-o_in':\n config['global']['mpas_o-in_dir'] = new_dir\n elif val == 'mpas-cice_in':\n config['global']['mpas_cice-in_dir'] = new_dir\n elif 'stream' in val:\n config['global']['streams_dir'] = new_dir\n\n if not os.path.exists(config['global']['output_path']):\n os.makedirs(config['global']['output_path'])\n if not os.path.exists(config['global']['data_cache_path']):\n os.makedirs(config['global']['data_cache_path'])\n\n # setup run_scipts_path\n config['global']['run_scripts_path'] = os.path.join(\n config['global']['output_path'],\n 'run_scripts')\n # setup tmp_path\n config['global']['tmp_path'] = os.path.join(\n config['global']['output_path'],\n 'tmp')\n\n # setup logging\n if args.log:\n log_path = args.log\n else:\n log_path = os.path.join(\n config.get('global').get('output_path'),\n 'workflow.log')\n logging.basicConfig(\n format='%(asctime)s:%(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n filename=log_path,\n filemode='w',\n level=logging.DEBUG)\n\n endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]\n if not setup_globus(endpoints):\n return -1\n print 'Globus setup complete'\n return config", "def test_node_config() -> None:\n node = MyNode()\n node.configure(\n MyConfig(\n int_field=5,\n str_field=\"hello\",\n float_field=0.5,\n int_enum_field=MyIntEnum.B,\n str_enum_field=MyStrEnum.A,\n bool_field=True,\n )\n )\n node.setup()", "def test_validate_config_file(self):\n ingest_mgmr = IngestManager()\n ingest_mgmr.validate_config_file(self.example_config_data)\n assert(ingest_mgmr.config is not None)\n assert (ingest_mgmr.config.config_data is not None)", "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def configure(self):\n pass" ]
[ "0.70605105", "0.7017709", "0.7011017", "0.692149", "0.692149", "0.690285", "0.68758297", "0.68144774", "0.6782708", "0.6702887", "0.66660374", "0.6653196", "0.66530657", "0.6643987", "0.6641886", "0.6610034", "0.6574361", "0.65678746", "0.6562894", "0.6540612", "0.6537637", "0.6505559", "0.65037274", "0.65011334", "0.6493991", "0.6430816", "0.6400373", "0.6387626", "0.6365025", "0.6344628", "0.63374805", "0.633304", "0.63308764", "0.63278395", "0.6310064", "0.6306638", "0.6292342", "0.6288631", "0.6287028", "0.62813365", "0.6273637", "0.62681246", "0.62637544", "0.6246227", "0.6245167", "0.6237004", "0.622046", "0.62197065", "0.6218227", "0.61999214", "0.61984855", "0.6184511", "0.6181732", "0.61817074", "0.6181439", "0.6169045", "0.61678195", "0.6158738", "0.6157283", "0.6157226", "0.614685", "0.6138336", "0.61372113", "0.61372113", "0.61346817", "0.6122851", "0.6114959", "0.6110955", "0.6107867", "0.6102825", "0.610018", "0.6095831", "0.6092921", "0.60894257", "0.60876817", "0.60520744", "0.6048995", "0.60342205", "0.6032394", "0.60262126", "0.60237175", "0.60202074", "0.60173637", "0.60173637", "0.60173637", "0.60173637", "0.600784", "0.5987557", "0.598295", "0.59715164", "0.5965609", "0.5965609", "0.596468", "0.59579587", "0.59507686", "0.5950279", "0.59456134", "0.59428483", "0.5939636", "0.59392494" ]
0.7371306
0
Fixture for retrieving mock event
def get_mock_event(): event = { "httpMethod": "GET", "//body": "{\"name\": \"Sam\"}", "resource": "/{proxy+}", "queryStringParameters": {}, "pathParameters": { "proxy": "users" }, "requestContext": { "accountId": "222222222", "identity": { "sourceIp": "2a02:a445:6d36:1:1e3:a188:313c:1d31", "userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_1_6) " "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2743.116 Safari/537.36", }, "resourcePath": "/{proxy+}", "httpMethod": "GET", "apiId": "xxxxxxxxxx" } } return event
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_factory_fixture():\n def _factory(device_id, event_type=\"DEVICE_EVENT\", capability='',\n attribute='Updated', value='Value', data=None):\n event = Mock()\n event.event_type = event_type\n event.device_id = device_id\n event.component_id = 'main'\n event.capability = capability\n event.attribute = attribute\n event.value = value\n event.data = data\n event.location_id = str(uuid4())\n return event\n return _factory", "def test_describe_event(self):\n pass", "def test_future_event(self):\n pass", "def test_get_event(self):\n event = Event(self.client, 123, {})\n\n self.assertEqual(event.action, \"ticket_create\")\n self.assertEqual(event.created, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(event.duration, 300.56)\n self.assertIsNotNone(event.entity)\n self.assertEqual(event.id, 123)\n self.assertEqual(event.message, \"None\")\n self.assertIsNone(event.percent_complete)\n self.assertIsNone(event.rate)\n self.assertTrue(event.read)\n self.assertIsNotNone(event.secondary_entity)\n self.assertTrue(event.seen)\n self.assertIsNone(event.status)\n self.assertIsNone(event.time_remaining)\n self.assertEqual(event.username, \"exampleUser\")", "def test_create_event(\n event_manager: EventManager, subscriber: Mock, input: bytes, expected: tuple\n) -> None:\n event_manager.handler(input)\n assert subscriber.call_count == 1\n\n event: Event = subscriber.call_args[0][0]\n assert event.topic == expected[\"topic\"]\n assert event.source == expected[\"source\"]\n assert event.id == expected[\"source_idx\"]\n assert event.group == expected[\"group\"]\n assert event.state == expected[\"state\"]\n assert event.is_tripped is expected[\"tripped\"]", "def test_api_predictor_events_get(self):\n pass", "def test_new_general_event(client, transactional_db, mocker):\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n settings.SNS_ARN = arn\n mock = mocker.patch('coordinator.api.models.boto3.client')\n assert Event.objects.count() == 0\n\n ev = Event(event_type='error', message='test error event')\n ev.save()\n assert Event.objects.count() == 1\n assert mock().publish.call_count == 1\n message = {\n 'default': json.dumps({\n 'event_type': 'error',\n 'message': 'test error event',\n 'task_service': None,\n 'task': None,\n 'release': None\n })\n }\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n mock().publish.assert_called_with(Message=json.dumps(message),\n MessageStructure='json',\n TopicArn=arn)\n settings.SNS_ARN = None", "async def test_api_fire_event_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\n\n Also test if our data came through.\n \"\"\"\n if \"test\" in event.data:\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_with_data\", listener)\n\n await mock_api_client.post(\"/api/events/test_event_with_data\", json={\"test\": 1})\n\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "def make_event(entity_id):\n domain = split_entity_id(entity_id)[0]\n state = mock.MagicMock(\n state=\"not blank\",\n domain=domain,\n entity_id=entity_id,\n object_id=\"entity\",\n attributes={},\n )\n return mock.MagicMock(data={\"new_state\": state}, time_fired=12345)", "def test_mqtt_event(event_manager: EventManager, subscriber: Mock) -> None:\n mqtt_event = {\n \"topic\": \"tns1:Device/tnsaxis:Sensor/PIR\",\n \"source\": \"sensor\",\n \"source_idx\": \"0\",\n \"type\": \"state\",\n \"value\": \"0\",\n }\n event_manager.handler(mqtt_event)\n assert subscriber.call_count == 1\n\n event: Event = subscriber.call_args[0][0]\n assert event.operation == EventOperation.INITIALIZED\n assert event.topic == \"tns1:Device/tnsaxis:Sensor/PIR\"\n assert event.id == \"0\"\n assert event.state == \"0\"\n assert not event.is_tripped\n\n mqtt_event[\"value\"] = \"1\"\n event_manager.handler(mqtt_event)\n assert subscriber.call_count == 2\n\n event: Event = subscriber.call_args[0][0]\n assert event.operation == EventOperation.CHANGED\n assert event.state == \"1\"\n assert event.is_tripped", "def test_event_object():\n data = retrieve_fixture()\n event = Event(event=data)\n\n assert event.event_month == \"09\"\n assert event.event_day == \"12\"\n assert event.event_year == \"2020\"", "def event_request_factory_fixture(event_factory):\n def _factory(device_ids=None, events=None):\n request = Mock()\n request.installed_app_id = uuid4()\n if events is None:\n events = []\n if device_ids:\n events.extend([event_factory(id) for id in device_ids])\n events.append(event_factory(uuid4()))\n events.append(event_factory(device_ids[0], event_type=\"OTHER\"))\n request.events = events\n return request\n return _factory", "def test_past_event(self):\n pass", "def testEventInit(self):\n e1 = Event(5, 'obj', 'message')\n self.assertEqual(e1.timestamp, 5)\n self.assertEqual(e1.eventObject, 'obj')\n self.assertEqual(e1.logMessage, 'message')", "async def test_api_fire_event_with_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test.event_no_data\", listener)\n\n await mock_api_client.post(\"/api/events/test.event_no_data\")\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass", "def sample_document_callback_assert(event):\n assert event.setter is mock_session", "def test_load_response_descriptor_events_event_event_resource(self):\n pass", "def test_run(self, init_event, mocker):\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"enabled\",\n new_callable=mocker.PropertyMock(return_value=True),\n )\n mock_stats = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"stats\",\n new_callable=mocker.PropertyMock,\n )\n mock_item_map = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"item_map\",\n new_callable=mocker.PropertyMock,\n )\n\n mock_stats.return_value = mocker.MagicMock(\n spec=houdini_toolbox.events.stats.HoudiniEventStats\n )\n\n mock_map = {}\n mock_item_map.return_value = mock_map\n\n event = init_event()\n\n mock_item1 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item1.run.side_effect = lambda sa: sa[\"order\"].append(mock_item1)\n\n mock_item2 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item2.run.side_effect = lambda sa: sa[\"order\"].append(mock_item2)\n\n mock_item3 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item3.run.side_effect = lambda sa: sa[\"order\"].append(mock_item3)\n\n # Assign objects to event map with priorities.\n mock_map[0] = [mock_item2]\n mock_map[15] = [mock_item3]\n mock_map[5] = [mock_item1]\n\n scriptargs = {\"key\": \"value\", \"order\": []}\n\n expected_scriptargs = {\n \"key\": \"value\",\n # We expect events to be run in decreasing priority order\n \"order\": [mock_item3, mock_item1, mock_item2],\n }\n\n # Run the test event.\n event.run(scriptargs)\n\n # Make sure each thing was ran.\n mock_item1.run.assert_called_once()\n mock_item2.run.assert_called_once()\n mock_item3.run.assert_called_once()\n\n assert scriptargs == expected_scriptargs\n\n # Ensure the context manager was called.\n mock_stats.return_value.__enter__.assert_called_once()\n mock_stats.return_value.__exit__.assert_called_once()", "def test_events(self):\n\n response = self.client.get(reverse('events'))\n\n assert response.status_code == 200", "def test_post_add_log_event(self):\n pass", "def test_instantiation(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n self.assertEqual(event.guild.id, 12345)\n self.assertEqual(event.title, 'Some title')\n self.assertEqual(event.date, datetime(2020, 10, 10, 10, 10, tzinfo=utc))\n self.assertEqual(event.description, 'Some description')", "def test_create_event_load(self):\n res = self.client.get('/create-event')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Create Event' in data", "def init_event(mocker):\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent, \"__init__\", lambda x, y: None\n )\n\n def _create():\n return houdini_toolbox.events.event.HoudiniEvent(None)\n\n return _create", "def test_timestamp_noint(self, mock):\n mock.configure_mock(**(self.config_payload(True, False)))\n self.assertRaises(\n TypeError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def setUp(self):\n self.output = StringIO.StringIO()\n self.formatter = json_out.Json(None, self.output)\n self.event_object = JsonTestEvent()", "async def test_api_fire_event_context(\n hass: HomeAssistant, mock_api_client: TestClient, hass_access_token: str\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(event)\n\n hass.bus.async_listen(\"test.event\", listener)\n\n await mock_api_client.post(\n \"/api/events/test.event\",\n headers={\"authorization\": f\"Bearer {hass_access_token}\"},\n )\n await hass.async_block_till_done()\n\n refresh_token = await hass.auth.async_validate_access_token(hass_access_token)\n\n assert len(test_value) == 1\n assert test_value[0].context.user_id == refresh_token.user.id", "async def test_event(bus: lightbus.BusNode, dummy_api):\n\n received_kwargs = []\n\n async def listener(**kwargs):\n received_kwargs.append(kwargs)\n\n async def co_fire_event():\n await asyncio.sleep(0.01)\n return await bus.my.dummy.my_event.fire_async(field='Hello! 😎')\n\n async def co_listen_for_events():\n await bus.my.dummy.my_event.listen_async(listener)\n # Consume a single event, rather than loop forever using consume_events()\n await bus.bus_client._consume_events_once()\n\n await asyncio.gather(co_fire_event(), co_listen_for_events())\n assert received_kwargs == [{'field': 'Hello! 😎'}]", "def mock_event(player: dict) -> dict:\n return {\n \"body\": {\n \"Player\": player,\n \"playerId\": \"player_hash\",\n \"action\": \"attack\",\n \"enhanced\": False,\n }\n }", "def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])", "def test_send_event_raises():\n send_event('pytest-reportportal', '5.0.5')", "def test_process_event(\n self, mock_objects, mock__create_from_stripe_object, mock_atomic\n ):\n # Set up mocks\n mock_objects.filter.return_value.exists.return_value = False\n mock_data = {\"id\": \"foo_id\", \"other_stuff\": \"more_things\"}\n\n result = Event.process(data=mock_data)\n\n # Check that all the expected work was performed\n mock_objects.filter.assert_called_once_with(id=mock_data[\"id\"])\n mock_objects.filter.return_value.exists.assert_called_once_with()\n mock_atomic.return_value.__enter__.assert_called_once_with()\n mock__create_from_stripe_object.assert_called_once_with(\n mock_data, api_key=djstripe_settings.STRIPE_SECRET_KEY\n )\n (\n mock__create_from_stripe_object.return_value.invoke_webhook_handlers\n ).assert_called_once_with()\n # Make sure the event was returned.\n self.assertEqual(mock__create_from_stripe_object.return_value, result)", "async def test_firing_bus_event(hass: HomeAssistant, monkeypatch) -> None:\n config = {\n \"rflink\": {\"port\": \"/dev/ttyABC0\"},\n DOMAIN: {\n \"platform\": \"rflink\",\n \"devices\": {\n \"protocol_0_0\": {\n \"name\": \"test\",\n \"aliases\": [\"test_alias_0_0\"],\n \"fire_event\": True,\n }\n },\n },\n }\n\n # setup mocking rflink module\n event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)\n\n calls = []\n\n @callback\n def listener(event):\n calls.append(event)\n\n hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)\n\n # test event for new unconfigured sensor\n event_callback({\"id\": \"protocol_0_0\", \"command\": \"down\"})\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n assert calls[0].data == {\"state\": \"down\", \"entity_id\": f\"{DOMAIN}.test\"}", "def test_register_events():\n event_bus = MockEventBus()\n test_module.register_events(event_bus)\n assert event_bus.topic_patterns_to_subscribers", "def test_otoroshi_controllers_adminapi_events_controller_alert_events(self):\n pass", "def setUp(self):\n event_bus._event_bus = event_bus._EventBus()", "def subscriber(event_manager: EventManager) -> Mock:\n callback = Mock()\n event_manager.subscribe(callback)\n return callback", "def test_make_event_defaults_dr(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event(\n 'delivery_report', 'abc123', delivery_status='pending')\n expected_event = TransportEvent(\n event_type='delivery_report', user_message_id='abc123',\n delivery_status='pending',\n transport_type=msg_helper.transport_type,\n transport_name=msg_helper.transport_name,\n transport_metadata={}, helper_metadata={},\n # These fields are generated in both messages, so copy them.\n event_id=event['event_id'], timestamp=event['timestamp'])\n self.assertEqual(expected_event, event)", "def setUp(self):\n super().setUp()\n with mock.patch('pywikibot.comms.eventstreams.EventSource'):\n self.es = EventStreams(url='dummy url')", "def setUp(self):\n super().setUp()\n with mock.patch('pywikibot.comms.eventstreams.EventSource'):\n self.es = EventStreams(url='dummy url')", "async def test_event(bus: lightbus.BusNode, dummy_api, stream_use):\n bus.bus_client.transport_registry.get_event_transport('default').stream_use = stream_use\n manually_set_plugins({})\n received_kwargs = []\n received_api_name = None\n received_event_name = None\n\n async def listener(api_name, event_name, **kwargs):\n nonlocal received_kwargs, received_api_name, received_event_name\n received_kwargs.append(kwargs)\n received_api_name = api_name\n received_event_name = event_name\n\n await bus.my.dummy.my_event.listen_async(listener)\n await asyncio.sleep(0.01)\n await bus.my.dummy.my_event.fire_async(field='Hello! 😎')\n await asyncio.sleep(0.01)\n\n # await asyncio.gather(co_fire_event(), co_listen_for_events())\n assert received_kwargs == [{'field': 'Hello! 😎'}]\n assert received_api_name == 'my.dummy'\n assert received_event_name == 'my_event'", "def test_timestamp_backward(self, mock):\n mock.configure_mock(**(self.config_payload(0, 1)))\n self.assertRaises(\n AssertionError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def test_simple(self):\n\n eventFilter = EventFilter(\"FooEvent\")\n\n # Start a session\n traceids = ['foobar']\n eventCallback = Mock()\n session = eventFilter.start(traceids, eventCallback)\n\n # The first FooEvent should be handled\n fooEvent1 = FooEvent(traceid=traceids)\n session.handle(fooEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n ])\n\n # The second FooEvent should also be handled\n fooEvent2 = FooEvent(traceid=traceids)\n session.handle(fooEvent2)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n ])\n\n # The BarEvent should not be handled\n barEvent = BarEvent(traceid=traceids)\n session.handle(barEvent)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n ])\n\n # No more events should be added when the session is finalized\n session.finalize()\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n ])", "def test_any(self):\n\n eventFilter = EventFilter(\"*\")\n\n # Start a session\n traceids = ['foobar']\n eventCallback = Mock()\n session = eventFilter.start(traceids, eventCallback)\n\n # The first FooEvent should be handled\n fooEvent1 = FooEvent(traceid=traceids)\n session.handle(fooEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n ])\n\n # The second FooEvent should also be handled\n fooEvent2 = FooEvent(traceid=traceids)\n session.handle(fooEvent2)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n ])\n\n # The BarEvent should also be handled\n barEvent1 = BarEvent(traceid=traceids)\n session.handle(barEvent1)\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n call(barEvent1),\n ])\n\n # No more events should be added when the session is finalized\n session.finalize()\n self.assertEqual(eventCallback.mock_calls, [\n call(fooEvent1),\n call(fooEvent2),\n call(barEvent1),\n ])", "def test_destroy_with_event(self, mocker):\n\n def stop_mock(event):\n time.sleep(0.1)\n event.set()\n return\n\n def stop_mock_2():\n return\n\n split_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n split_async_task_mock.stop.side_effect = stop_mock_2\n\n def _split_task_init_mock(self, synchronize_splits, period):\n self._task = split_async_task_mock\n self._period = period\n mocker.patch('splitio.client.factory.SplitSynchronizationTask.__init__',\n new=_split_task_init_mock)\n\n segment_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n segment_async_task_mock.stop.side_effect = stop_mock_2\n\n def _segment_task_init_mock(self, synchronize_segments, period):\n self._task = segment_async_task_mock\n self._period = period\n mocker.patch('splitio.client.factory.SegmentSynchronizationTask.__init__',\n new=_segment_task_init_mock)\n\n imp_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n imp_async_task_mock.stop.side_effect = stop_mock\n\n def _imppression_task_init_mock(self, synchronize_impressions, period):\n self._period = period\n self._task = imp_async_task_mock\n mocker.patch('splitio.client.factory.ImpressionsSyncTask.__init__',\n new=_imppression_task_init_mock)\n\n evt_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n evt_async_task_mock.stop.side_effect = stop_mock\n\n def _event_task_init_mock(self, synchronize_events, period):\n self._period = period\n self._task = evt_async_task_mock\n mocker.patch('splitio.client.factory.EventsSyncTask.__init__', new=_event_task_init_mock)\n\n imp_count_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n imp_count_async_task_mock.stop.side_effect = stop_mock\n\n def _imppression_count_task_init_mock(self, synchronize_counters):\n self._task = imp_count_async_task_mock\n mocker.patch('splitio.client.factory.ImpressionsCountSyncTask.__init__',\n new=_imppression_count_task_init_mock)\n\n telemetry_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n telemetry_async_task_mock.stop.side_effect = stop_mock\n\n def _telemetry_task_init_mock(self, synchronize_telemetry, synchronize_telemetry2):\n self._task = telemetry_async_task_mock\n mocker.patch('splitio.client.factory.TelemetrySyncTask.__init__',\n new=_telemetry_task_init_mock)\n\n split_sync = mocker.Mock(spec=SplitSynchronizer)\n split_sync.synchronize_splits.return_value = []\n segment_sync = mocker.Mock(spec=SegmentSynchronizer)\n segment_sync.synchronize_segments.return_values = None\n syncs = SplitSynchronizers(split_sync, segment_sync, mocker.Mock(),\n mocker.Mock(), mocker.Mock(), mocker.Mock())\n tasks = SplitTasks(split_async_task_mock, segment_async_task_mock, imp_async_task_mock,\n evt_async_task_mock, imp_count_async_task_mock, telemetry_async_task_mock)\n\n # Setup synchronizer\n def _split_synchronizer(self, ready_flag, some, auth_api, streaming_enabled, sdk_matadata, telemetry_runtime_producer, sse_url=None, client_key=None):\n synchronizer = Synchronizer(syncs, tasks)\n self._ready_flag = ready_flag\n self._synchronizer = synchronizer\n self._streaming_enabled = False\n self._telemetry_runtime_producer = telemetry_runtime_producer\n mocker.patch('splitio.sync.manager.Manager.__init__', new=_split_synchronizer)\n\n # Start factory and make assertions\n factory = get_factory('some_api_key')\n try:\n factory.block_until_ready(1)\n except:\n pass\n\n assert factory.ready is True\n assert factory.destroyed is False\n\n event = threading.Event()\n factory.destroy(event)\n assert not event.is_set()\n time.sleep(1)\n assert event.is_set()\n assert len(imp_async_task_mock.stop.mock_calls) == 1\n assert len(evt_async_task_mock.stop.mock_calls) == 1\n assert len(imp_count_async_task_mock.stop.mock_calls) == 1\n assert factory.destroyed is True", "def test_destroy_with_event(self, mocker):\n spl_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n def _split_task_init_mock(self, api, storage, period, event):\n self._task = spl_async_task_mock\n self._api = api\n self._storage = storage\n self._period = period\n self._event = event\n event.set()\n mocker.patch('splitio.client.factory.SplitSynchronizationTask.__init__', new=_split_task_init_mock)\n\n sgm_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n worker_pool_mock = mocker.Mock(spec=workerpool.WorkerPool)\n def _segment_task_init_mock(self, api, storage, split_storage, period, event):\n self._task = sgm_async_task_mock\n self._worker_pool = worker_pool_mock\n self._api = api\n self._segment_storage = storage\n self._split_storage = split_storage\n self._period = period\n self._event = event\n event.set()\n mocker.patch('splitio.client.factory.SegmentSynchronizationTask.__init__', new=_segment_task_init_mock)\n\n imp_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n def _imppression_task_init_mock(self, api, storage, refresh_rate, bulk_size):\n self._logger = mocker.Mock()\n self._impressions_api = api\n self._storage = storage\n self._period = refresh_rate\n self._task = imp_async_task_mock\n self._failed = mocker.Mock()\n self._bulk_size = bulk_size\n mocker.patch('splitio.client.factory.ImpressionsSyncTask.__init__', new=_imppression_task_init_mock)\n\n evt_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n def _event_task_init_mock(self, api, storage, refresh_rate, bulk_size):\n self._logger = mocker.Mock()\n self._impressions_api = api\n self._storage = storage\n self._period = refresh_rate\n self._task = evt_async_task_mock\n self._failed = mocker.Mock()\n self._bulk_size = bulk_size\n mocker.patch('splitio.client.factory.EventsSyncTask.__init__', new=_event_task_init_mock)\n\n tmt_async_task_mock = mocker.Mock(spec=asynctask.AsyncTask)\n def _telemetry_task_init_mock(self, api, storage, refresh_rate):\n self._task = tmt_async_task_mock\n self._logger = mocker.Mock()\n self._api = api\n self._storage = storage\n self._period = refresh_rate\n mocker.patch('splitio.client.factory.TelemetrySynchronizationTask.__init__', new=_telemetry_task_init_mock)\n\n # Start factory and make assertions\n factory = get_factory('some_api_key')\n assert factory.destroyed is False\n\n factory.block_until_ready()\n time.sleep(1) # give a chance for the bg thread to set the ready status\n assert factory.ready\n\n event = threading.Event()\n factory.destroy(event)\n\n # When destroy is called an event is created and passed to each task when\n # stop() is called. We will extract those events assert their type, and assert that\n # by setting them, the main event gets set.\n splits_event = spl_async_task_mock.stop.mock_calls[0][1][0]\n segments_event = worker_pool_mock.stop.mock_calls[0][1][0] # Segment task stops when wp finishes.\n impressions_event = imp_async_task_mock.stop.mock_calls[0][1][0]\n events_event = evt_async_task_mock.stop.mock_calls[0][1][0]\n telemetry_event = tmt_async_task_mock.stop.mock_calls[0][1][0]\n\n # python2 & 3 compatibility\n try:\n from threading import _Event as __EVENT_CLASS\n except ImportError:\n from threading import Event as __EVENT_CLASS\n\n assert isinstance(splits_event, __EVENT_CLASS)\n assert isinstance(segments_event, __EVENT_CLASS)\n assert isinstance(impressions_event, __EVENT_CLASS)\n assert isinstance(events_event, __EVENT_CLASS)\n assert isinstance(telemetry_event, __EVENT_CLASS)\n assert not event.is_set()\n\n splits_event.set()\n segments_event.set()\n impressions_event.set()\n events_event.set()\n telemetry_event.set()\n\n time.sleep(1) # I/O wait to trigger context switch, to give the waiting thread\n # a chance to run and set the main event.\n\n assert event.is_set()\n assert factory.destroyed", "def test_watch_function_method(self):\n self.wrapper.get(\"abc\", watch=self._assert_in_reactor_thread)\n event = object()\n self.client.watch(event)\n self.assertIdentical(self.received_event, event)", "def test_event_id(self):\n result = self.test_client.event_id\n\n assert result == \"2130389\"", "async def test_http2_wrong_event(mocker):\n mocker.patch(\"aiosonic.http2.Http2Handler.__init__\", lambda x: None)\n mocker.patch(\"aiosonic.http2.Http2Handler.h2conn\")\n\n handler = Http2Handler()\n\n async def coro():\n pass\n\n with pytest.raises(MissingEvent):\n await handler.handle_events([WrongEvent])", "def test_make_event_defaults_ack(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event('ack', 'abc123', sent_message_id='sent')\n expected_event = TransportEvent(\n event_type='ack', user_message_id='abc123', sent_message_id='sent',\n transport_type=msg_helper.transport_type,\n transport_name=msg_helper.transport_name,\n transport_metadata={}, helper_metadata={},\n # These fields are generated in both messages, so copy them.\n event_id=event['event_id'], timestamp=event['timestamp'])\n self.assertEqual(expected_event, event)", "def test_create_event(self):\n event_type = 'SERVICE NOTIFICATION'\n fields = EVENT_FIELDS.get(event_type, None)\n parts = [\n 'nagiosadmin',\n 'nagios4',\n 'Root Partition',\n 'CRITICAL',\n 'notify-service-by-email',\n 'DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]\n event = create_event(\n timestamp=1603813628, event_type=event_type, hostname='docker-desktop', fields=fields._make(parts)\n )\n\n assert event['timestamp'] == 1603813628\n assert event['event_type'] == 'SERVICE NOTIFICATION'\n assert event[\"msg_title\"] == 'Root Partition'\n assert event[\"source_type_name\"] == 'SERVICE NOTIFICATION'\n assert event[\"msg_text\"] == 'CRITICAL'\n assert event['tags'] == [\n 'contact:nagiosadmin',\n 'host:nagios4',\n 'check_name:Root Partition',\n 'event_state:CRITICAL',\n 'notification_type:notify-service-by-email',\n 'payload:DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]", "def test_no_arg(self):\n self.assertRaises(ValueError, NewickEventFactory)", "def test_event_populate(event_args):\n\n event = HealthEvent()\n event.populate(**event_args)\n\n assert event.source == event_args.get(\"source\")\n assert event.component_type == event_args.get(\"component_type\")\n assert event.event_type == event_args.get(\"event_type\")\n assert event.environment == event_args.get(\"environment\")\n assert event.service == event_args.get(\"service\")\n assert event.healthy == event_args.get(\"healthy\")\n assert event.resource[\"Name\"] == event_args.get(\"resource_name\")\n # assert event.resource[\"ID\"] == event_args.get(\"None\")\n assert event.source_data == event_args.get(\"source_data\")\n assert event.metric_data == event_args.get(\"metric_data\")", "def test_dispatch(self):\r\n self.hit = False\r\n\r\n def handler(event):\r\n self.hit = True\r\n\r\n self.events.register(handler, TestEvent)\r\n \r\n self.events.dispatch(TestEvent())\r\n\r\n self.assertTrue(self.hit)", "def test_load_events(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_testA.evt'))\n hen.read_events.main(command.split())\n new_filename = self.first_event_file\n ev = hen.io.load_events(new_filename)\n assert hasattr(ev, 'header')\n assert hasattr(ev, 'gti')", "def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])", "def mock_bus(hass):\n hass.bus.listen = mock.MagicMock()", "def testRun(self):\n stub = NetworkObjectStub()\n\n e1 = Event(5, stub, 'message')\n e2 = Event(0, stub, 'message')\n e3 = Event(7, stub, 'message')\n e4 = PacketEvent(1, 'sender2', stub, 4, 'message5')\n eventList = [e1, e2, e3, e4]\n\n eventHandler = EventHandler('network', eventList)\n eventHandler.run(0, 4)\n with self.assertRaises(Empty) as e:\n eventHandler.run(0, 1)", "def test_unsupported_event(event_manager: EventManager, subscriber: Mock) -> None:\n event_manager.handler(GLOBAL_SCENE_CHANGE)\n subscriber.assert_not_called()", "async def test_validation_event(loop, bus: lightbus.BusNode, dummy_api, mocker):\n config = Config.load_dict({\n 'apis': {\n 'default': {'validate': True, 'strict_validation': True}\n }\n })\n bus.bus_client.config = config\n mocker.patch('jsonschema.validate', autospec=True)\n\n async def co_listener(*a, **kw):\n pass\n\n await bus.bus_client.schema.add_api(dummy_api)\n await bus.bus_client.schema.save_to_bus()\n await bus.bus_client.schema.load_from_bus()\n\n listener_task = await bus.bus_client.listen_for_event('my.dummy', 'my_event', co_listener)\n\n await asyncio.sleep(0.1)\n await bus.my.dummy.my_event.fire_async(field='Hello')\n\n await cancel(listener_task)\n\n # Validate gets called\n jsonschema.validate.assert_called_with(\n {'field': 'Hello'},\n {\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'field': {'type': 'string'}\n },\n 'required': ['field'],\n 'title': 'Event my.dummy.my_event parameters'\n }\n )", "def test_event_can_be_comitted(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=timezone('Europe/Paris')),\n 'Some description')\n\n self.db.session.add(event)\n self.db.session.commit()\n\n queried = Event.query.one()\n self.assertEqual(queried.guild.id, '12345')\n self.assertEqual(queried.title, 'Some title')\n self.assertEqual(\n queried.date,\n datetime(2020, 10, 10, 10, 10, tzinfo=timezone('Europe/Paris')))\n self.assertEqual(queried.timezone, timezone('Europe/Paris'))\n self.assertEqual(queried.timezone_name, 'Europe/Paris')", "def test_can_list_event(app):\n with app.test_client() as client:\n events = client.get('/api/v1/event/')\n assert events.status_code == 200\n assert 'events' in events.json", "def test_lambda_wrapper_basic_events(reporter_mock, context):\n\n @lumigo_tracer(token=\"123\")\n def lambda_test_function(event, context):\n pass\n\n lambda_test_function({}, context)\n function_span = SpansContainer.get_span().function_span\n assert not SpansContainer.get_span().spans\n assert \"started\" in function_span\n assert \"ended\" in function_span\n assert reporter_mock.call_count == 2\n first_send = reporter_mock.call_args_list[0][1][\"msgs\"]\n assert len(first_send) == 1\n assert first_send[0][\"id\"].endswith(\"_started\")\n assert first_send[0][\"maxFinishTime\"]", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def test_factory(self):\n callbacks = {'foo': Mock()}\n RequestHandler = service_call.create_request_handler(callbacks)\n self.assertEqual(callbacks, RequestHandler.callbacks)", "def test_make_event_all_fields(self):\n msg_helper = MessageHelper()\n event_fields = {\n 'event_type': 'ack',\n 'user_message_id': 'abc123',\n 'sent_message_id': '123abc',\n 'transport_type': 'irc',\n 'transport_name': 'vuminet',\n 'transport_metadata': {'foo': 'bar'},\n 'helper_metadata': {'foo': {}},\n\n 'timestamp': datetime.utcnow(),\n 'event_id': 'e6b7efecda8e42988b1e6905ad40fae1',\n 'endpoint': 'foo_ep',\n }\n event = msg_helper.make_event(**event_fields)\n expected_fields = event_fields.copy()\n expected_fields.update({\n 'message_type': TransportEvent.MESSAGE_TYPE,\n 'message_version': TransportEvent.MESSAGE_VERSION,\n 'routing_metadata': {\n 'endpoint_name': expected_fields.pop('endpoint'),\n }\n })\n self.assertEqual(expected_fields, event.payload)", "def test_make_event_extra_fields(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event(\n 'ack', 'abc123', sent_message_id='sent', foo='bar', baz='quux')\n self.assert_message_fields(event, {'foo': 'bar', 'baz': 'quux'})", "def test_can_create_event(app, auth):\n event_data = {\"name\": \"Puggies Convention\", \"date\": \"2018-09-09\"}\n with app.test_client() as client:\n created = client.post('/api/v1/event/', json=event_data, headers=auth)\n assert created.status_code == 201", "def test_pir_init(event_manager: EventManager, subscriber: Mock) -> None:\n event_manager.handler(PIR_INIT)\n assert subscriber.call_count == 1\n\n event: Event = subscriber.call_args[0][0]\n assert event.state == \"0\"\n assert not event.is_tripped\n\n event_manager.handler(PIR_CHANGE)\n event: Event = subscriber.call_args[0][0]\n assert event.state == \"1\"\n assert event.is_tripped", "def test_editEvent(self):\n event_a = Event.objects.create(title=\"Christmas meal\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n client = APIClient()\n update_data = {\"event_owner\": self.person_a.pk, \"title\": \"Christmas meal\", \"start\":\n datetime.strptime(\"2020-12-07 12:00\", \"%Y-%m-%d %H:%M\"),\n \"end\": datetime.strptime(\"2020-12-07 16:00\", \"%Y-%m-%d %H:%M\"), \"duration\": timedelta(hours=4),\n \"invites\": [self.comms_grp.pk], \"recurrence_interval\": 0, \"description\": \"Christmas party yahoo\",\n \"website_publish\": False}\n resp = client.put('/api/events/christmas-meal', data=update_data, format='json')\n self.assertEqual(resp.status_code, 200)\n event_check = Event.objects.get(title=\"Christmas meal\")\n self.assertEqual(event_check.description, \"Christmas party yahoo\")", "def test_create_message_with_succes(self, mock_client): \n\n event = {\n 'operation': 'createMessage', \n 'arguments': {\n 'template': 'my-sample-geofence-id',\n 'input': {\n 'service': 'APNS',\n 'action': 'OPEN_APP',\n 'title': 'Sample Title',\n 'body': 'This is a sample body'\n }\n }\n }\n\n response = {\n \"Arn\": f'arn:aws:mobiletargeting:eus-east-1:SOME_ACCOUNT_ID:templates/my-sample-geofence-id/PUSH',\n \"RequestID\": \"some-request-id\",\n \"Message\": 'some message' \n }\n\n mock_client().create_push_template.return_value = response\n response = manageMessages.handler(event, None)\n\n self.assertTrue(response)\n self.assertEqual(response['status'], 'MESSAGE_CREATED')", "def test_get_event_by_attributes(self):\n def clean_up(trainee, trainer):\n self.database.mongo.event.delete_many({\n 'title': 'testEvent',\n 'creator_id': ObjectId(trainee._id)\n })\n\n self.database.mongo.event.delete_many({\n 'title': 'testEvent',\n 'creator_id': ObjectId(trainer._id)\n })\n\n trainee = self.database.get_trainee_by_username('testtrainee')\n trainer = self.database.get_trainer_by_username('testtrainer')\n\n try:\n clean_up(trainee, trainer)\n event = Event(\n _id=None,\n creator_id=trainee._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainer._id\n )\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainee._id)\n })\n assert database_event is not None\n\n database_event = self.database.get_event_by_attributes(creator_id=event.creator_id,\n title=event.title)\n assert database_event is not None\n assert database_event.title == event.title\n\n database_event = self.database.get_event_by_attributes(creator_id=event.creator_id,\n date=str(event.date))\n assert database_event is not None\n assert database_event.date == event.date\n\n database_event = self.database.get_event_by_attributes(creator_id=event.creator_id,\n date=event.date)\n assert database_event is not None\n assert database_event.date == event.date\n\n database_event = self.database.get_event_by_attributes(creator_id=event.creator_id,\n description=event.description)\n assert database_event is not None\n assert database_event.description == event.description\n\n database_event = self.database.get_event_by_attributes(creator_id=event.creator_id,\n participant_id=event.participant_id)\n assert database_event is not None\n assert database_event.participant_id == event.participant_id\n\n finally:\n clean_up(trainee, trainer)", "def setUp(self):\n user = User.objects.create(username=\"nerd\")\n self.description = \"Write world class code\"\n # specify owner of a event\n self.event = Event(description=self.description, owner=user)", "def test_make_event_defaults_nack(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event('nack', 'abc123', nack_reason='elves')\n expected_event = TransportEvent(\n event_type='nack', user_message_id='abc123', nack_reason='elves',\n transport_type=msg_helper.transport_type,\n transport_name=msg_helper.transport_name,\n transport_metadata={}, helper_metadata={},\n # These fields are generated in both messages, so copy them.\n event_id=event['event_id'], timestamp=event['timestamp'])\n self.assertEqual(expected_event, event)", "def visit_event(self, event):", "def test_initialize_event_twice(event_manager: EventManager, subscriber: Mock) -> None:\n event_manager.handler(VMD4_ANY_INIT)\n assert subscriber.call_count == 1\n\n event_manager.handler(VMD4_ANY_INIT)\n assert subscriber.call_count == 2", "def test_event_page(self):\n res = self.client.get('/events')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Upcoming Events' in data", "def test_get_event_type__mapped(self, mocker):\n mock_mappings = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEventFactory,\n \"_mappings\",\n new_callable=mocker.PropertyMock,\n )\n\n mock_name = mocker.MagicMock(spec=str)\n mock_event_name = mocker.MagicMock()\n mock_event_name.value = mock_name\n mock_return = mocker.MagicMock(spec=houdini_toolbox.events.event.HoudiniEvent)\n\n mock_mappings.return_value = {mock_event_name: mock_return}\n\n result = houdini_toolbox.events.event.HoudiniEventFactory.get_event_type(\n mock_event_name\n )\n\n assert result == mock_return.return_value\n mock_return.assert_called_with(mock_name)", "def test_api_can_create_a_event(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)", "def test_get_event_admin_correct_event(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user2_headers, data=params)\n self.assertEqual(response.status_code, 403)", "def test_basic_return(self):\n env = self.trace(\"\"\"\n foo = objects.create_foo()\n \"\"\")\n \n events = self.function_events\n self.assertEqual(len(events), 2)\n \n event = events[0]\n self.assertIsInstance(event, TraceCall)\n self.assertTrue(event.atomic)\n self.assertEqual(event.function, objects.create_foo)\n self.assertEqual(event.module_name, objects.__name__)\n self.assertEqual(event.qual_name, 'create_foo')\n self.assertEqual(event.arguments, OrderedDict())\n \n event = events[1]\n self.assertIsInstance(event, TraceReturn)\n self.assertTrue(event.atomic)\n self.assertEqual(event.function, objects.create_foo)\n self.assertEqual(event.module_name, objects.__name__)\n self.assertEqual(event.qual_name, 'create_foo')\n self.assertEqual(event.value, env['foo'])", "async def test_api_get_event_listeners(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(const.URL_API_EVENTS)\n data = await resp.json()\n\n local = hass.bus.async_listeners()\n\n for event in data:\n assert local.pop(event[\"event\"]) == event[\"listener_count\"]\n\n assert len(local) == 0", "def test_process_event_exists(\n self, mock_objects, mock__create_from_stripe_object, mock_atomic\n ):\n # Set up mocks\n mock_objects.filter.return_value.exists.return_value = True\n mock_data = {\"id\": \"foo_id\", \"other_stuff\": \"more_things\"}\n\n result = Event.process(data=mock_data)\n\n # Make sure that the db was queried and the existing results used.\n mock_objects.filter.assert_called_once_with(id=mock_data[\"id\"])\n mock_objects.filter.return_value.exists.assert_called_once_with()\n mock_objects.filter.return_value.first.assert_called_once_with()\n # Make sure the webhook actions and event object creation were not performed.\n mock_atomic.return_value.__enter__.assert_not_called()\n mock__create_from_stripe_object.assert_not_called()\n (\n mock__create_from_stripe_object.return_value.invoke_webhook_handlers\n ).assert_not_called()\n # Make sure the existing event was returned.\n self.assertEqual(mock_objects.filter.return_value.first.return_value, result)", "def test_newEvent(self):\n client = APIClient()\n data = {\"event_owner\": self.person_a.id, \"title\": \"PCC\",\n \"start\": datetime.strptime(\"2020-06-24 19:00\", \"%Y-%m-%d %H:%M\"),\n \"end\": datetime.strptime(\"1985-06-21 21:00\", \"%Y-%m-%d %H:%M\"), \"duration\": timedelta(hours=2),\n \"recurrence_interval\": 3, \"invites\": [self.comms_grp.group_name], \"description\": \"PCC meeting\",\n \"website_publish\": False}\n resp = client.post('/api/events', data=data, format='json')\n self.assertEqual(resp.status_code, 201)\n query = Event.objects.get(title=\"PCC\")\n self.assertEqual(data['title'], query.title)", "async def test_api_fire_event_with_invalid_json(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_bad_data\", listener)\n\n resp = await mock_api_client.post(\n \"/api/events/test_event_bad_data\", data=json.dumps(\"not an object\")\n )\n\n await hass.async_block_till_done()\n\n assert resp.status == HTTPStatus.BAD_REQUEST\n assert len(test_value) == 0\n\n # Try now with valid but unusable JSON\n resp = await mock_api_client.post(\n \"/api/events/test_event_bad_data\", data=json.dumps([1, 2, 3])\n )\n\n await hass.async_block_till_done()\n\n assert resp.status == HTTPStatus.BAD_REQUEST\n assert len(test_value) == 0", "def testEventHandlerInit(self):\n stub = NetworkObjectStub()\n\n e1 = Event(5, stub, 'message')\n e2 = Event(0, stub, 'message')\n e3 = Event(7, stub, 'message')\n e4 = PacketEvent(1, 'sender2', stub, 4, 'message5')\n eventList = [e1, e2, e3, e4]\n\n eventHandler = EventHandler('network', eventList)\n self.assertEqual('network', eventHandler._network)\n self.assertEqual(4, eventHandler._queue.qsize())", "def test_list_events(self):\n\n def clean_up(trainee, trainer):\n self.database.mongo.event.delete_many({\n 'creator_id': ObjectId(trainee._id)\n })\n\n self.database.mongo.event.delete_many({\n 'creator_id': ObjectId(trainer._id)\n })\n\n trainee = self.database.get_trainee_by_username('testtrainee')\n trainer = self.database.get_trainer_by_username('testtrainer')\n\n try:\n clean_up(trainee, trainer)\n event = Event(\n _id=None,\n creator_id=trainee._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainer._id\n )\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainee._id)\n })\n assert database_event is not None\n assert str(database_event['creator_id']) == event.creator_id\n assert str(database_event['participant_id']\n ) == event.participant_id\n\n event = Event(\n _id=None,\n creator_id=trainer._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainee._id\n )\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainer._id)\n })\n assert database_event is not None\n assert str(database_event['creator_id']) == event.creator_id\n assert str(database_event['participant_id']\n ) == event.participant_id\n\n finally:\n clean_up(trainee, trainer)", "def test_timestamp_minus(self, mock):\n mock.configure_mock(**(self.config_payload(-1, -2)))\n self.assertRaises(\n AssertionError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def assertEventCalled(self, event_name, times=None):\n if event_name not in self._events:\n raise AssertionError(\"Event {} not mocked.\".format(event_name))\n\n if self._events[event_name] == 0:\n raise AssertionError(\"Event {} was not called.\".format(event_name))\n\n if times is not None and self._events[event_name] != times:\n raise AssertionError(\"Event {} was called {} instead of {}.\".format(\n event_name, self._events[event_name], times))", "def test_trigger():\n # assume\n subject = Subject()\n observer = Observer()\n subject.handle('test', observer.on_test)\n\n # act\n subject.trigger('test', 1, 'one', True)\n\n # assert\n assert observer.invocations[0] == (1, 'one', True), \"observer did not receive event\"", "async def test_new_event_sends_signal(hass):\n entry = Mock()\n entry.data = ENTRY_CONFIG\n\n axis_device = axis.device.AxisNetworkDevice(hass, entry)\n\n with patch.object(axis.device, \"async_dispatcher_send\") as mock_dispatch_send:\n axis_device.async_event_callback(action=OPERATION_INITIALIZED, event_id=\"event\")\n await hass.async_block_till_done()\n\n assert len(mock_dispatch_send.mock_calls) == 1\n assert len(mock_dispatch_send.mock_calls[0]) == 3", "def testPacketEventInit(self):\n e1 = PacketEvent(1, 'sender2', 'receiver3', 4, 'message5')\n self.assertEqual(e1.timestamp, 1)\n self.assertEqual(e1.eventObject, 'receiver3')\n self.assertEqual(e1.sender, 'sender2')\n self.assertEqual(e1.packet, 4)\n self.assertEqual(e1.logMessage, 'message5')", "def test_get_event_log(event_log_api_setup):\n api_response = event_log_api_setup.get_event_log(\n event_log_id=1,\n )\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def test_event_view_add(self):\n request = self.factory.get('/module/event/add/')\n request.user = self.user\n request.session = {}\n response = event_add(request)\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post('/module/event/add/', data=\n {\n \"title\": \"test event\",\n \"description\": \"\",\n \"creator_id\": 1,\n \"created_on\": datetime.utcnow().replace(tzinfo=utc).strftime(\"%Y-%m-%d\"),\n \"calendar_id\": 1,\n }, follow=True)\n self.assertEqual(response.status_code, 200)\n\n request = self.factory.post('/module/event/add/',\n {\n \"title\": \"test event\",\n \"description\": \"\",\n \"creator_id\": 1,\n \"created_on\": datetime.utcnow().replace(tzinfo=utc).strftime(\"%Y-%m-%d\"),\n \"calendar_id\": 1,\n }, follow=True)\n request.user = self.user\n request.session = {}\n response = event_add(request)\n self.assertEqual(response.status_code, 200)", "def test_create_event(self):\n res = self.client.post('/create-event',\n follow_redirects=True, data=dict(\n event_type='Rally',\n location='Letterkenny',\n date='30 September 2020',\n description='Test',\n organiser='Unicorn MCC'\n ))\n\n data = res.data.decode('utf-8')\n # Check user and rally displayed\n assert 'orange' in data\n assert 'Rally' in data", "def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )", "async def test_events_http_api(hass, hass_client):\n await async_setup_component(hass, \"calendar\", {\"calendar\": {\"platform\": \"demo\"}})\n await hass.async_block_till_done()\n client = await hass_client()\n response = await client.get(\"/api/calendars/calendar.calendar_2\")\n assert response.status == 400\n start = dt_util.now()\n end = start + timedelta(days=1)\n response = await client.get(\n \"/api/calendars/calendar.calendar_1?start={}&end={}\".format(\n start.isoformat(), end.isoformat()\n )\n )\n assert response.status == 200\n events = await response.json()\n assert events[0][\"summary\"] == \"Future Event\"\n assert events[0][\"title\"] == \"Future Event\"", "def test_basic_call(self):\n env = self.trace(\"\"\"\n foo = objects.Foo()\n bar = objects.bar_from_foo(foo, x=1, y=2)\n \"\"\")\n \n events = self.function_events\n self.assertEqual(len(events), 4)\n \n event = events[2]\n self.assertIsInstance(event, TraceCall)\n self.assertTrue(event.atomic)\n self.assertEqual(event.function, objects.bar_from_foo)\n self.assertEqual(event.module_name, objects.__name__)\n self.assertEqual(event.qual_name, 'bar_from_foo')\n self.assertEqual(event.arguments, OrderedDict([\n ('foo',env['foo']), ('x',1), ('y',2)\n ]))\n \n event = events[3]\n self.assertIsInstance(event, TraceReturn)\n self.assertTrue(event.atomic)\n self.assertEqual(event.function, objects.bar_from_foo)\n self.assertEqual(event.module_name, objects.__name__)\n self.assertEqual(event.qual_name, 'bar_from_foo')", "def setUp(self):\n user = User.objects.create(username=\"nerd\")\n\n # Initialize client and force it to use authentication\n self.client = APIClient()\n self.client.force_authenticate(user=user)\n\n # Since user model instance is not serializable, use its Id/PK\n self.event_data = {'description': 'Go to Ibiza', 'owner': user.id}\n self.response = self.client.post('/events/', self.event_data, format='json')", "def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)" ]
[ "0.74541396", "0.724666", "0.72374237", "0.71662945", "0.7073701", "0.7027783", "0.68203604", "0.68195695", "0.680202", "0.6784378", "0.6762648", "0.67611665", "0.66722715", "0.66431737", "0.6619424", "0.65345937", "0.65251654", "0.65242076", "0.65172464", "0.6512973", "0.6508907", "0.64926445", "0.6475576", "0.6469131", "0.6420528", "0.64146644", "0.64124495", "0.6411172", "0.64062", "0.6396349", "0.63446623", "0.63375944", "0.63286906", "0.6310429", "0.62864673", "0.6284523", "0.62556493", "0.625366", "0.6252926", "0.6252926", "0.62467283", "0.6172138", "0.6161407", "0.61492753", "0.61231226", "0.6111113", "0.61089456", "0.61025167", "0.61024094", "0.6077665", "0.6070657", "0.6064041", "0.6063871", "0.60544026", "0.60543", "0.60528094", "0.60460854", "0.6026164", "0.6009751", "0.5997698", "0.59894985", "0.5987838", "0.5977979", "0.59648997", "0.5964612", "0.5964577", "0.5962443", "0.5958903", "0.5956897", "0.5955078", "0.59381497", "0.5932129", "0.5919361", "0.59151006", "0.5912132", "0.5906395", "0.5893073", "0.5890909", "0.5885993", "0.58855593", "0.5880635", "0.58762306", "0.58669746", "0.58617014", "0.585858", "0.5857002", "0.58531815", "0.5849665", "0.58405", "0.5836194", "0.5811594", "0.58092976", "0.58031315", "0.580018", "0.58000296", "0.57969236", "0.57950544", "0.57943517", "0.5775414", "0.57725483" ]
0.74155563
1
Unit test get_ip_type_by_address method of the Bad Bots class
def test_get_ip_type_by_address(setup_config, get_mock_event): # !ARRANGE! bad_bots = BadBots(setup_config, get_mock_event) ipv4_address_1 = '1.1.1.1' ipv4_address_2 = '11.22.33.44' ipv4_address_3 = '123.123.123.123' ipv6_address_1 = '2a02:a445:6d36:1:1e3:a188:313c:1d31' ipv6_address_2 = '3731:54:65fe:2::a7' ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463' # !ACT! # Detect the IP type of provided IP addresses ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1) ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2) ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3) ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1) ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2) ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3) # !ASSERT! # Assert IP addresses are of type IPv4 assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value # Assert IP addresses are of type IPv6 assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def test_external_ip_get_kind(self):\n assert_equal(self.test_external_ip.get_kind(), 'mpexternalip')", "def test_ipam_ip_addresses_read(self):\n pass", "def test_get_source_ip(self):\n pass", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def test_get_node_internal_ip_address(self):\n pass", "def test_get_geoip():\n assert get_geoip(\"74.125.67.100\") == \"US\"", "def test_ipam_ip_addresses_list(self):\n pass", "def test_ipam_ip_addresses_create(self):\n pass", "def testIP(self):\n self.assertEqual([\"http://234.234.234.234\"], grab('http://234.234.234.234', self.needScheme))", "def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)", "def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")", "def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def test_get_ip_tags_invalid_ip(client, database):\n\n invalid_ip = \"http://127.0.0.1:5000/ip-tags/10.1.2.3000\"\n response = client.get(invalid_ip)\n response_data = response.get_json()\n\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert (\n response_data[\"error\"]\n == \"400 Bad Request: Address 10.1.2.3000 does not have IPv4 format\"\n )", "def test_exclude_ip_ban(self):\n pass", "def test_read_host_subnet(self):\n pass", "def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def _get_address_type(self):\n return self.__address_type", "def test_functional_bad_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n self.assertEqual(response.status_code, BAD_REQUEST)\n self.assertEqual(response.json().get(\"error\"),\n \"No city for ip {}\".format(url))", "def test_get_ip_from_headers(self):\n response = self.client.get(self.voter_location_url, REMOTE_ADDR='69.181.21.132')\n self.assertEqual(response.status_code, 200)\n json_data = json.loads(response.content.decode())\n self.assertEqual(json_data['success'], True)\n self.assertEqual(json_data['voter_location_found'], True)", "def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')", "def mock_country_code_by_addr(self, ip_addr):\r\n ip_dict = {\r\n '1.0.0.0': 'CU',\r\n '2.0.0.0': 'IR',\r\n '3.0.0.0': 'SY',\r\n '4.0.0.0': 'SD',\r\n '5.0.0.0': 'AQ', # Antartica\r\n }\r\n return ip_dict.get(ip_addr, 'US')", "def vt_ip_check(ip, vt_api):\n if not is_IPv4Address(ip):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n parameters = {'ip': ip, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def get_ip(self):", "def test_find_agent_ips(self):\n\n with patch(\n \"salt.cloud.clouds.proxmox.query\",\n return_value={\n \"result\": [\n {\n \"name\": \"eth0\",\n \"ip-addresses\": [\n {\"ip-address\": \"1.2.3.4\", \"ip-address-type\": \"ipv4\"},\n {\"ip-address\": \"2001::1:2\", \"ip-address-type\": \"ipv6\"},\n ],\n },\n {\n \"name\": \"eth1\",\n \"ip-addresses\": [\n {\"ip-address\": \"2.3.4.5\", \"ip-address-type\": \"ipv4\"},\n ],\n },\n {\n \"name\": \"dummy\",\n },\n ]\n },\n ) as mock_query:\n vm_ = {\n \"technology\": \"qemu\",\n \"host\": \"myhost\",\n \"driver\": \"proxmox\",\n \"ignore_cidr\": \"1.0.0.0/8\",\n }\n\n # CASE 1: Test ipv4 and ignore_cidr\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2.3.4.5\"\n\n # CASE 2: Test ipv6\n\n vm_[\"protocol\"] = \"ipv6\"\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2001::1:2\"", "def test_ipam_ip_addresses_update(self):\n pass", "def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def test_client_address_retrieve(self):\n pass", "def outside_ip_address_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"outside_ip_address_type\")", "def test(self):\n response = requests.get(\"https://ipinfo.io/\")\n response_json = {}\n try:\n response_json = response.json()\n except JSONDecodeError as e:\n response_json[\"ip\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"city\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"region\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"loc\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"org\"] = \"Error with remote website. This is not an error with the client.\"\n response_json[\"timezone\"] = \"Error with remote website. This is not an error with the client.\"\n\n self.ip = str(response_json['ip'])\n self.city = str(response_json['city'])\n self.region = str(response_json['region'])\n self.loc = str(response_json['loc'])\n self.org = str(response_json['org'])\n self.timezone = str(response_json['timezone'])\n return self", "def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def ip_lookup(self, ip_address):\r\n obj = self.client['Network_Subnet_IpAddress']\r\n return obj.getByIpAddress(ip_address, mask='hardware, virtualGuest')", "def test_list_host_subnet(self):\n pass", "def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))", "def test_cloud_api():\n mock = provider.MockProvider()\n\n mock.setup_cloud('empty config....')\n\n assert mock.get_ext_ip_addr('some-node')", "def is_valid_ip(ip):\n ...", "def test_ip_address_vars(self):\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_ipaddress_vars\"\n name = \"TestIPAddresses\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n # Fill out the form\n form_data = dict(\n ipv4_address=\"1.2.3.4\",\n ipv4_with_mask=\"1.2.3.4/32\",\n ipv4_network=\"1.2.3.0/24\",\n ipv6_address=\"2001:db8::1\",\n ipv6_with_mask=\"2001:db8::1/64\",\n ipv6_network=\"2001:db8::/64\",\n )\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n\n # Prepare the job data\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n data = job_class.serialize_data(form.cleaned_data)\n\n # Run the job and extract the job payload data\n run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data[\"run\"][\"log\"][0][2] # Indexing makes me sad.\n job_result_data = json.loads(job_payload)\n\n # Assert stuff\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)", "def outside_ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"outside_ip_address_type\")", "def outside_ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"outside_ip_address_type\")", "def isIP(ipToTest):\n \n try:\n socket.inet_aton(ipToTest)\n return True\n except socket.error:\n return False", "def test_replace_host_subnet(self):\n pass", "def _check_ip(val: Any, input_format: str, clean: bool) -> Any:\n try:\n if val in NULL_VALUES:\n return (None, \"null\") if clean else False\n\n address = ip_address(val)\n vers = address.version\n\n if vers == 4 and input_format != \"ipv6\" or vers == 6 and input_format != \"ipv4\":\n return (address, \"success\") if clean else True\n return (None, \"unknown\") if clean else False\n\n except (TypeError, ValueError):\n return (None, \"unknown\") if clean else False", "def get_ip(node_name='', ip_type=''):\n ip = ''\n \n while True:\n ip = input('ip address for {} in {} node: '.format(ip_type, node_name))\n ip_check = validate_ip(ip)\n if ip_check:\n break\n else:\n logging.warn('ip address should be in format: x.x.x.x')\n \n return ip", "def _get_ipaddress(node):\n if \"ipaddress\" not in node:\n with settings(hide('stdout'), warn_only=True):\n output = sudo('ohai ipaddress')\n if output.succeeded:\n node['ipaddress'] = json.loads(output)[0]\n return True\n return False", "def ipinfo_ip_check(ip):\n if not is_IPv4Address(ip):\n return None\n\n response = requests.get('http://ipinfo.io/%s/json' % ip)\n return response.json()", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()", "async def get_cell_type(ip: str):\n try:\n async with asyncssh.connect(ip,username=USER_IPACCESS,password=PASS_IPACCESS,known_hosts=None) as conn:\n result = await conn.run('ls /opt/ipaccess')\n if 'DMI' in result.stdout:\n return '3g'\n else:\n return '4g'\n except:\n LOGGER.warning(f\"unable to get cell type from {ip}\")", "def test_get_ip_tags(client, database, sample_data, url, expected_data):\n\n response = client.get(url)\n response_data = response.get_json()\n\n assert response.status_code == 200\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert response_data == expected_data", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def test_default_country_by_ip(self):\n\n response = self.client.get(\n reverse(\"billing_info\"), HTTP_X_FORWARDED_FOR=\"85.214.132.117\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"DE\" selected>Germany</option>', html=True\n )\n\n response = self.client.get(\n reverse(\"billing_info\"), REMOTE_ADDR=\"85.214.132.117\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"DE\" selected>Germany</option>', html=True\n )", "def test_interface_addresses(bf: Session, sot: SoT) -> None:\n interface_props = bf.q.interfaceProperties(nodes=SNAPSHOT_NODES_SPEC).answer().frame()\n for _, row in interface_props.iterrows():\n interface_name = row[\"Interface\"].interface\n\n if not row[\"Active\"] or \\\n row[\"Access_VLAN\"] or \\\n row[\"Description\"] == \"[type=ISP]\" or \\\n interface_name in INTERFACES_WITHOUT_ADDRESS:\n continue\n\n assert row[\"Primary_Address\"], f'No address assigned to {row[\"Interface\"]}'\n\n interface_address = ipaddress.ip_network(row[\"Primary_Address\"], strict=False)\n\n # check prefix length\n expected_prefix_length = sot.get_interface_prefix_length(interface_name)\n assert interface_address.prefixlen == expected_prefix_length, \"Unexpected prefix length {} for {}. Expected {}\".format(\n interface_address.prefixlen, row[\"Interface\"], expected_prefix_length)\n\n # check that IP address is from the right prefix\n address_in_range = any([interface_address.subnet_of(prefix)\n for prefix in sot.get_interface_prefixes(interface_name)])\n assert address_in_range, \"Unexpected address {} for {}. Expected it to be in {}\".format(\n interface_address, row[\"Interface\"], sot.get_interface_prefixes(interface_name))", "def test_addresses(self):\n # pylint:disable=expression-not-assigned\n\n self._compare_avp(\n avp.AddressAVP(257, '127.0.0.1'),\n memoryview(\n b'\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x0e'\n b'\\x00\\x01\\x7f\\x00\\x00\\x01\\x00\\x00',\n ),\n )\n\n self._compare_avp(\n avp.AddressAVP(257, '2001:db8::1'),\n memoryview(\n b'\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x1a\\x00\\x02 \\x01\\r'\n b'\\xb8\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n b'\\x01\\x00\\x00',\n ),\n )\n\n # Can't read invalid address type \\x03\n with self.assertRaises(CodecException):\n avp.decode(\n b'\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x0e'\n b'\\x00\\x03\\x7f\\x00\\x00\\x01\\x00\\x00',\n ).value\n\n # Can't read too short IPV4\n with self.assertRaises(CodecException):\n avp.decode(\n b'\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x0e'\n b'\\x00\\x01\\x7f',\n ).value\n\n # Can't read too short IPV6\n with self.assertRaises(CodecException):\n avp.decode(\n b'\\x00\\x00\\x01\\x01\\x00\\x00\\x00\\x0e'\n b'\\x00\\x02\\x7f\\x00\\x00\\x01\\x00\\x00',\n ).value\n\n # Cant encode non-ips\n with self.assertRaises(CodecException):\n avp.Unsigned32AVP(257, 'facebook.com')", "def ip_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, ipaddress._IPAddressBase):\n name = type(var).__name__\n raise IPError(\n 'Function {} expected IP address, {} got instead.'.format(func, name))", "def test_re_ip(self, ip_address: str, is_valid_ip: bool):\n self.assertEqual(bool(re_ip.search(ip_address)), is_valid_ip)", "def ip_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_type\")", "def get_ip_info(ip_addr):\n\n ip_info = {}\n fields = ['range', 'name', 'country', 'description', 'emails']\n\n try:\n info = ipwhois.IPWhois(ip_addr).lookup_whois()\n\n for field in fields:\n value = info['nets'][0].get(field, 'N/A')\n ip_info[field] = value\n\n except ipwhois.BaseIpwhoisException as ip_err:\n ip_info['error'] = 'Unable to get IP details ({0})'.format(ip_err)\n\n return ip_info", "def _read_proto_resolve(self, addr: 'bytes', ptype: 'int') -> 'str | IPv4Address | IPv6Address':\n if ptype == Enum_EtherType.Internet_Protocol_version_4: # IPv4\n return ipaddress.ip_address(addr)\n if ptype == Enum_EtherType.Internet_Protocol_version_6: # IPv6\n return ipaddress.ip_address(addr)\n return addr.hex()", "def test_patch_host_subnet(self):\n pass", "def test_address_to_bytes(self):\n pass", "def test_create_host_subnet(self):\n pass", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def test_vm_ip_fqdn_info(self):\n self.check_vm_ip_fqdn_info()", "def getAddress(self) -> int:\n ...", "def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def __detect_type__(self, value):\n def is_ipv6_address(value):\n try:\n value, interface = value.split('%', 1)\n except: # noqa\n pass\n try:\n parts = value.split(':')\n for part in parts:\n if part == '':\n continue\n part = int(part, 16)\n if part < 0:\n raise ValueError\n return True\n except Exception:\n return False\n\n def is_ipv4_address(value):\n try:\n value, interface = value.split('%', 1)\n except: # noqa\n pass\n try:\n parts = value.split('.', 3)\n for part in parts:\n part = int(part)\n if part < 0 or part > 255:\n raise ValueError\n return True\n except: # noqa\n return False\n\n # Strip port\n if value.startswith('['):\n value = value[1:]\n try:\n value, port = value.split(':', 1)\n except: # noqa\n pass\n\n if value.endswith(']'):\n value = value[:-1]\n\n if is_ipv4_address(value):\n return 1, value, 'ipv4_address'\n\n elif is_ipv6_address(value):\n return 2, value, 'ipv6_address'\n\n else:\n return 0, value, 'hostname'", "def get_ip(tag,env=None,eip=False):\n api_url = 'http://api.rahulinux.io/ip?host={0}&env={1}&eip={2}'\n try:\n resp = requests.get(api_url.format(tag,env,eip))\n except requests.exceptions.RequestException as e:\n return e\n if len(resp.text) >= 30:\n return resp.text.split()\n return [ resp.text ]", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def test_correct_city(self, ip_address, city_correct):\n city, country = get_geo(ip_address=ip_address)\n self.assertEqual(city, city_correct)", "def test_get_host(self):\n pass", "def is_reserved(ip):\n if ip_between(ip, \"0.0.0.0\", \"0.255.255.255\"):\n return True\n elif ip_between(ip, \"10.0.0.0\", \"10.255.255.255\"):\n return True\n elif ip_between(ip, \"100.64.0.0\", \"100.127.255.255\"):\n return True\n elif ip_between(ip, \"127.0.0.0\", \"127.255.255.255\"):\n return True\n elif ip_between(ip, \"169.254.0.0\", \"169.254.255.255\"):\n return True\n elif ip_between(ip, \"172.16.0.0\", \"172.31.255.255\"):\n return True\n elif ip_between(ip, \"192.0.0.0\", \"192.0.0.255\"):\n return True\n elif ip_between(ip, \"192.0.2.0\", \"192.0.2.255\"):\n return True\n elif ip_between(ip, \"192.88.99.0\", \"192.88.99.255\"):\n return True\n elif ip_between(ip, \"192.168.0.0\", \"192.168.255.255\"):\n return True\n elif ip_between(ip, \"198.18.0.0\", \"198.19.255.255\"):\n return True\n elif ip_between(ip, \"198.51.100.0\", \"198.51.100.255\"):\n return True\n elif ip_between(ip, \"203.0.113.0\", \"203.0.113.255\"):\n return True\n elif ip_between(ip, \"224.0.0.0\", \"255.255.255.255\"):\n return True\n else:\n return False", "def test_ipam_prefixes_available_ips_read(self):\n pass", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')", "def ip_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_type\")", "def urlvoid_ip_check(ip):\n if not is_IPv4Address(ip):\n return None\n\n return_dict = {}\n headers = {'User-Agent': useragent}\n url = 'http://urlvoid.com/ip/%s/' % ip\n response = requests.get(url, headers=headers)\n data = BeautifulSoup(response.text)\n h1 = data.findAll('h1')[0].text\n if h1 == 'Report not found':\n return None\n elif re.match('^IP', h1):\n return_dict['bad_names'] = []\n return_dict['other_names'] = []\n for each in data.findAll('img', alt='Alert'):\n return_dict['bad_names'].append(each.parent.text.strip())\n for each in data.findAll('img', alt='Valid'):\n return_dict['other_names'].append(each.parent.text.strip())\n\n return return_dict", "def test_same_ip(self):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo1.oregonstate.edu')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo2.oregonstate.edu')", "def _get_ip_resp(api_url: str):\n return get(api_url, headers={'user-agent': USER_AGENT})", "def test_ip(self, api_client, ip_address, expected_response):\n runner = CliRunner()\n\n api_client.ip.return_value = expected_response\n\n result = runner.invoke(subcommand.ip, [\"-f\", \"json\", ip_address])\n assert result.exit_code == 0\n assert result.output.strip(\"\\n\") == json.dumps(\n [expected_response], indent=4, sort_keys=True\n )\n api_client.ip.assert_called_with(ip_address=ip_address)", "def test_ipam_ip_addresses_delete(self):\n pass", "def test_geoiplookup(self):\n\n try:\n output = subprocess.check_output('geoiplookup 1.1.1.1', shell=True).decode('utf-8')\n if not 'Cloudflare' in output:\n self.fail('Are your geoip databases in /usr/share/GeoIP/ ???')\n except:\n self.fail('Error when calling geoiplookup')", "def get_ip_type2(self) -> str:\n hex_ip = hexlify(self.message)[154:162]\n ip_addr = int(hex_ip[0:2] + hex_ip[2:4] + hex_ip[4:6] + hex_ip[6:8], 16)\n return inet_ntoa(pack(\">L\", ip_addr))", "def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip", "def test_ipam_prefixes_available_ips_create(self):\n pass" ]
[ "0.67973435", "0.6795078", "0.67154825", "0.66857165", "0.66440994", "0.6609387", "0.65462726", "0.6507168", "0.649237", "0.6404722", "0.63306016", "0.6302462", "0.6274524", "0.6213682", "0.6204644", "0.62016076", "0.61814064", "0.6152368", "0.6097284", "0.60947865", "0.60787016", "0.6026259", "0.6008837", "0.5960325", "0.59585816", "0.5950451", "0.59444684", "0.593575", "0.5914348", "0.5899763", "0.589471", "0.5889534", "0.5835708", "0.5818922", "0.57957983", "0.5772059", "0.5763991", "0.5754107", "0.5731307", "0.57223856", "0.5720932", "0.5720932", "0.5694813", "0.5689201", "0.5688158", "0.5685744", "0.56788516", "0.5678415", "0.5669873", "0.5667809", "0.5667392", "0.5655839", "0.56538975", "0.56494987", "0.56440073", "0.5634004", "0.5633829", "0.5630952", "0.56225616", "0.5611136", "0.5608081", "0.56077915", "0.56010723", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.5577574", "0.55762047", "0.55679184", "0.5564652", "0.5564355", "0.556433", "0.5563282", "0.5548044", "0.55453545", "0.5545349", "0.5536237", "0.5535767", "0.5531554", "0.5530239", "0.55300415", "0.55274856", "0.5524455", "0.5522595", "0.55084616", "0.5507352", "0.5504507", "0.5499197" ]
0.78166175
0
Unit test check_bot_confidence method of the Bad Bots class
def test_check_bot_confidence(setup_config, get_mock_event): # !ARRANGE! bad_bots = BadBots(setup_config, get_mock_event) bot_1 = Bot() bot_1.source_ip = '1.1.1.1' bot_1.http_query_string_parameters = '<script></script>' bot_1.http_body = 'EXEC' bot_1.geolocation = 'United States' bot_1.source_ip_type = BadBots.SourceIPType.IPV4 bot_1.http_method = "CONNECT" bot_1.http_user_agent = "Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)" bot_2 = Bot() bot_2.source_ip = '77.168.51.231' bot_2.http_query_string_parameters = 'hello' bot_2.http_body = 'hello!' bot_2.geolocation = 'Netherlands' bot_2.source_ip_type = BadBots.SourceIPType.IPV4 bot_2.http_method = "GET" bot_2.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" bot_3 = Bot() bot_3.source_ip = '2a02:a445:6d36:1:1e3:a188:313c:1d33' bot_3.http_query_string_parameters = 'param=true' bot_3.http_body = 'username=xxx' bot_3.geolocation = 'United States' bot_3.source_ip_type = BadBots.SourceIPType.IPV6 bot_3.http_method = "GET" bot_3.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" # !ACT! # Do confidence check on potential bots confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1) confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2) confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3) # !ASSERT! # Assert IP addresses are of type IPv4 assert(confidence_score_bot_1 == 25) assert(confidence_score_bot_2 == 0) assert(confidence_score_bot_3 == 5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_word_confidences(self):\n self._api.SetImageFile(self._image_file)\n words = self._api.AllWords()\n self.assertEqual(words, [])\n self._api.Recognize()\n words = self._api.AllWords()\n confidences = self._api.AllWordConfidences()\n self.assertEqual(len(words), len(confidences))\n mapped_confidences = self._api.MapWordConfidences()\n self.assertEqual([v[0] for v in mapped_confidences], words)\n self.assertEqual([v[1] for v in mapped_confidences], confidences)", "def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))", "def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)", "def test_likelihood(app):\n\n assert False", "def test_confidences(self):\n\n # Add alignments to pipeline\n for hit, aln in zip(self.pipeline[\"templates\"], self.ALIGNMENTS):\n hit[\"alignment\"] = aln\n\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"][\"confidence\"],\n \"---5-4-----\")\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"][\"confidence\"],\n \"----3-----\")", "def test_rb_utils(self):\n\n t1 = 100.\n t2 = 100.\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],\n [t2, t2], gate2Q)\n\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],\n [t2], gate1Q)\n\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n \"Error: 1Q Coherence Limit\")\n\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n \"Error: 2Q Coherence Limit\")\n\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],\n [0, 1, -1],\n [0.001, 0.0015, 0.02])\n\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n \"Error: 2Q EPC Calculation\")", "def test_error_at_995tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.995))", "def compute_confidence_interval(self) -> bool:\n return False", "def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))", "def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))", "def testIsBiconnected(self):\n self.assertEqual(is_biconnected(self.G1), True)\n self.assertEqual(is_biconnected(self.G2), False)", "def test_dice_coef_loss():\n assert dice_coef_loss() == expected_dice_coef_loss", "def test_verify_fails_expected_metric_kwargs(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm - pm.mean(\"time\").mean(\"init\")\n with pytest.raises(ValueError) as excinfo:\n pm.verify(\n metric=\"threshold_brier_score\", comparison=\"m2c\", dim=[\"init\", \"member\"]\n )\n assert \"Please provide threshold.\" == str(excinfo.value)", "def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def check():\n hokusai.check()", "def test_should_contain_badge_classes(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/badge_retrieval.yaml'):\n self.assertTrue(isinstance(badgr.badges[0], Badge))", "def test_score_with_fitted_estimator(self):\n model = GaussianNB().fit(self.binary.X.train, self.binary.y.train)\n\n # NOTE that the wrapper will pass a call down to `classes_`\n oz = ClassificationScoreVisualizer(model)\n assert_not_fitted(oz, [\"class_counts_\", \"score_\"])\n\n msg = \"could not determine class_counts_\"\n with pytest.warns(YellowbrickWarning, match=msg):\n oz.score(self.binary.X.test, self.binary.y.test)\n assert_fitted(oz, [\"classes_\", \"class_counts_\", \"score_\"])", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_validate_metadata_pass(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-passing-batch\",\n \"samples\": [\n \"PTC_EXPn200908LL_L2000001\",\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNone(reason)\n\n # should not call to slack webhook\n verify(libslack.http.client.HTTPSConnection, times=0).request(...)", "def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))", "def test_cv_warning_messages():\n fr = h2o.import_file(path=pyunit_utils.locate(\"smalldata/admissibleml_test/Bank_Personal_Loan_Modelling.csv\"))\n target = \"Personal Loan\"\n fr[target] = fr[target].asfactor()\n x = [\"Experience\",\"Income\",\"Family\",\"CCAvg\",\"Education\",\"Mortgage\",\n \"Securities Account\",\"CD Account\",\"Online\",\"CreditCard\"]\n splits = fr.split_frame(ratios=[0.80])\n train = splits[0]\n test = splits[1]\n infogram_model_cv_v = H2OInfogram(seed = 12345, protected_columns=[\"Age\",\"ZIP Code\"], nfolds=3) \n infogram_model_cv_v.train(x=x, y=target, training_frame=train, validation_frame=test)\n \n pyunit_utils.checkLogWeightWarning(\"infogram_internal_cv_weights_\", wantWarnMessage=False)", "def test_status(self):\n self.assertEqual('perfect', self.__metric.status())", "def test_life_critical():\n assert chap2.life_critical()", "def test_balance_tracking(self):\n # TODO\n pass", "def test_get_boat(self):\n pass", "def test_heartbeat( self ):\n with self.app.app_context():\n url = '/donation/heartbeat'\n\n # Ensure a GET with no saved caged_donors returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( response.status_code, status.HTTP_200_OK )", "def test_coast():\n dwd = DwdWeatherWarningsAPI(WARNCELL_NAME_COAST)\n assert dwd.data_valid\n assert dwd.warncell_id == WARNCELL_ID_COAST\n assert dwd.warncell_name == WARNCELL_NAME_COAST\n start_time = datetime.datetime.now(\n datetime.timezone.utc\n ) - datetime.timedelta(0, TIME_TOLERANCE)\n stop_time = start_time + datetime.timedelta(0, (2 * TIME_TOLERANCE))\n assert start_time < dwd.last_update < stop_time\n assert MIN_WARNING_LEVEL <= dwd.current_warning_level <= MAX_WARNING_LEVEL\n assert MIN_WARNING_LEVEL <= dwd.expected_warning_level <= MAX_WARNING_LEVEL\n assert isinstance(dwd.current_warnings, list)\n assert isinstance(dwd.expected_warnings, list)", "def test_b_grade_exact(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.33)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def test_return_advice_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n text = \"Ton poids actuel est déjà bien bas... je te déconseille \" \\\n \"de perdre plus de poids. \"\n self.assertEqual(advice, text)", "def test_label_all_positives_happy_path(\n mock_engine: DicomImagePiiVerifyEngine,\n mock_gt_single: dict,\n get_mock_dicom_verify_results: dict,\n ocr_results: List[dict],\n analyzer_results: List[dict],\n tolerance: int,\n expected_results: List[dict],\n):\n # Assign\n if not ocr_results:\n ocr_results = get_mock_dicom_verify_results[\"ocr_results_formatted\"]\n\n if not analyzer_results:\n analyzer_results = get_mock_dicom_verify_results[\"analyzer_results\"]\n\n # Act\n test_all_pos = mock_engine._label_all_positives(\n mock_gt_single, ocr_results, analyzer_results, tolerance\n )\n\n # Assert\n assert test_all_pos == expected_results", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def check_initial_confidence(self): # pragma: no cover\n if self.test_type != 'perf':\n return True\n\n if self.required_initial_confidence is None:\n return True # pragma: no cover\n\n # TODO(robertocn): Remove all uses of \"confidence\".\n if self.dummy_initial_confidence is not None:\n self.initial_confidence = float(\n self.dummy_initial_confidence)\n if (float(self.initial_confidence) <\n float(self.required_initial_confidence)):\n self._set_insufficient_confidence_warning()\n return False\n return True\n\n if self.dummy_builds:\n dummy_result = self.good_rev.values != self.bad_rev.values\n if not dummy_result:\n self._set_insufficient_confidence_warning()\n return dummy_result\n\n with self.api.m.step.nest('Re-testing reference range'):\n expiration_time = time.time() + REGRESSION_CHECK_TIMEOUT\n while time.time() < expiration_time:\n if len(self.good_rev.values) >= 5 and len(self.bad_rev.values) >= 5:\n if self.significantly_different(self.good_rev.values,\n self.bad_rev.values):\n return True\n if len(self.good_rev.values) == len(self.bad_rev.values):\n revision_to_retest = self.last_tested_revision\n else:\n revision_to_retest = min(self.good_rev, self.bad_rev,\n key=lambda x: len(x.values))\n if len(revision_to_retest.values) < MAX_REQUIRED_SAMPLES:\n revision_to_retest.retest()\n else:\n break\n self._set_insufficient_confidence_warning()\n return False", "def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))", "def test_return_advice_goal_weight_ok(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"55\"}\n return_advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n advice = \"Alors c'est parti ! Partons sur un objectif de - 5 kg. \"\n self.assertEqual(return_advice, advice)", "def test_heartbeat(self):\n pass", "def verify():", "def _verify_api_heartbeat(retry=True):\n url = 'http://{0}/heartbeat'.format(env.host_string)\n try:\n resp = urllib2.urlopen(url)\n status_code = resp.getcode()\n except urllib2.HTTPError as error:\n print '[{0}] Error while testing API: {1}'.format(env.host_string,\n error)\n print '[{0}] \\t Received: {1}'.format(env.host_string, error.read())\n status_code = error.getcode()\n\n if status_code == 200:\n print '[{0}] API Test Succesful!'.format(env.host_string)\n return\n\n if not retry:\n fabric.utils.abort('Host: {0} API is not functioning properly'\n .format(env.host_string))\n else:\n print '[{0}] Retrying heartbeat in 2 seconds...' \\\n .format(env.host_string)\n time.sleep(2)\n _verify_api_heartbeat(retry=False)", "def test_conservation(self):\n self.c_s_tot = (\n self.c_s_n_tot(self.solution.t)\n + self.c_s_p_tot(self.solution.t)\n + self.c_SEI_n_tot(self.solution.t)\n + self.c_SEI_p_tot(self.solution.t)\n + self.c_Li_n_tot(self.solution.t)\n + self.c_Li_p_tot(self.solution.t)\n )\n diff = (self.c_s_tot[1:] - self.c_s_tot[:-1]) / self.c_s_tot[:-1]\n if \"profile\" in self.model.options[\"particle\"]:\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"surface form\"] == \"differential\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"SEI\"] == \"ec reaction limited\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=12)\n else:\n np.testing.assert_array_almost_equal(diff, 0, decimal=15)", "def verify(self):", "def test_detect_problem(self, check_output_mock):\n check_output_mock.return_value = \"arp_ignore = 1\"\n self.assertTrue(moduletests.src.arpignore.detect())\n self.assertTrue(check_output_mock.called)", "def test_badge_should_have_criteria_narrative(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.criteria_narrative, str)", "def _calculate_tp_confidences(images, test_class):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{test_class}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if class_name == test_class and response_json[\"response\"] == class_name:\n confidences.append(response_json[\"confidence\"])\n return confidences", "def testBatteryResponse(self):\n message = (mavutil.mavlink.GOPRO_COMMAND_BATTERY, mavutil.mavlink.GOPRO_REQUEST_SUCCESS, (72, 0, 0, 0))\n self.mgr.get_response_callback('vehicle','name', message)\n self.assertEqual( self.mgr.battery, 72)\n self.mgr.processMsgQueue.assert_called_with()", "def test_calculate_recall_happy_path(\n mock_engine: DicomImagePiiVerifyEngine,\n gt_labels: dict,\n all_pos: dict,\n expected_result: float,\n):\n # Act\n test_recall = mock_engine.calculate_recall(gt_labels, all_pos)\n\n # Assert\n assert test_recall == expected_result", "def test_confidence_thresholding_vis_api(experiment_to_use):\n experiment = experiment_to_use\n probabilities = experiment.probabilities\n viz_outputs = (\"pdf\", \"png\")\n with TemporaryDirectory() as tmpvizdir:\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = tmpvizdir + f\"/*.{viz_output}\"\n visualize.confidence_thresholding(\n [probabilities, probabilities],\n experiment.ground_truth,\n experiment.ground_truth_metadata,\n experiment.output_feature_name,\n labels_limit=0,\n model_names=[\"Model1\", \"Model2\"],\n output_directory=tmpvizdir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 1 == len(figure_cnt)", "def test_dichotomous_scores(dichotomous_Contingency, method, expected):\n xs_score = getattr(dichotomous_Contingency, method)().item()\n npt.assert_almost_equal(xs_score, expected)", "def testConsistency(self):", "def test_binary_balance(self):\n dataset = make_fixture(binary=True, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def test_submithint_nopermission(self):\r\n mock_module = CHModuleFactory.create(user_voted=True)\r\n json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}\r\n print mock_module.user_voted\r\n mock_module.submit_hint(json_in)\r\n print mock_module.hints\r\n self.assertTrue('29.0' not in mock_module.hints)", "def test_fit_score(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n assert_not_fitted(oz, X_test=self.binary.X.test)\n assert oz.fit(self.binary.X.train, self.binary.y.train) is oz\n assert 0.0 <= oz.score(self.binary.X.test, self.binary.y.test) <= 1.0\n assert_fitted(oz, X_test=self.binary.X.test)", "def test_emirp_check():\r\n pass", "def test_return_advice_goal_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n advice = \"Ton objectif semble trop bas, je te conseille de ne pas \" \\\n \"aller en dessous de 47.4 kg. \" \\\n \"C'est donc l'objectif que nous allons fixer ! \"\n self.assertEqual(return_advice, advice)", "def _verify(self):\n pass", "def inner_test(param: enums.Conclusion):\n self.assertEqual(param, enums.Conclusion.STALEMATE)", "def test_verification_failed(self):\n pass", "def test_verify_metric_kwargs(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm - pm.mean(\"time\").mean(\"init\")\n assert pm.verify(\n metric=\"threshold_brier_score\",\n comparison=\"m2c\",\n dim=[\"init\", \"member\"],\n threshold=0.5,\n )", "def test_check_and_apply_status(self):\n # Arrange\n player = Character.objects.get(pk=1)\n target = Character.objects.get(pk=2)\n expected_rules = {\"area\": {\"beats\": [\"disrupt\", \"dodge\", \"block\"],\n \"loses\": [\"attack\"]},\n \"attack\": {\"beats\": [\"disrupt\", \"area\"],\n \"loses\": [\"block\", \"dodge\"]},\n \"block\": {\"beats\": [\"area\", \"attack\"],\n \"loses\": [\"disrupt\", \"dodge\"]},\n \"disrupt\": {\"beats\": [\"block\", \"dodge\"],\n \"loses\": [\"attack\", \"area\"]},\n \"dodge\": {\"beats\": [\"attack\", \"block\"],\n \"loses\": [\"area\", \"disrupt\"]}}\n\n object_to_test = Combat(player=player,\n target=target,\n player_attack_type=\"disrupt\",\n target_attack_type=\"block\",\n player_enhanced=True)\n\n # Inflict a status effect\n _ = object_to_test.do_combat_round()\n\n # Act\n # Check and apply the status effect\n _ = object_to_test.check_and_apply_status()\n\n # Assert\n self.assertDictEqual(object_to_test.rules, expected_rules)", "def test_basic(self):\n data = get()\n metrics = [verif.metric.Within(),\n verif.metric.A(), # Hit\n verif.metric.B(), # FA\n verif.metric.C(), # Miss\n verif.metric.D(), # Correct rejection\n verif.metric.Hit(),\n verif.metric.Threat(),\n verif.metric.Conditional(),\n verif.metric.XConditional(func=np.median),\n ]\n intervals = [verif.interval.Interval(-np.inf, 0, True, True), # [-inf, 0]\n verif.interval.Interval(-np.inf, 1, True, True),\n verif.interval.Interval(-np.inf, 2, True, True),\n ]\n obs = [0, 1.5, 2]\n fcst = [3.1, 1.1, -2.1]\n N = len(obs)*1.0\n\n # Each line is one metric (one number for each threshold)\n expected = [[0/N, 100/N, 100/N], # Within\n [0/N, 0/N, 2/N], # Hit\n [1/N, 1/N, 0/N], # FA\n [1/N, 1/N, 1/N], # Miss\n [1/N, 1/N, 0/N], # Correct rejection\n [0, 0, 2.0/3], # Hit rate\n [0, 0, 2.0/3], # Threat score\n [3.1, 3.1, 0.7], # Average fcst given obs in interval\n [0, 0, 1.5], # Average obs given obs in interval\n ]\n\n for m in range(len(metrics)):\n metric = metrics[m]\n for i in range(len(intervals)):\n value = metric.compute_from_obs_fcst(np.array(obs), np.array(fcst), intervals[i])\n ex = expected[m][i] * 1.0\n if np.isnan(value):\n self.assertTrue(np.isnan(ex))\n else:\n self.assertAlmostEqual(ex, value)", "def callback_check(data):\n global BallDetected, BallCheck, currentRadius\n BallDetected = data.BallDetected\n currentRadius = data.currentRadius\n\n if (BallDetected == True) and (BallCheck == False):\n BallCheck = True\n rospy.loginfo(\"Ball Detected! Start tracking \")\n client.cancel_all_goals()", "def test_alerts_when_no_breath(app, events, data):\n time_intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / time_intervals)\n app.run_iterations(num_of_samples)\n assert alerts.AlertCodes.NO_BREATH in events.alerts_queue.active_alerts, \\\n f\"NO_BREATH missing from: {events.alerts_queue.active_alerts}\"", "def test_check_solved():\n game = Game()\n game.word = 'word'\n game.pattern = 'word'\n game.check_solved()\n assert game.solved is True", "def test_verify_balance_behaviour(self, cred):\n # check the initial balance\n resp = requests.get(balance_url.format(cred[0], cred[1]))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json;charset=UTF-8'\n start_balance = resp.json()['value']\n # now init the verification process\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n resp = requests.get(balance_url.format(cred[0], cred[1]))\n assert resp.status_code == 200\n assert start_balance == resp.json()['value']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']\n resp = requests.get(balance_url.format(cred[0], cred[1]))\n assert resp.status_code == 200\n assert start_balance == resp.json()['value']", "def test_confidence_intervals(self):\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)", "def test_compare_genomes_2(self):\n self.pmr_gnm.annotation_status = \"final\"\n self.pmr_gnm.name = \"Trixie\"\n import_genome.compare_genomes(self.genome_pair, self.eval_flags)\n count = count_status(self.genome_pair, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.genome_pair.evaluations), 13)\n with self.subTest():\n self.assertEqual(count, 0)", "def test_verification_status_visible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_on('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_on('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_on('audit', 'You\\'re auditing this course')", "def test_compute_correlation_invalid_confidence_level(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary1, 'paired',\r\n 'spearman', 'high', 10, 0)", "def test_num_buses_3(self):\n actual = a1.num_buses(50)\n expected = 1\n self.assertEqual(actual,expected)", "def test_validate_metadata_no_samples(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-no-samples-batch\",\n \"samples\": [],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)", "def test_get_game_boxscore(self):\n pass", "def test_verify(perfectModelEnsemble_initialized_control):\n assert perfectModelEnsemble_initialized_control.verify(\n metric=\"mse\", comparison=\"m2e\", dim=[\"init\", \"member\"]\n )", "def __confidenceLocal__(self,featureVals):\n pass", "def test_in_compliance(self, mock_get_policy_compliance_status):\n # Succeed when calling the in_compliance function with a valid\n # results_api and compliance_status has a mocked response of \"Pass\"\n results_api = ResultsAPI(app_id=test_constants.VALID_RESULTS_API[\"app_id\"])\n mock_get_policy_compliance_status.return_value = \"Pass\"\n self.assertTrue(check_compliance.in_compliance(results_api=results_api))\n\n # Return False when calling the in_compliance function with a valid\n # results_api and compliance_status has a mocked response of \"Unknown\"\n results_api = ResultsAPI(app_id=test_constants.VALID_RESULTS_API[\"app_id\"])\n mock_get_policy_compliance_status.return_value = \"Unknown\"\n self.assertRaises(\n ValueError, check_compliance.in_compliance, results_api=results_api\n )\n\n # Succeed when calling the in_compliance function with a valid\n # results_api and compliance_status has a mocked response of anything\n # but \"Pass\"\n #\n # https://analysiscenter.veracode.com/resource/2.0/applicationbuilds.xsd\n # and https://help.veracode.com/viewer/document/mo49_yYZJCUuKhwdE9WRFQ\n # for possible values\n results_api = ResultsAPI(app_id=test_constants.VALID_RESULTS_API[\"app_id\"])\n for value in [\n \"Calculating...\",\n \"Not Assessed\",\n \"Did Not Pass\",\n \"Conditional Pass\",\n \"Under Vendor Review\",\n \"UNKNOWN VALUE!()&@%\",\n 300,\n 7.12,\n results_api,\n ]:\n mock_get_policy_compliance_status.return_value = value\n self.assertFalse(check_compliance.in_compliance(results_api=results_api))", "def test_binary_compare(self):\n dataset = make_fixture(binary=True, split=True)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y.train, dataset.y.test) is oz\n assert oz._mode == COMPARE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def is_bot(self) -> undefined.UndefinedOr[bool]:", "def test_heartbeat(self):\n self.dut._poll_processes = MagicMock()\n self.dut._do_heartbeat()\n self.assertEqual(self.dut._poll_processes.call_count, 3)", "def test_find_naked_twins(self):\n self.assertEqual(solution.find_naked_twins(self.before_naked_twins_1), self.before_naked_twins_1_boxes)", "async def expected_ball_received(self):\n # We do nothing in that case", "def test_labels_warning(self):\n with pytest.warns(\n YellowbrickWarning, match=\"both classes and encoder specified\"\n ):\n oz = ClassificationScoreVisualizer(\n GaussianNB(),\n classes=[\"a\", \"b\", \"c\"],\n encoder={0: \"foo\", 1: \"bar\", 2: \"zap\"},\n )\n labels = oz._labels()\n npt.assert_array_equal(labels, [\"foo\", \"bar\", \"zap\"])", "def test_test_value_oob_gets_error_message(self):\n res = predict_model.predict(test_value=-1)\n assert res == self.err_msg\n res = predict_model.predict(test_value=40283)\n assert res == self.err_msg", "def test_multiclass_balance(self):\n dataset = make_fixture(binary=False, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def test_class_names_must_match(self):\n oz = ClassBalance(labels=[\"a\", \"b\", \"c\"])\n dataset = make_fixture(binary=False, split=False)\n\n with pytest.raises(YellowbrickValueError):\n oz.fit(dataset.y)", "def test_irobotframework_report_error(self):\n with patch(\"jupyter_kernel_test.validate_message\", fake_validate):\n reply, outputs = self.execute_helper(code=ERROR_TASK)\n assert reply[\"content\"][\"status\"] != \"ok\"\n assert outputs", "def test_dice_coef():\n assert dice_coef() == expected_dice_coef", "def _set_insufficient_confidence_warning(\n self): # pragma: no cover\n self.failed_initial_confidence = True\n self.surface_result('LO_INIT_CONF')\n self.warnings.append(\n 'Bisect failed to reproduce the regression with enough confidence.')", "def test_chao1_bias_corrected(self):\n obs = chao1_bias_corrected(*osd(self.TestData))\n self.assertEqual(obs, 9.75)", "async def voice_verify(self, ctx: Context, *_) -> None:\n try:\n data = await self.bot.api_client.get(f\"bot/users/{ctx.author.id}/metricity_data\")\n except ResponseCodeError as e:\n if e.status == 404:\n embed = discord.Embed(\n title=\"Not found\",\n description=(\n \"We were unable to find user data for you. \"\n \"Please try again shortly, \"\n \"if this problem persists please contact the server staff through Modmail.\"\n ),\n color=Colour.red()\n )\n log.info(f\"Unable to find Metricity data about {ctx.author} ({ctx.author.id})\")\n else:\n embed = discord.Embed(\n title=\"Unexpected response\",\n description=(\n \"We encountered an error while attempting to find data for your user. \"\n \"Please try again and let us know if the problem persists.\"\n ),\n color=Colour.red()\n )\n log.warning(f\"Got response code {e.status} while trying to get {ctx.author.id} Metricity data.\")\n\n await ctx.author.send(embed=embed)\n return\n\n # Pre-parse this for better code style\n if data[\"verified_at\"] is not None:\n data[\"verified_at\"] = parser.isoparse(data[\"verified_at\"])\n else:\n data[\"verified_at\"] = datetime.utcnow() - timedelta(days=3)\n\n checks = {\n \"verified_at\": data[\"verified_at\"] > datetime.utcnow() - timedelta(days=GateConf.minimum_days_verified),\n \"total_messages\": data[\"total_messages\"] < GateConf.minimum_messages,\n \"voice_banned\": data[\"voice_banned\"],\n \"activity_blocks\": data[\"activity_blocks\"] < GateConf.minimum_activity_blocks\n }\n failed = any(checks.values())\n failed_reasons = [MESSAGE_FIELD_MAP[key] for key, value in checks.items() if value is True]\n [self.bot.stats.incr(f\"voice_gate.failed.{key}\") for key, value in checks.items() if value is True]\n\n if failed:\n embed = discord.Embed(\n title=\"Voice Gate failed\",\n description=FAILED_MESSAGE.format(reasons=\"\\n\".join(f'• You {reason}.' for reason in failed_reasons)),\n color=Colour.red()\n )\n try:\n await ctx.author.send(embed=embed)\n await ctx.send(f\"{ctx.author}, please check your DMs.\")\n except discord.Forbidden:\n await ctx.channel.send(ctx.author.mention, embed=embed)\n return\n\n self.mod_log.ignore(Event.member_update, ctx.author.id)\n embed = discord.Embed(\n title=\"Voice gate passed\",\n description=\"You have been granted permission to use voice channels in Python Discord.\",\n color=Colour.green()\n )\n\n if ctx.author.voice:\n embed.description += \"\\n\\nPlease reconnect to your voice channel to be granted your new permissions.\"\n\n try:\n await ctx.author.send(embed=embed)\n await ctx.send(f\"{ctx.author}, please check your DMs.\")\n except discord.Forbidden:\n await ctx.channel.send(ctx.author.mention, embed=embed)\n\n # wait a little bit so those who don't get DMs see the response in-channel before losing perms to see it.\n await asyncio.sleep(3)\n await ctx.author.add_roles(discord.Object(Roles.voice_verified), reason=\"Voice Gate passed\")\n\n self.bot.stats.incr(\"voice_gate.passed\")", "def test_guessing(self):\n self.classifier.guess(self.message)", "def test__API_with_correct_answers(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # mutex must be acquired\n self.assertEqual(self.mutex.lock(), True) # acquire mutex\n self.mutex.unlock() # release mutex", "def test_check_validity(game):\n\n game.solve()\n assert game.check_validity()", "def is_bot(self) -> bool:", "def test_get_good_evening():\n assert get_greetings.get_good_evening().upper() == \"GOOD EVENING!\"", "def generateBroConsistencyCheck(self):\n pass", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def test_bayes_updates_bad_data(self):\r\n self.assertRaises(ValueError, bayes_updates, self.bad)", "def test_lbheartbeat(self):\n pass", "def test_check_bundle_8(self):\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql_x\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 6)\n with self.subTest():\n self.assertEqual(count, 2)", "def checkbbox(boundingbox, mdtype):\n score = 0\n result = 0\n bboxstr = \"\"\n try:\n bboxstr = boundingbox.minx + \",\" + boundingbox.miny + \\\n \",\" + boundingbox.maxx + \",\" + boundingbox.maxy\n # TODO: make values bbox configurable?\n # larger: 2.0, 50.0, 8.0, 55.0\n if float(boundingbox.minx) >= 2.0 and float(boundingbox.miny) >= 50.0 and float(boundingbox.maxx) <= 8.0 and float(boundingbox.maxy) <= 57.0:\n score = 2\n logging.debug('Boudingbox ' + bboxstr + ' is in NL area')\n else:\n score = 1\n logging.debug('Boudingbox ' + bboxstr + ' is NOT in NL area')\n except Exception as e:\n logging.info('Error in boundinbox extent.')\n logging.info(str(e))\n try:\n bboxstr = boundingbox.minx + \",\" + boundingbox.miny + \\\n \",\" + boundingbox.maxx + \",\" + boundingbox.maxy\n except Exception as e:\n logging.debug(\n 'Error in boundinbox extent, bboxstr cannot be constructed')\n logging.debug(str(e))\n if mdtype == \"dataset\" or mdtype == \"series\":\n # checkid = 10, so the index in the matrix is: 9\n result = checksdatasets[9][2][score]\n return MkmScore(bboxstr, score, result)", "def test_validate_metadata_no_override_cycles(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-no-override-cycles-batch\",\n \"samples\": [\n \"PTC_EXPn200908LL_L2000001\",\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"adapter_read_1\": \"AAAACAACT\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)" ]
[ "0.67971265", "0.6142417", "0.61158496", "0.6029153", "0.57504106", "0.572192", "0.56564856", "0.5608764", "0.5544521", "0.554337", "0.55383706", "0.55253214", "0.54930353", "0.5490882", "0.5488589", "0.54793096", "0.5459387", "0.5425729", "0.540067", "0.5386406", "0.5373596", "0.53599584", "0.53518784", "0.53335965", "0.53318787", "0.5326166", "0.5317281", "0.5312178", "0.53002065", "0.52993965", "0.52975655", "0.52908134", "0.5282685", "0.5278243", "0.5271076", "0.5268807", "0.526244", "0.5259949", "0.5258925", "0.52588", "0.5252088", "0.5249342", "0.5243107", "0.523953", "0.5238661", "0.5227488", "0.52220845", "0.5220601", "0.5217209", "0.52129465", "0.5197954", "0.51923954", "0.51904136", "0.5183349", "0.51758677", "0.51743066", "0.51703167", "0.5166844", "0.5159255", "0.5158858", "0.5158042", "0.515739", "0.515378", "0.5149035", "0.514556", "0.5143735", "0.5140387", "0.5139987", "0.5133662", "0.5129361", "0.5129322", "0.5126679", "0.5124843", "0.51244557", "0.5111342", "0.5107245", "0.5105458", "0.5099882", "0.50973654", "0.5095271", "0.50912315", "0.5091142", "0.50841546", "0.50748664", "0.5074818", "0.5073429", "0.507008", "0.5068707", "0.50677496", "0.50660723", "0.50635624", "0.5059231", "0.5054061", "0.5053253", "0.50444454", "0.5043685", "0.5042753", "0.5042618", "0.5040099", "0.50383204" ]
0.7511095
0
Generates IDL files from a template for user and system marshaling.
def _Main(): cmd_parser = argparse.ArgumentParser( description='Tool to generate IDL from template.') cmd_parser.add_argument('--idl_template_file', dest='idl_template_file', type=str, required=True, help='Input IDL template file.') cmd_parser.add_argument('--idl_output_file', type=str, required=True, help='Output IDL file.') flags = cmd_parser.parse_args() _GenerateIDLFile(flags.idl_template_file, flags.idl_output_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildAutogenContents(self):\n if len(self.mTemplates) == 0:\n return None\n \n content = \"/** Autogenerated temporary file for template instantiation. */\\n\"\n for t in self.mTemplates:\n template_type = t.mTemplateType\n typedef_name = t.mTypedefName\n content += \"\"\"\n typedef %(template_type)s %(typedef_name)s;\n inline unsigned __instantiate_%(typedef_name)s()\n { return unsigned(sizeof(%(typedef_name)s)); }\n \"\"\" % vars() \n \n return content", "def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass", "def build_mapping() -> str:\n templates = make_module_docstring(\"Template classes for GBD entities\", __file__)\n templates += make_import(\"typing\", [\"Union\", \"Tuple\"])\n templates += (\n make_import(\n \".id\",\n [\n \"c_id\",\n \"s_id\",\n \"hs_id\",\n \"me_id\",\n \"cov_id\",\n \"rei_id\",\n \"scalar\",\n ],\n )\n + SPACING\n )\n templates += make_gbd_record()\n\n for entity, info in get_base_types().items():\n templates += SPACING\n templates += make_record(entity, **info)\n\n return templates", "def test_pnictogen():\n for template in templates:\n template_prefix, extension = os.path.splitext(template)\n for xyz_file in example_xyz_files:\n input_prefix, xyz_file_extension = os.path.splitext(xyz_file)\n\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(xyz_file, xyz_file_extension[1:])\n )\n written_files = pnictogen(mol, input_prefix, template, extension[1:])\n\n assert_equals(type(written_files), list)\n for written_file in written_files:\n assert_equals(type(written_file), str)\n\n written_files2 = pnictogen(mol, input_prefix, template)\n assert_equals(written_files, written_files2)\n\n # Allow use of template in the parent directory\n with cd(\"pnictogen/repo\"):\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(\"../../data/water-dimer.xyz\", \"xyz\")\n )\n written_files = pnictogen(mol, \"../../data/water-dimer\", \"ADF.in\", \"in\")\n\n assert_equals(written_files, [\"../../data/water-dimer.in\"])\n\n main([\"-g\", \"/tmp/hello.world.ORCA.inp\"])\n mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/co.xyz\", \"xyz\"))\n written_files = pnictogen(mol, \"data/co\", \"/tmp/hello.world.ORCA.inp\", foo=\"bar\")\n\n assert_equals(written_files, [\"data/co.inp\"])", "def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file", "def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)", "def generate_basic_modules(template_dir=TEMPLATE_DIR, out_dir=PKG_DIR):\n print(80 * \"-\")\n print(\"Package:\", out_dir)\n\n basic_modules = [\"_init.py\",\n \"constants.py\",\n \"base_api.py\",\n \"exception.py\"]\n\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n installed = []\n for module in basic_modules:\n in_file = os.path.join(template_dir, module)\n\n if module == \"_init.py\":\n module = \"__init__.py\"\n\n out_file = os.path.join(out_dir, module)\n try:\n shutil.copy(in_file, out_file)\n except (FileNotFoundError, shutil.SameFileError) as err:\n print(err)\n installed.append(\"- \" + out_file)\n\n print(\"Basic modules:\")\n print(\"\\n\".join(installed))", "def generate():", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()", "def generate(env):\r\n if not exists(env):\r\n return 0;\r\n\r\n TLBImpBuilder = env.Builder(\r\n action = SCons.Action.Action(\r\n TLBImpGenerator\r\n , generator = 1\r\n #, cmdstr = \"$TLBIMPCOMSTR\"\r\n )\r\n , src_suffix = '.dll'\r\n , target_suffix = '.dll'\r\n )\r\n\r\n dotNETSDK = _getNETSDKPath()\r\n homedir = env.Dir(dotNETSDK)\r\n bindir = homedir.Dir('bin')\r\n\r\n env['TLBIMP'] = 'tlbimp.exe'\r\n env['TLBIMPFLAGS'] = '/nologo /silent /strictref:nopia'\r\n env['TLBIMPCOMSTR'] = '[.NET] TLBIMP: Generating interop assembly for typelib in: $SOURCE to: $TARGET'\r\n env['BUILDERS']['TLBImp'] = TLBImpBuilder\r\n\r\n # Agrego al PATH el directorio del tlbimp\r\n env.PrependENVPath(\r\n 'PATH',\r\n bindir.abspath\r\n )", "def TLBImpGenerator(\r\n target\r\n , source\r\n , env\r\n , for_program = 0\r\n , for_signature = 0\r\n ):\r\n\r\n src = source[0].children()\r\n\r\n assert len(src) >= 1, \"[.NET] TLBIMP: At least one source is needed. Check your declarations.\"\r\n\r\n cmdline = env['TLBIMP']\r\n cmdline += ' '\r\n cmdline += env['TLBIMPFLAGS']\r\n if env.get('namespace') != None:\r\n cmdline += ' /namespace:$namespace'\r\n if env.get('key_file') != None:\r\n cmdline += ' /keyfile:$key_file'\r\n cmdline += ' /out:' + target[0].abspath\r\n for refnode in source[1:]:\r\n for ref in refnode.children():\r\n cmdline += ' /reference:' + ref.abspath\r\n cmdline += ' ' + src[0].abspath\r\n\r\n return [cmdline]", "def Generate(self):\n return self.Render(self.TEMPLATE_NAME, {\n 'name': self._namespace.name,\n 'enums': self._enums,\n 'types': self._types,\n 'events': self._namespace.events,\n 'functions': self._namespace.functions,\n # TODO(sammc): Don't change years when regenerating existing output files.\n 'year': datetime.date.today().year,\n 'source_file': self._namespace.source_file,\n })", "def writeDomainFile():\n writeTemplate(localTemplate)", "def Write(self):\n template_mappings = {}\n\n template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n file_content = file_content.encode('utf-8')\n\n with open(self.PATH, 'wb') as file_object:\n file_object.write(file_content)", "def generate_input_file(temp_type, out_file):\r\n\r\n file_path = os.path.realpath(__file__)\r\n dir_path = os.sep.join(file_path.split(os.sep)[:-1])\r\n\r\n if temp_type == 0:\r\n template = 'Template00_CompleteParameters.py'\r\n elif temp_type == 1:\r\n template = 'Template01_SingleRowCylindricalRollerBearing.py'\r\n elif temp_type == 3:\r\n template = 'Template03_CylindricalRollerThustBearing.py'\r\n elif temp_type == 4:\r\n template = 'Template04_BallOnDisk.py'\r\n elif temp_type == 5:\r\n template = 'Template05_PinOnDisk.py'\r\n elif temp_type == 6:\r\n template = 'Template06_4Ball.py'\r\n elif temp_type == 7:\r\n template = 'Template07_BallOn3Plates.py'\r\n elif temp_type == 8:\r\n template = 'Template08_RingOnRing.py'\r\n else:\r\n raise ValueError(\"temp_type value '{}' undefined\".format(temp_type))\r\n\r\n shutil.copy(os.sep.join([dir_path, 'UserInputTemplates', template]),\r\n out_file)\r\n return out_file", "def test_code_template(tmpdir):\n # Create temp file\n fn = tmpdir.mkdir(\"data\")\n expected_file = os.path.join(str(fn), 'loader.py')\n\n # Gen code template\n runner = CliRunner()\n result = runner.invoke(cli.generate_code_template,\n ['-o', str(fn)], env=env)\n\n assert result.exit_code == 0\n assert os.path.isfile(expected_file)\n\n # Update file\n with open(expected_file, 'w') as f:\n f.write('print(\"hello world!\")')\n\n # Try to generate file again\n result = runner.invoke(cli.generate_code_template,\n ['-o', str(fn)], env=env)\n\n assert 'already exists' in result.stdout\n assert result.exit_code == 0\n\n # Check file\n with open(expected_file, 'r') as f:\n assert 'hello world!' in f.read()", "def run(tree, args):\n\n global run_before\n\n if run_before:\n util.fatalError(\"Sorry, the C++ backend cannot process more \"\n \"than one IDL file at a time.\")\n run_before = 1\n\n dirname, filename = os.path.split(tree.file())\n basename,ext = os.path.splitext(filename)\n config.state['Basename'] = basename\n config.state['Directory'] = dirname\n\n process_args(args)\n\n try:\n # Check the input tree only contains stuff we understand\n support.checkIDL(tree)\n\n # initialise the handy ast module\n ast.__init__(tree)\n\n # Initialise the descriptor generating code\n descriptor.__init__(tree)\n\n # Build the map of AST nodes to Environments\n tree.accept(id.WalkTree())\n\n # AMI code hooks into existing infrastructure (ie doesn't need to\n # be driven explicitly here)\n #if config.state['AMI']:\n # tree = ami.__init__(tree)\n # tree.accept(id.WalkTree())\n # Not ported yet.\n \n header.run(tree)\n \n skel.run(tree)\n \n # if we're generating code for Typecodes and Any then\n # we need to create the DynSK.cc file\n if config.state['Typecode']:\n dynskel.run(tree)\n\n if config.state['Example Code']:\n impl.run(tree)\n\n except AttributeError, e:\n name = e.args[0]\n unsupported_visitors = map(lambda x:\"visit\" + x,\n AST_unsupported_nodes[:])\n if name in unsupported_visitors:\n # delete all possibly partial output files\n for file in output.listAllCreatedFiles():\n os.unlink(file)\n\n util.unsupportedIDL()\n \n raise\n\n except SystemExit, e:\n # fatalError function throws SystemExit exception\n # delete all possibly partial output files\n for file in output.listAllCreatedFiles():\n os.unlink(file)\n \n raise", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def generate(options):\n interactive = options['i']\n if interactive:\n generate_interactive(options)\n else:\n generate_rcfile(vars(options['c']), options['rcfile'])", "def _GenerateFromTemplate(self, template_filename, template_mappings):\n template_filename = os.path.join(\n self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)\n return super(AppveyorYmlWriter, self)._GenerateFromTemplate(\n template_filename, template_mappings)", "def test_create_template_for_all_namespaces(self):\n pass", "def genStixDoc(\n outputDir_,\n targetFileSha1_,\n targetFileSha256_,\n targetFileSha512_,\n targetFileSsdeep_,\n targetFileMd5_,\n targetFileSize_,\n targetFileName_,\n ipv4Addresses_,\n hostNames_):\n parsedTargetFileName = reFileName(targetFileName_)[1]\n parsedTargetFilePrefix = reFileName(targetFileName_)[0]\n stix.utils.set_id_namespace({\"http://www.nickdriver.com/cuckoo2CRITs\" : \"cuckoo2CRITs\"})\n NS = cybox.utils.Namespace(\"http://www.nickdriver.com/cuckoo2CRITs\", \"cuckoo2CRITs\")\n cybox.utils.set_id_namespace(NS)\n stix_package = STIXPackage()\n\n stix_header = STIXHeader()\n stix_header.title = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_header.description = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_package.stix_header = stix_header\n\n #Will take this out later\n # Create the ttp\n malware_instance = MalwareInstance()\n malware_instance.add_name(parsedTargetFileName)\n malware_instance.description = targetFileSha1_\n ttp = TTP(title='TTP: ' + parsedTargetFileName)\n ttp.behavior = Behavior()\n ttp.behavior.add_malware_instance(malware_instance)\n #stix_package.add_ttp(ttp)\n \n #Trying to create an array that will be added later...\n stix_observables = []\n \n #This works - leaving intact until the new portion works\n '''\n # Create the indicator for the ipv4 addresses\n ipv4Object = Address(ipv4Addresses_, Address.CAT_IPV4)\n #stix_msg['stix_observables'].extend(Observables([ipv4Object]))\n stix_observables.extend([ipv4Object])\n '''\n for ip in ipv4Addresses_:\n\t\tipv4Object = Address(ip, Address.CAT_IPV4)\n\t\tstix_observables.extend([ipv4Object])\n \n \n '''\n #This works - leaving intact until the new portion works\n # Create the indicator for the domain names\n domainNameObject = DomainName()\n domainNameObject.value = hostNames_\n '''\n for name in hostNames_:\n\t\tdomainNameObject = DomainName()\n\t\tdomainNameObject.value = name\n\t\tstix_observables.extend([domainNameObject])\n\t\t\n \n\n \n # Create the observable for the file\n fileObject = File()\n fileObject.file_name = parsedTargetFileName\n #fileObject.file_name.condition = 'Equals'\n fileObject.size_in_bytes = targetFileSize_\n #fileObject.size_in_bytes.condition = 'Equals'\n fileObject.add_hash(Hash(targetFileSha1_, type_='SHA1', exact=True))\n fileObject.add_hash(Hash(targetFileSha256_, type_='SHA256', exact=True))\n fileObject.add_hash(Hash(targetFileSha512_, type_='SHA512', exact=True))\n fileObject.add_hash(Hash(targetFileSsdeep_, type_='SSDEEP', exact=True))\n fileObject.add_hash(Hash(targetFileMd5_, type_='MD5', exact=True))\n \n stix_observables.extend([fileObject])\n \n \n stix_package.observables = Observables(stix_observables)\n \n #DEBUG\n #stagedStixDoc = stix_package.to_xml()\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(stagedStixDoc)\n\t\t\n #print \"stix_observables list\"\n\n #pp.pprint(stix_observables)\n \n '''\n #VERY BASIC STIX ATTEMPT - THIS WORKS!\n a = Address(\"1.2.3.4\", Address.CAT_IPV4)\n d = DomainName()\n d.value = \"cybox.mitre.org\"\n stix_package.observables = Observables([a, d])\n #concensus - Observable does not work - ObservableS does\n '''\n\t\n\t\n\t###UNCOMMENT THIS WHEN DONE###\n\t\n \n stagedStixDoc = stix_package.to_xml()\n stagedStixDoc = fixAddressObject(stagedStixDoc)\n stagedStixDoc = fixDomainObject(stagedStixDoc)\n today = datetime.datetime.now()\n now = today.strftime('%Y-%m-%d_%H%M%S')\n if not os.path.exists(outputDir_):\n os.makedirs(outputDir_)\n with open (outputDir_ + '/' + now + '-' + targetFileSha1_ + '.stix.xml', 'a') as myfile:\n myfile.write(stagedStixDoc)\n _l.debug('Wrote file: ' + now + '-' + targetFileSha1_ + '.stix.xml')\n \n return", "def doMakeEyeballTemplate(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n try:\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n #Gather limb specific data and check\n #==============\n mi_helper = self._mi_module.helper\n if not mi_helper:\n raise StandardError,\"No helper found!\"\n\n b_irisControl = mi_helper.irisHelper\n b_pupilControl = mi_helper.pupilHelper\n\n mi_helper.parent = self._mi_module.templateNull\n except Exception,error:raise Exception,\"doMakeEyeballTemplate | {0}\".format(error)\n\n\n return True", "def generate(self, namespace: Optional[str], template: str, func: Callable, call_args: Dict) -> str:", "def generate(self):\n try:\n self._parse_groups()\n self._parse_types()\n self._parse_enums()\n self._parse_features()\n self._parse_extensions()\n self._add_extra_enums()\n self._parse_and_build_commands()\n self._build_all_enums()\n self._build_enum_groups()\n self._generate_files()\n except Exception as exception:\n print('Generate failed: {}'.format(str(exception)))\n raise", "def _create_swig_interface(self, path: pathlib.Path) -> str:\n module_name = path.with_suffix('').name\n header_code = self.create_header_file(path)\n include_directives = []\n function_signatures = []\n for line in header_code.splitlines():\n if line.startswith('#include'):\n collection = include_directives\n else:\n collection = function_signatures\n collection.append(line)\n swig_interface = SWIG_INTERFACE_TEMPLATE.format(\n module_name=module_name, include_directives='\\n'.join(include_directives),\n function_signatures='\\n'.join(function_signatures))\n _LOG.debug('SWIG interface: \"\"\"%s\"\"\"', swig_interface)\n return swig_interface", "def generate(random, pid, autogen_tools, n):\n\n generator_path = autogen_tools.get_directory(__file__)\n\n template_path = path.join(generator_path, \"code.txt.template\")\n rendered_template_path = path.join(generator_path, \"code.txt\")\n\n autogen_tools.replace_source_tokens(\n template_path,\n {\"flag\": gen_code(n, \"Aviation House\")},\n rendered_template_path\n )\n\n code_link = autogen_tools.generate_resource_link(pid, \"code.txt\", title=\"Encrypted file\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"code.txt\"),\n ],\n },\n \"static_files\": {\n },\n \"problem_updates\": {\n \"description\": \"<p>We've updated the system to AES. We heard that this is military grade encryption so that should fix everything</p><p>The team have stored the password in %s. Bet you can't get into it</p>\" % code_link\n }\n }", "def create(self):\n\t\tlipsBaseFile.imp()", "def generate_makefile(path, make_template):\n full_path = os.path.abspath(path)\n\n # List of *.c files in the directory.\n app_src = [ f for f in os.listdir(full_path) if f.endswith('.c') ]\n \n # if there is an *.rci file, assume rci is enabled and that ConfigGenerator\n # will generate a remote_config.c we want to compile\n rci = [ f for f in os.listdir(full_path) if f.endswith('.rci') ]\n if rci and \"remote_config.c\" not in app_src:\n app_src.append(\"remote_config.c\")\n\n # Get the Mode and name of the Sample, this will be used for determining\n # what libraries and platform files to include.\n (_, mode, _, sample) = full_path.rsplit(os.sep, 3)\n\n subs = DEFAULT_SUBS.copy()\n\n dvt_test = path.find('dvt') != -1\n\n if dvt_test:\n subs['CONNECTOR_DIR'] = '../../../private'\n subs['PUBLIC_HEADER_DIR'] = '../../../public/include'\n subs['PLATFORM_DIR'] = \"\"\"\n# Location of Platform Src Code.\nPLATFORM_DIR=../../../public/run/platforms/$(PLATFORM)\"\"\"\n\n # Change platform to 'template' if this is a template test.\n if sample == \"template_test\":\n subs[\"TARGET_PLATFORM\"] = \"\"\"\n# Target Platform\nPLATFORM = template\"\"\"\n\n # Treat compile and link as a special case, no platform used.\n if sample in LINK_SAMPLES:\n subs['TARGET_PLATFORM'] = ''\n subs['PLATFORM_DIR'] = ''\n subs['PLATFORM_VPATH'] = ''\n subs['PLATFORM_HEADER_INCLUDE'] = ''\n subs['SRCS'] = 'SRCS = $(APP_SRCS) $(PRIVATE_SRCS)'\n else:\n # Assume this is the base set of Platform files\n # Only include files who are also not in local directory.\n subs['PLATFORM_SRCS'] = 'PLATFORM_SRCS = '\n for f in ['os.c', 'config.c', 'debug.c', 'main.c']:\n if f not in app_src:\n subs['PLATFORM_SRCS'] += '$(PLATFORM_DIR)/%s ' % f\n\n # Add all *.c files in the directory.\n subs['APP_SRCS'] = 'APP_SRCS = ' + ' '.join([ re.sub(r'\\.c$', '.c', f) \\\n for f in app_src ])\n\n # Add -lpthread as a linked library if this is a run sample.\n if (dvt_test or mode == 'run') and sample not in LINK_SAMPLES:\n subs['LIBS'] += ' -lpthread' \n\n if sample == 'connect_on_ssl' and 'network_ssl.c' not in app_src:\n # Add network_ssl.c to PLATFORM_SRCS and -lssl to LIBS.\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_dns.c'\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_tcp_ssl.c'\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_udp.c'\n subs['LIBS'] += ' -lssl -lcrypto'\n elif sample not in LINK_SAMPLES:\n if 'network.c' not in app_src:\n if 'network_dns.c' not in app_src:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_dns.c'\n if 'network_tcp.c' not in app_src:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_tcp.c'\n if 'network_udp.c' not in app_src:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_udp.c'\n\n if sample == 'sm_sms':\n if 'network_sms.c' not in app_src:\n # Add network_ssl.c to PLATFORM_SRCS and -lssl to LIBS.\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_sms.c'\n else:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_sms.c'\n if sample == 'sm_sms_gammu':\n subs['LIBS'] += ' -lGammu'\n subs['GAMMU_INCLUDES'] = \"CFLAGS += -I/usr/include/gammu\"\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_sms.c'\n if sample == 'file_system' and 'file_system.c' not in app_src:\n # Add file_system.c to PLATFORM_SRCS. -lcrypto if APP_ENABLE_MD5\n # passed.\n subs['PLATFORM_SRCS'] += \" $(PLATFORM_DIR)/file_system.c\"\n\n if sample == 'fs_os_abort' and 'file_system.c' not in app_src:\n # Add file_system.c to PLATFORM_SRCS. -lcrypto if APP_ENABLE_MD5\n # passed.\n subs['PLATFORM_SRCS'] += \" $(PLATFORM_DIR)/file_system.c\"\n\n if sample == 'file_system' or sample == 'file_system_dir_cov':\n if dvt_test:\n subs['LIBS'] += \"\"\"\nAPP_ENABLE_MD5=true\"\"\"\n\n subs['LIBS'] += \"\"\"\n\nifeq ($(APP_ENABLE_MD5),true)\nLIBS += -lcrypto\nCFLAGS+= -DAPP_ENABLE_MD5=true\nendif\"\"\"\n\n if sample == 'ic_timing':\n subs['LIBS'] += ' -lrt' \n\n\n if dvt_test:\n subs['POST_SAMPLE'] = '$(AT)python ../../../dvt/scripts/iikmapsum.py $(SAMPLE).map | tee $(basename $@).stats'\n \n if sample == 'build_library':\n make_lib_template = open(LIB_TEMPLATE, 'r')\n lib_template_data = make_lib_template.read()\n make_lib_template.close()\n# lib_template = Template(lib_template_data)\n return lib_template_data\n else:\n return make_template.substitute(**subs)", "def _GenerateLibFile(self, lib_template, lib_file_path, file_paths,\n **template_args):\n # Load template.\n template = self._templates.Load(lib_template)\n # Generate the .lib file.\n lib_file_contents = self._emitters.FileEmitter(lib_file_path)\n\n # Emit the list of #source directives.\n list_emitter = lib_file_contents.Emit(template, **template_args)\n lib_file_dir = os.path.dirname(lib_file_path)\n for path in sorted(file_paths):\n relpath = os.path.relpath(path, lib_file_dir)\n list_emitter.Emit(\"#source('$PATH');\\n\", PATH=MassagePath(relpath))", "def _GenerateLibFile(self, lib_template, lib_file_path, file_paths,\n **template_args):\n # Load template.\n template = self._templates.Load(lib_template)\n # Generate the .lib file.\n lib_file_contents = self._emitters.FileEmitter(lib_file_path)\n\n # Emit the list of #source directives.\n list_emitter = lib_file_contents.Emit(template, **template_args)\n lib_file_dir = os.path.dirname(lib_file_path)\n for path in sorted(file_paths):\n relpath = os.path.relpath(path, lib_file_dir)\n list_emitter.Emit(\"#source('$PATH');\\n\", PATH=MassagePath(relpath))", "def generate_command(ctx, template):\n config_extension = '.py'\n template_extension = '.html'\n output_extension = '.pdf'\n\n context = Context(\n config=path.join(\n ctx.obj.get(\"config_path\"),\n template + config_extension\n ),\n template_path=path.join(\n ctx.obj.get(\"template_path\"),\n template + template_extension\n ),\n variables=ctx.obj.get(\"varibales\"),\n output_path=path.join(\n ctx.obj.get(\"output_path\"),\n template, output_extension),\n )\n\n generator = Generator(context)\n generator.execute()\n exit(0)", "def construct_dicts(self, path):\n module_dicts = self.read_dict(path, use_superpkg=True)\n\n id_dict = dict()\n name_dict = dict()\n\n for cmd_dict in module_dicts:\n # Create a cmd template object\n cmd_temp = cmd_template.CmdTemplate(\n cmd_dict[self.OP_CODE_FIELD],\n cmd_dict[self.MNEMONIC_FIELD],\n cmd_dict[self.COMPONENT_FIELD],\n cmd_dict[self.ARGS_FIELD],\n cmd_dict[self.DESC_FIELD],\n )\n\n id_dict[cmd_dict[self.OP_CODE_FIELD]] = cmd_temp\n name_dict[cmd_dict[self.MNEMONIC_FIELD]] = cmd_temp\n\n return (id_dict, name_dict)", "def generate_cpp():\n cpp_file = AUTOGEN_WARNING\n cpp_file += \"// Implements basic nuclear data functions.\\n\"\n cpp_file += \"#ifndef PYNE_IS_AMALGAMATED\\n\"\n cpp_file += '#include \"atomic_data.h\"\\n'\n cpp_file += '#include \"nucname.h\"\\n'\n cpp_file += \"#endif\\n\"\n cpp_file += \" \\n\"\n cpp_file += \"void pyne::_load_atomic_mass_map_memory() { \\n\"\n cpp_file += \" // header version of atomic weight table data \\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!atomic_mass_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_atomic_mass_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!natural_abund_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_abund_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // calculate the atomic_masses of the elements \\n\"\n cpp_file += \" std::map<int,double> :: iterator it;\\n\"\n cpp_file += \" \\n\"\n cpp_file += \" for (int z = 1; z <= 92 ; z++) {\\n\"\n cpp_file += \" // loop through the natural abundance map\\n\"\n cpp_file += \" double element_atomic_weight = 0.0;\\n\"\n cpp_file += \" for (it = natural_abund_map.begin(); it != natural_abund_map.end() ; ++it){\\n\"\n cpp_file += \" // if the atomic number of the abudance matches the\\n\"\n cpp_file += \" // that of index\\n\"\n cpp_file += \" if(pyne::nucname::znum(it->first) == z) {\\n\"\n cpp_file += \" // take atomic abundance and multiply by mass\\n\"\n cpp_file += (\n \" // to get the mass of that nuclide / 100 since abundance is in %\\n\"\n )\n cpp_file += \" element_atomic_weight += (it->second*atomic_mass_map[it->first]/100.0);\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // insert the abundance of the element into the list\\n\"\n cpp_file += \" atomic_mass_map[z*10000000] = element_atomic_weight;\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_atomic_mass_map() { \\n\"\n cpp_file += generate_atomic_mass()\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_abund_map() { \\n\"\n cpp_file += generate_abundances()\n cpp_file += \"}\\n\"\n return cpp_file", "def generate_skeleton(self, ci, qname):\n symbol_table = ci.epv.symbol_table\n cls = ci.co\n\n\n # Skeleton (in Chapel)\n self.pkg_chpl_skel.gen(ir.Import('.'.join(symbol_table.prefix)))\n\n self.pkg_chpl_skel.new_def('use sidl;')\n objname = '.'.join(ci.epv.symbol_table.prefix+[ci.epv.name]) + '_Impl'\n\n self.pkg_chpl_skel.new_def('extern record %s__object { var d_data: opaque; };'\n %qname)#,objname))\n self.pkg_chpl_skel.new_def('extern proc %s__createObject('%qname+\n 'd_data: int, '+\n 'out ex: sidl_BaseInterface__object)'+\n ': %s__object;'%qname)\n self.pkg_chpl_skel.new_def(ci.chpl_skel)\n\n\n # Skeleton (in C)\n cskel = ci.chpl_skel.cstub\n cskel._name = qname+'_Skel'\n cskel.gen(ir.Import('stdint'))\n cskel.gen(ir.Import('stdio'))\n cskel.gen(ir.Import(cskel._name))\n cskel.gen(ir.Import(qname+'_IOR'))\n cskel.gen(ir.Fn_defn([], ir.pt_void, qname+'__call_load', [],\n [ir.Comment(\"FIXME: [ir.Stmt(ir.Call('_load', []))\")], ''))\n\n # set_epv ... Setup the entry-point vectors (EPV)s\n #\n # there are 2*3 types of EPVs:\n # epv: regular methods\n # sepv: static methods\n # pre_(s)epv: pre-hooks\n # post_(s)epv: post-hooks\n epv_t = ci.epv.get_ir()\n sepv_t = ci.epv.get_sepv_ir()\n pre_epv_t = ci.epv.get_pre_epv_ir()\n pre_sepv_t = ci.epv.get_pre_sepv_ir()\n post_epv_t = ci.epv.get_post_epv_ir()\n post_sepv_t = ci.epv.get_post_sepv_ir()\n cskel.gen(ir.Fn_decl([], ir.pt_void, 'ctor', [], ''))\n cskel.gen(ir.Fn_decl([], ir.pt_void, 'dtor', [], ''))\n\n epv_init = []\n sepv_init = []\n for m in builtins+cls.get_methods():\n fname = m[2][1] + m[2][2]\n attrs = sidlir.method_method_attrs(m)\n static = member_chk(sidlir.static, attrs)\n def entry(stmts, epv_t, table, field, pointer):\n stmts.append(ir.Set_struct_item_stmt(epv_t, ir.Deref(table), field, pointer))\n\n if static: entry(sepv_init, sepv_t, 'sepv', 'f_'+fname, '%s_%s_skel'%(qname, fname))\n else: entry(epv_init, epv_t, 'epv', 'f_'+fname, '%s_%s_skel'%(qname, fname))\n\n builtin_names = ['_ctor', '_ctor2', '_dtor']\n with_hooks = member_chk(ir.hooks, attrs)\n if fname not in builtin_names and with_hooks:\n if static: entry(sepv_init, pre_sepv_t, 'pre_sepv', 'f_%s_pre'%fname, 'NULL')\n else: entry(epv_init, pre_epv_t, 'pre_epv', 'f_%s_pre'%fname, 'NULL')\n if static: entry(sepv_init, post_sepv_t, 'post_sepv', 'f_%s_post'%fname, 'NULL')\n else: entry(epv_init, post_epv_t, 'post_epv', 'f_%s_post'%fname, 'NULL')\n\n pkgname = '_'.join(ci.epv.symbol_table.prefix)\n\n dummyargv = '''\n char* argv[] = { \n babel_program_name,\n \"-nl\", /* number of locales */\n \"\",\n \"-v\", /* verbose chapel runtime */\n NULL\n };\n argv[2] = getenv(\"SLURM_NTASKS\");\n if (argv[2] == NULL) {\n fprintf(stdout, \"**ERROR: please set the SLURM_NTASKS environment variable\\\\n\"\n \" to the desired number of Chapel locales.\");\n argv[2] = \"0\";\n }\n int ignored = setenv(\"GASNET_BACKTRACE\", \"1\", 1);\n'''\n cskel.genh(ir.Import('stdlib'))\n cskel.pre_def('extern int chpl_init_library(int argc, char* argv[]);')\n cskel.pre_def('// You can set this to argv[0] in main() to get better debugging output')\n cskel.pre_def('char* __attribute__((weak)) babel_program_name = \"BRAID_LIBRARY\";')\n # These are now called by chpl_init_library -> chpl_gen_init\n #cskel.pre_def('extern void chpl__init_chpl__Program(int, const char*);')\n #cskel.pre_def('extern void chpl__init_%s_Impl(int, const char*);'%pkgname)\n init_code = [dummyargv,\n 'int locale_id = chpl_init_library(4, argv)',\n # 'chpl__init_chpl__Program(__LINE__, __FILE__)',\n # 'chpl__init_%s_Impl(__LINE__, __FILE__)'%pkgname\n ]\n init_code = map(lambda x: (ir.stmt, x), init_code)\n epv_init.extend(init_code)\n sepv_init.extend(init_code)\n\n cskel.gen(ir.Fn_defn(\n [], ir.pt_void, qname+'__set_epv',\n [ir.Arg([], ir.out, epv_t, 'epv'),\n ir.Arg([], ir.out, pre_epv_t, 'pre_epv'),\n ir.Arg([], ir.out, post_epv_t, 'post_epv')],\n epv_init, ''))\n\n if sepv_t:\n cskel.gen(ir.Fn_defn(\n [], ir.pt_void, qname+'__set_sepv',\n [ir.Arg([], ir.out, sepv_t, 'sepv'),\n ir.Arg([], ir.out, pre_sepv_t, 'pre_sepv'),\n ir.Arg([], ir.out, post_sepv_t, 'post_sepv')],\n sepv_init, ''))\n\n # C Skel\n for code in cskel.optional:\n cskel.new_global_def(code)\n cskel.write()", "def generate(self, context=None):\r\n outputfile = self.__get_output_filename()\r\n # For output type 'hcr', write the binary repository file\r\n if self.output_obj.type == 'hcr':\r\n self.logger.info(\"Generating binary repository to '%s'\" % outputfile)\r\n writer = HcrWriter()\r\n repo = self.output_obj.get_hcr_repository()\r\n data = writer.get_repository_bindata(repo)\r\n f = context.create_file(outputfile, mode='wb')\r\n #f = open(outputfile,'wb')\r\n try: f.write(data)\r\n finally: f.close()\r\n elif self.output_obj.type == 'header':\r\n self.logger.info(\"Generating header file to '%s'\" % outputfile)\r\n writer = HeaderWriter(outputfile, self.output_obj)\r\n writer.write(context)\r\n elif self.output_obj.type == None:\r\n # The HCRML file contains no <output> element, so no output should\r\n # be generated\r\n pass", "def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()", "def test_render_templates():\n water_mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/water.xyz\", \"xyz\"))\n if not water_mol.name:\n water_mol.name = \"data/water.xyz\"\n\n main([\"-g\", \"/tmp/foo.ADF.in\"])\n main([\"/tmp/foo.ADF.in\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.in\").read().strip(),\n \"\"\"TITLE data/water.xyz\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\"\"\",\n )\n\n main([\"-g\", \"/tmp/test.GAMESS.inp\"])\n main([\"/tmp/test.GAMESS.inp\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.inp\").read(),\n \"\"\" $CONTRL COORD=CART UNITS=ANGS $END\n\n $DATA\ndata/water.xyz\nC1\nO 8.0 0.0584027061 0.0584027059 0.0000000000\nH 1.0 1.0096135406 -0.0680162466 0.0000000000\nH 1.0 -0.0680162466 1.0096135407 0.0000000000\n $END\n\n\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/hello.GAMESSUK.inp\"])\n main([\"/tmp/hello.GAMESSUK.inp\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.inp\").read(), water_mol.to_string(\"gukin\"))\n\n main([\"-g\", \"/tmp/hello.world.Gaussian.gjf\"])\n main([\"/tmp/hello.world.Gaussian.gjf\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.gjf\").read(),\n \"\"\"#Put Keywords Here, check Charge and Multiplicity.\n\n data/water.xyz\n\n0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\n\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.Jaguar.in\"])\n main([\"/tmp/bar.Jaguar.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"jin\"))\n\n main([\"-g\", \"/tmp/foo.Molpro.inp\"])\n main([\"/tmp/foo.Molpro.inp\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.inp\").read(), water_mol.to_string(\"mp\"))\n\n main([\"-g\", \"/tmp/example.MOPAC.mop\"])\n main([\"/tmp/example.MOPAC.mop\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.mop\").read(),\n \"\"\"CHARGE=0 MS=0.0\ndata/water.xyz\n\nO 0.05840 1 0.05840 1 0.00000 1\nH 1.00961 1 -0.06802 1 0.00000 1\nH -0.06802 1 1.00961 1 0.00000 1\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.MPQC.in\"])\n main([\"/tmp/bar.MPQC.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"mpqcin\"))\n\n main([\"-g\", \"/tmp/foo.NWChem.nw\"])\n main([\"/tmp/foo.NWChem.nw\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.nw\").read(),\n \"\"\"start molecule\n\ntitle data/water.xyz\n\ngeometry units angstroms print xyz autosym\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nend\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/example.ORCA.inp\"])\n main([\"/tmp/example.ORCA.inp\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.inp\").read(),\n \"\"\"# data/water.xyz\n! Opt\n\n* xyz 0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\n*\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.Psi.dat\"])\n main([\"/tmp/bar.Psi.dat\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.dat\").read(),\n \"\"\"# data/water.xyz\n\nmolecule {\n0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nunits angstrom\n}\n\noptimize('scf')\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/example.QChem.in\"])\n main([\"/tmp/example.QChem.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"qcin\"))\n\n main([\"-g\", \"/tmp/foo.ZINDO.input\"])\n main([\"/tmp/foo.ZINDO.input\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.input\").read(), water_mol.to_string(\"zin\"))", "def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)", "def main(temp_dir, extensions, template):\n env = load_env(template_dir=temp_dir)\n if not template:\n # Get all the templates and return a dict with enumerated \n # templates names\n ext = extensions if extensions else []\n template_dict = get_templates(env, extensions=ext)\n # Echo the content of the template directory by enumerating \n # the templates and a simple list join\n temp_list = list()\n for x in template_dict.items():\n num = str(x[0])\n # Remove whitespace, underscores and capitalize words\n temp_name = x[1].strip().replace(\"_\", \" \").title()\n temp_string = \"{}. {}\".format(num, temp_name)\n temp_list.append(temp_string)\n click.echo(\"\\n\".join(temp_list))\n # Prompt the user to give the number of the template\n temp_num = click.prompt(\n \"Choose a templeta by entering the number of the template.\",\n type=int\n )\n # Get the template from the template dictionary\n template = template_dict.get(temp_num)\n # Get the variables\n temp_vars = get_vars(template, env)\n # Crate a dict with variables and let the user input the variables\n vars_to_render = dict()\n for var in temp_vars:\n user_var = click.prompt(\"{}?\".format(var.capitalize()))\n vars_to_render[var] = user_var\n # Get the template\n temp = env.get_template(template)\n # Render the template\n click.echo(temp.render(vars_to_render))", "def _generate_from_template(self, name, path, context):\n template = self._templates.get_template(name)\n with open(path, 'w') as f:\n f.write(template.render(context))", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def generate(request, response):\n for file_descriptor in request.proto_file:\n LOGGER.info('Processing \"{}\".', file_descriptor.name)\n if file_descriptor.name not in request.file_to_generate:\n LOGGER.info(\n 'File \"{}\" is not supposed to be processed, skipping.',\n file_descriptor.name\n )\n continue\n\n target_dir = path.dirname(path.normpath(\n file_descriptor.name\n ))\n filename, _ = path.splitext(path.basename(file_descriptor.name))\n\n pb_module = filename + '_pb2'\n content = MESSAGES_TEMPLATE.render(\n PB_MODULE=pb_module,\n FILE_DESCRIPTOR=file_descriptor\n )\n\n target_path = path.join(target_dir, filename + '.py')\n\n output_file = response.file.add()\n output_file.name = target_path\n output_file.content = content\n\n LOGGER.info('Writing file \"{}\".', target_path)", "def unit_test_obj_mk_obj_template(p, rname, inputs):\n # define name and arguments\n name = 'obj_mk_obj_template'\n arg_names = ['night_name', 'files']\n arg_types = [str, list]\n\n # get arguments\n args = get_args(p, name, rname, inputs, arg_names, arg_types)\n return args, name", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def GenerateBindings(filename, path):\n name = filename[0:-4]\n if name.startswith('a_'):\n name = name[2:]\n\n input = path\n output_header = os.path.join(os.path.dirname(path), '%s.h' % name)\n output_impl = os.path.join(os.path.dirname(path), '%s.cpp' % name)\n\n write_bindings.WriteBindings(input, output_header, output_impl)", "def generate_wrapper(self):\n\n # If there is an input file, parse it\n if self.package_info_path is not None:\n info_parser = PackageInfoParser(self.package_info_path,\n self.source_root)\n info_parser.parse()\n self.package_info = info_parser.package_info\n else:\n pass\n\n # Generate a header collection\n self.collect_source_hpp_files()\n\n # Attempt to assign source paths to each class, assuming the containing \n # file name is the class name\n for eachModule in self.package_info.module_info:\n for eachClass in eachModule.class_info:\n for eachPath in self.package_info.source_hpp_files:\n base = ntpath.basename(eachPath)\n if eachClass.name == base.split('.')[0]:\n eachClass.source_file_full_path = eachPath\n if eachClass.source_file is None:\n eachClass.source_file = base\n\n # Attempt to automatically generate template args for each class\n for eachModule in self.package_info.module_info:\n info_generator = CppInfoHelper(eachModule)\n for eachClass in eachModule.class_info:\n info_generator.expand_templates(eachClass, \"class\")\n\n # Generate the header collection\n header_collection_path = self.generate_header_collection()\n\n # Parse the header collection\n self.parse_header_collection(header_collection_path)\n\n # Update the Class and Free Function Info from the parsed code\n self.update_class_info()\n self.update_free_function_info()\n self.update_enum_info()\n\n # Write the modules\n for eachModule in self.package_info.module_info:\n module_writer = CppModuleWrapperWriter(self.global_ns,\n self.source_ns,\n eachModule,\n self.get_wrapper_template(),\n self.wrapper_root)\n module_writer.write()", "def make_header_files():\n os.makedirs(DATA_DIR) if not os.path.exists(DATA_DIR) else None\n from dkistdataratemodel.units import frame\n from dkist_data_model.generator.dataproducts.visp import CalibratedVISP\n\n \"\"\"\n Generate VISP\n \"\"\"\n visp = CalibratedVISP(end_condition=20*frame)\n\n visp_files = visp.to_fits(\"sp_5_labelled\",\n path_template=os.path.join(DATA_DIR, 'visp_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"visp.zip\"), \"w\") as myzip:\n for fname in visp_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)\n\n \"\"\"\n Generate VTF\n \"\"\"\n from dkist_data_model.generator.dataproducts.vtf import CalibratedVTF\n vtf = CalibratedVTF(end_condition=96*frame)\n\n vtf_files = vtf.to_fits(\"5d_test\",\n path_template=os.path.join(DATA_DIR, 'vtf_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"vtf.zip\"), \"w\") as myzip:\n for fname in vtf_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)", "def main(\n files: List[Path] = typer.Argument(default=None, dir_okay=False, exists=True),\n template: Optional[str] = typer.Option(\n None, '--template', help='Name of template file'\n ),\n logo: Optional[str] = typer.Option(None, '--logo', help='Name of logo file'),\n logo_width: Optional[str] = typer.Option(\n None, '--logo-width', help='Logo width (default 35mm)'\n ),\n highlight_style: Optional[str] = typer.Option(None, '--highlight-style',\n help='Specify coloring style to be used in highlighting source code'),\n syntax_definition: Optional[str] = typer.Option(None, '--syntax-definition',\n help='Specify a directory which contains syntax definition files'),\n no_toc: bool = typer.Option(\n False, '--no-toc', help='table of contents in PDF document'\n ),\n no_number_sections: bool = typer.Option(False, '--no-number-sections', help='no section numbering'),\n\n no_titlepage: bool = typer.Option(False, '--no-titlepage', help='title in PDF document'),\n tex_file: bool = typer.Option(\n False, '--tex', help='create TeX file instead of PDF document'\n ),\n email: Optional[str] = typer.Option(None, '--email', help='Author email'),\n company: Optional[str] = typer.Option(None, '--company', help='Name of company'),\n department: Optional[str] = typer.Option(\n None, '--department', help='Name of department'\n ),\n confidential: bool = typer.Option(\n False, '--confidential', help='indicate confidential'\n ),\n debug: bool = typer.Option(False, '--debug', help='turns debugging on'),\n pdf_engine: str = typer.Option(\n 'xelatex',\n '--pdf-engine',\n help='Specify pdf engine, one of lualatex, xelatex or tectonic ',\n ),\n _version: bool = typer.Option(\n None, '-V', '--version', callback=version_callback, help='Show version and exit'\n ),\n):\n\n if not files:\n typer.echo('Error: Must specify at least one .md file.')\n raise typer.Abort()\n\n mdfiles: List[str] = [str(md) for md in files]\n\n template = template or os.environ.get('MD2PDF_TEMPLATE')\n if template is None:\n print('No template specified')\n sys.exit(1)\n\n email = email or os.environ.get('MD2PDF_AUTHOR_EMAIL')\n footer_center = ''\n\n # command line overwrites `MD2PDF_PDF_ENGINE`. if both are not given\n # then `xelatex` is the default\n pdf_engine = pdf_engine or os.environ.get('MD2PDF_PDF_ENGINE') or 'xelatex'\n # check that pdf-engine is one of the following\n if pdf_engine not in ['xelatex', 'lualatex', 'tectonic']:\n print('--pdf-engine must be one of \"xelatex\", \"lualatex\", \"tectonic\"')\n sys.exit(1)\n\n ext = '.pdf'\n if tex_file:\n ext = '.tex'\n\n if len(mdfiles) == 1:\n toml_file = os.path.splitext(mdfiles[0])[0] + '.toml'\n\n if os.path.exists(toml_file):\n print(f'TOML file {toml_file} found')\n parsed_toml = toml.load(toml_file)\n default_val = parsed_toml.get('default')\n if default_val is None:\n print(f'No file names found in {toml_file}')\n else:\n mdfiles = default_val.get('files')\n\n for mdf in mdfiles:\n print(f'Compiling {mdf}')\n\n main_mdfile = os.path.realpath(mdfiles[0])\n\n outfile = Path(main_mdfile).stem + ext\n\n year = date.today().year\n\n company = company or os.environ.get('MD2PDF_COMPANY')\n department = department or os.environ.get('MD2PDF_DEPARTMENT')\n\n if company:\n if confidential:\n footer_center = f'© Copyright {year} {company}'\n else:\n footer_center = f'{year} {company}'\n\n pdcmd = PandocCmd(outfile)\n pdcmd.append(f'--template={template}')\n pdcmd.append(f'--pdf-engine={pdf_engine}')\n\n pdcmd.set_v('footer-center', footer_center)\n pdcmd.set_v('company', company)\n pdcmd.set_v('department', department)\n\n syntax_definition = syntax_definition or os.environ.get('MD2PDF_SYNTAX_DEFINITION_DIR')\n if syntax_definition is not None:\n add_syntax_definition(pdcmd, syntax_definition)\n\n pdcmd.append('--highlight-style')\n highlight_style = highlight_style or os.environ.get('MD2PDF_HIGHLIGHT_STYLE')\n if highlight_style is None:\n pdcmd.append('pygments')\n else:\n check_highlight_style(highlight_style)\n pdcmd.append(highlight_style)\n\n if not no_number_sections:\n pdcmd.append('--number-sections')\n\n if no_titlepage:\n pdcmd.set_m('titlepage', 'false')\n\n logo = logo or os.environ.get('MD2PDF_LOGO')\n pdcmd.set_v('logo', logo)\n\n logo_width = logo_width or os.environ.get('MD2PDF_LOGO_WIDTH')\n pdcmd.set_v('logo-width', logo_width)\n\n pdcmd.set_m('email', email)\n\n if not no_toc:\n pdcmd.append('--toc')\n\n pdcmd.extend(mdfiles)\n\n if debug:\n print(' '.join(pdcmd.pandoc))\n\n\n pdcmd.run()", "def generate_thrift_wrappers(maxfile_name, dirs, targets):\n with common.change_working_directory(dirs['scratch']):\n run('thrift', '--gen', 'cpp', maxfile_name + '.thrift')\n\n for target in targets:\n run('thrift', '--gen', target, maxfile_name + '.thrift')\n\n target_dir = os.path.join(dirs['client'], target)\n subprocess.call(['cp', '-rf', 'gen-' + target, target_dir])", "def main():\n\targs = getArgs()\n\tid_question = args.id_question\n\tlang = args.language\n\tdir_cp = None\n\twith open('config.json') as json_file:\n\t\tconfig_data = json.load(json_file)\n\t\tdir_cp = config_data['dir_cp']\n\n\t\n\t\n\t# sample_io = GetData(args.id_question).get_uri_io_sample()\n\ttemplate = FileUtil(id_question, dir_cp['path'], lang)\n\ttemplate.write_template()\n\t# print(sample_io)", "def generate_template():\n date = input(\"What day is the template for? dd/mm/yy: \")\n day = input(\"What day of the week is this?: \")\n if \"/\" not in date:\n date_components = [date[0:2], date[2:4], date[4:]]\n else:\n date_components = date.split(\"/\")\n file_name = \"{}{}{}.table\".format(date_components[0], date_components[1], date_components[2])\n try:\n template_file = open(\"templates/{}_template.table\".format(day), \"r\")\n except IOError:\n print(\"source template not found\")\n else:\n source_lines = []\n output_file = open(file_name, \"w\")\n for line in template_file:\n source_lines.append(line)\n output_file.write(line)\n print(\"template generated successfully for: {} on date: {}\".format(day, date))", "def gen_qsys_file(target, custom_components, sys_clock_rate_hz, template, working_dir):\n create_tcl_system_file(target, custom_components,\n sys_clock_rate_hz, template, working_dir)\n gen_qsys_file_from_tcl(target.system_name + \".tcl\", working_dir)", "def cli(template, outfile):\n try:\n project = Project.from_directory()\n except InvalidProjectError as e:\n raise click.UsageError(str(e))\n jenv = get_jinja_env()\n if outfile is not None:\n if len(template) != 1:\n raise click.UsageError(\n \"--outfile may only be used with a single template argument\"\n )\n print(project.render_template(template[0], jenv), end=\"\", file=outfile)\n else:\n for tmplt in template:\n project.write_template(tmplt, jenv)", "def GenerateCode(self):\n print \"Generating code...\"\n for type in self.getObjectTypes():\n generator = __import__(\"codegen.Cpp\" + type, globals(), locals(), [''])\n print \"Generating code for objects of type: %s\" % type\n generator.GenerateCode(self)", "def generate(self, api):\n for namespace in api.namespaces.values():\n # One module per namespace is created. The module takes the name\n # of the namespace.\n with self.output_to_relative_path('{}.py'.format(namespace.name)):\n self._generate_namespace_module(namespace)", "def generate_ext_stub(cls):\n # Qualified name (C Version)\n qname = '_'.join(symbol_table.prefix+[cls.name])\n self.exts.append(qname)\n\n if self.config.verbose:\n import sys\n mod_name = '.'.join(symbol_table.prefix[1:]+[cls.name])\n sys.stdout.write('\\r'+' '*80)\n sys.stdout.write('\\rgenerating glue code for %s'%mod_name)\n sys.stdout.flush()\n\n # Consolidate all methods, defined and inherited\n cls.scan_methods()\n \n # chpl_defs = ChapelScope(chpl_stub)\n ci = self.ClassInfo(cls)\n\n # if self.server:\n # ci.impl = self.pkg_impl\n\n ci.stub.new_def(babel.externals(cls.get_scoped_id()))\n ci.stub.new_def(babel.builtin_stub_functions(cls.get_scoped_id()))\n \n has_contracts = ior_template.generateContractChecks(cls)\n self.gen_default_methods(cls, has_contracts, ci)\n\n #print qname, map(lambda x: x[2][1]+x[2][2], cls.all_methods)\n for method in cls.all_methods:\n (Method, Type, Name, Attrs, Args, \n Except, From, Requires, Ensures, DocComment) = method\n ci.epv.add_method((method, Type, Name, Attrs, \n babel.drop_rarray_ext_args(Args),\n Except, From, Requires, Ensures, DocComment))\n\n # all the methods for which we would generate a server impl\n impl_methods = babel.builtins+cls.get_methods()\n impl_methods_names = [sidlir.method_method_name(m) for m in impl_methods]\n\n # client\n for method in cls.all_methods:\n has_impl = sidlir.method_method_name(method) in impl_methods_names\n self.generate_client_method(symbol_table, method, ci, has_impl)\n\n if self.server:\n class_methods = filter(sidlir.is_not_static, impl_methods)\n static_methods = filter(sidlir.is_static, impl_methods)\n\n # # Class\n # ci.impl.new_def(gen_doc_comment(cls.doc_comment, chpl_stub)+\n # 'class %s_Impl {'%qname)\n # splicer = '.'.join(cls.qualified_name+['Impl'])\n # ci.impl.new_def('// DO-NOT-DELETE splicer.begin(%s)'%splicer)\n # ci.impl.new_def('// DO-NOT-DELETE splicer.end(%s)'%splicer)\n # for method in class_methods: \n # self.generate_server_method(symbol_table, method, ci)\n\n # ci.impl.new_def('} // class %s_Impl'%qname)\n # ci.impl.new_def('')\n # ci.impl.new_def('')\n\n # # Static\n # if static_methods:\n # ci.impl.new_def('// all static member functions of '+qname)\n # ci.impl.new_def(gen_doc_comment(cls.doc_comment, chpl_stub)+\n # '// FIXME: chpl allows only one module per library //'+\n # ' module %s_static_Impl {'%qname)\n\n # for method in static_methods:\n # self.generate_server_method(symbol_table, method, ci)\n\n # ci.impl.new_def('//} // module %s_static_Impl'%qname)\n # ci.impl.new_def('')\n # ci.impl.new_def('')\n\n\n # # Chapel Stub (client-side Chapel bindings)\n # self.generate_chpl_stub(chpl_stub, qname, ci)\n \n # # Because of Chapel's implicit (filename-based) modules it\n # # is important for the Chapel stub to be one file, but we\n # # generate separate files for the cstubs\n # self.pkg_chpl_stub.new_def(chpl_stub)\n\n # Stub (in C), the order of these definitions is somewhat sensitive\n ci.stub.genh_top(ir.Import(qname+'_IOR'))\n ci.stub.gen(ir.Import(ci.stub._name))\n\n pkg_name = '_'.join(symbol_table.prefix)\n ci.stub.gen(ir.Import(pkg_name))\n ci.stub.write()\n\n # IOR\n ior_template.generate_ior(ci, with_ior_c=self.server, _braid_config=self.config )\n ci.ior.write()\n\n # Skeleton\n if self.server:\n self.generate_skeleton(ci, qname)\n\n # Convenience header\n ext_h = CFile(qname)\n ext_h.genh(ir.Import(qname+'_IOR'))\n ext_h.genh(ir.Import(qname+'_Stub'))\n ext_h.write()\n\n # Makefile\n self.classes.append(qname)", "def main(args=None):\n if args is None:\n parser = create_parser()\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n input_module = input_mapping[args.input_reader]\n output_module = output_mapping[args.output_format]\n\n templates = []\n # Load templates from external folder if set.\n if args.template_folder:\n templates += read_templates(os.path.abspath(args.template_folder))\n\n # Load internal templates, if not disabled.\n if not args.exclude_built_in_templates:\n templates += read_templates()\n output = []\n for f in args.input_files:\n res = extract_data(f.name, templates=templates, input_module=input_module)\n if res:\n logger.info(res)\n output.append(res)\n if args.copy:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.copyfile(f.name, join(args.copy, filename))\n if args.move:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.move(f.name, join(args.move, filename))\n f.close()\n\n if output_module is not None:\n output_module.write_to_file(output, args.output_name, args.output_date_format)", "def output_ida_script(self):\n with open('ida_script.py', 'w') as f:\n f.write('import idaapi\\n')\n f.write('import idautils\\n')\n for source_addr, dst_addr in self.ida_xrefs:\n #print 'idaapi.add_cref(0x%x, 0x%x, fl_CF)\\n' % (source_addr, dst_addr)\n f.write('idaapi.add_cref(0x%x, 0x%x, fl_CF)\\n' % (source_addr, dst_addr))\n\n for source_addr, comment, function_name in self.ida_comments:\n #print 'idc.MakeComm(0x%x, %s%s)\\n' % (source_addr, comment, function_name)\n f.write('idc.MakeComm(0x%x, \"%s%s\")\\n' % (source_addr, comment, function_name))", "def create_template_ini_file():\n if not os.path.isfile(API_KEYS_LOCATION):\n with open(API_KEYS_LOCATION, 'w') as f:\n f.write('[openai]\\n')\n f.write('organization_id=\\n')\n f.write('secret_key=\\n')\n\n print('OpenAI API config file created at {}'.format(API_KEYS_LOCATION))\n print('Please edit it and add your organization ID and secret key')\n print('If you do not yet have an organization ID and secret key, you\\n'\n 'need to register for OpenAI Codex: \\n'\n 'https://openai.com/blog/openai-codex/')\n sys.exit(1)", "def generate_srv(pkg, files, out_dir, search_path):\n msg_context = MsgContext.create_default()\n for f in files:\n f = os.path.abspath(f)\n infile = os.path.basename(f)\n full_type = genmsg.gentools.compute_full_type_name(pkg, infile)\n spec = genmsg.msg_loader.load_srv_from_file(msg_context, f, full_type)\n generate_srv_from_spec(msg_context, spec, search_path, out_dir, pkg, f)", "def test_create_namespaced_template(self):\n pass", "def test_8_template(install_test_files, data_dir):\n fc_dir = os.path.join(data_dir, os.pardir, \"100326_FC6107FAAXX\")\n with make_workdir():\n cl = [\"bcbio_nextgen.py\", \"-w\", \"template\", \"--only-metadata\",\n \"freebayes-variant\",\n os.path.join(fc_dir, \"100326.csv\"),\n os.path.join(fc_dir, \"7_100326_FC6107FAAXX_1_fastq.txt\"),\n os.path.join(fc_dir, \"7_100326_FC6107FAAXX_2_fastq.txt\"),\n os.path.join(fc_dir, \"8_100326_FC6107FAAXX.bam\")]\n subprocess.check_call(cl)", "def _generate_info(gen: CodeGenerator, namespace: str,\n service: ProtoService) -> None:\n service_id = f'0x{pw_rpc.ids.calculate(service.proto_path()):08x}'\n info = f'struct {RPC_NAMESPACE.lstrip(\":\")}::internal::MethodInfo'\n\n for method in service.methods():\n gen.line('template <>')\n gen.line(f'{info}<{namespace}::pw_rpc::{gen.name()}::'\n f'{service.name()}::{method.name()}> {{')\n\n with gen.indent():\n gen.line(f'static constexpr uint32_t kServiceId = {service_id};')\n gen.line(f'static constexpr uint32_t kMethodId = '\n f'0x{pw_rpc.ids.calculate(method.name()):08x};')\n gen.line(f'static constexpr {RPC_NAMESPACE}::MethodType kType = '\n f'{method.type().cc_enum()};')\n gen.line()\n\n gen.line('template <typename ServiceImpl>')\n gen.line('static constexpr auto Function() {')\n\n with gen.indent():\n gen.line(f'return &ServiceImpl::{method.name()};')\n\n gen.line('}')\n\n gen.method_info_specialization(method)\n\n gen.line('};')\n gen.line()", "def _generate_generic_licence_header_template() -> str:\n return GENERIC_LICENCE_HEADER_TEMPLATE.format(\n licence_identifier=configuration.get_value(ConfigurationVariable.FILE_LICENCE_IDENTIFIER),\n author=\"${owner}\",\n date=\"${years}\",\n )", "def test_create_processed_template_for_all_namespaces_v1(self):\n pass", "def generate_combinations(self, handler_input, template_name):\n # type: (HandlerInput, str) -> Iterator[str]\n locale = handler_input.request_envelope.request.locale\n language, country = split_locale(locale=locale)\n if not language and not country:\n yield template_name\n else:\n yield os.path.join(template_name, language, country)\n yield os.path.join(template_name, (language + \"_\" + country))\n yield os.path.join(template_name, language)\n yield (template_name + \"_\" + language + \"_\" + country)\n yield (template_name + \"_\" + language)\n yield template_name", "def create_swig_interface(self, path: pathlib.Path) -> str:\n module_name = path.with_suffix('').name\n swig_interface = SWIG_INTERFACE_TEMPLATE_HPP.format(\n module_name=module_name, include_path=path)\n _LOG.debug('SWIG interface: \"\"\"%s\"\"\"', swig_interface)\n return swig_interface", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def genLibData(self):\n import mush\n tsMain = string.Template(mush.libGenMain)\n tsIfAltId = string.Template(mush.libGenIfAltId)\n #--Data Records\n for id in ('lib_action','lib_actionCount'):\n glob = self.getRecord('GLOB',id,Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n setAllCode = 'begin lib_setAllGS\\n'\n setNoneCode = 'begin lib_setNoneGS\\n'\n for libId in self.libList:\n (srcId,altId) = self.libMap[libId]\n srcBook = self.srcBooks.get(srcId)[0]\n if not srcBook:\n print '%s: Missing source: %s' % (libId,srcId)\n continue\n #--Global\n glob = self.getRecord('GLOB',libId+'G',Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n #--Script\n scriptId = libId+'LS'\n script = self.getRecord('SCPT',scriptId,Scpt)\n scriptCode = tsMain.substitute(\n libId=libId, srcId=srcId, ifAltId=(\n (altId and tsIfAltId.substitute(libId=libId,altId=altId)) or ''))\n script.setCode(scriptCode)\n script.setChanged()\n #--Book\n srcBook.load(unpack=True)\n book = self.getRecord('BOOK',libId,Book)\n book.model = srcBook.model\n book.title = srcBook.title\n book.icon = srcBook.icon\n book.text = srcBook.text\n book.script = scriptId\n book.setChanged()\n #--Set Scripts\n setAllCode += 'set %sG to 1\\n' % (libId,)\n setNoneCode += 'set %sG to 0\\n' % (libId,)\n #--Set scripts\n for id,code in (('lib_setAllGS',setAllCode),('lib_setNoneGS',setNoneCode)):\n code += ';--Done\\nstopScript %s\\nend\\n' % (id,)\n script = self.getRecord('SCPT',id,Scpt)\n script.setCode(code)\n script.setChanged()", "def __write_file_from_template(self, file, template, macros):\n create_dir(file)\n with open(file, 'a') as db, open(template, 'r') as template:\n for line in template:\n db.write(self.__expand_macros(line, macros))", "def write_template_body2(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n basin = template_filename.split('/')[-1].split('_')[1].replace('.php', '')\n template_file = open(template_filename, 'a')\n template_file.write('domains.push({\\n')\n template_file.write(' displayName: \"All\",\\n')\n template_file.write(' name: \"'+basin+'\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('variables.push({\\n')\n template_file.write(' displayName: \"Mean\",\\n')\n template_file.write(' name: \"<?php echo $LeadMean_name; ?>\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_AL.php\",\\n')\n template_file.write(' displayName: \"Atlantic\",\\n')\n template_file.write(' name: \"'+template_type+'_AL\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_CP.php\",\\n')\n template_file.write(' displayName: \"Central Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_CP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_EP.php\",\\n')\n template_file.write(' displayName: \"Eastern Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_EP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_WP.php\",\\n')\n template_file.write(' displayName: \"Western Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_WP\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Initialize the page\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//function for keyboard controls\\n')\n template_file.write('document.onkeydown = keys;\\n')\n template_file.write('\\n')\n template_file.write(\n '//Decare object containing data about the currently displayed map\\n'\n )\n template_file.write('imageObj = {};\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('initialize();\\n')\n template_file.write('\\n')\n template_file.write(\n '//Format initialized run date & return in requested format\\n'\n )\n template_file.write('function formatDate(offset,format){\\n')\n template_file.write(' var newdate = String(cycle);\\n')\n template_file.write(' var yyyy = newdate.slice(0,4)\\n')\n template_file.write(' var mm = newdate.slice(4,6);\\n')\n template_file.write(' var dd = newdate.slice(6,8);\\n')\n template_file.write(' var hh = newdate.slice(8,10);\\n')\n template_file.write(\n ' var curdate = new Date(yyyy,parseInt(mm)-1,dd,hh)\\n'\n )\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write(' //Offset by run\\n')\n template_file.write(\n ' var newOffset = curdate.getHours() + offset;\\n'\n )\n template_file.write(' curdate.setHours(newOffset);\\n')\n template_file.write('\\n')\n template_file.write(\n ' var yy = String(curdate.getFullYear()).slice(2,4);\\n'\n )\n template_file.write(' yyyy = curdate.getFullYear();\\n')\n template_file.write(' mm = curdate.getMonth()+1;\\n')\n template_file.write(' dd = curdate.getDate();\\n')\n template_file.write(' if(dd < 10){dd = \"0\" + dd;}\\n')\n template_file.write(' hh = curdate.getHours();\\n')\n template_file.write(' if(hh < 10){hh = \"0\" + hh;}\\n')\n template_file.write('\\n')\n template_file.write(' var wkday = curdate.getDay();\\n')\n template_file.write(\n ' var day_str = [\"Sun\", \"Mon\", \"Tue\", \"Wed\", '\n +'\"Thu\", \"Fri\", \"Sat\"];\\n'\n )\n template_file.write('\\n')\n template_file.write(' //Return in requested format\\n')\n template_file.write(\" if(format == 'valid'){\\n\")\n template_file.write('//06Z Thu 03/22/18 (90 h)\\n')\n template_file.write(\n 'var txt = hh + \"Z \" + day_str[wkday] + \" \" + '\n +'mm + \"/\" + dd + \"/\" + yy;\\n'\n )\n template_file.write(' return txt;\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('function initialize(){\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Set image object based on default variables\\n'\n )\n template_file.write(' imageObj = {\\n')\n template_file.write(\n ' variable: \"<?php echo $LeadMean_name; ?>\",\\n'\n )\n template_file.write(' domain: \"'+basin+'\"\\n')\n template_file.write(' };\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change domain based on passed argument, if any\\n'\n )\n template_file.write(' var passed_domain = \"\";\\n')\n template_file.write(' if(passed_domain!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_domain,domains)>=0){\\n'\n )\n template_file.write(\n ' imageObj.domain = passed_domain;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change variable based on passed argument, if any\\n'\n )\n template_file.write(' var passed_variable = \"\";\\n')\n template_file.write(' if(passed_variable!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_variable,variables)>=0){\\n'\n )\n template_file.write(\n ' imageObj.variable = passed_variable;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Populate forecast hour and dprog/dt arrays for this '\n +'run and frame\\n'\n )\n template_file.write(\" populateMenu('variable');\\n\")\n template_file.write(\" populateMenu('domain');\\n\")\n template_file.write(\" populateMenu('maptype')\\n\")\n template_file.write('\\n')\n template_file.write(' //Populate the frames arrays\\n')\n template_file.write(' frames = [];\\n')\n template_file.write(\n ' for(i=minFrame;i<=maxFrame;i=i+incrementFrame)'\n +'{frames.push(i);}\\n'\n )\n template_file.write('\\n')\n template_file.write(\n ' //Predefine empty array for preloading images\\n'\n )\n template_file.write(' for(i=0; i<variables.length; i++){\\n')\n template_file.write(' variables[i].images = [];\\n')\n template_file.write(' variables[i].loaded = [];\\n')\n template_file.write(' variables[i].dprog = [];\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(' //Preload images and display map\\n')\n template_file.write(' preload(imageObj);\\n')\n template_file.write(' showImage();\\n')\n template_file.write('\\n')\n template_file.write(' //Update mobile display for swiping\\n')\n template_file.write(' updateMobile();\\n')\n template_file.write('\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('var xInit = null;\\n')\n template_file.write('var yInit = null;\\n')\n template_file.write('var xPos = null;\\n')\n template_file.write('var yPos = null;\\n')\n template_file.write('\\n')\n template_file.write('</script>\\n')\n template_file.write('\\n')\n template_file.write('</body>\\n')\n template_file.write('</html>\\n')\n template_file.close()", "def update():\n if Project.use_templates:\n defaults = _project_defaults()\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def generate_stubs(self):\n # type: (Generator) -> str\n return render_to_string(\n self.backend, \"stubs.py\", {\n \"classes\": self._classes,\n \"module\": self.module_name\n })", "def build_model_multi(self):\n\t\n\t\tif len(self.template) < 1: raise Exception('except: needs multiple templates '+str(self.template))\n\t\tif len(self.target) != 1: raise Exception('except: needs only one target '+str(self.template))\n\t\n\t\tprint 'preparing modeller scripts'\n\t\t#---variables passed to modeller via settings-homology.py\n\t\tvars_to_modeller = {\n\t\t\t'pdblist':self.template,\n\t\t\t'target_seq':self.target[0][0],\n\t\t\t'n_models':self.settings['n_models'],\n\t\t\t}\n\t\n\t\t#---write a settings file for the modeller script\n\t\twith open(self.rootdir+'settings-homology.py','w') as fp:\n\t\t\tfp.write('#!/usr/bin/python\\n\\n')\n\t\t\tfor var in vars_to_modeller.keys():\n\t\t\t\tval = '\\''+str(vars_to_modeller[var])+'\\'' \\\n\t\t\t\t\tif type(vars_to_modeller[var]) == str else vars_to_modeller[var]\n\t\t\t\tfp.write(var+' = '+str(val)+'\\n')\n\t\t\t\n\t\t#---write an ali file with the target\n\t\tfasta_linelen = 50\n\t\twith open(self.rootdir+self.target[0][0]+'.ali','w') as fp:\n\t\t\tfp.write('>P1;'+self.target[0][0]+'\\n')\n\t\t\tfp.write('sequence:'+self.target[0][0]+':::::::0.00:0.00\\n')\n\t\t\tseq = self.target[0][1]\n\t\t\tchopped = [seq[j*fasta_linelen:(j+1)*fasta_linelen] for j in range(len(seq)/fasta_linelen+1)]\n\t\t\tchopped = [i for i in chopped if len(i) > 0]\n\t\t\tfor i,seg in enumerate(chopped): fp.write(seg+('\\n' if i < len(chopped)-1 else '*\\n'))\n\t\t\n\t\tprint 'running modeller'\n\t\tcmd = [gmxpaths['modeller'],'script-multi.py']\n\t\tcall(cmd,logfile='log-modeller-script-multi',cwd=self.rootdir)", "def test_main():\n for template in templates:\n main([\"-g\", template])\n\n # One at a time\n for xyz_file in example_xyz_files:\n main([template, xyz_file])\n\n # All at once\n main([template] + list(example_xyz_files))\n\n # Allow use of template in the parent directory\n with cd(\"data\"):\n main([\"../pnictogen/repo/ADF.in\", \"water-dimer.xyz\"])", "def generate_msg(pkg, files, out_dir, search_path):\n msg_context = MsgContext.create_default()\n for f in files:\n f = os.path.abspath(f)\n infile = os.path.basename(f)\n full_type = genmsg.gentools.compute_full_type_name(pkg, infile)\n spec = genmsg.msg_loader.load_msg_from_file(msg_context, f, full_type)\n generate_msg_from_spec(msg_context, spec, search_path, out_dir, pkg)", "def create_template(api_url, project_id, username, token, update_flag,\n validation_messages, json_files, content_files, scope, csv_flag, input_list):\n try:\n # template loader log folder exists check\n log_path = '/opt/core/cache/tmp/templateloader_logs/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n timestamp = datetime.datetime.fromtimestamp(\n time.time()).strftime('%Y%m%d%H%M%S')\n log_filename = 'templateloader_' + timestamp\n my_file = open(log_path + log_filename, \"a\")\n\n # Print and write the log messages\n for message in validation_messages:\n my_file.write(\"%s\\n\" % message)\n\n success_templates = 0\n\n for metadata, content in zip(json_files, content_files):\n # Metadata Read\n json_file = open(metadata, 'r')\n file_name = list(metadata.split(\"/\"))\n file_name = file_name[-1]\n req_body = json.dumps(json_file.read()).encode('utf-8')\n req_body = json.loads(req_body)\n json_file.close()\n\n req_body = json.loads(req_body)\n\n if csv_flag:\n if input_list and req_body.get(\"name\") not in input_list:\n continue\n # Content Read\n if os.path.isfile(content):\n content_datafile = open(content, 'r')\n content_value = json.dumps(content_datafile.read()).encode('utf-8')\n content_value = json.loads(content_value)\n content_datafile.close()\n req_body[\"content_files\"] = dict(content=dict(content=content_value, name=content.split('/')[-1]))\n else:\n req_body[\"content_files\"] = get_content_files(content)\n # Checks for files\n files_directory = os.path.abspath(\n os.path.join(content, os.pardir)) + \"/files\"\n if os.path.exists(files_directory):\n dependencies = list()\n for script_file_path in find_files(files_directory, '*'):\n script_file_name = os.path.basename(script_file_path)\n script_file_obj = open(script_file_path, 'r')\n script_file_value = script_file_obj.read()\n script_file_obj.close()\n dependencies.append({\"content\": script_file_value, \"name\": script_file_name})\n req_body[\"content_files\"][\"files\"] = dependencies\n\n dependencies_directory = os.path.abspath(os.path.join(content, 'modules'))\n if os.path.exists(dependencies_directory):\n dependencies = list()\n for elem in os.listdir(dependencies_directory):\n module_path = os.path.join(dependencies_directory, elem)\n if not os.path.isdir(module_path):\n continue\n dependencies.append({\"type\": \"module\", \"name\": elem,\n \"content_files\": get_content_files(module_path)})\n if dependencies:\n req_body['dependencies'] = dependencies\n if scope != 'default':\n req_body['scope'] = scope\n\n req_body = json.dumps(req_body).encode('utf-8')\n\n url = \"%s%s/%s\" % (api_url, project_id, 'templates')\n http_client = httplib2.Http()\n headers = {\"X-Auth-User\": username, \"X-Auth-Token\": token}\n\n # call the Create Template API\n resp, content = http_client.request(\n url, method=\"POST\", body=req_body, headers=headers)\n content = json.loads(content)\n\n if resp[\"status\"] == \"200\":\n success_templates += 1\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\",\n content[\"status\"],\n content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n elif resp[\"status\"] == \"400\" and update_flag:\n template_id = None\n url = \"%s%s/%s\" % (api_url, project_id, 'templates')\n list_resp, list_content = http_client.request(\n url, method=\"GET\", headers=headers)\n list_content = json.loads(list_content)\n if list_resp[\"status\"] == \"200\":\n template_list = list_content['data']['templates']\n for template in template_list:\n if template['name'] == json.loads(req_body)['name']:\n # call the Update Template API\n template_id = template[\"id\"]\n url = \"%s%s/%s/%s\" % (api_url, project_id,\n 'templates', template_id)\n update_resp, update_content = \\\n http_client.request(url, method=\"PUT\",\n body=req_body,\n headers=headers)\n update_content = json.loads(update_content)\n log_msg = \"%s%s%s - %s\" % (\n file_name[:-5], \" ==> status:\",\n update_content[\"status\"],\n update_content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n if update_resp[\"status\"] == \"200\":\n success_templates += 1\n break\n if not template_id:\n temp_url = \"%s%s/%s?is_temp=true\" % (api_url, project_id, 'templates')\n list_temp_resp, list_temp_content = http_client.request(\n temp_url, method=\"GET\", headers=headers)\n list_temp_content = json.loads(list_temp_content)\n if list_temp_resp[\"status\"] == \"200\":\n temp_template_list = list_temp_content['data']['templates']\n for template in temp_template_list:\n if template['name'] == json.loads(req_body)['name']:\n # call the Update Template API\n template_id = template[\"id\"]\n url = \"%s%s/%s/%s\" % (api_url, project_id,\n 'templates', template_id)\n update_resp, update_content = \\\n http_client.request(url, method=\"PUT\",\n body=req_body,\n headers=headers)\n update_content = json.loads(update_content)\n log_msg = \"%s%s%s - %s\" % (\n file_name[:-5], \" ==> status:\",\n update_content[\"status\"],\n update_content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n if update_resp[\"status\"] == \"200\":\n success_templates += 1\n break\n if not template_id:\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\",\n content[\"status\"],\n content[\"message\"])\n sys.stderr.write(\"%s\\n\" % log_msg)\n my_file.write(\"%s\\n\" % log_msg)\n else:\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\",\n content[\"status\"],\n content[\"message\"])\n sys.stderr.write(\"%s\\n\" % log_msg)\n my_file.write(\"%s\\n\" % log_msg)\n if not csv_flag:\n total_templates = len(json_files)\n failed_templates = total_templates - success_templates\n else:\n total_templates = len(input_list)\n failed_templates = total_templates - success_templates\n sys.stdout.write('Total templates: ' + str(total_templates) + \"\\n\")\n sys.stdout.write(\"Success Templates: \" + str(success_templates) + \"\\n\")\n sys.stderr.write(\"Failed Templates: \" + str(failed_templates) + \"\\n\")\n\n my_file.write('Total templates: ' + str(total_templates) + \"\\n\")\n my_file.write(\"Failed Templates: \" + str(failed_templates) + \"\\n\")\n my_file.close()\n\n except Exception as e:\n sys.stdout.write(e.message)\n exit(1)", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def __init__(self, template_path, jinja_args=None):\n\n # Short description for the template class.\n self.desc = 'Generic'\n\n self.template_filename = os.path.basename(template_path)\n\n m = re.match(r'([^.]*).*\\.([^.]*)$', self.template_filename)\n if not m:\n raise Exception(\n 'Error matching template filename: %s' % self.template_filename)\n\n self.schema_object_type = m.group(1).lower()\n self.extension = m.group(2)\n\n default_jinja_args = {\n 'loader': jinja2.FileSystemLoader(os.path.dirname(template_path)),\n 'undefined': jinja2.StrictUndefined,\n 'trim_blocks': True,\n 'lstrip_blocks': True,\n 'extensions': ['jinja2.ext.do', 'jinja2.ext.loopcontrols'],\n 'line_statement_prefix': '%%',\n 'line_comment_prefix': '##'\n }\n\n if jinja_args:\n default_jinja_args.update(jinja_args)\n\n self.jinja_env = jinja2.Environment(**default_jinja_args)\n\n self.jinja_env.globals.update({\n 'template_filename': self.template_filename,\n 'error': error,\n 'full_id': full_id,\n 'quote': json.dumps,\n 'is_object': is_object,\n 'is_field': is_field,\n 'is_map': is_map,\n 'has_visibility': has_visibility,\n 'hasattr': hasattr,\n 'json_schema': json_schema,\n 'data_as_json': data_as_json,\n 'camelize': inflection.camelize,\n 'dasherize': inflection.dasherize,\n 'humanize': inflection.humanize,\n 'underscore': inflection.underscore,\n 'regex_replace': regex_replace,\n 'print': print,\n 'map_key': map_key,\n 'map_value': map_value,\n 'get_enum_dependencies': get_enum_dependencies,\n 'get_struct_dependencies': get_struct_dependencies,\n 'get_dependencies': get_dependencies,\n 'get_direct_dependencies': get_direct_dependencies,\n 'get_nested_enums': get_nested_enums,\n 'get_nested_structs': get_nested_structs,\n 'get_all_files': get_all_files,\n 'get_all_structs': get_all_structs,\n 'get_all_enums': get_all_enums,\n 'get_all_typespaces': get_all_typespaces,\n 'get_all_traits': get_all_traits,\n 'get_all_commands': get_all_commands,\n 'get_all_command_responses': get_all_command_responses,\n 'get_all_events': get_all_events,\n 'get_all_interfaces': get_all_interfaces,\n 'get_all_resources': get_all_resources,\n 'type_url_prefix': TYPE_URL_PREFIX,\n 'get_object_type': get_object_type,\n 'get_object_type_url': get_object_type_url,\n 'get_idl_type': idl_type,\n })\n\n self.jinja_env.tests.update({\n 'array': is_array,\n 'command': is_command,\n 'command_response': is_command_response,\n 'common': is_common,\n 'duration': is_duration,\n 'event': is_event,\n 'field': is_field,\n 'nullable': is_nullable,\n 'map': is_map,\n 'object': is_object,\n 'standard': is_standard,\n 'protobuf': is_protobuf,\n 'wdl': is_wdl,\n 'resource_id': is_resource_id,\n 'resource_name': is_resource_name,\n 'timestamp': is_timestamp,\n 'writable': is_writable,\n 'false': lambda x: not x,\n 'struct': is_struct,\n 'oneof': is_oneof,\n 'enum': is_enum,\n 'trait': is_trait,\n 'typespace': is_typespace,\n 'vendor': is_vendor,\n })\n\n self.jinja_env.filters.update({\n 'all': all,\n 'any': any,\n 'camelize': inflection.camelize,\n 'chain': itertools.chain,\n 'dasherize': inflection.dasherize,\n 'humanize': inflection.humanize,\n 'max': max,\n 'min': min,\n 'underscore': inflection.underscore,\n 'unique': unique,\n })", "def generate_loader(mode, symbols, definition, linker):\n if \"vanilla\" == mode:\n loader_content = generate_loader_vanilla()\n elif \"dlfcn\" == mode:\n loader_content = generate_loader_dlfcn(symbols, linker)\n else:\n loader_content = generate_loader_hash(symbols)\n ret = template_loader % (definition, loader_content)\n if \"maximum\" != mode:\n ret += template_und_symbols\n return ret", "def create_initial_templates_document() -> Dict[str, Any]:\n return {\n 'schema-version': 'v1', 'document-version': '',\n 'gateway-templates': [], 'service-templates': [],\n }", "def create_init_files(self, app_label, model_names, models):\n model_name_slugs = [\"%s_views\" % (self.camel_to_slug(model_name)) for model_name in model_names]\n model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for\n model in models}\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/__init__.py\" % (app_label, folder_name)\n template_path = \"django_baker/__init__%s\" % folder_name\n self.create_file_from_template(file_path, template_path, {\"app_label\": app_label,\n \"model_name_slugs\": model_name_slugs,\n \"model_names_dict\": model_names_dict\n })", "def writeTemplate (templatefile, option):\r\n\t\r\n\ttemplate = open(templatefile, 'r')\r\n\ttemplatefile.read()\r\n\r\n\tif option == \"Manifest\":\r\n\t\t\"\"\"Escribe el contenido del archivo Manifest.xml\"\"\"\r\n\t\twriteTemplate = open(\"Manifest.xml\", 'w')\r\n\t\twriteTemplate.write(\"\"\"\r\n<application name=\"{app_name}\"\r\n\tversion=\"{app_version}\"\r\n\tlicenses=\"{app_licenses}\">\r\n\r\n\t<software>\r\n\t\t<sys use:software=\"{soft_shared}\"> <!--Git, SFS, etc.-->\r\n\t\t\r\n\t\t<app code=\"{app_code}\">\r\n\t\t<app author=\"{app_author}\">\r\n\t</software>\r\n</application>\r\n\"\"\".format(app_name = aname, app_versionv = aversion, app_licenses = alicenses, soft_shared = sshared, app_code = acode, app_author = aauthor))\r\n\telif option == \"SharedSystem\":\r\n\t\tpass\r\n\telif option == \"CopyProject\":\r\n\t\t\"\"\"Realiza una copia del proyecto\"\"\"\r\n\t\tpass\r\n\telse:\r\n\t\tprint \"ERROR\"", "def generate_script_impl(f):\n\n # Open `impl Script` section.\n f.write(\"\"\"impl Script {\n\"\"\")\n\n # Generate impl of `inner_full_name`.\n f.write(\"\"\"\n #[inline]\n pub(crate) fn inner_full_name(self) -> &'static str {\n match self {\n Script::Unknown => \"Unknown\",\n Script::Common => \"Common\",\n Script::Inherited => \"Inherited\",\n\"\"\")\n for script in script_list:\n f.write(\" Script::%s => \\\"%s\\\",\\n\" % (longforms[script], longforms[script]))\n f.write(\"\"\" }\n }\n\"\"\")\n\n # Generate impl of `inner_from_full_name`.\n f.write(\"\"\"\n #[inline]\n pub(crate) fn inner_from_full_name(input: &str) -> Option<Self> {\n match input {\n \"Unknown\" => Some(Script::Unknown),\n \"Common\" => Some(Script::Common),\n \"Inherited\" => Some(Script::Inherited),\n\"\"\")\n for script in script_list:\n f.write(\" \\\"%s\\\" => Some(Script::%s),\\n\" % (longforms[script], longforms[script]))\n f.write(\" _ => None,\\n\" )\n f.write(\"\"\" }\n }\n\"\"\")\n\n # Generate impl of `inner_short_name`\n f.write(\"\"\"\n #[inline]\n pub(crate) fn inner_short_name(self) -> &'static str {\n match self {\n Script::Unknown => \"\",\n Script::Common => \"Zyyy\",\n Script::Inherited => \"Zinh\",\n\"\"\")\n for script in script_list:\n f.write(\" Script::%s => \\\"%s\\\",\\n\" % (longforms[script], script))\n f.write(\"\"\" }\n }\n\"\"\")\n\n # Generate impl of `inner_from_short_name`\n f.write(\"\"\"\n #[inline]\n pub(crate) fn inner_from_short_name(input: &str) -> Option<Self> {\n match input {\n \"Zyyy\" => Some(Script::Common),\n \"Zinh\" => Some(Script::Inherited),\n\"\"\")\n for script in script_list:\n f.write(\" \\\"%s\\\" => Some(Script::%s),\\n\" % (script, longforms[script]))\n f.write(\"\"\" _ => None,\\n\"\"\")\n f.write(\"\"\" }\n }\n\"\"\")\n\n # Generate impl of `for_integer`\n f.write(\"\"\"\n #[inline]\n pub(crate) fn for_integer(value: u8) -> Self {\n match value {\n\"\"\")\n for (i, script) in enumerate(script_list):\n f.write(\" %s => Script::%s,\\n\" % (i, longforms[script]))\n f.write(\"\"\" _ => unreachable!(),\n }\n }\n\"\"\")\n\n # Close `impl Script` section\n f.write(\"\"\"\n}\n\"\"\")", "def generate(module_name, module_path, target_dir):\n if not (Path(module_path) / 'builtins.stub.py').exists():\n copy(Path(__file__).parent.parent / 'stubs/builtins.stub.py', module_path)\n build_swift_wrappers_module(module_name, module_path, target_dir)", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def create_key(template, outtype=('nii.gz',), annotation_classes=None):\n\n if template is None or not template:\n raise ValueError('Template must be a valid format string')\n return template, outtype, annotation_classes", "def create_key(template, outtype=('nii.gz',), annotation_classes=None):\n\n if template is None or not template:\n raise ValueError('Template must be a valid format string')\n return template, outtype, annotation_classes", "def generate_registry_operations(data):\n data = render_to_string(TEMPLATE_VC, {\"data\": data})\n open(os.path.join(PLUGINSPACE_TD_VC_DIR, FILENAME_VC), \"w+\").write(data)\n return", "def generate(self, specs, experiment = None, dirname = 'target'):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n if not os.path.exists(dirname + '/app'):\n os.makedirs(dirname + '/app')\n if not os.path.exists(dirname + '/src'):\n os.makedirs(dirname + '/src')\n if not (experiment is None):\n experiment.install(self)\n self._generate_model(specs, experiment, dirname + '/app/Main.hs')\n generate_cabal_file_impl(self, dirname + '/modeling-project.cabal')\n generate_stack_file_impl(self, dirname + '/stack.yaml')\n generate_license_file_impl(dirname + '/LICENSE.txt')\n generate_readme_file_impl(dirname + '/README.md')\n generate_setup_file_impl(dirname + '/Setup.hs')\n generate_lib_file_impl(dirname + '/src/Lib.hs')", "def GenPy(mod,fname):\n f = open(fname, 'w')\n title = \"\"\"#\n# This file is generated automatically\n# Author:IAN\n# http://www.iknot.org\n\"\"\"\n f.write(title)\n for i in mod.__dict__.keys():\n s = \"def \" + i + \"():\" + \"\\n\"\n f.write(s)\n s = \" return\"\n f.write(s + \"\\n\")\n f.close()\n kcs_ui.message_noconfirm('py file saved to:%s'%(fname))", "def generate(random, pid, autogen_tools, n):\n\n #Get a random build path\n generator_path = autogen_tools.get_directory(__file__)\n\n rendered_template_path = path.join(generator_path, \"encrypted\")\n\n key = \"xor_20134113\"\n flag = \"flag_\" + sha1((str(n) + key).encode('utf-8')).hexdigest()\n text = xor(\"You accessed all my secrets :(. But the juicy diary entries are in another castle! Here is the flag though: \" + flag, random.randint(0x1,0xff))\n\n with codecs.open(rendered_template_path, 'w', \"utf-8\") as out_file:\n out_file.write(text)\n\n encrypted_link = autogen_tools.generate_resource_link(pid, \"encrypted\", title=\"encrypted\")\n source_link = autogen_tools.generate_resource_link(pid, \"diary.py\", static=True, title=\"script\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"encrypted\"),\n ],\n },\n \"static_files\": {\n \"public\": [\n (path.join(generator_path,\"diary.py\"), \"diary.py\")\n ]\n },\n \"problem_updates\": {\n \"description\": \"<p>A friend of yours has been using this %s to encrypt his diary. Being the nosy person you are, you must take a look! Can you decrypt it?</p><p>%s</p>\" % (source_link, encrypted_link)\n }\n }", "def _build_interpreter_definition(config_map, template_file_path) -> str:\n target_template_file_path = os.path.splitext(template_file_path)\n LOG.info('Generating Zeppelin Interpreter file for \"%s\"', template_file_path)\n\n if len(target_template_file_path) > 1 and target_template_file_path[1] == '.j2':\n file_loader = jinja2.FileSystemLoader(os.path.dirname(template_file_path))\n j2_env = jinja2.Environment(loader=file_loader)\n template = j2_env.get_template(os.path.basename(template_file_path))\n\n output = template.render(**config_map)\n\n out_fh = tempfile.NamedTemporaryFile()\n out_fh.write(output.encode())\n out_fh.flush()\n shutil.copy(out_fh.name, target_template_file_path[0])\n LOG.info('Zeppelin Interpreter file \"%s\" generated', target_template_file_path[0])\n else:\n LOG.error('Skipping \"%s\" Zeppelin Interpreter generation as it does not end with \".j2\"',\n template_file_path)", "def WriteAeroDynTemplate(TurbDict,TmplDir,ModlDir,AeroDir,WrDir,\n verbose=0):\n \n TurbName = TurbDict['TurbName']\n if verbose:\n sys.stdout.write('\\nWriting AeroDyn v13 template' + \\\n ' for turbine {:s}...'.format(TurbName))\n \n # define path to base template and output filename\n fpath_temp = os.path.join(TmplDir,'Template_AD.ipt')\n fpath_out = os.path.join(WrDir,TurbName + '_AD_template.ipt')\n \n # get list of keys to skip (they depend on wind file)\n version, FastFlag = 7, 0\n windfile_keys = GetWindfileKeys(version,FastFlag)\n \n # open template file and file to write to\n with open(fpath_temp,'r') as f_temp:\n with open(fpath_out,'w') as f_write:\n \n # read each line in template file\n for r_line in f_temp:\n \n # default to copying without modification\n w_line = r_line\n \n # if line has a write-able field\n if ('{:' in r_line):\n \n # get fieldname, format for value, and remaining string\n field = r_line.split()[1]\n value_format = r_line.split(field)[0]\n comment = r_line.split(field)[-1]\n \n # check if comment line\n if ('ADCmnt' in field):\n w_line = TurbDict[field] + '\\n'\n \n # if foilnames, print them all with path to AeroDir\n elif (field == 'FoilNm'):\n FoilNames = TurbDict[field]\n \n # print first foilname manually\n FoilPath = os.path.join(AeroDir,FoilNames[0])\n w_line = field.join([value_format.format(FoilPath),\n comment])\n f_write.write(w_line) \n \n # loop through remaining airfoils\n for i_line in range(1,len(TurbDict['FoilNm'])):\n FoilPath = os.path.join(AeroDir,FoilNames[i_line])\n f_write.write('\\\"{:s}\\\"\\n'.format(FoilPath))\n w_line = ''\n \n # if AeroDyn schedule, print it\n elif (field == 'ADSched'): \n \n # loop through remaining airfoils\n for i_line in range(0,len(TurbDict['ADSched'])):\n w_line = value_format.format( \\\n *TurbDict['ADSched'][i_line])\n f_write.write(w_line + '\\n')\n w_line = ''\n \n # if key is not to be skipped\n elif (field not in windfile_keys):\n value = TurbDict[field]\n w_line = field.join([value_format.format(value),\n comment])\n \n f_write.write(w_line)\n \n if verbose:\n print('done.')\n \n return", "def input_template(template, fields):\n editor = os.environ.get('EDITOR', '/usr/bin/vim')\n with tempfile.NamedTemporaryFile('w+t') as ofile:\n ofile.write(template % fields)\n ofile.flush()\n user_command = '%s %s' % (editor, ofile.name)\n if os.system(user_command) != 0:\n raise Error('Error acquiring user input (command was %r).' % user_command)\n with open(ofile.name, 'r') as ifile:\n filled_template = ifile.read()\n\n fields = dict(parse_template(filled_template))\n return fields", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def test_create_namespaced_processed_template(self):\n pass" ]
[ "0.60924524", "0.5882521", "0.5875414", "0.5807833", "0.57298577", "0.5695035", "0.56720126", "0.55950695", "0.55856544", "0.55830556", "0.557712", "0.55616045", "0.555261", "0.5524907", "0.5512638", "0.5499624", "0.5496154", "0.5461171", "0.54406464", "0.54035246", "0.5400476", "0.5395825", "0.5393488", "0.53826606", "0.5379185", "0.5377172", "0.5363132", "0.53524625", "0.5345551", "0.5330658", "0.5325719", "0.5325719", "0.53010553", "0.5292497", "0.52735037", "0.5257242", "0.5257106", "0.52502304", "0.5238334", "0.5236914", "0.5221825", "0.5219236", "0.52138233", "0.5213244", "0.5194212", "0.51823545", "0.5179593", "0.51786125", "0.5171996", "0.5167472", "0.5160696", "0.51582545", "0.51549053", "0.51466084", "0.5141879", "0.51352483", "0.5132552", "0.51249766", "0.5113866", "0.50988734", "0.50923556", "0.5082632", "0.50766873", "0.50747323", "0.506957", "0.5062388", "0.50584584", "0.5055643", "0.5049419", "0.50404185", "0.5033317", "0.5030198", "0.50232923", "0.50228673", "0.5021361", "0.5021161", "0.5017094", "0.500552", "0.4998534", "0.49984944", "0.49931708", "0.49921817", "0.4989444", "0.4984765", "0.4977345", "0.4976832", "0.4974347", "0.49695307", "0.49683785", "0.4966262", "0.4966262", "0.49610922", "0.4960562", "0.4960088", "0.4959084", "0.495261", "0.49523446", "0.49512678", "0.49506453", "0.494689" ]
0.6245206
0
add rankig to each node using google pagerank algorithm
def add_pagerank(self): query = ''' MATCH (c1:)-[r:INTERACTS]->(c2:) RETURN c1.name, c2.name, r.weight AS weight ''' ig = IGraph.TupleList(self.graph.run(query), weights=True) pg = ig.pagerank() pgvs = [] for p in zip(ig.vs, pg): print(p) pgvs.append({"name": p[0]["name"], "pg": p[1]}) write_clusters_query = ''' UNWIND {nodes} AS n MATCH (c:) WHERE c.name = n.name SET c.pagerank = n.pg ''' self.graph.run(write_clusters_query, nodes=pgvs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_rank(self):\n self.__rank += 1", "def rank():\n return 0", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def __rank__(self) -> int:", "def calculate_page_rank(self, iterations=5):\n # clear out the current page rank tables\n self.con.execute('drop table if exists pagerank')\n self.con.execute('create table pagerank(urlid primary key,score)')\n\n # initialize every url with a page rank of 1\n for (urlid,) in self.con.execute('select rowid from urllist'):\n self.con.execute('insert into pagerank(urlid,score) values (%d,1.0)' % urlid)\n self.dbcommit()\n\n for i in range(iterations):\n # Need multiple iterations, as the page ranks of pages linked to this\n # one will be consistently updated on each iteration\n print(\"Iteration %d\" % i)\n for (urlid,) in self.con.execute('select rowid from urllist'):\n # Default page rank\n page_rank = 0.15\n\n # Loop through all the pages that link to this one\n for (linker,) in self.con.execute('select distinct fromid from link where toid=%d'\n % urlid):\n # Get the page rank of the linker\n linkingpr = self.con.execute('select score from pagerank where urlid=%d'\n % linker).fetchone()[0]\n\n # Get the total number of links from the linker\n linkingcount = self.con.execute('select count(*) from link where fromid=%d'\n % linker).fetchone()[0]\n # add to page rank, accounting for the link count\n page_rank += 0.85 * (linkingpr / linkingcount)\n self.con.execute('update pagerank set score=%f where urlid=%d'\n % (page_rank, urlid))\n self.dbcommit()", "def pagerank(self, limit=20):\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tself.all_scores[urlid] = 1.0\r\n\r\n\t\tfor i in range(limit):\r\n\t\t\tfor urlid in self.url_ids:\r\n\t\t\t\tscore = self.all_scores[urlid]\r\n\t\t\t\tfor fromid in self.from_ids[urlid]:\r\n\t\t\t\t\tscore += self.all_scores[fromid] / \\\r\n\t\t\t\t\t\t\t (len(self.from_ids[fromid])+len(self.to_ids[fromid]))\r\n\t\t\t\tscore *= 0.85\r\n\t\t\t\tscore += 0.15\r\n\t\t\t\tself.all_scores[urlid] = score\r\n\t\tself.save_pr()", "def pagerank(self, alpha=0.85):\n try:\n self.logger.info('正在计算网络的PageRank值 ...')\n return self.order_dict(nx.pagerank(self.G, alpha=alpha), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))", "def pagerank(self):\n\n raise NotImplementedError", "def update(self, rank):\n # calculate MR and MRR\n self.mr += rank\n self.mrr += 1 / rank\n # calculate Hits@k\n if rank <= 1:\n self.hits1 += 1\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 3:\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 5:\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 10:\n self.hits10 += 1", "def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n newrank = dict()\n\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n repeat = True\n\n while repeat:\n\n for page in pagerank:\n\n summation = 0\n\n links = get_links(corpus, page)\n\n if not links:\n for p in corpus:\n summation += pagerank[p] / len(corpus)\n\n for link in links:\n summation += pagerank[link] / len(corpus[link])\n\n newrank[page] = (1 - damping_factor) / len(corpus) + damping_factor * summation\n\n repeat = False\n\n for page in pagerank:\n if abs(newrank[page] - pagerank[page]) > 0.001:\n repeat = True\n\n pagerank[page] = newrank[page]\n\n return pagerank", "def run_pagerank(tag_table, unique_tags, targetNum):\n id2tag = {i: tag for i, tag in enumerate(unique_tags)}\n tag2id = {tag: i for i, tag in id2tag.items()}\n\n co_occurence = dict()\n for tag_list in tag_table:\n indices = [tag2id[tag] for tag in tag_list]\n for pair in combinations(indices, 2):\n co_occurence[pair] = co_occurence.get(pair, 0) + 1\n\n nodes = range(len(unique_tags))\n edges = [(pair[0], pair[1], weight) for pair, weight in co_occurence.items()]\n G = nx.Graph()\n G.add_nodes_from(nodes)\n G.add_weighted_edges_from(edges)\n pr = nx.pagerank(G, weight='weight')\n\n top_indices, top_scores = zip(*sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:targetNum])\n topTags = [id2tag[i] for i in top_indices]\n return topTags", "def _update_ranks(sample_count):\n raise NotImplementedError", "def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n new_pagerank = dict()\n repeat = True\n\n # Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus.\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n # Repeatedly calculate new rank values based on all of the current rank values\n while repeat:\n for page in corpus:\n\n # Probability that we followed a link from a page i to current page.\n followed = 0.0\n for linked_page in linked_pages(corpus, page):\n followed += pagerank[linked_page] / number_of_links(corpus, linked_page)\n\n new_pagerank[page] = (1 - damping_factor) / len(corpus) + damping_factor * followed\n\n repeat = False\n\n # Repeat the process if new PageRank value changes by more than 0.001\n for page in pagerank:\n if not isclose(pagerank[page], new_pagerank[page], abs_tol=0.001):\n repeat = True\n\n # Assigning new values to the previous ones\n pagerank[page] = new_pagerank[page]\n\n # Sorting pagerank by keys\n pagerank = dict(sorted(pagerank.items()))\n\n return pagerank", "def rank_transform(self):\n sorted_targets = sorted(self.genomes, key=lambda item: item.fitness)\n for index, target in enumerate(sorted_targets):\n target.fitness = index/len(sorted_targets) - 0.5", "def pagerank(matrix, bias, d=0.85):\n n = matrix.shape[0]\n rank = 0\n new_rank = np.array([1.0 / n] * n)\n for i in range(0,200):\n print \"iteration: \"+str(i)\n rank = new_rank\n new_rank = np.array([(1.0-d)/n] * n) + d * np.dot(matrix, rank)\n# new_rank = (1.0-d) * bias + d * np.dot(matrix, rank)\n # new_rank = [(((1.0-d) / n) +\n # d * sum((rank[i] * link) for i, link in enumerate(row)))\n # for row in matrix]\n if(has_converged(rank, new_rank)):\n break\n return new_rank", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def _graph_ranks(avranks, names, p_values, cd=None, cdmethod=None, lowv=None, highv=None, highlight=None,\n width=6, textspace=1, reverse=False, filename=None, labels=False, **kwargs):\n width = float(width)\n textspace = float(textspace)\n \n def lloc(_list, n):\n \"\"\"\n List location in list of list structure.\n Enable the use of negative locations:\n -1 is the last element, -2 second last...\n \"\"\"\n if n < 0:\n return len(_list[0]) + n\n return n\n \n def nth(_list, n):\n \"\"\"\n Returns only nth elemnt in a list.\n \"\"\"\n n = lloc(_list, n)\n return [a[n] for a in _list]\n\n def mxrange(lr):\n \"\"\"\n Multiple xranges. Can be used to traverse matrices.\n This function is very slow due to unknown number of\n parameters.\n >>> mxrange([3,5])\n [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]\n >>> mxrange([[3,5,1],[9,0,-3]])\n [(3, 9), (3, 6), (3, 3), (4, 9), (4, 6), (4, 3)]\n \"\"\"\n if len(lr):\n yield ()\n else:\n # it can work with single numbers\n index = lr[0]\n if isinstance(index, int):\n index = [index]\n for a in range(*index):\n for b in mxrange(lr[1:]):\n yield tuple([a] + list(b))\n\n sums = avranks\n\n nnames = names\n ssums = sums\n\n if lowv is None:\n lowv = min(1, int(math.floor(min(ssums))))\n if highv is None:\n highv = max(len(avranks), int(math.ceil(max(ssums))))\n\n cline = 0.4\n\n k = len(sums)\n\n linesblank = 0\n scalewidth = width - 2 * textspace\n\n def rankpos(rank):\n if not reverse:\n a = rank - lowv\n else:\n a = highv - rank\n return textspace + scalewidth / (highv - lowv) * a\n\n distanceh = 0.25\n\n cline += distanceh\n\n # calculate height needed height of an image\n minnotsignificant = max(2 * 0.2, linesblank)\n height = cline + ((k + 1) / 2) * 0.2 + minnotsignificant\n\n fig = plt.figure(figsize=(width, height*1.05))\n fig.set_facecolor('white')\n ax = fig.add_axes([0, 0, 1, 1]) # reverse y axis\n ax.set_axis_off()\n\n hf = 1. / height # height factor\n wf = 1. / width\n\n def hfl(_list):\n return [a * hf for a in _list]\n\n def wfl(_list):\n return [a * wf for a in _list]\n\n # Upper left corner is (0,0).\n ax.plot([0, 1], [0, 1], c=\"w\")\n ax.set_xlim(0, 1)\n ax.set_ylim(1, 0)\n\n def line(l, color='k', **kwargs):\n \"\"\"\n Input is a list of pairs of points.\n \"\"\"\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)\n\n def text(x, y, s, *args, **kwargs):\n ax.text(wf * x, hf * y, s, *args, **kwargs)\n\n line([(textspace, cline), (width - textspace, cline)], linewidth=2)\n\n bigtick = 0.3\n smalltick = 0.15\n linewidth = 2.0\n linewidth_sign = 4.0\n\n tick = None\n for a in list(np.arange(lowv, highv, 0.5)) + [highv]:\n tick = smalltick\n if a == int(a):\n tick = bigtick\n line([(rankpos(a), cline - tick / 2),\n (rankpos(a), cline)],\n linewidth=2)\n\n for a in range(lowv, highv + 1):\n text(rankpos(a), cline - tick / 2 - 0.05, str(a),\n ha=\"center\", va=\"bottom\", size=16)\n\n k = len(ssums)\n\n def filter_names(name):\n return name\n\n space_between_names = 0.24\n\n for i in range(math.ceil(k / 2)):\n chei = cline + minnotsignificant + i * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"right\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18, color='red')\n else:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18)\n\n for i in range(math.ceil(k / 2), k):\n chei = cline + minnotsignificant + (k - i - 1) * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + scalewidth - 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"left\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18, color='red')\n else:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18)\n start = cline + 0.2\n side = -0.02\n height = 0.1\n\n # draw no significant lines\n # get the cliques\n cliques = _form_cliques(p_values, nnames)\n achieved_half = False\n print(nnames)\n for clq in cliques:\n if len(clq) == 1:\n continue\n print(clq)\n min_idx = np.array(clq).min()\n max_idx = np.array(clq).max()\n if min_idx >= len(nnames) / 2 and achieved_half == False:\n start = cline + 0.25\n achieved_half = True\n line([(rankpos(ssums[min_idx]) - side, start),\n (rankpos(ssums[max_idx]) + side, start)],\n linewidth=linewidth_sign)\n start += height", "def mpi_rank(self, new_value):", "def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r", "def iterate_pagerank(corpus, damping_factor):\n\n PageRank = dict()\n accuracy = 0.001\n\n # initialise the rank of each page with 1 / N\n\n N = len(corpus)\n\n for page in corpus:\n PageRank[page] = 1 / N\n\n # for each page, use the PageRank formula to calculate the ranks\n\n while True:\n\n count = 0\n\n for page in corpus:\n\n new_rank = (1 - damping_factor) / N\n change = 0\n\n for new_page in corpus:\n\n if page in corpus[new_page]:\n NumLinks = len(corpus[new_page])\n change = change + (PageRank[new_page] / NumLinks)\n\n change = damping_factor * change\n new_rank += change\n\n if abs(PageRank[page] - new_rank) < accuracy:\n count += 1\n\n PageRank[page] = new_rank\n\n if count == N:\n break\n\n return PageRank", "def competitionRanking(groups, setRank):\n rank = 1\n for k, g in groups:\n cnt = 0\n for item in g:\n setRank(item, rank)\n cnt += 1\n rank += cnt", "def denseRanking(groups, setRank):\n rank = 1\n for k, g in groups:\n for item in g:\n setRank(item, rank)\n rank += 1", "def getPageRank(elistPath, alpha, maxiter, tolerance):\n\n adjGraph = AdjGraph(elistPath, separator=\" \")\n graph = adjGraph.SNAPGraph\n\n preference_vector = []\n for node in graph.Nodes():\n id = node.GetId()\n if (id % 4) == 0:\n preference_vector.append(id)\n\n pageRank, convIter, time = biasedPageRank(\n adjGraph, preference_vector=preference_vector, alpha=alpha,\n max_iterations=maxiter, tolerance=tolerance)\n\n writeCentrality(\"pagerank.txt\", pageRank)\n return pageRank, convIter, time", "def calculate_PageRank(outlinks):\n\n\t# Damping factor\n\td = 0.85\n\n\t# size of the matrix\n\tsize = outlinks.shape[0]\n\n\t# list to hold page ranks\n\tpage_ranks = [1 for i in range(size)]\n\n\t# Calculating the out degree of each node and storing in a list\n\tout_degrees = []\n\tfor i in range(size):\n\t\tsums = 0\n\t\tfor j in range(size):\n\t\t\tsums += outlinks[i][j]\n\t\tout_degrees.append(sums)\n\n\t#print(out_degrees)\n\n\tprint('Initial page ranks:')\n\tprint(page_ranks)\n\n\tfor _ in range(100):\n\t\tfor j in range(size):\n\t\t\ttemp = 0\n\t\t\tfor i in range(size):\n\t\t\t\tif outlinks[i][j] == 1:\n\t\t\t\t\ttemp += page_ranks[i] / out_degrees[i]\n\t\t\ttemp *= d\n\t\t\ttemp += (1-d)\n\t\t\tpage_ranks[j] = round(temp, 4)\n\n\treturn page_ranks", "def _add_ranks(standings, key):\n prev_key = None\n current_rank = 0\n for i, team in enumerate(standings, start=1):\n this_key = key(team)\n if this_key != prev_key:\n current_rank = i\n prev_key = this_key\n team.rank = current_rank", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:", "def __ranking_function(self, doc, query_tokens):", "def increment_node_index(self):\n self.node_index += 1", "def rank(ctx, path, metric, revision, limit, desc, threshold, wrap):\n config = ctx.obj[\"CONFIG\"]\n\n if not exists(config):\n handle_no_cache(ctx)\n\n from wily.commands.rank import rank\n\n logger.debug(f\"Running rank on {path} for metric {metric} and revision {revision}\")\n rank(\n config=config,\n path=path,\n metric=metric,\n revision_index=revision,\n limit=limit,\n threshold=threshold,\n descending=desc,\n wrap=wrap,\n )", "def set_rank(self,rank):\n self._rank = rank", "def rankPages(pages): \n nPages = len(pages)\n transitionMatrix = createTransMatrix(pages)\n rankSteps = [[1 / nPages] * nPages]\n for i in range(0,10):\n p = rankSteps[-1] * transitionMatrix\n rankSteps.append(np.squeeze(np.asarray(p)))\n return rankSteps", "def assignRanks(self):\r\n\t\trank = 0\r\n\t\tscores = list(self._playerScores)\r\n\t\tscores.reverse()\r\n\t\tfor playerScore in scores:\r\n\t\t\tif not playerScore.has(NOT_MET) or not playerScore.value(NOT_MET):\r\n\t\t\t\trank += 1\r\n\t\t\t\tplayerScore.set(RANK, smallText(BugUtil.colorText(u\"%d\" % rank, ScoreOpt.getRankColor())))\r\n\t\tif rank > 0:\r\n\t\t\tself._anyHas[RANK] = True", "def rank(self, value: int) -> None:\n self._rank = value", "def plot_pagerank(net, label, outpath):\n _, pagerank_values = networkit_util.get_pagerank(net, label, outpath)\n unique_value, unique_cnt = np.unique(pagerank_values, return_counts=True)\n unique_cumcnt = np.cumsum(unique_cnt) / sum(unique_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(unique_value, unique_cumcnt, 'r.')\n # ax.set_title('Cumulative distribution of pagerank of nodes')\n ax.set_xlabel('pagerank value v')\n ax.set_ylabel('p(x <= v)')\n plt.savefig(outpath + label + \"-pagerank-distribution.eps\")", "def findRelativeRanks(nums):\n compare_lst = copy.deepcopy(nums)\n compare_lst.sort(reverse=True)\n for i in nums:\n compare_index = compare_lst.index(i)\n nums_index = nums.index(i)\n if compare_index > 2:\n nums[nums_index] = str(compare_index + 1)\n elif compare_index == 0:\n nums[nums_index] = 'Gold Medal'\n elif compare_index == 1:\n nums[nums_index] = 'Silver Medal'\n else:\n nums[nums_index] = 'Bronze Medal'\n return nums", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def add_rank(self, input_name, name=None):\n return self._build_op('Rank', [input_name], name=name)", "def increment_rank(self, rank, result):\n if result not in VALID_RESULTS:\n raise ValueError('Invalid result: %s' % result)\n self.counts[rank][result] += 1\n self.total_items += 1", "def rank(self, rank):\n self._rank = rank", "def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n\n #Modifying the corpus, to account the fact that,\n #\"A page that has no links at all should be interpreted as having one link for every page in the corpus\"\n modif_corpus = copy.deepcopy(corpus)\n for pg in modif_corpus.keys():\n if len(modif_corpus[pg]) == 0:\n modif_corpus[pg] = list(corpus.keys())\n\n #Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus\n for pg in modif_corpus.keys():\n pagerank[pg] = 1/len(modif_corpus.keys())\n\n convergence_check = False\n while not convergence_check:\n old_pagerank = copy.deepcopy(pagerank)\n\n for page in pagerank.keys():\n sigma = 0\n for pg in pagerank.keys():\n if page in modif_corpus[pg]: #Finding all the pages that link to 'page'\n sigma += pagerank[pg]/len(modif_corpus[pg])\n \n pagerank[page] = (1-damping_factor)/len(modif_corpus.keys()) + damping_factor*sigma\n\n #Making sure the new values differ more than 0.001\n convergence_check = True\n for pg in modif_corpus.keys():\n if abs(pagerank[pg] - old_pagerank[pg]) > 0.001:\n convergence_check = False\n break\n\n return pagerank", "async def handle_rank_ups(self, user: discord.User, brawler: str):\n\n brawler_data = await self.get_player_stat(\n user, 'brawlers', is_iter=True, substat=brawler)\n\n pb = brawler_data['pb']\n rank = brawler_data['rank']\n\n rank_as_per_pb = self.get_rank(pb)\n\n if rank_as_per_pb <= rank:\n return False\n\n await self.update_player_stat(\n user, 'brawlers', rank_as_per_pb, brawler, 'rank')\n\n rank_up_tokens = self.RANKS[str(rank)][\"PrimaryLvlUpRewardCount\"]\n\n token_doubler = await self.get_player_stat(user, 'token_doubler')\n\n upd_td = token_doubler - rank_up_tokens\n if upd_td < 0:\n upd_td = 0\n\n if token_doubler > rank_up_tokens:\n rank_up_tokens *= 2\n else:\n rank_up_tokens += token_doubler\n\n rank_up_starpoints = self.RANKS[str(rank)][\"SecondaryLvlUpRewardCount\"]\n\n await self.update_player_stat(\n user, 'tokens', rank_up_tokens, add_self=True)\n await self.update_player_stat(\n user, 'starpoints', rank_up_starpoints, add_self=True)\n await self.update_player_stat(\n user, 'token_doubler', upd_td)\n\n embed = discord.Embed(\n color=EMBED_COLOR,\n title=f\"Brawler Rank Up! {rank} → {rank_as_per_pb}\"\n )\n embed.set_author(name=user.name, icon_url=user.avatar_url)\n embed.add_field(\n name=\"Brawler\", value=f\"{brawler_emojis[brawler]} {brawler}\")\n embed.add_field(\n name=\"Tokens\", value=f\"{emojis['token']} {rank_up_tokens}\")\n if rank_up_starpoints:\n embed.add_field(\n name=\"Star Points\",\n value=f\"{emojis['starpoints']} {rank_up_starpoints}\"\n )\n if token_doubler > 0:\n embed.add_field(\n name=\"Token Doubler\",\n value=f\"{emojis['tokendoubler']} x{upd_td} remaining!\",\n inline=False\n )\n return embed", "def approximate_PageRank_weighted(G,\n ref_nodes,\n iterations: int = 100000,\n alpha: float = 0.15,\n rho: float = 1.0e-6): \n \n #print(\"Uses the weighted Andersen Chung and Lang (ACL) Algorithm.\")\n n = G.adjacency_matrix.shape[0]\n (length,xids,values) = aclpagerank_weighted_cpp(n,G.ai,G.aj,G.adjacency_matrix.data,alpha,rho,\n ref_nodes,iterations)\n #p = np.zeros(n)\n #p[xids] = values\n\n return xids, values", "def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0", "def update_node_count(self, node, add_to_count):\r\n current_score = 0\r\n count_string = self.parser.getAttribute(node, 'gravityNodes')\r\n if count_string:\r\n current_score = int(count_string)\r\n\r\n new_score = current_score + add_to_count\r\n self.parser.setAttribute(node, \"gravityNodes\", str(new_score))", "def pytextrank_get_rank(doc):\n rank = {}\n for p in doc._.phrases:\n rank[p] = [p.rank,p.count]\n return rank", "def RPR(G, alpha=0.15, beta=0):\n #set default variables\n weight = None\n res = {} #stores results\n eligible_node = G.nodes()\n personalization = dict.fromkeys(G, beta)\n\n for u in G.nodes():\n personalization[u] = 1 - beta\n pagerank_scores = nx.pagerank_scipy(G, alpha, personalization, weight=weight)\n\n for v, w in six.iteritems(pagerank_scores):\n if w > 0 and u != v and v in eligible_node:\n res[(u, v)] = 0.0\n res[(u, v)] += w\n\n return res", "def pagerank(dict_prefs, nitems, eps_search=20):\n prefs_mat=np.zeros((nitems,nitems))\n for k,v in dict_prefs.items():\n if v==0:\n continue\n elif v>0:\n prefs_mat[k[1],k[0]]+=v\n else:\n prefs_mat[k[0],k[1]]-=v\n prefs_mat_orig=prefs_mat.copy()\n eps_grid=list(.5**np.logspace(0,1,eps_search))\n best=-10^5\n best_order=None\n \n for eps in eps_grid:\n prefs_mat=prefs_mat_orig.copy()\n for i in range(nitems):\n prefs_mat[:,i]+=eps\n tot=np.sum(prefs_mat[:,i])\n prefs_mat[:,i]=prefs_mat[:,i]/tot\n\n \n pr=np.ones((nitems,1))/nitems\n for i in range(30):\n pr=prefs_mat.dot(pr)\n lst_pagerank=list(np.argsort(pr.reshape(-1)))\n score_this_order=eval_ordering(lst_pagerank,dict_prefs)\n if score_this_order>best:\n best=score_this_order\n best_order=deepcopy(lst_pagerank)\n return best_order", "def compute_pagerank(urls, inlinks, outlinks, b=.85, iters=20):\n ###TODO\n pagerank = defaultdict(lambda: 1.0)\n N = len(urls)\n for url in urls:\n pagerank[url]\n for i in range(0, iters):\n for url in urls:\n result_sum = 0.0\n for link in inlinks[url]:\n if len(outlinks[link]) is not 0:\n result_sum += (pagerank[link] / len(outlinks[link]))\n pagerank[url] = (1/N) * (1-b) + (b * result_sum)\n return pagerank\n pass", "def vis_rank_list(self, output, vis_label, num_vis=100, rank_sort=\"ascending\", label_sort=\"ascending\", max_rank=5,\n actmap=False):\n assert rank_sort in ['ascending', 'descending'], \"{} not match [ascending, descending]\".format(rank_sort)\n\n query_indices = np.argsort(self.all_ap)\n if rank_sort == 'descending': query_indices = query_indices[::-1]\n\n query_indices = query_indices[:num_vis]\n self.save_rank_result(query_indices, output, max_rank, vis_label, label_sort, actmap)", "def set_level(self):\n queue = []\n for node in self.node:\n if distance.euclidean(node.location, para.base) < node.com_ran:\n node.level = 1\n queue.append(node.id)\n while queue:\n for neighbor_id in self.node[queue[0]].neighbor:\n if not self.node[neighbor_id].level:\n self.node[neighbor_id].level = self.node[queue[0]].level + 1\n queue.append(neighbor_id)\n queue.pop(0)", "def rank_hist(self):\n self.ranks = {}\n for card in self.cards:\n self.ranks[card.rank] = self.ranks.get(card.rank, 0) + 1", "def update_node_count(self, node, add_to_count):\n current_score = 0\n count_string = self.parser.getAttribute(node, 'gravityNodes')\n if count_string:\n current_score = int(count_string)\n\n new_score = current_score + add_to_count\n self.parser.setAttribute(node, \"gravityNodes\", str(new_score))", "def PageRank(start):\n probs = {}\n probs[start] = 1\n num_page_rank_iterations = 3\n maximum = 25\n\n PageRankProbs = PageRankHelper(start,\n probs,\n num_page_rank_iterations)\n\n PageRankProbs = zip(PageRankProbs.iterkeys(),\n PageRankProbs.itervalues())\n\n PageRankProbs = [(node,score) for node, score in PageRankProbs\\\n if node not in out_edges.get(start) and node != start]\n PageRankProbs.sort(key=lambda x:x[1],reverse=True)\n\n return dict(PageRankProbs[:maximum])", "def _insort(self, node):\n lo = 0\n hi = len(self._pool)\n f_score = node.get_f_score()\n while lo < hi:\n mid = (lo+hi)//2\n if f_score < self._pool[mid].get_f_score(): hi = mid\n else: lo = mid + 1\n self._pool.insert(lo, node)", "def rank_and_assign(self, cutoff_matrix_element):\r\n\r\n L0 = (qt.liouvillian(self.rotating_frame_hamiltonian, self.jump_ops))\r\n \r\n relevance_table = [[self.calculate_first_order_correction(cutoff_matrix_element,L0,ket_index=n,bra_index=m) for m in range(self.dim)] for n in range(self.dim)]\r\n relevance_table = np.asarray(relevance_table)\r\n \r\n number_of_transitions = int(self.dim*(self.dim-1)/2)\r\n transition_rank = [None for i in range(number_of_transitions)]\r\n # This loop ranks drive terms according to relevance \r\n for rank in range(number_of_transitions):\r\n max_ranked_indices = np.where(relevance_table == relevance_table.max())\r\n indices = [max_ranked_indices[0][0], max_ranked_indices[1][0]]\r\n transition_rank[rank] = [relevance_table.max(), indices]\r\n relevance_table[indices[0]][indices[1]] = relevance_table[indices[1]][indices[0]] = 0\r\n \r\n # This graphical algorithm assigns an integer to each eigenstate of the Hamiltonian based on the ranking from above\r\n integer_list = [None for i in range(self.dim)]\r\n # START ALGORITHM\r\n # initialize first term into a graph\r\n first_index = transition_rank[0][1]\r\n graph_list = [[first_index[0],first_index[1]]]\r\n integer_list[max(first_index)] = 1\r\n integer_list[min(first_index)] = 0\r\n # assign subsequent terms\r\n for i in range(1,number_of_transitions):\r\n # if no more non-zero relevance parameters, then break \r\n if transition_rank[i][0] == 0.0:\r\n break\r\n else:\r\n index = transition_rank[i][1]\r\n # scenario (i) neither states have been incorporated into the graph \r\n if integer_list[index[0]]==integer_list[index[1]]==None: \r\n integer_list[max(index)] = 1\r\n integer_list[min(index)] = 0\r\n # place them in a new graph\r\n graph_list.append([index[0],index[1]])\r\n # scenario (ii) one of the states has been incorporated, but not the other\r\n elif integer_list[index[0]]==None:\r\n if index[0] > index[1]:\r\n integer_list[index[0]] = integer_list[index[1]] + 1\r\n else:\r\n integer_list[index[0]] = integer_list[index[1]] - 1\r\n # find which graph component to put the state in (the component the other state is in)\r\n for k,graph in enumerate(graph_list):\r\n if index[1] in graph:\r\n # place that state in that graph component\r\n graph_list[k].append(index[0]) \r\n break\r\n elif integer_list[index[1]]==None:\r\n if index[0] > index[1]:\r\n integer_list[index[1]] = integer_list[index[0]] - 1\r\n else:\r\n integer_list[index[1]] = integer_list[index[0]] + 1\r\n for k,graph in enumerate(graph_list):\r\n if index[0] in graph:\r\n graph_list[k].append(index[1])\r\n break\r\n # scenario (iii) both states have already been incorporated in the graph\r\n else:\r\n # find the graph components where these states have been placed\r\n for k,graph in enumerate(graph_list):\r\n overlap = list(set(index) & set(graph))\r\n # subscenario: the states are in the same graph component, hence a cycle, so nothing can do\r\n if (len(overlap) == 2):\r\n break\r\n # subscenario: the states are in two disjoint graph components\r\n elif (len(overlap) == 1):\r\n fixed_index = overlap[0]\r\n shift_index = list(set(index) - set(graph))[0]\r\n old_integer = integer_list[shift_index]\r\n if shift_index > fixed_index:\r\n new_integer = integer_list[fixed_index] + 1\r\n else:\r\n new_integer = integer_list[fixed_index] - 1\r\n shift_amount = new_integer - old_integer\r\n # merge one graph component into the other\r\n for j,graph2 in enumerate(graph_list):\r\n if shift_index in graph2:\r\n for m,index2 in enumerate(graph2):\r\n integer_list[index2] = integer_list[index2] + shift_amount\r\n graph_list[k] = graph_list[k] + graph2\r\n graph_list.pop(j)\r\n break\r\n break\r\n else:\r\n continue\r\n continue\r\n # Just in case, if a state was not assigned an integer due to not participating in dynamics, set its integer to 0\r\n for i,integer in enumerate(integer_list):\r\n if integer == None:\r\n integer_list[i] = 0\r\n ## END algorithm\r\n return transition_rank, integer_list", "def update_player_rank(dct, player_name, place, rank):\n dct.get(player_name)[place - 1] = rank", "def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking", "def learning_to_rank(self):\n self.error_throw('rank')\n\n instance = Instance(self.table_name)\n instance.addTable(Table_LTR(instance,False,'',''))\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n\n self.rank_learning(instance)\n\n self.rank_method = methods_of_ranking[1] # = 'learn_to_rank'", "def create_text_rank(self):\n # filtered_tokens = self.filter_pos() #if use, replace 2 self.lemma_tokens below\n vocab = self.create_vocab(self.lemma_tokens)\n token_windows = self.create_token_windows(self.lemma_tokens)\n graph = self.create_matrix(vocab, token_windows)\n text_rank = np.array([1] * len(vocab))\n previous_tr = 0\n d = 0.85\n min_difference = 1e-5\n for epoch in range(10):\n text_rank = (1 - d) + d * np.dot(graph, text_rank)\n if abs(previous_tr - sum(text_rank)) < min_difference:\n break\n else:\n previous_tr = sum(text_rank)\n node_weight = {}\n for word in vocab:\n node_weight[word] = text_rank[vocab[word]]\n return node_weight", "def page_rank(self):\n print(\"Generating the matrix...\")\n G = self.graph\n p = FOLLOW\n column_sum = np.sum(G, axis=0, dtype=np.float64)\n \n n = self.graph.shape[0]\n\n D = sps.lil_matrix((n, n))\n D.setdiag(np.divide(1.,column_sum, where=column_sum != 0, out=np.zeros_like(column_sum)).reshape(-1, 1))\n self.diagonal = D\n print(\"created diagonal\")\n e = np.ones((n, 1))\n I = sps.eye(n)\n x = sps.linalg.spsolve((I - p*G*D), e)\n x = x/np.sum(x)\n\n self.page_rank = x\n\n return x", "def _addReferees(self):\n a_ranks = self.division.GetGroupsRanks(['A'])\n b_ranks = self.division.GetGroupsRanks(['B'])\n c_ranks = self.division.GetGroupsRanks(['C'])\n d_ranks = self.division.GetGroupsRanks(['D'])\n\n self._GroupAddReferees('7th', [d_ranks[1]])\n self._GroupAddReferees('5th', [d_ranks[2]])\n\n self._GroupAddReferees('3rd',[d_ranks[0]])\n self._GroupAddReferees('final',[c_ranks[2]])", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def add_biport(self, node, biport):", "def rank(self):\r\n\t\trank = self.n % 13\r\n\t\treturn rank", "def number_nodes(tree):\n def list_of_nodes(tree):\n \"\"\"Return a list of internal nodes in postorder traversal\n\n @param HuffmanNode tree: a tree to traverse\n @rtype: list\n\n >>> t = HuffmanNode(None, HuffmanNode(6), HuffmanNode(7))\n >>> list_of_nodes(t) == [t]\n True\n >>> t = HuffmanNode(None, HuffmanNode(8), HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)))\n >>> list_of_nodes(t) == [HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)), HuffmanNode(None, HuffmanNode(8), HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)))]\n True\n \"\"\"\n list_ = []\n if tree.left.is_leaf() and tree.right.is_leaf():\n list_.append(tree)\n return list_\n elif tree.left.is_leaf():\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n elif tree.right.is_leaf():\n list_.extend(list_of_nodes(tree.left))\n list_.append(tree)\n return list_\n else:\n list_.extend(list_of_nodes(tree.left))\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n\n internal_nodes = list_of_nodes(tree)\n for i in range(len(internal_nodes)):\n node = internal_nodes[i]\n node.number = i", "def iterate_pagerank(corpus, damping_factor):\n # Set initial values to choosing a page randomly\n corpus_length = len(corpus)\n prev_iterated_page_rank = defaultdict(lambda: 1/corpus_length)\n max_abs_difference = inf\n while max_abs_difference > 0.001:\n max_iter_diff = -inf\n next_iterated_page_rank = defaultdict(lambda: (1 - damping_factor) / corpus_length)\n for prev_page in corpus:\n if not corpus[prev_page]:\n print(\"hi\")\n for next_page in corpus:\n next_iterated_page_rank[next_page] += prev_iterated_page_rank[prev_page] * 1/len(corpus)\n else:\n print(\"hi2\")\n for next_page in corpus[prev_page]:\n next_iterated_page_rank[next_page] += damping_factor * prev_iterated_page_rank[prev_page]/len(corpus[prev_page])\n\n for prev_prob, next_prob in zip(prev_iterated_page_rank.values(), next_iterated_page_rank.values()):\n max_iter_diff = max(max_iter_diff, abs(next_prob-prev_prob))\n max_abs_difference = min(max_abs_difference, max_iter_diff)\n\n prev_iterated_page_rank = next_iterated_page_rank.copy()\n assert abs(sum(prev_iterated_page_rank.values())-1) < 10**-2\n assert abs(sum(next_iterated_page_rank.values()) - 1) < 10**-2\n return prev_iterated_page_rank", "def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist", "def update(self, old, new):\n i = self.rank[old] # change value at index i\n del self.rank[old]\n self.heap[i] = new\n self.rank[new] = i\n if old < new: # maintain heap order\n self.down(i)\n else:\n self.up(i)", "def update(self, old, new):\n i = self.rank[old] # change value at index i\n del self.rank[old]\n self.heap[i] = new\n self.rank[new] = i\n if old < new: # maintain heap order\n self.down(i)\n else:\n self.up(i)", "def diversified_ranking(self):\n self.error_throw('rank')\n instance = Instance(self.table_name)\n instance.addTable(Table(instance,False,'','')) # 'False'->transformed '',''->no describe yet\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n\n self.rank_partial(instance)\n\n self.rank_method = methods_of_ranking[3] # = 'diversified_ranking'", "def iteratehelper(links, ranks, contributer, count):\n \n ranks=ranks.cache()\n links=links.cache()\n contributer=contributer.cache()\n \n for iteration in range(count):\n print('Running %d iteration' %(iteration))\n contribs = links.join(ranks).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))\n \n ranks = contribs.reduceByKey(lambda x,y:x+y).mapValues(lambda rank: rank * 0.85 + 0.15)\n \n ranks.take(1)\n\n return ranks", "def rank(self,file_name, how_to_rank='total'):\r\n rank_dic={'total': lambda x:int(x[1])+int(x[2])+int(x[3]),\r\n 'gold' : lambda x: int(x[1]),\r\n 'weight': lambda x:3*int(x[1])+2*int(x[2])+int(x[3])}\r\n\r\n file=open(file_name,'r')\r\n total_list=[]\r\n for word in file:\r\n total_list.append(tuple(w for w in word.split(sep=' ') if w is not '\\n'))\r\n\r\n namelist=[name[0] for name in total_list]\r\n ranks=[rank for rank in list(map(rank_dic[how_to_rank], total_list))]\r\n\r\n newlist = [(namelist[i], ranks[i]) for i in range(len(total_list))]\r\n newlist.sort(key=lambda x:x[1],reverse=True)\r\n string_res=\"\"\r\n for i in range(len(newlist)):\r\n string_res+=str(newlist[i][0])+\": \"+str(newlist[i][1])+\"\\n\"\r\n\r\n file.close()\r\n return string_res", "def next(self, delta=1):\n return Prufer.unrank(self.rank + delta, self.nodes)", "def update(self):\r\n debug.write(\"[SourceRPG] Updating all ranked positions\", 1)\r\n database.execute(\"SELECT steamid FROM Player ORDER BY level DESC,xp DESC\")\r\n results = database.cursor.fetchall()\r\n self.ranks = []\r\n for index, steamid in enumerate(results):\r\n debug.write(\"Rank: %s Steamid: %s\" % (index, steamid), 5)\r\n self.ranks.append(steamid[0])\r\n debug.write(\"[SourceRPG] All ranked positions updated\", 1)", "def ranking_by_address_in_ascending_order(filename):\r\n in_address = sort_by_address(filename) #[['201808',1744',희우정로길','5955','69941'],['201808','1743','휘경로길','10483',143839']]\r\n address_dict = {}\r\n for i in in_address:\r\n if i[2] not in address_dict:\r\n address_dict[i[2]] = int(i[-1])\r\n else:\r\n address_dict[i[2]] += int(i[-1]) #주소별로 데이터합산 후 딕셔너리 형태로 저장\r\n \r\n address_dict_items = sorted(address_dict.items()) #데이터정렬후 튜플 in 리스트로 저장\r\n y = []\r\n for i in address_dict_items:\r\n y.append(list(i)) #튜플 in 리스트 형태를 리스트 in 리스트 형태로 변환\r\n for i in range(len(y)):\r\n k = y[i][0]\r\n y[i][0] = y[i][1]\r\n y[i][1] = k #리스트 안에 있는 리스트의 순서를 바꿈 (ex. [행당로,3] -> [3,행당로])\r\n rank = []\r\n count = 0\r\n for i in range(len(y)):\r\n s = 0\r\n x = y[i][0]\r\n for j in range(len(y)):\r\n z = y[j][0]\r\n if x>z:\r\n s += 1\r\n rank.append([y[i][1],s+1])\r\n count += 1\r\n if count == len(y):\r\n break #같은 크기의 데이터에 대해 같은 순위를 부여한다\r\n result_list = []\r\n for i in range(len(rank)):\r\n result_list.append([str(rank[i][1])+'위',rank[i][0]])\r\n return result_list", "def order_players_by_initial_rank(self):\n pass", "def rank(userid, args):\r\n testUserid = userid\r\n if len(args):\r\n testUserid = es.getuserid(str(args))\r\n if not es.exists('userid', testUserid):\r\n testUserid = userid\r\n player = players[testUserid]\r\n tokens = {}\r\n tokens['name'] = player['name']\r\n tokens['level'] = player['level']\r\n tokens['xp'] = player['xp']\r\n tokens['nextxp'] = (player['level'] - 1) * int(xpIncrement) + int(startXp)\r\n tokens['credits'] = player['credits']\r\n tokens['rank'] = ranks.getRank(player['steamid'])\r\n tokens['total'] = len( ranks )\r\n for tellUserid in es.getUseridList():\r\n tell(tellUserid, 'rank', tokens)", "def ranked_list_gen(infr, use_refresh=True):\n infr.print('============================', color='white')\n infr.print('--- RANKED LIST LOOP ---', color='white')\n n_prioritized = infr.refresh_candidate_edges()\n if n_prioritized == 0:\n infr.print('RANKING ALGO FOUND NO NEW EDGES')\n return\n if use_refresh:\n infr.refresh.clear()\n yield from infr._inner_priority_gen(use_refresh)", "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def page_rank(arr,num,alpha):\n for i in range(arr.shape[0]):\n count_non = np.count_nonzero(arr[i])\n if count_non == 0:\n arr[i] += 1/(np.count(arr[i]))\n else:\n arr[i] = arr[i] * (1/count_non)\n x_0 = np.array([1] + [0 for _ in range(arr.shape[1] - 1)])\n arr = arr * (1 - alpha)\n arr = arr + (alpha/arr.shape[0])\n arr = np.round(arr,2)\n init_state = np.round(x_0.dot(arr),2)\n for _ in range(num):\n init_state = np.round(init_state,2).dot(arr)\n return init_state", "def get_rank(self) -> int:\r\n return self.rank", "def test_rank(self):\n self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)\n self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)", "def rank(args):\n logger = logging.getLogger(\"GACM\")\n logger.info('Checking the data files...')\n for data_path in args.train_dirs + args.dev_dirs + args.test_dirs + args.label_dirs:\n assert os.path.exists(data_path), '{} file does not exist.'.format(data_path)\n dataset = Dataset(args, train_dirs=args.train_dirs, dev_dirs=args.dev_dirs, test_dirs=args.test_dirs, label_dirs=args.label_dirs)\n logger.info('Initialize the model...')\n model = Agent(args, len(dataset.qid_query), len(dataset.uid_url), len(dataset.vid_vtype))\n logger.info('model.global_step: {}'.format(model.global_step))\n assert args.load_model > -1\n logger.info('Restoring the model...')\n model.load_model(model_dir=args.load_dir, model_prefix=args.algo, global_step=args.load_model, load_optimizer=False)\n logger.info('Start computing NDCG@k for ranking performance (cheat)')\n label_batches = dataset.gen_mini_batches('label', 1, shuffle=False)\n trunc_levels = [1, 3, 5, 10]\n ndcgs_version1, ndcgs_version2 = model.ndcg(label_batches, dataset)\n for trunc_level in trunc_levels:\n ndcg_version1, ndcg_version2 = ndcgs_version1[trunc_level], ndcgs_version2[trunc_level]\n logger.info(\"NDCG@{}: {}, {}\".format(trunc_level, ndcg_version1, ndcg_version2))\n logger.info('【{}, {}】'.format(args.load_model, args.minimum_occurrence))\n logger.info('Done with model testing!')", "def reciprocal_rank(ranking, references, atk=None):\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n return 1.0 / k\n return 0.0", "def rank(self, value):\n for key, logger in self._loggers.items():\n logger._rank = value", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:\n self.current_rank += current_order_by_value != self.previous_value\n return self.current_rank", "def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()", "def move_up(g,k): # g: graph; k: coefficient\n for i,_ in path(g): #i: node address\n if (i%k)!=0:\n move_up_node(g,i,k)", "def rerank_candidates(s, pred2sub_rank, all_predictions, rerank_top=20):\n predicted_smiles = []\n model_input = []\n for (predict_smi, label), _ in Counter(all_predictions).most_common(rerank_top):\n if predict_smi == s:\n continue\n features = get_all_features(\n get_all_ranking_info(pred2sub_rank[predict_smi]))\n predicted_smiles.append((predict_smi, label))\n model_input.append(features)\n\n model = RankingModel()\n model.load_state_dict(torch.load('./models/ranker/rank_model.pt', map_location='cpu'))\n model.eval()\n\n test_loader = DataLoader(RankingTestDataset(\n model_input), batch_size=1000, shuffle=False, num_workers=2)\n ranking_scores = []\n for data in test_loader:\n outputs = model(data)[0]\n ranking_scores.extend(outputs.detach().cpu().numpy())\n\n assert len(predicted_smiles) == len(ranking_scores)\n pred_smi2score = {k: v[1]\n for k, v in zip(predicted_smiles, ranking_scores)}\n return pred_smi2score", "def rank(self, value):\n for logger in self._loggers:\n logger._rank = value", "def get_ranked_points(zpoints, dsq):\n pos_map = calc_positions(zpoints, dsq)\n rpoints = calc_ranked_points(pos_map, dsq)\n return rpoints", "def accession_up(node: PhyloNode, rank: str, target: Optional[PhyloNode]):\n\n # if projection target exists, project to target\n if target is not None:\n target.reads += node.reads\n node.reads.clear()\n # if node is of target rank, set as target\n elif node.rank == rank:\n target = node\n\n # propagate\n for child in node.children:\n accession_up(child, rank, target)", "def PageRankHelper(start, probs, numIterations, alpha=0.5):\n if numIterations <= 0:\n return probs\n else:\n ProbsPropagated = {}\n\n # with probability 1-alpha, we teleport back to the start\n # node\n ProbsPropagated[start] = 1 - alpha\n \n # Propagate the previous probabilities\n for node, prob in probs.iteritems():\n forwards = list(out_edges.get(node, set()))\n backwards = list(in_edges.get(node, set()))\n\n\n # With probability alpha, we move to a follwer\n # And each node distributes its current probability\n # equally to its neighbours.\n\n ProbtoPropagate = alpha * prob / (len(forwards)+len(backwards))\n\n for neighbour in (forwards+backwards):\n if not ProbsPropagated.has_key(neighbour):\n ProbsPropagated[neighbour] = 0\n\n ProbsPropagated[neighbour] += ProbtoPropagate\n\n return PageRankHelper(start, ProbsPropagated, numIterations-1, alpha)", "def compute_node_positions(self):\n pass", "def rank(self, urlids, wordids):\r\n\t\tself.score()\r\n\t\treturn sorted(self.scores.items(), key=lambda v:v[1], reverse=self.rev)", "def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)", "def updateNode(self, result):\n self.visits += 1\n self.wins += result", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def get_rank(self):\r\n return self.rank" ]
[ "0.6782681", "0.6664084", "0.6492858", "0.64766073", "0.6452059", "0.6438518", "0.64112085", "0.6404493", "0.62282807", "0.6224711", "0.62225074", "0.61820155", "0.6177658", "0.61345315", "0.61322975", "0.6032352", "0.6016183", "0.59991485", "0.59905785", "0.5985397", "0.5966015", "0.59427786", "0.5938758", "0.5916292", "0.59072614", "0.5894973", "0.5890533", "0.58685136", "0.58652604", "0.58532816", "0.5831604", "0.58201176", "0.5800913", "0.57949543", "0.57734174", "0.5735634", "0.5716176", "0.57154524", "0.5702164", "0.56814736", "0.56795776", "0.56790996", "0.56780106", "0.56719375", "0.56692195", "0.56631553", "0.56570584", "0.5654552", "0.56533664", "0.5652589", "0.56488806", "0.5636199", "0.5627043", "0.5605251", "0.56013703", "0.55938375", "0.5592971", "0.5592531", "0.5560809", "0.5559882", "0.555519", "0.55364573", "0.553454", "0.551595", "0.55081266", "0.55061984", "0.5494205", "0.5488834", "0.5488834", "0.5487778", "0.54861236", "0.547911", "0.5476658", "0.5473615", "0.547056", "0.54649305", "0.5463135", "0.5461687", "0.54566246", "0.5449863", "0.54493797", "0.5447838", "0.5444527", "0.54409903", "0.5427008", "0.5417001", "0.54119384", "0.54094523", "0.54060143", "0.54045844", "0.54005647", "0.5389484", "0.5388325", "0.53833735", "0.53815967", "0.5378005", "0.5376992", "0.53768015", "0.53768015", "0.53486925" ]
0.75397
0
add community membership to each node using walktrap algorithm implemented in igraph
def add_communites(self): query = ''' MATCH (c1:)-[r:INTERACTS]->(c2:) RETURN c1.name, c2.name, r.weight AS weight ''' ig = IGraph.TupleList(self.graph.run(query), weights=True) clusters = IGraph.community_walktrap(ig, weights="weight").as_clustering() nodes = [{"name": node["name"]} for node in ig.vs] for node in nodes: idx = ig.vs.find(name=node["name"]).index node["community"] = clusters.membership[idx] write_clusters_query = ''' UNWIND {nodes} AS n MATCH (c:) WHERE c.name = n.name SET c.community = toInt(n.community) ''' self.graph.run(write_clusters_query, nodes=nodes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)", "def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]", "def forward(self, nodes):\n sec_level_conlved = []\n\n for node in nodes:\n\n first_neighs = list(self.user_to_users_social_adjacency[int(node)])\n\n sec_neighs = []\n for neigh_node in first_neighs:\n sec_neighs.append(self.user_to_users_social_adjacency[int(neigh_node)])\n\n sec_neighs_aggregate_to_first_neighs_feats = self.aggregator.forward(first_neighs, sec_neighs, self.userFeatsUVFlag, False)\n\n # self_feats_first = self.uv_updated_features(torch.LongTensor(first_neighs).cpu().numpy()).to(self.device)\n self_feats_first = self.user_embeddings.weight[first_neighs]\n self_feats_first = self_feats_first\n\n first_neighs_sec_neighs_feats = torch.cat([self_feats_first, sec_neighs_aggregate_to_first_neighs_feats], dim=1)\n\n first_neighs_sec_neighs_feats = F.relu(self.w1(first_neighs_sec_neighs_feats))\n first_neighs_sec_neighs_feats = F.relu(self.w2(first_neighs_sec_neighs_feats))\n\n sec_level_conlved.append(first_neighs_sec_neighs_feats)\n\n parentnodes_convolved_with_sec_level_convolves = self.aggregator.forward(nodes, sec_level_conlved, self.userFeatsUVFlag, True)\n\n nodes_self_features = self.uv_updated_features(torch.LongTensor(nodes.cpu().numpy())).to(self.device)\n nodes_self_features = nodes_self_features.t() #TODO\n\n convolved = torch.cat([nodes_self_features, parentnodes_convolved_with_sec_level_convolves], dim=1)\n convolved = F.relu(self.w_cnvlvd(convolved))\n\n return convolved", "def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())", "def assign_louvain_communities(\n reddit_graph: nx.Graph,\n wiki_graph: nx.Graph = None,\n reddit_edge_weight: str = \"count\",\n others_threshold: int = 2,\n louvain_resolution_reddit: float = 1,\n) -> Union[nx.Graph, Tuple[nx.Graph, nx.Graph]]:\n reddit_dendrogram = community.generate_dendrogram(\n reddit_graph, weight=reddit_edge_weight, resolution=louvain_resolution_reddit\n )\n if wiki_graph:\n wiki_dendrogram = community.generate_dendrogram(\n wiki_graph,\n )\n\n # Iterate over reddit nodes to assign communities\n for node in reddit_graph:\n # Iterate over all levels of the dendrogram\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n reddit_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n if wiki_graph:\n # Also add the community from the other graph to allow comparing\n # Again, iterate over all levels in the dendrogram\n for level in range(len(wiki_dendrogram) - 1):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n\n try:\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n\n except:\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{level}\"\n ] = f\"L{level}-NONE\"\n if wiki_graph:\n for node in wiki_graph:\n for level in range(\n len(wiki_dendrogram) - 1,\n ):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n wiki_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n # Also add the community from the other graph to allow comparing\n\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n try:\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n except:\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{level}\"\n ] = f\"L{level}-NONE\"\n\n return (\n (reddit_graph, reddit_dendrogram, wiki_graph, wiki_dendrogram)\n if wiki_graph\n else (reddit_graph, reddit_dendrogram)\n )", "def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity", "def wsngraph():\n G = nx.Graph()\n G.add_node(1)\n G.add_node(2)\n G.add_node(3)\n G.add_node(4)\n G.add_node(5)\n G.add_node(6)\n G.add_node(7)\n G.add_node(8)\n G.add_node(9)\n G.add_node(10)\n G.add_node(11)\n G.add_node(12)\n G.add_edge(1,3,weight=1)\n G.add_edge(1,2,weight=6)\n G.add_edge(1,12,weight=16)\n G.add_edge(2,11,weight=12)\n G.add_edge(2,6,weight=10)\n G.add_edge(2,5,weight=11)\n G.add_edge(3,4,weight=10)\n G.add_edge(3,7,weight=11)\n G.add_edge(3,8,weight=14)\n G.add_edge(3,9,weight=11)\n G.add_edge(4,7,weight=9)\n G.add_edge(5,6,weight=7)\n G.add_edge(5,9,weight=12)\n G.add_edge(6,9,weight=9)\n G.add_edge(7,10,weight=10)\n G.add_edge(8,10,weight=2)\n G.add_edge(8,11,weight=11)\n G.add_edge(8,9,weight=12)\n G.add_edge(9,11,weight=8)\n G.add_edge(10,12,weight=3)\n G.pos={}\n G.pos[1]=(6,4)\n G.pos[2]=(-1,3.7)\n G.pos[3]=(4.7,3.5)\n G.pos[4]=(5.3,3.2)\n G.pos[5]=(0,3)\n G.pos[6]=(1.4,3.4)\n G.pos[7]=(5,2.6)\n G.pos[8]=(4.7,0)\n G.pos[9]=(1.4,2.4)\n G.pos[10]=(5.2,0.5)\n G.pos[11]=(1.3,0)\n G.pos[12]=(6,2.4)\n elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] > 8]\n esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <= 8]\n nx.draw_networkx_nodes(G,G.pos,node_color='w')\n nx.draw_networkx_edges(G,G.pos,elarge,width=3,edge_color='r',alpha=0.3)\n nx.draw_networkx_edges(G,G.pos,esmall,width=1,edge_color='b',alpha=0.3)\n nx.draw_networkx_labels(G,G.pos)\n ax=plt.gca()\n ax.axison = False\n label = {} \n for (u,v) in G.edges():\n d = G.get_edge_data(u,v)\n label[(u,v)]=d['weight']\n edge_label=nx.draw_networkx_edge_labels(G,G.pos,edge_labels=label)\n\n return(G)", "def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities", "def additive_phylogeny(matrix, n, G):\n new_node = n\n\n def additive_recur_helper(matrix, n, G):\n\n nonlocal new_node\n\n if n == 2:\n print(\"d add_edge (%s,%s):%s\" % (0, 1, matrix[0, 1]))\n G.add_edge(0, 1, weight=matrix[0, 1])\n return\n\n limblen = limblength(n - 1, matrix)\n i, k = find_i_k(matrix, n - 1, limblen)\n x = matrix[i, n - 1] - limblen\n\n print(\"n=%s limblen=%s i=%s k=%s x=%s\" % (n, limblen, i, k, x))\n\n additive_recur_helper(matrix[0 : n - 1, 0 : n - 1], n - 1, G)\n\n v = node_at_distance(G, i, k, x, matrix[i, k], new_node)\n if v == new_node:\n new_node += 1\n\n print(\"node_at_distance %s from %s is %s\" % (x, i, v))\n\n print(\"e add_edge (%s,%s):%s\" % (v, n - 1, limblen))\n G.add_edge(v, n - 1, weight=limblen)\n\n # draw graph if small\n if len(G) < 30:\n global plot_cnt\n pos = nx.kamada_kawai_layout(G)\n labels = nx.get_edge_attributes(G, \"weight\")\n nx.draw(G, pos, with_labels=True)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.draw()\n plt.savefig(\"Graph\" + str(plot_cnt) + \".png\", format=\"PNG\")\n plt.clf()\n plot_cnt += 1\n\n return\n\n additive_recur_helper(matrix, n, G)\n\n return", "def make_communities(community_side, communities_per_side):\n community_size = community_side * community_side\n communities = []\n seed_node = 0\n for i in range(communities_per_side):\n for j in range(communities_per_side):\n community = []\n for k in range(community_side):\n for z in range(community_side):\n _id = (\n communities_per_side * community_size * i\n + community_side * j\n + z\n + k * (communities_per_side * community_side)\n )\n # print(f\"{_id} \", end=\"\")\n community.append(_id)\n # print(\"- \", end=\"\")\n communities.append(community)\n #print()\n return communities", "def getUpstream(node, distance, pInteractions):\n rpInteractions = reverseInteractions(pInteractions)\n seenNodes = set([node])\n borderNodes = [node]\n frontierNodes = []\n for dist in range(distance):\n while len(borderNodes) > 0:\n currNode = borderNodes.pop()\n if currNode in rpInteractions:\n for i in rpInteractions[currNode].keys():\n if i not in seenNodes:\n seenNodes.update([i])\n frontierNodes.append(i)\n borderNodes = deepcopy(frontierNodes)\n frontierNodes = list()\n return(seenNodes)", "def __decorate_nodes(nodes, space):\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)", "def getMutationPathways(node, gPathway, distance = [2, 1], include = None):\n rpInteractions = reverseInteractions(gPathway.interactions)\n if include == None:\n include = set(gPathway.nodes.keys())\n upPathway = Pathway({node : gPathway.nodes[node]}, {})\n downPathway = Pathway({node : gPathway.nodes[node]}, {})\n seenUp = set([node])\n seenDown = set([node])\n unresolvedUp = [node]\n unresolvedDown = [node]\n for d in range(distance[0]): \n ## Up-\n frontierUp = []\n while len(unresolvedUp) > 0:\n currNode = unresolvedUp.pop()\n ## Add complex as upstream for seed node\n if currNode == node:\n if currNode in gPathway.interactions:\n for target in gPathway.interactions[currNode].keys():\n if gPathway.interactions[currNode][target] == \"component>\":\n seenUp.update([target])\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[currNode] = {}\n upPathway.interactions[currNode][target] = \"component>\"\n unresolvedUp.append(target)\n ## Add upstream\n if currNode in gPathway.rinteractions:\n for target in gPathway.rinteractions[currNode].keys():\n if target not in seenUp:\n seenUp.update([target])\n if gPathway.nodes[target] == \"protein\":\n if target in include:\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[target] = {}\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n frontierUp.append(target)\n elif gPathway.nodes[target] == \"complex\":\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[target] = {}\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedUp.append(target)\n else:\n if target not in upPathway.interactions:\n upPathway.interactions[target] = {}\n if currNode not in upPathway.interactions[target]:\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedUp = deepcopy(frontierUp)\n for d in range(distance[1]):\n ## Down-\n frontierDown = []\n while len(unresolvedDown) > 0:\n currNode = unresolvedDown.pop()\n ## Add downstream\n if currNode in gPathway.interactions:\n for target in gPathway.interactions[currNode].keys():\n if target not in seenDown:\n seenDown.update([target])\n if gPathway.nodes[target] == \"protein\":\n if target in include:\n downPathway.nodes[target] = gPathway.nodes[target]\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n frontierDown.append(target)\n elif gPathway.nodes[target] == \"complex\":\n downPathway.nodes[target] = gPathway.nodes[target]\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n unresolvedDown.append(target)\n else:\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n if target not in downPathway.interactions[currNode]:\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n ## Add upstream for non-seed node\n # if currNode != node:\n # if currNode in gPathway.rinteractions:\n # for target in gPathway.rinteractions[currNode].keys():\n # if target not in seenDown:\n # seenDown.update([target])\n # if gPathway.nodes[target] == \"protein\":\n # if target in include:\n # downPathway.nodes[target] = gPathway.nodes[target]\n # downPathway.interactions[target] = {}\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n # elif gPathway.nodes[target] == \"complex\":\n # downPathway.nodes[target] = gPathway.nodes[target]\n # downPathway.interactions[target] = {}\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n # unresolvedDown.append(target)\n # else:\n # if target not in downPathway.interactions:\n # downPathway.interactions[target] = {}\n # if currNode not in downPathway.interactions[target]:\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedDown = deepcopy(frontierDown)\n return(upPathway, downPathway)", "def transition(self):\n for node in self.net.nodes():\n if node not in self.evidence:\n self.update_node(node)", "def sum_product(nodes, edges, node_potentials, edge_potentials):\n marginals = {}\n messages = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n \n def send_message(j, i, grand_children_of_i):\n \"\"\"\n Send messages from node j to node i, i.e. summing over all xj\n \n Input\n -----\n j: Source node (to be summed over)\n i: Destination node\n grand_children_of_i: All neighboring nodes except node i (sources of messages).\n \"\"\"\n messages[(j,i)] = {xi: 0 for xi in node_potentials[i]}\n \n incoming_messages = {xj: 1 for xj in node_potentials[j]} # Default to be 1 for leaf nodes (no effect)\n if len(grand_children_of_i) != 0: # Only deal with this case because at leaf node, no messages to be collected\n for xj in node_potentials[j]:\n for grand_child in grand_children_of_i:\n incoming_messages[xj] *= messages[(grand_child, j)][xj]\n for xj in node_potentials[j]:\n for xi in node_potentials[i]:\n messages[(j,i)][xi] += node_potentials[j][xj] * edge_potentials[(j,i)][xj][xi] * incoming_messages[xj]\n \n \n def collect_messages(j, i):\n \"\"\"\n Collect messages from node j to node i\n \"\"\"\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n for k in j_neighbors_except_i: # No effect when j_neighbors_except_i is empty []\n collect_messages(k, j)\n send_message(j, i, j_neighbors_except_i)\n \n def distribute_messages(i, j):\n \"\"\"\n Distribute messages from node i to node j\n \"\"\"\n i_neighbors_except_j = [k for k in edges[i] if k != j]\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n send_message(i, j, i_neighbors_except_j)\n for k in j_neighbors_except_i:\n distribute_messages(j, k)\n \n def compute_marginal(i):\n marginals[i] = node_potentials[i]\n for x in marginals[i]:\n for neighbor_node in edges[i]:\n marginals[i][x] *= messages[(neighbor_node, i)][x]\n \n # Renormalize\n normalization_const = np.array(list(marginals[i].values())).sum()\n for x in marginals[i]:\n marginals[i][x] /= normalization_const\n \n \n root_node = list(nodes)[0]\n for node in edges[root_node]:\n collect_messages(node, root_node)\n for node in edges[root_node]:\n distribute_messages(root_node, node)\n for node in nodes:\n compute_marginal(node)\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return marginals", "def _position_nodes(g, partition, **kwargs):\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph,k=10,iterations=20)\n #pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos", "def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents", "def addNeighbor(self, neighbor):", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def distributePheromones(self, graph):\n pheromoneUpdate = 100.0 / self.fitness\n previousBin = 0\n for newBin, item in self.route:\n graph.graph[previousBin, item, newBin] += pheromoneUpdate\n previousBin = newBin", "def _community_detection(self, kg: KG) -> None:\n nx_graph = nx.Graph()\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n nx_graph.add_node(str(vertex), vertex=vertex)\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n # Neighbors are predicates\n for pred in kg.get_neighbors(vertex):\n for obj in kg.get_neighbors(pred):\n nx_graph.add_edge(\n str(vertex), str(obj), name=str(pred)\n )\n\n # Create a dictionary that maps the URI on a community\n partition = community.best_partition(\n nx_graph, resolution=self.resolution\n )\n self.labels_per_community = defaultdict(list)\n\n self.communities = {}\n vertices = nx.get_node_attributes(nx_graph, \"vertex\")\n for node in partition:\n if node in vertices:\n self.communities[vertices[node]] = partition[node]\n\n for node in self.communities:\n self.labels_per_community[self.communities[node]].append(node)", "def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph", "def make_walk_node(self, g):\r\n start = len(self.walk)\r\n self.walk.append(g)\r\n g.visited += 1\r\n self.add_loop(start, g)\r\n\r\n i = start\r\n while i < len(self.walk):\r\n node = self.walk[i]\r\n unused = self.find_unused_connection(node)\r\n if unused is None:\r\n i += 2\r\n continue\r\n i += self.add_loop(i, node)\r\n i += 2", "def cluster_connectivity(G, weight='weight'):\n\t# 1) indexing the edges by community\n\tsum_edges_dic = { com : {} for com in range(G.nb_communities)}\n\tfor node1, node2 in G.edges():\n\t\tcomm1 = G.nodes[node1]['community']\n\t\tcomm2 = G.nodes[node2]['community']\n\t\tif comm2 not in sum_edges_dic[comm1]:\n\t\t\tsum_edges_dic[comm1][comm2] = 0\n\t\t\tsum_edges_dic[comm2][comm1] = 0\n\t\telse:\n\t\t\tif weight is None:\n\t\t\t\tsum_edges_dic[comm1][comm2] += 1\n\t\t\t\tsum_edges_dic[comm2][comm1] += 1\n\t\t\telse:\t\n\t\t\t\tsum_edges_dic[comm1][comm2] += G.edges[node1, node2][weight]\n\t\t\t\tsum_edges_dic[comm2][comm1] += G.edges[node1, node2][weight]\n\tc_connectivity = {}\n\t# 2) computing the connectivity\n\tfor com in sum_edges_dic:\n\t\tin_out_edges = sum(sum_edges_dic[com].values())\n\t\tc_connectivity[com] = round(- np.log2(sum_edges_dic[com][com] / in_out_edges),3) \n\treturn c_connectivity", "def extend_labeled_graph(graph):\n la = community.best_partition(graph)\n nx.set_node_attributes(graph, la, 'community')\n nodes = graph.nodes(data=True)\n # nx.write_graphml(graph,'./data_2/clean_data/comm_graph.graphml')\n\n a = list(set(list(la.values())))\n temp = {}\n for comm in a:\n temp[comm] = [k for k, v in la.items() if v == comm]\n\n s = sorted(temp, key=lambda k: len(temp[k]), reverse=True)[:10]\n comm_size = {}\n for key in s:\n if key in temp:\n comm_size[key] = temp[key]\n\n dict_leaning_amount = {}\n for comm, ids in comm_size.items():\n count_r = 0\n for node in ids:\n if graph.node[node]['leaning'] == 'R':\n count_r += 1\n dict_leaning_amount[comm] = count_r\n sort_lean = sorted(dict_leaning_amount.items(), key=operator.itemgetter(1), reverse=True)\n top_3 = [k for k, v in sort_lean][0:3]\n\n extendible_nodes = []\n for comm in top_3:\n nodes = temp[comm]\n for node in nodes:\n if graph.node[node]['leaning'] == 'Unknown':\n extendible_nodes.append(node)\n\n original_graph = create_labeled_subgraph(graph)\n extendible_nodes.extend(list(create_labeled_subgraph(graph).nodes()))\n extendible_node_Set = set(extendible_nodes)\n\n extended_graph = nx.subgraph(graph, list(extendible_node_Set))\n return original_graph, extended_graph", "def _general_link(clusters, i, j, method):\n for k in range(len(clusters)):\n if k != i and k != j:\n if method.__name__ == \"ward_update\":\n new_distance = method(clusters[i,k], clusters[j,k], k)\n else:\n new_distance = method(clusters[i,k], clusters[j,k])\n clusters[i,k] = new_distance\n clusters[k,i] = new_distance\n return clusters", "def peel_clusters(self, plot_step=0):\n\n def peel_edge(cluster, vertex):\n \"\"\"\n :param cluster current active cluster\n :param vertex pendant vertex of the edge to be peeled\n\n Recursive function which peels a branch of the tree if the input vertex is a pendant vertex\n\n If there is only one neighbor of the input vertex that is in the same cluster, this vertex is a pendant vertex and can be peeled. The function calls itself on the other vertex of the edge leaf.\n \"\"\"\n plot = True if self.plot and plot_step else False\n num_connect = 0\n\n for wind in self.graph.wind:\n (NV, NE) = vertex.neighbors[wind]\n if NE.support == 2:\n new_cluster = find_cluster_root(NV.cluster)\n if new_cluster is cluster and not NE.peeled:\n num_connect += 1\n edge, new_vertex = NE, NV\n if num_connect > 1:\n break\n if num_connect == 1:\n edge.peeled = True\n if vertex.state:\n edge.state = not edge.state\n edge.matching = True\n vertex.state = False\n new_vertex.state = not new_vertex.state\n if plot:\n self.uf_plot.plot_edge_step(edge, \"match\")\n self.uf_plot.plot_strip_step_anyon(vertex)\n self.uf_plot.plot_strip_step_anyon(new_vertex)\n else:\n if plot:\n self.uf_plot.plot_edge_step(edge, \"peel\")\n peel_edge(cluster, new_vertex)\n\n for vertex in self.graph.V.values():\n if vertex.cluster is not None:\n cluster = find_cluster_root(vertex.cluster)\n peel_edge(cluster, vertex)\n\n if self.plot and not plot_step:\n self.uf_plot.plot_removed(self.graph, \"Peeling completed.\")", "def run_adding_edges(self):\n indices = np.where(self.X==0)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def complex_network_mapping(graph):\n vect = []\n\n n = nx.number_of_nodes(graph)\n e = nx.number_of_edges(graph)\n print n, e\n\n# adj = nx.adjacency_matrix(graph).toarray()\n# adj_bin = np.where(adj > 0, 1., 0.)\n# adj_conn = 1 - adj\n adj_bin = nx.adjacency_matrix(graph).toarray()\n adj_bin = np.array(adj_bin, dtype=np.float)\n\n # Node Betweenness binary\n bt_bin = nx.betweenness_centrality(graph).values()\n avg_btb = np.mean(bt_bin)\n vect.append(avg_btb)\n\n # Edge betweenness\n ebt = np.array(nx.edge_betweenness_centrality(graph).values())\n vect.append(np.mean(ebt))\n\n # Eigen vector centrality binary\n evc_bin = eigenvector_centrality_und(adj_bin)\n avg_evcb = np.mean(evc_bin)\n vect.append(avg_evcb)\n\n # Flow coefficient\n _, flow_bin, _ = flow_coef_bd(adj_bin)\n avg_flow = np.mean(flow_bin)\n vect.append(avg_flow)\n\n # Kcoreness centrality\n kcor_bin, _ = kcoreness_centrality_bu(adj_bin)\n avg_kcor = np.mean(kcor_bin)\n vect.append(avg_kcor)\n\n # Degree assortivity\n dac = nx.degree_assortativity_coefficient(graph)\n vect.append(dac)\n\n # Page rank centrality\n# pgr_wei = pagerank_centrality(adj_bin, d=0.85)\n# avg_pgr = np.mean(pgr_wei)\n# vect.append(avg_pgr)\n\n # Rich club coefficient\n# rcc = nx.rich_club_coefficient(graph).values()\n# avg_rcc = np.mean(rcc)\n# vect.append(avg_rcc)\n\n # Transitivity\n tr = nx.transitivity(graph)\n vect.append(tr)\n\n # average clustering\n avg_clst = nx.average_clustering(graph)\n vect.append(avg_clst)\n\n glb_ef = efficiency_bin(adj_bin)\n vect.append(glb_ef)\n\n return vect", "def bgll(self, graph, node_count, min_mod, max_pass):\n\n #the belonging of the node\n bl = [i for i in range(node_count)]\n #the node's weight in community\n _in = [0.0] * node_count\n #the node's weight in graph\n _tot = []\n #total weight of a node, just a copy of _tot\n k = []\n #the total weight of the graph\n m = []\n\n #inital the in-param\n network = [[0.0] * node_count for n in range(node_count)]\n for node, tag, weight in graph:\n network[node][tag] = weight\n for node in network:\n k.append(sum(node))\n _tot = k[:]\n m = sum(k)\n #inital the in-param\n\n def modularity():\n \"\"\"\n This function mainly computes the modularity of the network\n Return:\n mod->the modularity value\n \"\"\"\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q\n\n def modularity_gain(n, c, dnodecomm):\n \"\"\"\n This function mainly computes the modularity gain of a node moving\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n Return:\n gain->modularity gain\n \"\"\"\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m\n\n def neigh_comm(n):\n \"\"\"\n This function mainly computes the weight between the node and it's neighbour community\n Param:\n n->node id\n Return:\n nc->the map of the weight between the node and it's neighbour community\n nc=>{cid,weight}\n \"\"\"\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc\n\n def insert(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of insert the node into community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] += k[n]\n _in[c] += 2 * dnodecomm + network[n][n]\n bl[n] = c\n\n def remove(n, c, dnodecomm):\n \"\"\"\n This function mainly get the effect of remove the node off community\n Param:\n n->node id\n c->community id\n dnodecomm->the weight between the node and the community\n \"\"\"\n\n _tot[c] -= k[n]\n _in[c] -= 2 * dnodecomm + network[n][n]\n bl[n] = -1\n\n def detect():\n \"\"\"\n This function mainly detect the community of the graph.\n \"\"\"\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod\n\n detect()\n return bl", "def mark_nodes_as_members_of_threat_zone(self):\n\n for y in range(self.top_left_y, self.top_left_y + self.height):\n for x in range(self.top_left_x, self.top_left_x + self.width):\n main_node = self.main_graph[y][x]\n main_node.threat_zones.add(self)\n\n self.nodes.append(main_node)", "def move_up(g,k): # g: graph; k: coefficient\n for i,_ in path(g): #i: node address\n if (i%k)!=0:\n move_up_node(g,i,k)", "def apply(H, x, g=lambda v, e: np.sum(v[list(e)])):\n new_x = np.zeros(H.num_nodes)\n for edge in H.edges.members():\n edge = list(edge)\n # ordered permutations\n for shift in range(len(edge)):\n new_x[edge[shift]] += g(x, edge[shift + 1 :] + edge[:shift])\n return new_x", "def make_crosswalks(street_network, sidewalk_network):\n\n intersection_nodes = street_network.nodes.get_intersection_nodes()\n # intersection_nodes = [street_network.nodes.get(nid) for nid in intersection_node_ids]\n\n # Create sidewalk nodes for each intersection node and overwrite the adjacency information\n for intersection_node in intersection_nodes:\n try:\n adj_street_nodes = street_network.get_adjacent_nodes(intersection_node)\n adj_street_nodes = sort_nodes(intersection_node, adj_street_nodes)\n v_curr = intersection_node.vector()\n\n if len(adj_street_nodes) == 3:\n # Take care of the case where len(adj_nodes) == 3.\n # Identify the largest angle that are formed by three segments\n # Make a dummy node between two vectors that form the largest angle\n # Using the four nodes (3 original nodes and a dummy node), create crosswalk nodes\n vectors = [intersection_node.vector_to(adj_street_node, normalize=True) for adj_street_node in adj_street_nodes]\n angles = [math.acos(np.dot(vectors[i - 1], vectors[i])) for i in range(3)]\n idx = np.argmax(angles)\n vec_idx = (idx + 1) % 3\n dummy_vector = - vectors[vec_idx] * distance_to_sidewalk\n inverse_vec = - vectors[vec_idx]\n # dummy_vector = inverse_vec * latlng_offset_size(vectors[vec_idx][1], vectors[vec_idx][0],\n # vector=inverse_vec,\n # distance=distance_to_sidewalk)\n dummy_coordinate_vector = v_curr + dummy_vector\n dummy_node = Node(None, dummy_coordinate_vector[0], dummy_coordinate_vector[1])\n adj_street_nodes.insert(idx, dummy_node)\n\n # Create crosswalk nodes and add a cross walk to the data structure\n try:\n crosswalk_nodes = make_crosswalk_nodes(intersection_node, adj_street_nodes)\n except ValueError:\n raise\n\n crosswalk_node_ids = [node.id for node in crosswalk_nodes]\n crosswalk_node_ids.append(crosswalk_node_ids[0])\n # crosswalk = Sidewalk(None, crosswalk_node_ids, \"crosswalk\")\n\n # Add nodes to the network\n for crosswalk_node in crosswalk_nodes:\n sidewalk_network.add_node(crosswalk_node)\n sidewalk_network.nodes.crosswalk_node_ids.append(crosswalk_node.id)\n\n # Add crosswalks to the network\n crosswalk_node_id_pairs = window(crosswalk_node_ids, 2)\n for node_id_pair in crosswalk_node_id_pairs:\n n1 = sidewalk_network.nodes.get(node_id_pair[0])\n n2 = sidewalk_network.nodes.get(node_id_pair[1])\n if len(n1.get_way_ids()) == 1 and len(n2.get_way_ids()) == 1:\n crosswalk = Sidewalk(None, list(node_id_pair), \"footway\")\n else:\n crosswalk = Sidewalk(None, list(node_id_pair), \"crosswalk\")\n sidewalk_network.add_way(crosswalk)\n\n # Connect the crosswalk nodes with correct sidewalk nodes\n connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids)\n except ValueError:\n log.exception(\"ValueError in make_sidewalks, so skipping...\")\n continue\n return", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def addInLink(source, target):\n if inlinkGraph.has_key(source):\n # if target not in inlinkGraph[source]:# uncomment to remove repetitives\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = inlinkGraphDegree[source] + 1\n else:\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = 1", "def nsga_replacement(random, population, parents, offspring, args):\n survivors = []\n combined = list(population)\n combined.extend(offspring)\n \n # Perform the non-dominated sorting to determine the fronts.\n fronts = []\n pop = set(range(len(combined)))\n while len(pop) > 0:\n front = []\n for p in pop:\n dominated = False\n for q in pop:\n if combined[p] < combined[q]:\n dominated = True\n break\n if not dominated:\n front.append(p)\n fronts.append([dict(individual=combined[f], index=f) for f in front])\n pop = pop - set(front)\n \n # Go through each front and add all the elements until doing so\n # would put you above the population limit. At that point, fall\n # back to the crowding distance to determine who to put into the\n # next population. Individuals with higher crowding distances\n # (i.e., more distance between neighbors) are preferred.\n for i, front in enumerate(fronts):\n if len(survivors) + len(front) > len(population):\n # Determine the crowding distance.\n distance = [0 for _ in range(len(combined))]\n individuals = list(front)\n num_individuals = len(individuals)\n num_objectives = len(individuals[0]['individual'].fitness)\n for obj in range(num_objectives):\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\n distance[individuals[0]['index']] = float('inf')\n distance[individuals[-1]['index']] = float('inf')\n for i in range(1, num_individuals-1):\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \n (individuals[i+1]['individual'].fitness[obj] - \n individuals[i-1]['individual'].fitness[obj]))\n \n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\n crowd.sort(key=lambda x: x['dist'], reverse=True)\n last_rank = [combined[c['index']] for c in crowd]\n r = 0\n num_added = 0\n num_left_to_add = len(population) - len(survivors)\n while r < len(last_rank) and num_added < num_left_to_add:\n if last_rank[r] not in survivors:\n survivors.append(last_rank[r])\n num_added += 1\n r += 1\n # If we've filled out our survivor list, then stop.\n # Otherwise, process the next front in the list.\n if len(survivors) == len(population):\n break\n else:\n for f in front:\n if f['individual'] not in survivors:\n survivors.append(f['individual'])\n return survivors", "def jupiteNetwork(): \n \n # Build a graph for large clos topo\n tor_cut, aggr_cut, spine_cut = 512, 256, 256\n #2048, 4096, 4096 #32*4*2, 64*2, 16*2\n switches, edges = tp.jupiter_topo(\n tor_cut=tor_cut, aggr_cut=aggr_cut, spine_cut=spine_cut\n )\n \n G = build_graph(edges)\n external_edges = []\n for node in G.nodes():\n if 'sh' in node:\n G.add_edge(node, 'e1')\n \n \"\"\"\n paths = list(nx.all_shortest_paths(G, 'tor385', 'tor1'))\n #print(paths)\n #eee\n paths = list(nx.all_shortest_paths(G, 'tor129', 'tor257'))\n #print(paths)\n paths = list(nx.all_shortest_paths(G, 'tor257', 'tor385'))\n #print(paths)\n #eee\n \"\"\"\n switch_nodes, hnodes, tors, anodes, snodes = tp.getJupiternNodes(\n tors_num=tor_cut, aggr_num=aggr_cut, spine_num=spine_cut\n )\n print('**** is_connected(G)', nx.is_connected(G))\n print('**** number of components', nx.number_connected_components(G))\n \n tors = tors[0:512] + ['e1']\n\n # Get the routing path of all nodes\n table_file_name = '../outputs/jupiter_routing_table_anodes_cut4.txt'\n\n if((os.path.isfile(table_file_name)) == False):\n table = all_routing(G, tors, table_file_name)\n else:\n json_data = open(table_file_name).read()\n table = json.loads(json_data)\n \n seeds, polys = cf.get_seeds_table_jupiter(switch_nodes + ['e1']) #\n \n return G, tors, edges, table, seeds, polys, anodes", "def generate_adjacents(node):\n # Makes a dictionary where keys are current upper token positions and\n # values are the list of positions attainable from one slide move\n slide_moves = {}\n for key, value in node.boardstate.items():\n if value.isupper() and value != \"B\":\n slide_moves[key] = get_slide_moves(key, node.boardstate)\n\n # Append list of swing moves to get all moves\n moves_dict = {}\n #relevant_pieces = [name ]\n for key in slide_moves:\n all_moves = set(slide_moves[key] + get_swing_moves(key, slide_moves))\n moves_dict[key] = list(all_moves)\n\n # Convert from dictionary to list of list of tuples of the form:\n #[[(curr_move, next_move)...]...] where each tokens moves occupy a list\n moves_list = []\n for curr, news in moves_dict.items():\n moves_list.append([(curr, new) for new in news])\n\n # Get all combinations of moves and for each combo construct a new board state\n adjacent_states = []\n turns = list(product(*moves_list))\n\n for turn in turns:\n new_board = apply_turn(node, turn)\n if new_board:\n adjacent_states.append((turn, new_board))\n return adjacent_states", "def make_connected(self):\r\n if nx.is_connected(self.g): return\r\n import random\r\n cc = list( nx.connected_components(self.g) )\r\n nodes = [random.sample(cluster,1)[0] for cluster in cc]\r\n for n1,n2 in zip(nodes[:-1],nodes[1:]):\r\n self.factors.append(factor(var=[n1,n2],card=self.cardVec[[n1,n2]],val=scipy.ones(4)))\r\n self.update()", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def island_migration(self):\n for y in self.island_map:\n for cell in y:\n cell.migration()\n\n for y in self.island_map:\n for cell in y:\n for animal in cell.population:\n animal.has_moved = False", "def merge_nodes(G,nodes, new_node, attr_dict=None, **attr):\n \n G.add_node(new_node, distToCancer=0, classCell=\"cancerCluster\") # Add the 'merged' node\n \n for n1,n2,data in G.edges(data=True):\n # For all edges related to one of the nodes to merge,\n # make an edge going to or coming from the `new gene`.\n if n1 in nodes:\n G.add_edge(new_node,n2,data)\n elif n2 in nodes:\n G.add_edge(n1,new_node,data)\n \n for n in nodes: # remove the merged nodes\n if(G.has_node(n)):\n G.remove_node(n)", "def nsga_replacement(random, population, parents, offspring, args):\r\n survivors = []\r\n combined = list(population)\r\n combined.extend(offspring)\r\n \r\n # Perform the non-dominated sorting to determine the fronts.\r\n fronts = []\r\n pop = set(range(len(combined)))\r\n while len(pop) > 0:\r\n front = []\r\n for p in pop:\r\n dominated = False\r\n for q in pop:\r\n if combined[p] < combined[q]:\r\n dominated = True\r\n break\r\n if not dominated:\r\n front.append(p)\r\n fronts.append([dict(individual=combined[f], index=f) for f in front])\r\n pop = pop - set(front)\r\n \r\n # Go through each front and add all the elements until doing so\r\n # would put you above the population limit. At that point, fall\r\n # back to the crowding distance to determine who to put into the\r\n # next population. Individuals with higher crowding distances\r\n # (i.e., more distance between neighbors) are preferred.\r\n for i, front in enumerate(fronts):\r\n if len(survivors) + len(front) > len(population):\r\n # Determine the crowding distance.\r\n distance = [0 for _ in range(len(combined))]\r\n individuals = list(front)\r\n num_individuals = len(individuals)\r\n num_objectives = len(individuals[0]['individual'].fitness)\r\n for obj in range(num_objectives):\r\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\r\n distance[individuals[0]['index']] = float('inf')\r\n distance[individuals[-1]['index']] = float('inf')\r\n for i in range(1, num_individuals-1):\r\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \r\n (individuals[i+1]['individual'].fitness[obj] - \r\n individuals[i-1]['individual'].fitness[obj]))\r\n \r\n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\r\n crowd.sort(key=lambda x: x['dist'], reverse=True)\r\n last_rank = [combined[c['index']] for c in crowd]\r\n r = 0\r\n num_added = 0\r\n num_left_to_add = len(population) - len(survivors)\r\n while r < len(last_rank) and num_added < num_left_to_add:\r\n if last_rank[r] not in survivors:\r\n survivors.append(last_rank[r])\r\n num_added += 1\r\n r += 1\r\n # If we've filled out our survivor list, then stop.\r\n # Otherwise, process the next front in the list.\r\n if len(survivors) == len(population):\r\n break\r\n else:\r\n for f in front:\r\n if f['individual'] not in survivors:\r\n survivors.append(f['individual'])\r\n return survivors", "def add_loop(self, index, node):\r\n index += 1\r\n i = index\r\n while True:\r\n unused = self.find_unused_connection(node)\r\n if unused is None:\r\n break\r\n segment = node.connections[unused]\r\n self.walk.insert(i, segment)\r\n i += 1\r\n node.visited += 1\r\n segment.visited += 1\r\n node = self.other_node_for_segment(node, segment)\r\n self.walk.insert(i, node)\r\n i += 1\r\n return i - index", "def back_prop(nodes_in_path, playout_result):\n for temp_node in nodes_in_path:\n temp_node.visited += 1\n if str(playout_result) == str(temp_node.side):\n temp_node.winning += 1", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def insert_communities_to_log(log, partition, graph):\n communities = create_community_dict(partition, graph)\n\n for trace in log:\n for event in trace:\n if event['concept:name'] in communities.keys():\n event['community'] = communities[event['concept:name']]\n return log", "def attach_nodes_to_person_graph(user_id, amount_nodes_to_add=3):\n for i in range(amount_nodes_to_add):\n graph_interface.Person.add_skill(user_id, get_random_noun())\n graph_interface.Person.add_resource(user_id, get_random_noun())\n graph_interface.Person.add_interest(user_id, get_random_noun())", "def __step(self, G):\n new_infected_node_set = self.infected_node_set.copy()\n #look for new infections\n for node in self.infected_node_set:\n #try to infect neighbors\n for neighbor in G.neighbors(node):\n if random() < self.p:\n new_infected_node_set.add(neighbor)\n\n #look for recuperations\n for node in self.infected_node_set:\n #try to recuperate\n if random() < self.q:\n new_infected_node_set.remove(node)\n #set new infected nodes\n self.infected_node_set = new_infected_node_set", "def set_neighbor(self):\n for node in self.node:\n for other in self.node:\n if other.id != node.id and distance.euclidean(node.location, other.location) <= node.com_ran:\n node.neighbor.append(other.id)", "def create_network(self, community_detection, wt_steps, n_clust, network_from, neighbors, top):\n \n if network_from == 'top_n':\n sort_by_scores = []\n\n for pair, score in scores_update.items():\n sort_by_scores.append([pair[0], pair[1], score[2]])\n top_n = sorted(sort_by_scores, reverse=False, key=lambda x: x[2])[:top]\n\n # Convert from distance to similarity for edge\n for score in top_n: \n c = 1/(1 + score[2])\n score[2] = c\n\n flat = [tuple(pair) for pair in top_n]\n\n elif network_from == 'knn': \n flat = []\n projection_knn = nearest_neighbors(neighbors=neighbors)\n\n for projection, knn in projection_knn.items():\n for n in knn:\n flat.append((projection, n[0], abs(n[3]))) # p1, p2, score\n\n clusters = {}\n g = Graph.TupleList(flat, weights=True)\n\n if community_detection == 'walktrap':\n try:\n wt = Graph.community_walktrap(g, weights='weight', steps=wt_steps)\n cluster_dendrogram = wt.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n elif community_detection == 'betweenness':\n try:\n ebs = Graph.community_edge_betweenness(g, weights='weight', directed=True)\n cluster_dendrogram = ebs.as_clustering(n_clust)\n except:\n self.show_cluster_fail()\n\n for community, projection in enumerate(cluster_dendrogram.subgraphs()):\n clusters[community] = projection.vs['name']\n\n #convert node IDs back to ints\n for cluster, nodes in clusters.items():\n clusters[cluster] = sorted([int(node) for node in nodes])\n \n remove_outliers(clusters)\n\n clustered = []\n for cluster, nodes in clusters.items():\n for n in nodes:\n clustered.append(n)\n\n clusters['singles'] = [] # Add singles to clusters if not in top n scores\n clusters['removed'] = []\n \n for node in projection_2D:\n if node not in clustered and node not in drop:\n clusters['singles'].append(node)\n elif node in drop:\n clusters['removed'].append(node)\n \n G = nx.Graph()\n\n for pair in flat:\n G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])\n\n #if you want to see directionality in the networkx plot\n #G = nx.MultiDiGraph(G)\n\n #adds singles if not in top n scores\n for node_key in projection_2D:\n if node_key not in G.nodes:\n G.add_node(node_key)\n\n return flat, clusters, G", "def add_cell_and_edges(self,nodes,**kws): \n for a,b in circular_pairs(nodes):\n j=self.nodes_to_edge(a,b)\n if j is None:\n self.add_edge(nodes=[a,b])\n return self.add_cell(nodes=nodes,**kws)", "def go_or_grow(lgca):\n relevant = lgca.cell_density[lgca.nonborder] > 0\n coords = [a[relevant] for a in lgca.nonborder]\n n_m = lgca.nodes[..., :lgca.velocitychannels].sum(-1)\n n_r = lgca.nodes[..., lgca.velocitychannels:].sum(-1)\n M1 = np.minimum(n_m, lgca.restchannels - n_r)\n M2 = np.minimum(n_r, lgca.velocitychannels - n_m)\n for coord in zip(*coords):\n # node = lgca.nodes[coord]\n n = lgca.cell_density[coord]\n\n n_mxy = n_m[coord]\n n_rxy = n_r[coord]\n\n rho = n / lgca.K\n j_1 = npr.binomial(M1[coord], tanh_switch(rho, kappa=lgca.kappa, theta=lgca.theta))\n j_2 = npr.binomial(M2[coord], 1 - tanh_switch(rho, kappa=lgca.kappa, theta=lgca.theta))\n n_mxy += j_2 - j_1\n n_rxy += j_1 - j_2\n n_mxy -= npr.binomial(n_mxy * np.heaviside(n_mxy, 0), lgca.r_d)\n n_rxy -= npr.binomial(n_rxy * np.heaviside(n_rxy, 0), lgca.r_d)\n M = min([n_rxy, lgca.restchannels - n_rxy])\n n_rxy += npr.binomial(M * np.heaviside(M, 0), lgca.r_b)\n\n v_channels = [1] * n_mxy + [0] * (lgca.velocitychannels - n_mxy)\n v_channels = npr.permutation(v_channels)\n r_channels = np.zeros(lgca.restchannels)\n r_channels[:n_rxy] = 1\n node = np.hstack((v_channels, r_channels))\n lgca.nodes[coord] = node", "def metis_partition(G):\n partition_list = partition(G, 2)[1]\n for i in range(2):\n for username in partition_list[i]:\n G.add_node(username, cluster=i)\n \n return G", "def neighbors_iter(node, topology):\n return topology[node]", "def neighbors(node, topology):\n return [n for n in topology[node]]", "def move_up_node(g,i,k): # i: node address of null element, k: coefficient\n d = i%k*k #d: address of dependent node coindexed with i\n # co-index stored in 'ctag'\n g.nodes[d]['ctag'] = g.nodes[i]['ctag']\n h = g.nodes[d]['head'] # assumption: 'head' exists\n h_new = chose_head(g,h,d)\n g.nodes[d]['head'] = h_new\n rel = g.nodes[d]['rel']\n g.nodes[d]['rel'] = 'NP2P'\n g.add_arc(h_new,d)\n g.nodes[h]['deps'][rel].remove(d)", "def iter_nodes(self):", "def add_self_loops(graph):\n num_nodes = normalizations.compute_num_nodes(graph)\n senders = np.concatenate(\n (np.arange(num_nodes), np.asarray(graph.senders, dtype=np.int32)))\n receivers = np.concatenate(\n (np.arange(num_nodes), np.asarray(graph.receivers, dtype=np.int32)))\n\n return graph._replace(\n senders=senders,\n receivers=receivers,\n edges=np.ones_like(senders),\n n_edge=np.asarray([senders.shape[0]]))", "def run(self, infected_graph):\n pos = nx.spring_layout(infected_graph)\n points = np.zeros((len(pos), 2))\n i = 0\n for p in pos:\n points[i] = pos[p]\n i += 1\n \n hull = ConvexHull(points)\n nodes = list(pos)\n return [nodes[p] for p in hull.vertices]", "def collaple_node(self, offset):\n raise NotImplementedError", "def neighborJoining(distances):\n\n tree = {}\n\n while(len(distances.keys()) > 2):\n\n r = calcRs(distances)\n M = makeMMatrix(distances, r)\n\n smallest = 10000\n smallestKey = (\"\",\"\")\n\n #Find nearest neighbors\n for key in M.keys():\n for subkey in M[key].keys():\n if M[key][subkey] < smallest:\n smallest = M[key][subkey]\n smallestKey = (key, subkey)\n\n #Add new node and update distances to rest of tree\n newname = smallestKey[0] + \"-\" + smallestKey[1]\n distances[newname] = {}\n tree[smallestKey[0]] = {}\n tree[smallestKey[1]] = {}\n dij = distances[smallestKey[0]][smallestKey[1]]\n for key in M.keys():\n if key in smallestKey:\n continue\n distances[newname][key] = .5*(distances[smallestKey[0]][key] \\\n + distances[smallestKey[1]][key] - dij)\n distances[key][newname] = distances[newname][key]\n\n #Update distances to parents of node\n dik = (dij + r[smallestKey[0]] - r[smallestKey[1]])/2\n tree[smallestKey[0]][newname] = dik\n tree[smallestKey[1]][newname] = dij-dik\n detachDict(distances, smallestKey[0], smallestKey[1])\n\n #Connect final two nodes\n tree[distances.keys()[0]] = {}\n tree[distances.keys()[0]][distances[distances.keys()[0]].keys()[0]] =\\\n distances[distances.keys()[0]][distances[distances.keys()[0]].keys()[0]] \n return tree", "def compute_perm(parents):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L167\n\n # Order of last layer is random (chosen by the clustering algorithm).\n indices = []\n if len(parents) > 0:\n M_last = max(parents[-1]) + 1\n indices.append(list(range(M_last)))\n\n for parent in parents[::-1]:\n #print('parent: {}'.format(parent))\n\n # Fake nodes go after real ones.\n pool_singeltons = len(parent)\n\n indices_layer = []\n for i in indices[-1]:\n indices_node = list(np.where(parent == i)[0])\n assert 0 <= len(indices_node) <= 2\n #print('indices_node: {}'.format(indices_node))\n\n # Add a node to go with a singelton.\n if len(indices_node) == 1:\n indices_node.append(pool_singeltons)\n pool_singeltons += 1\n #print('new singelton: {}'.format(indices_node))\n # Add two nodes as children of a singelton in the parent.\n elif len(indices_node) == 0:\n indices_node.append(pool_singeltons+0)\n indices_node.append(pool_singeltons+1)\n pool_singeltons += 2\n #print('singelton childrens: {}'.format(indices_node))\n\n indices_layer.extend(indices_node)\n indices.append(indices_layer)\n\n # Sanity checks.\n for i,indices_layer in enumerate(indices):\n M = M_last*2**i\n # Reduction by 2 at each layer (binary tree).\n assert len(indices[0] == M)\n # The new ordering does not omit an indice.\n assert sorted(indices_layer) == list(range(M))\n\n return indices[::-1]", "def accession_up(node: PhyloNode, rank: str, target: Optional[PhyloNode]):\n\n # if projection target exists, project to target\n if target is not None:\n target.reads += node.reads\n node.reads.clear()\n # if node is of target rank, set as target\n elif node.rank == rank:\n target = node\n\n # propagate\n for child in node.children:\n accession_up(child, rank, target)", "def __assign_level(vertex: \"Vertex\", level, already_assigned: \"List[Vertex]\"):\n vertex.level = level\n already_assigned.append(vertex)\n for neighbour in vertex.neighbours:\n if neighbour not in already_assigned:\n __assign_level(neighbour, level + 1, already_assigned)", "def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)", "def simple_linkage(x):\n \n nodes = []\n edges = []\n for i in range(len(x)):\n node_attr ={\"lvl\":x[i]}\n nodes.append((i, node_attr))\n edges.append((i,i+1,{'weight':1}))\n edges.pop()\n \n g =nx.Graph()\n g.add_nodes_from(nodes) \n g.add_edges_from(edges) \n return g", "def update_cell_nodes(self):\n self.cells['nodes'] = -1\n\n for c in range(self.Ncells()):\n # consider two edges at a time, and find the common node\n for i,(ja,jb) in enumerate(circular_pairs(self.cell_to_edges(c))):\n for n in self.edges['nodes'][ja,:]: \n if n in self.edges['nodes'][jb]:\n self.cells['nodes'][c,i] = n\n break", "def sample_cond_on_subtree_nodes(new, tree, subtree_nodes, subtree_edges, subtree_adjlist):\n new_separators = {}\n new_cliques = set()\n old_cliques = set()\n subtree_order = len(subtree_nodes)\n #print(\"subtree nodes:\" + str(subtree_nodes))\n\n if subtree_order == 0:\n # If the tree, tree is empty (n isolated node),\n # add random neighbor.\n c = frozenset([new])\n new_cliques.add(c)\n #c2 = tree.nodes()[0] # nx 1.9\n c2 = list(tree.nodes())[0] # GraphTool\n #c2 = list(tree.nodes)[0] # nx 2.1\n tree.add_node(c, label=tuple([new]), color=\"red\")\n tree.add_edge(c, c2, label=tuple([]))\n\n sep = frozenset()\n #tree.fix_graph()\n trilearn.graph.junction_tree.randomize_at_sep(tree, sep)\n\n new_separators[sep] = [(c, c2)]\n # tree TODO: the actual value for the key is not needed.\n P = {c: np.exp(-tree.log_nu(sep))}\n return (old_cliques, new_cliques, new_separators, P, {c: 1.0})\n\n S = {c: set() for c in subtree_nodes}\n M = {c: set() for c in subtree_nodes}\n for c in S:\n for neig in subtree_adjlist[c]:\n #S[c] = S[c] | (c & neig)\n S[c] |= (c & neig)\n RM = {c: c - S[c] for c in S}\n C = {c: set() for c in subtree_nodes}\n P = {}\n N_S = {c: set() for c in subtree_nodes}\n\n sepCondition = {}\n for c in RM:\n sepCondition[c] = len({neig for neig in subtree_adjlist[c] if\n S[c] == neig & c}) > 0 or len(subtree_adjlist) == 1\n\n if sepCondition[c] is True:\n tmp = np.array(list(RM[c]))\n first_node = []\n if len(tmp) > 0:\n # Connect to one node\n first_ind = np.random.randint(len(tmp))\n first_node = tmp[[first_ind]]\n tmp = np.delete(tmp, first_ind)\n\n rest = set()\n if len(tmp) > 0:\n # Connect to the rest of the nodes if there are any left\n rest = aux.random_subset(tmp)\n M[c] = frozenset(rest | set(first_node))\n else:\n M[c] = frozenset(aux.random_subset(RM[c]))\n\n # Create the new cliques\n for clique in M:\n C[clique] = frozenset(M[clique] | S[clique] | {new})\n new_cliques.add(C[clique])\n\n # Get the neighbor set of each c which can be moved to C[c]\n for clique in subtree_nodes:\n N_S[clique] = {neig for neig in tree.neighbors(clique)\n if neig & clique <= C[clique] and neig not in subtree_nodes}\n\n # Add the new cliques\n #for c in subtree_nodes:\n # tree.add_node(C[c], label=str(tuple(C[c])), color=\"red\")\n tree.add_nodes_from([C[c] for c in subtree_nodes])\n\n # Construct and add the new edges between the new cliques,\n # replicating the subtree\n new_subtree_edges = []\n for e in subtree_edges:\n sep = C[e[0]] & C[e[1]]\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[e[0]], C[e[1]]))\n #new_subtree_edges.append((C[e[0]], C[e[1]]))\n tree.add_edge(C[e[0]], C[e[1]])\n # tree.add_edges_from(new_subtree_edges)\n\n # Connect cliques in the subtree to the new cliques\n for c in subtree_nodes:\n # Move the neighbors of a swallowed node to the swallowing node\n # Remove the swallowed node\n\n if C[c] - {new} == c:\n # If connecting to all nodes in a clique (the node should be replaces instead)\n for neig in tree.neighbors(c):\n if neig not in subtree_nodes:\n tree.add_edge(C[c], neig)#, label=lab)\n\n tree.remove_node(c)\n old_cliques.add(c)\n else: # If not connecting to every node in a clique\n sep = C[c] & c\n if not sep in new_separators:\n new_separators[sep] = []\n new_separators[sep].append((C[c], c))\n\n #print \"adding edge: \" + str((C[c], c))\n tree.add_edge(C[c], c)\n # Pick random subset of neighbors intersecting with subset of S U M\n\n N = aux.random_subset(N_S[c])\n #for neig in N:\n # tree.add_edge(C[c], neig)\n\n tree.add_edges_from([(C[c], neig) for neig in N])\n tree.remove_edges_from([(c, neig) for neig in N])\n\n # Compute probabilities\n N = {}\n for c in subtree_nodes:\n if sepCondition[c] is False:\n # Every internal node in c belongs to a separator\n P[c] = np.power(2.0, - len(RM[c]))\n #P[c] = Fraction(1, 2 ** len(RM[c]))\n if not len(c) + 1 == len(C[c]):\n N[c] = np.power(2.0, - len(N_S[c]))\n #N[c] = Fraction(1, 2 ** len(N_S[c]))\n else:\n N[c] = 1.0\n #N[c] = Fraction(1,1)\n else:\n P[c] = 1.0\n #P[c] = Fraction(1, 1)\n N[c] = 1.0\n #N[c] = Fraction(1, 1)\n if len(RM[c]) > 1:\n P[c] = 1.0 / len(RM[c])\n #P[c] = Fraction(1, len(RM[c]))\n P[c] *= np.power(2.0, - (len(RM[c]) - 1.0)) * len(M[c])\n #P[c] *= Fraction(len(M[c]), 2 ** (len(RM[c]) - 1.0)) \n if not len(c) + 1 == len(C[c]): # c not swallowed by C[c]\n #N[c] = np.power(2.0, - len(N_S[c]))\n N[c] = Fraction(1, 2 ** len(N_S[c]))\n\n # Remove the edges in tree\n tree.remove_edges_from(subtree_edges)\n # Todo: This will introduce a bug if we instead replace a node.\n return (old_cliques, new_cliques, new_separators, P, N)", "def __follow_node(node, tree_graph, seed_space, seed):\n\n def node_has_filter(x):\n \"\"\"\n Check if a node is a pattern node and has an object filter\n \"\"\"\n p_node = list(self.__plan_graph.objects(subject=x, predicate=AGORA.byPattern))\n try:\n p_node = p_node.pop()\n return 'filter_object' in self.__patterns[p_node] or 'filter_subject' in self.__patterns[p_node]\n except IndexError:\n return False\n\n try:\n # Get the sorted list of current node's successors\n nxt = sorted(list(self.__plan_graph.objects(node, AGORA.next)),\n key=lambda x: node_has_filter(x), reverse=True)\n\n # Per each successor...\n for n in nxt:\n if seed_space in self.__node_spaces[n]:\n node_patterns = self.__node_patterns.get(n, [])\n\n # In case the node is not a leaf, 'onProperty' tells which is the next link to follow\n try:\n link = list(self.__plan_graph.objects(subject=n, predicate=AGORA.onProperty)).pop()\n except IndexError:\n link = None\n\n filter_next_seeds = set([])\n next_seeds = set([])\n # If the current node is a pattern node, it must search for triples to yield\n for pattern in node_patterns:\n pattern_space = self.__patterns[pattern].get('space', None)\n if pattern_space != seed_space or seed in self.__subjects_to_ignore[pattern_space]:\n continue\n\n subject_filter = self.__patterns[pattern].get('filter_subject', None)\n if subject_filter is not None and seed != subject_filter:\n self.__subjects_to_ignore[pattern_space].add(seed)\n continue\n\n pattern_link = self.__patterns[pattern].get('property', None)\n\n # If pattern is of type '?s prop O'...\n if pattern_link is not None:\n if (seed, pattern_link) not in self.__fragment:\n obj_filter = self.__patterns[pattern].get('filter_object', None)\n if on_plink is not None:\n on_plink(pattern_link, [seed], pattern_space)\n\n seed_was_filtered = True\n try:\n for seed_object in list(\n __process_pattern_link_seed(seed, tree_graph, pattern_link)):\n __check_stop()\n quad = (pattern, seed, pattern_link, seed_object)\n if obj_filter is None or u''.join(seed_object).encode(\n 'utf-8') == u''.join(obj_filter.toPython()).encode('utf-8'):\n self.__fragment.add((seed, pattern_link))\n __put_triple_in_queue(quad)\n seed_was_filtered = False\n if isinstance(obj_filter, URIRef):\n filter_next_seeds.add(obj_filter)\n if obj_filter is not None and seed_was_filtered:\n self.__subjects_to_ignore[pattern_space].add(seed)\n except AttributeError as e:\n log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))\n\n # If pattern is of type '?s a Concept'...\n obj_type = self.__patterns[pattern].get('type', None)\n if obj_type is not None:\n check_type = self.__patterns[pattern].get('check', False)\n if on_type is not None:\n on_type(obj_type, [seed], pattern_space)\n\n __dereference_uri(tree_graph, seed)\n try:\n seed_objects = list(tree_graph.objects(subject=seed, predicate=link))\n for seed_object in seed_objects:\n type_triple = (pattern, seed_object, RDF.type, obj_type)\n # In some cases, it is necessary to verify the type of the seed\n if (seed_object, obj_type) not in self.__fragment:\n if check_type:\n __dereference_uri(tree_graph, seed_object)\n types = list(\n tree_graph.objects(subject=seed_object, predicate=RDF.type))\n if obj_type in types:\n self.__fragment.add((seed_object, obj_type))\n __put_triple_in_queue(type_triple)\n else:\n self.__subjects_to_ignore[pattern_space].add(seed_object)\n else:\n self.__fragment.add((seed_object, obj_type))\n __put_triple_in_queue(type_triple)\n except AttributeError as e:\n log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))\n\n # If the current node is not a leaf... go on finding seeds for the successors\n if link is not None and seed not in self.__subjects_to_ignore[seed_space]:\n if on_link is not None:\n on_link(link, [seed], seed_space)\n __process_link_seed(seed, tree_graph, link, next_seeds)\n\n if filter_next_seeds:\n next_seeds = set.intersection(next_seeds, filter_next_seeds)\n\n chs = list(chunks(list(next_seeds), min(len(next_seeds), max(1, workers / 2))))\n next_seeds.clear()\n try:\n while True:\n __check_stop()\n chunk = chs.pop()\n threads = []\n for s in chunk:\n try:\n workers_queue.put_nowait(s)\n future = pool.submit(__follow_node, n, tree_graph, seed_space, s)\n threads.append(future)\n except Queue.Full:\n # If all threads are busy...I'll do it myself\n __follow_node(n, tree_graph, seed_space, s)\n except Queue.Empty:\n pass\n\n wait(threads)\n [(workers_queue.get_nowait(), workers_queue.task_done()) for _ in threads]\n except (IndexError, KeyError):\n pass\n except Queue.Full:\n stop_event.set()\n except Exception as e:\n traceback.print_exc()\n log.error(e.message)\n return", "def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray], seeds: Union[np.ndarray, dict] = None) \\\n -> 'Propagation':\n adjacency = check_format(adjacency)\n n = adjacency.shape[0]\n index_seed, index_remain, labels_seed = self._instanciate_vars(adjacency, seeds)\n\n if self.node_order == 'random':\n np.random.shuffle(index_remain)\n elif self.node_order == 'decreasing':\n index = np.argsort(-adjacency.T.dot(np.ones(n))).astype(np.int32)\n index_remain = index[index_remain]\n elif self.node_order == 'increasing':\n index = np.argsort(adjacency.T.dot(np.ones(n))).astype(np.int32)\n index_remain = index[index_remain]\n\n labels = -np.ones(n, dtype=np.int32)\n labels[index_seed] = labels_seed\n labels_remain = np.zeros_like(index_remain, dtype=np.int32)\n\n indptr = adjacency.indptr.astype(np.int32)\n indices = adjacency.indices.astype(np.int32)\n if self.weighted:\n data = adjacency.data.astype(np.float32)\n else:\n data = np.ones(n, dtype=np.float32)\n\n t = 0\n while t < self.n_iter and not np.array_equal(labels_remain, labels[index_remain]):\n t += 1\n labels_remain = labels[index_remain].copy()\n labels = np.asarray(vote_update(indptr, indices, data, labels, index_remain))\n\n membership = membership_matrix(labels)\n membership = normalize(adjacency.dot(membership))\n\n self.labels_ = labels\n self.membership_ = membership\n\n return self", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def traverse_up(node, node_callback):\n node_callback(node)\n for e in node.edges_out:\n traverse_up(e.dst, node_callback)", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def propagate_lineage(nodes):\n for name in nodes:\n # Called just to populate lineages, both way\n get_node_lineage(nodes, name, 'up', 'ancestors')\n get_node_lineage(nodes, name, 'down', 'descendants')", "def recurse(hp):\n global G\n nodes = G.nodes(data=True)\n p_insert = hp.p_insert if count_boxes() > hp.initial_boxes else 1.\n for node in nodes:\n try:\n if node[1][\"shape\"] is \"square\":\n if random.random() < p_insert:\n insert_motif(hp, id=node[0])\n except Exception as e:\n log('exception in recurse', e)", "def community_layout(g, partition):\n\n pos_communities = _position_communities(g, partition, scale=3.)\n\n pos_nodes = _position_nodes(g, partition, scale=1.)\n\n # combine positions\n pos = dict()\n for node in g.nodes():\n pos[node] = pos_communities[node] + pos_nodes[node]\n\n return pos", "def rigidNodesAdder(self):\n N = len(self.aircraftConnectedNodes[0])\n logger.debug('\\n'*20)\n # logger.debug(self.aircraftConnectedNodes[0])\n newConnectedNodes = []\n np.set_printoptions(0)\n for i in range(N):\n # Connects the wing nodes to one another\n wingNode = self.aircraftConnectedNodes[0][i][2]\n fuselageNode = self.aircraftConnectedNodes[0][i][3]\n newNodes = self.settings['wing' + str(i+1)]['FEM']['rigidNodes']\n newConnectedNodes.append(self.aircraftConnectedNodes[0][i])\n M = int(np.ceil(newNodes/2))\n for j in range(1,M+1):\n temp1 = copy.deepcopy(self.aircraftConnectedNodes[0][i])\n temp2 = copy.deepcopy(self.aircraftConnectedNodes[0][i])\n # self.aircraftNonRotatingNodes.append('wing_' + str(i+1) + '_')\n # logger.debug(self.aircraftNodesNames[i+1][int(wingNode+j)])\n # logger.debug(self.aircraftNodesNames[i+1][int(wingNode-j)])\n # logger.debug(self.aircraftNodesNames[i+1])\n if wingNode - M > 0:\n temp1[2] = wingNode + j\n temp2[2] = wingNode - j\n # logger.debug(temp1)\n # logger.debug(temp2)\n newConnectedNodes.append(temp1)\n newConnectedNodes.append(temp2)\n self.aircraftNonRotatingNodes.append(self.aircraftNodesNames[i+1][int(wingNode+j)])\n self.aircraftNonRotatingNodes.append(self.aircraftNodesNames[i+1][int(wingNode+j)])\n else:\n temp1[2] = wingNode + j\n temp2[2] = wingNode + j + M\n newConnectedNodes.append(temp1)\n newConnectedNodes.append(temp2)\n # Connects the fuselage nodes that are connected to a wing:\n for i in range(N):\n # Tests if it's a fuselage node\n # logger.debug(self.aircraftConnectedNodes[0][i])\n temp = self.aircraftConnectedNodes[0][i]\n if temp[1] == 0:\n newNodes = self.settings['fuselage']['FEM']['rigidNodes']\n M = int(np.ceil(newNodes)/2)\n fuselageNode = temp[3]\n wingNode = temp[2]\n\n for j in range(1,M+1):\n newNodes = self.settings['wing' + str(int(temp[0]))]['FEM']['rigidNodes']\n for k in range(newNodes):\n if fuselageNode - M > 0:\n # WARNING: move this bit of code only if you are sure\n # of what you are doing. This one can lead to unwanted\n # bugs!!\n # Wing uid\n # temp1[0] = 0\n # temp2[0] = 0\n \n # Fuselage uid\n # temp1[1] = 0\n # temp2[1] = 0\n # logger.debug(j)\n if wingNode - M > 0:\n temp1 = copy.deepcopy(temp)\n temp2 = copy.deepcopy(temp)\n temp1[2] = wingNode + k\n temp2[2] = wingNode - k\n temp1[3] = fuselageNode + j\n temp2[3] = fuselageNode - j\n newConnectedNodes.append(temp1)\n newConnectedNodes.append(temp2)\n else:\n temp1 = copy.deepcopy(temp)\n temp2 = copy.deepcopy(temp)\n temp1[2] = wingNode + k\n temp2[2] = wingNode + M + k\n temp1[3] = fuselageNode + j\n temp2[3] = fuselageNode - j\n newConnectedNodes.append(temp1)\n newConnectedNodes.append(temp2)\n\n else:\n pass\n # logger.debug(temp)\n # logger.debug(temp[3])\n for i in newConnectedNodes:\n logger.debug(i)\n \n logger.debug('\\n'*5)\n # logger.debug(newConnectedNodes[0])\n self.aircraftConnectedNodes = newConnectedNodes\n for i in self.aircraftConnectedNodes:\n logger.debug(i)\n # sys.exit()", "def transition(s, direction):\n new_pos = [sum(x) for x in zip(s, direction)] # sum up every element at same index of two lists\n if hit_wall(new_pos):\n return s\n else:\n return new_pos", "def update_neighbours(self, iteration, iterations, input_vector, bmu):\n\n t = iteration / iterations\n learning_rate = self.learning_rate(t)\n for node in self.codebook:\n influence = self.codebook.neighbourhood(node, bmu, t)\n node.update(learning_rate, influence, input_vector, bmu)", "def visit(self, node):", "def visit(self, node):", "def infomap_communities(G):\n name_map = {}\n name_map_inverted = {}\n for n in G.nodes():\n id_ = hash(n) % 100000\n name_map_inverted[id_] = n\n name_map[n] = id_\n \n infomapSimple = infomap.Infomap(\"--two-level\")\n network = infomapSimple.network()\n \n for n1, n2, data in G.edges(data=True):\n network.addLink(name_map[n1], name_map[n2], data['weight'] if 'weight' in data else 1)\n\n infomapSimple.run()\n\n return dict(\n (name_map_inverted[node.physicalId], node.moduleIndex())\n for node in infomapSimple.iterTree()\n if node.isLeaf()\n )", "def add_neighbors(node2feature, adj_dict):\n for key, val in adj_dict.items():\n node2feature[key]['neighbors'] = val\n \n return node2feature", "def coarsen(A, levels, self_connections=False):\n # Function written by M. Defferrard, taken (almost) verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L5\n graphs, parents = metis(A, levels)\n perms = compute_perm(parents)\n\n for i, A in enumerate(graphs):\n M, M = A.shape\n\n if not self_connections:\n A = A.tocoo()\n A.setdiag(0)\n\n if i < levels:\n A = perm_adjacency(A, perms[i])\n\n A = A.tocsr()\n A.eliminate_zeros()\n graphs[i] = A\n\n# Mnew, Mnew = A.shape\n# print('Layer {0}: M_{0} = |V| = {1} nodes ({2} added),'\n# '|E| = {3} edges'.format(i, Mnew, Mnew-M, A.nnz//2))\n\n\n return graphs, perms[0] if levels > 0 else None", "def communityGraph(graph):\n\n lapgr = nx.laplacian_matrix(graph)\n\n # Get the eigenvalues and eigenvectors of the Laplacian matrix\n evals, evec = np.linalg.eigh(lapgr.todense())\n\n fiedler = evec[1]\n results = []\n ## \"Fiedler\", fiedler\n median = np.median(fiedler, axis=1) # median of the second eigenvalue\n for i in range(0, fiedler.size): # divide the graph nodes into two\n if(fiedler[0, i] < median):\n results.append(0)\n else:\n results.append(1)\n return results, evals, evec", "def calculate_nr_of_upslope_cells(node_conn_mat, rows, cols, traps, steepest_spill_pairs, d4):\n\n # Retrieve the expanded connectivity matrix with traps as nodes\n node_conn_mat = expand_conn_mat(node_conn_mat, len(traps))\n conn_mat = reroute_trap_connections(node_conn_mat, rows, cols, traps, steepest_spill_pairs, d4)\n\n # The flow starts in the start_cells. These are the cells without flow leading in to them\n start_nodes = calculate_flow_origins(conn_mat, traps, rows, cols)\n flow_acc, one_or_trap_size = assign_initial_flow_acc(traps, start_nodes, rows, cols)\n _, next_nodes = conn_mat[start_nodes, :].nonzero()\n next_nodes = np.unique(next_nodes)\n\n current_nodes = next_nodes\n it = 0\n\n while len(current_nodes) > 0:\n print 'Iteration: ', it\n # Current nodes cannot be assigned flow without previous nodes having flow assigned\n previous_nodes, corr_current_index = conn_mat[:, current_nodes].nonzero()\n _, flow_to_each_current = np.unique(corr_current_index, return_counts=True)\n previous_nodes_with_flow = flow_acc[previous_nodes] > 0\n remove_indices = corr_current_index[previous_nodes_with_flow == False]\n keep_indices = np.setdiff1d(np.arange(0, len(current_nodes), 1), remove_indices)\n sorting_order = np.argsort(corr_current_index)\n previous_nodes = previous_nodes[sorting_order]\n assign_flow_indices = np.setdiff1d(current_nodes, current_nodes[remove_indices])\n\n # Calculate flow to current nodes having previous nodes with assigned flow\n splits = np.cumsum(flow_to_each_current)\n nodes_to_each_current = np.split(previous_nodes, splits)[:-1]\n flow_to_each_current = np.asarray([np.sum(flow_acc[el]) for el in nodes_to_each_current])\n flow_acc[current_nodes[keep_indices]] = flow_to_each_current[keep_indices]\n\n # Add one or the trap size\n flow_acc[assign_flow_indices] += one_or_trap_size[assign_flow_indices]\n\n it += 1\n if len(assign_flow_indices) > 0:\n _, next_nodes = conn_mat[assign_flow_indices, :].nonzero()\n next_nodes = np.unique(next_nodes)\n unassigned_current_nodes = current_nodes[remove_indices]\n current_nodes = np.union1d(next_nodes, unassigned_current_nodes)\n\n else:\n current_nodes = []\n\n # Map from trap nodes back to traps\n for i in range(len(traps)):\n trap = traps[i]\n flow_acc[trap] = flow_acc[rows * cols + i]\n\n flow_acc = flow_acc[:rows * cols]\n flow_acc = flow_acc.reshape(rows, cols)\n\n return flow_acc", "def connect_crosswalk_nodes(sidewalk_network, crosswalk_node_ids):\n # crosswalk_node_ids = crosswalk.get_node_ids()[:-1] # Crosswalk has a redundant node at the end.\n\n for crosswalk_node_id in crosswalk_node_ids[:-1]:\n try:\n # Get the intersection node and two nodes that created the intersection sidewalk node\n crosswalk_node = sidewalk_network.nodes.get(crosswalk_node_id)\n intersection_node, adjacent_street_node1, adjacent_street_node2 = crosswalk_node.parents\n\n # Connect sidewalk nodes created from adjacent_street_node1 and adjacent_street_node2\n # Get sidewalk nodes that are created from the street node, and\n # identify which one should be connected to crosswalk_node\n for adjacent_street_node in [adjacent_street_node1, adjacent_street_node2]:\n # Skip the dummy node\n if len(adjacent_street_node.get_way_ids()) == 0:\n continue\n\n # Create a vector from the intersection node to the adjacent street node.\n # Then also create a vector from the intersection node to the sidewalk node\n v_adjacent_street_node = intersection_node.vector_to(adjacent_street_node, normalize=True)\n shared_street_id = intersection_node.get_shared_way_ids(adjacent_street_node)[0]\n try:\n sidewalk_node_1_from_intersection, sidewalk_node_2_from_intersection = intersection_node.get_sidewalk_nodes(shared_street_id)\n except TypeError:\n # Todo: Issue #29. Sometimes shared_street_id does not exist in the intersection_node.\n log.exception(\"connect_crosswalk_nodes(): shared_street_id %s does not exist.\" % shared_street_id)\n continue\n v_sidewalk_node_1_from_intersection = intersection_node.vector_to(sidewalk_node_1_from_intersection, normalize=True)\n\n # Check which one of sidewalk_node_1_from_intersection and sidewalk_node_2_from_intersection are\n # on the same side of the road with crosswalk_node.\n # If the sign of the cross product from v_adjacent_street_node to v_crosswalk_node is same as\n # that of v_adjacent_street_node to v_sidewalk_node_1_from_intersection, then\n # sidewalk_node_1_from_intersection should be on the same side.\n # Otherwise, sidewalk_node_2_from_intersection should be on the same side with crosswalk_node.\n v_crosswalk_node = intersection_node.vector_to(crosswalk_node, normalize=True)\n if np.cross(v_adjacent_street_node, v_crosswalk_node) * np.cross(v_adjacent_street_node, v_sidewalk_node_1_from_intersection) > 0:\n node_to_swap = sidewalk_node_1_from_intersection\n else:\n node_to_swap = sidewalk_node_2_from_intersection\n\n sidewalk_network.swap_nodes(node_to_swap, crosswalk_node)\n except ValueError:\n log.exception(\"Error while connecting crosswalk nodes, so skipping...\")\n continue\n return", "def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph", "def walk(self, community, now):\n self._get_or_create_timestamps(community).last_walk = now", "def update_pheromones(self, colony):\n\n # Decay all pheromone trails\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes)):\n self.pheromones[i][j] *=(1- self.decay)\n\n # Add to edge pheromones if edge was part of successful tour\n for ant in colony.ants:\n distance = self.get_path_distance(ant.path)\n if distance <= colony.min_distance:\n for i, j in ant.nodes_traversed():\n self.pheromones[i][j] += self.q / distance\n\n # Keep pheromone trails greater than or equal to 0.01, so nodes do not become\n # completely unviable choices.\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes)):\n self.pheromones[i][j] = max(self.pheromones[i][j], self.min_pheromone)", "def anchors_to_adjacency(set_path, n_proteomes, mailbox_reader):\n frame_list = []\n for idx in range(n_proteomes):\n with mailbox_reader(idx) as file_handle:\n frame_list.append(\n pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n )\n nodes = pd.concat(\n frame_list,\n ignore_index=True,\n )\n del frame_list\n graph = nx.Graph()\n for unused_tuple, subframe in nodes.groupby(\n by=[\"syn.anchor.id\", \"syn.anchor.sub_id\"]\n ):\n ids = subframe[\"member_ids\"]\n n_ids = len(ids)\n graph.add_nodes_from(ids)\n if n_ids > 1:\n edges = combinations(ids, 2)\n graph.add_edges_from(edges, weight=n_ids)\n outpath = set_path / ANCHORS_FILE\n summarypath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_summary.tsv\"\n )\n histpath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_hist.tsv\"\n )\n components = [\n c\n for c in sorted(nx.connected_components(graph), key=len, reverse=True)\n if len(c) > 1\n ]\n fh = outpath.open(\"w\")\n fh.write(\"idx\\tcluster_id\\tsize\\tmembers\\n\")\n n_items = 0\n count_list = []\n hash_list = []\n id_list = []\n for i, comp in enumerate(components):\n component = np.sort(pd.Index(list(comp)).to_numpy())\n id_list.append(i)\n size = len(comp)\n count_list.append(size)\n hash_list.append(hash_array(component))\n for node in component:\n fh.write(f\"{n_items}\\t{i}\\t{size}\\t{node}\\n\")\n n_items += 1\n fh.close()\n n_clusts = len(count_list)\n del graph, components\n cluster_counts = pd.DataFrame({\"size\": count_list})\n largest_cluster = cluster_counts[\"size\"].max()\n cluster_hist = (\n pd.DataFrame(cluster_counts.value_counts()).sort_index().reset_index()\n )\n cluster_hist = cluster_hist.set_index(\"size\")\n cluster_hist = cluster_hist.rename(columns={0: \"n\"})\n cluster_hist[\"item_pct\"] = (\n cluster_hist[\"n\"] * cluster_hist.index * 100.0 / n_items\n )\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n cluster_hist[\"cluster_pct\"] = cluster_hist[\"n\"] * 100.0 / n_clusts\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n clusters = pd.DataFrame(\n {\"anchor.id\": id_list, \"count\": count_list, \"hash\": hash_list}\n )\n clusters.to_csv(summarypath, sep=\"\\t\")\n stats_dict = {\n \"in_anchor\": n_items,\n \"syn.anchors.n\": n_clusts,\n \"syn.anchors.largest\": largest_cluster,\n }\n return stats_dict", "def tangle(self):\n for x in range(self.width):\n for y in range(self.height):\n cell = self.grid[y][x]\n # connect to neighborhood\n for i in [x-1, x, x+1 if x+1 < self.width else -1]:\n for j in [y-1, y, y+1 if y+1 < self.height else -1]:\n self.g.add_edge(cell, self.grid[j][i])\n\n # remove self loops from graph\n self.g.remove_edges_from(self.g.selfloop_edges())", "def computesWingConnexions(self):\n logger.warning(\"!!! WARINING !!!!\")\n logger.warning(\"BE CERTAIN THAT CPACS FILE HAS THE HORIZONTAL TAIL\")\n logger.warning(\"DEFINED BEFORE THE VERTICAL TAIL!!!!\")\n if self.nWings <= 1:\n logger.info(\"No wing connexions needed\")\n elif self.nWings > 1 and self.nFuselage < 1:\n logger.error(\"Multiple wings with no fuselage! Mesh can not be computed\")\n sys.exit()\n else:\n ws_connextionsPoints = self.nWings + self.nFuselage - 1\n # if self.nFuselage > 0:\n # ws_connextionsPoints = self.nWings\n # else:\n # ws_connextionsPoints = self.nWings -1\n\n # ws: wings\n # d: distance\n # L: left, R: right, c: center\n connectedNodes = np.zeros((ws_connextionsPoints,5))\n for i in range(ws_connextionsPoints):\n \"\"\"\n for each wing there is 3 points that need to be taken into\n account:\n 1) Left tip position[0]\n 2) center [np.floor(nNodes/2))]\n WARNING: keep in mind that symmetric wings always have \n an odd number of points!\n 3) right tip position[-1]\n \"\"\"\n if self.nFuselage > 0:\n wingIndex = i+1\n else:\n wingIndex = i\n # logger.debug(\"Wing index = \"+str(wingIndex))\n # N = len(self.aircraftNodesPoints[i])\n currentWingNpoints = len(self.aircraftNodesPoints[wingIndex])\n if currentWingNpoints > 2:\n c = int(np.floor(len(self.aircraftNodesPoints[wingIndex])/2))\n else:\n c = 0\n # logger.debug(\"c = \"+str(c))\n ws_identifiers = np.empty((ws_connextionsPoints,5))\n\n for j in range(ws_connextionsPoints+1):\n logger.debug(\"wingIndex,j = \"+str(wingIndex)+\" \"+str(j))\n if wingIndex != j:\n # c: stands for current\n logger.debug(\"True with wingIndex,j = \"+str(wingIndex)+\" \"+str(j))\n # Computes the distances between all point\n dist_l = np.linalg.norm(self.aircraftNodesPoints[j] - self.aircraftNodesPoints[wingIndex][0], axis=1)\n if currentWingNpoints > 2:\n dist_c = np.linalg.norm(self.aircraftNodesPoints[j] - self.aircraftNodesPoints[wingIndex][c], axis=1)\n else:\n dist_c = dist_l\n dist_r = np.linalg.norm(self.aircraftNodesPoints[j] - self.aircraftNodesPoints[wingIndex][-1],axis=1)\n\n # Gets the index of the minimal distance between all point\n index_l = np.argmin(dist_l)\n if currentWingNpoints > 2:\n index_c = np.argmin(dist_c)\n else:\n index_c = index_l\n index_r = np.argmin(dist_r)\n indexes = np.array([index_l, index_c, index_r])\n\n # gets the minimal distance for each wing part\n minDist_l = dist_l[index_l]\n if currentWingNpoints > 2:\n minDist_c = dist_c[index_c]\n else:\n minDist_c = minDist_l\n minDist_r = dist_r[index_r]\n minDist = np.array([minDist_l,minDist_c,minDist_r])\n\n k = np.argmin(minDist)\n minimalDistance = minDist[k]\n m = indexes[k]\n r = len(self.aircraftNodesPoints[wingIndex])\n tab = np.array([0,c,r])\n k = tab[k]\n\n # minimalDistance: distance between both of these points\n identifier = np.array([wingIndex,j,k,m,minimalDistance])\n # logger.debug(\"Identifier \\n\"+str(identifier))\n if j < wingIndex:\n ws_identifiers[j] = identifier\n else:\n ws_identifiers[j-1] = identifier\n \n index2 = np.argmin(ws_identifiers[:,4])\n old = np.array([ws_identifiers[index2,2],\n ws_identifiers[index2,3],\n ws_identifiers[index2,0],\n ws_identifiers[index2,1],\n ws_identifiers[index2,4]])\n if old in connectedNodes and i > 1:\n logger.warning(\"Found a matching connexion\")\n logger.debug(old)\n logger.debug(ws_identifiers)\n ws_identifiers = np.delete(ws_identifiers,index2,0)\n logger.debug(ws_identifiers)\n index2 = np.argmin(ws_identifiers[:,4])\n connectedNodes[i] = ws_identifiers[index2]\n logger.debug(\"connected Nodes: \\n\"+str(connectedNodes[i]))\n # logger.debug(connectedNodes)\n \n self.aircraftConnectedNodes.append(connectedNodes)\n self.rigidNodesAdder()\n # sys.exit()", "def associate_successors(graph, node=\"\"):\n return {\n \"successors\": [\n {\n \"source\": node,\n \"target\": succ,\n \"edge_attribute\": graph.succ[node][succ][\"edge_attribute\"],\n }\n for succ in graph.succ[node]\n ]\n }", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def test_node_sampling(weighted_graph_config_fixture):\n w_config = weighted_graph_config_fixture\n\n # Node 5 to node 4 has zero weight (zero transition probability)\n # Node 4 to node 5 has ten weight (high transition probability)\n edges = pd.DataFrame({'source_content_id': [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n 'destination_content_id': [5, 1, 0, 3, 4, 1, 2, 1, 3, 5, 3, 4],\n 'weight': [1, 2, 3, 4, 1, 2, 3, 4, 1, 10, 5, 0]}\n )\n wm = N2VModel()\n\n wm.create_graph(edges, w_config['weighted_graph'])\n\n wm.generate_walks(**w_config)\n\n wm.fit_model(**w_config, callbacks=EpochLogger())\n\n n_nodes = len(set(edges.source_content_id))\n n_transitions = n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks']\n\n res = np.array([np.array(list(zip(x, x[1:]))).ravel() for x in wm.node2vec.walks])\n walks = np.reshape(res, (n_transitions, 2))\n\n pairs = pd.DataFrame({'state1': walks[:, 0], 'state2': walks[:, 1]})\n counts = pairs.groupby('state1')['state2'].value_counts().unstack()\n counts = counts.replace(np.nan, 0)\n assert pairs.shape == (n_nodes * (w_config['walk_length'] - 1) * w_config['num_walks'], 2)\n assert counts.iloc[5][4] == 0\n assert counts.iloc[4][5] != 0\n assert len(set(edges['source_content_id']).union(\n set(edges['destination_content_id']))) == len(wm.model.wv.vocab.keys())" ]
[ "0.5967038", "0.5503575", "0.54628575", "0.5436496", "0.54320776", "0.54097146", "0.53649384", "0.53567284", "0.5344505", "0.53354657", "0.5303637", "0.5284366", "0.52824575", "0.52807456", "0.525839", "0.52522177", "0.52431047", "0.52199847", "0.5210583", "0.51947844", "0.5170885", "0.5151944", "0.5148783", "0.51455945", "0.5138633", "0.5134267", "0.51229393", "0.5100826", "0.5099169", "0.5097744", "0.5095899", "0.50816816", "0.5062293", "0.50552714", "0.50521916", "0.50427836", "0.5032907", "0.50304854", "0.5025601", "0.50048345", "0.49946532", "0.4988973", "0.49834293", "0.49805263", "0.49774614", "0.49772018", "0.49758777", "0.49753755", "0.49744555", "0.49639866", "0.49616536", "0.495194", "0.4939474", "0.4934251", "0.49286288", "0.49278805", "0.49161804", "0.49138144", "0.49117175", "0.49085316", "0.49037227", "0.48964867", "0.4881695", "0.48805055", "0.48774475", "0.4868934", "0.4867343", "0.48652032", "0.4860395", "0.48445943", "0.48442924", "0.48383528", "0.48373866", "0.4837043", "0.48367548", "0.48333552", "0.48326492", "0.48304048", "0.48232883", "0.48228827", "0.4821867", "0.4820816", "0.48207432", "0.48190928", "0.48190928", "0.47978294", "0.47968736", "0.4796041", "0.47960076", "0.4794929", "0.47909716", "0.47884527", "0.47858182", "0.47843274", "0.47820395", "0.4778557", "0.47768304", "0.47758606", "0.47717735", "0.4771384" ]
0.6357265
0
Advance the time reference by the given amount.
def advance_by(self, amount: float): if amount < 0: raise ValueError("cannot retreat time reference: amount {} < 0" .format(amount)) self.__delta += amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance(self, amount):\n raise NotImplementedError()", "def advance(self, amount=1):\n self._current += amount\n if self._current - self._updateRate >= self._lastUpdated:\n self.redraw()\n # go to nearest multiple of updateRate less than current\n self._lastUpdated = (self._current // self._updateRate)*self._updateRate", "def advance(self, amount=1):\n raise NotImplementedError()", "def advance(self, amount):\n right_now = self.rightNow + amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= right_now:\n self.rightNow = self.calls[0].getTime()\n call = self.calls.pop(0)\n call.called = 1\n call.func(*call.args, **call.kw)\n self._sortCalls()\n self.rightNow = right_now", "def advance(self):\n self.amount = self._nextAmount", "def advance(self, amount=1):\n self._current += amount\n self.redraw()", "def advance(self, dt):\n self.workTill(self.currentTime + dt)", "def increment(self, amount):\n pass", "def advance(self, delta_t: float) -> None:\n pass", "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)", "def advanceCompletely(self, amount):\n self.rightNow += amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= self.seconds():\n call = self.calls.pop(0)\n call.called = 1\n yield call.func(*call.args, **call.kw)\n self._sortCalls()", "def advance(self, time):\n raise \"use method advance of class ReactorNet\"\n #return _cantera.reactor_advance(self.__reactor_id, time)", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()", "def _advance(self):\n self._current += 1", "def advance_to(self, timestamp: float):\n now = self.__original_time()\n if timestamp < now:\n raise ValueError(\"cannot retreat time reference: \"\n \"target {} < now {}\"\n .format(timestamp, now))\n self.__delta = timestamp - now", "def _advance(self):\n self._prev, self._current = self._current, abs(self._prev - self._current)", "def advance(self):\n # Increment iteration counter\n self.currentIteration += 1\n if self._lastStep:\n # The timestep was adjusted to reach end in the previous call\n # So now the simulation is over\n self.isOver = True\n else:\n if self.currentIteration < self.iterMax:\n # Advance time for the iteration just ended\n self.tk = self.tkp1\n self.tkp1 = self.tk + self.timeStep\n\n # Adjust last timestep to reach self.end\n if self.tkp1 > self.end:\n self.timeStep = self.end - self.tk\n if self.timeStep <= self.tol:\n self.isOver = True\n else:\n self.tkp1 = self.end\n self._lastStep = True\n else:\n # iteration number is reached\n self.isOver = True\n\n self.time = self.tkp1", "def _advance(self):\n self._current += self._increment # Accessing the superclass's field", "def _advance(self):\t\t# override inherited version\n self._current *= self._base", "def advance():\n global angle_movement, bullet_distance, fire, time\n time += 1\n angle_movement += angle_step\n if angle_movement >= 360:\n angle_movement -= 360 # So angle doesn't get too large.\n elif angle_movement < 0:\n angle_movement += 360 # So angle doesn't get too small.", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def step(self, value):\n self.real_time += pd.DateOffset(**{self.time_unit: value})\n self.simu_time += value\n logger.debug(\"NEW TIME\")", "def advance(self):\n\n max_days = Calendar.months[self.__months - 1]\n if self.__months == 2 and Calendar.leapyear(self.__years):\n max_days += 1\n if self.__days == max_days:\n self.__days = 1\n if self.__months == 12:\n self.__months = 1\n self.__years += 1\n else:\n self.__months += 1\n else:\n self.__days += 1", "def increment_date(self, change_amount=None):\n if change_amount is None:\n change_amount = self._timestep_duration\n\n self._current_date += relativedelta(years=change_amount)\n self._current_timestep = self.timestep_from_date(self._current_date)", "def advance(self, dt):\n for i, p in enumerate(self.persons):\n p.advance(dt)\n self.handle_collisions()", "def increment(self):\n self._deltas += 1", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def advancePosition(self,time):\n velocity = self.getVelocity()\n return self.x + time*velocity", "def advance_time_delta(timedelta):\r\n assert(not utcnow.override_time is None)\r\n try:\r\n for dt in utcnow.override_time:\r\n dt += timedelta\r\n except TypeError:\r\n utcnow.override_time += timedelta", "def countdown(self, amt=1):\n pass", "def advance(self):\n try:\n self.day += 1\n except TypeError:\n try:\n self.month += 1\n self.day = 1\n except TypeError:\n self.year += 1\n self.month = 1\n self.day = 1", "def advance(self, time):\n return _cantera.reactornet_advance(self.__reactornet_id, time)", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def advance(self, distance):\n self.cursor += distance", "def update(self, delta_time):\n self.total_time += delta_time", "def increment_number_served(self, amount):\n self.number_served += amount", "def add(self, amount):\n self.amount += amount", "def inc_cycles(self, cycles):\n self.cycle += cycles\n self.global_cycle += cycles", "def _advance(self):\n from math import sqrt\n self._current = sqrt(self._current)", "def advance_processing_time(self, advance_by):\n self._add(ProcessingTimeEvent(advance_by))\n return self", "def _add_time(time_to_add: int):\n store.time += time_to_add", "def increment_datetime(self):\n self.current_datetime += timedelta(seconds=self.step_size)", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def __pass_time(self):\n self.hunger += 1\n self.boredom += 1", "def increase_time(self,s):\n self.days += 1\n if self.disease_status > 0:\n self.time_since_infection += 1\n if self.days == 365:\n self.increase_age(s)", "def advance(self, step=1):\n self.set_progress(self._step + step)", "def add(self, time):\n\n self.elapsed_time = self.elapsed_time + time", "def performAction(self, action):\n self.action = action\n self.t += self.dt \n self.step()", "def advance_ticks(self, ticks):\n raise NotImplementedError(\"advance_ticks() was not implemented in a subclass of TurnListItem.\")", "def advance_one(self):\n days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if self.is_leap_year():\n days_in_month[2] = 29\n if self.day == days_in_month[self.month]:\n if self.month==12:\n self.year+=1\n self.month=1\n else:\n self.month+=1\n self.day =1\n else:\n self.day+=1", "def step(self, dt):\n self.time_elapsed += dt\n self.project()", "def inc_tau_trans(self):\r\n self.num_tau_trans += 1", "def _advanceTime(self):\n\n now = time.time()\n dt = now - self.time\n dtIdeal = 1.0 / self.targetFPS\n\n if dt > dtIdeal * 2:\n # Big jump forward. This may mean we're just starting out, or maybe our animation is\n # skipping badly. Jump immediately to the current time and don't look back.\n\n self.time = now\n animationDt = dt\n\n else:\n # We're approximately keeping up with our ideal frame rate. Advance our animation\n # clock by the ideal amount, and insert delays where necessary so we line up the\n # animation clock with the real-time clock.\n\n self.time += dtIdeal\n animationDt = dtIdeal\n if dt < dtIdeal:\n time.sleep(dtIdeal - dt)\n\n # Log frame rate\n\n self._fpsFrames += 1\n if self.showFPS and now > self._fpsTime + self._fpsLogPeriod:\n fps = self._fpsFrames / (now - self._fpsTime)\n self._fpsTime = now\n self._fpsFrames = 0\n sys.stderr.write(\"%7.2f FPS\\n\" % fps)\n\n return animationDt", "def step(self):\n \n if not self.instant_grow_back:\n self.amount = min([self.max_sugar, self.amount + 1])\n else:\n self.amount = self.max_sugar", "def recharge(self, amount):\n self.action.recharge(self.cardUid, amount)\n self.start()", "def go_to(self, time):\n half_dur = self.half_duration\n self.set_interval((time - half_dur, time + half_dur))", "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n t -= self.initTime\n\n p = float(self.period) / POINTS_PER_CYCLE\n t2 = math.floor(t / p) * p + p\n\n t2 += self.initTime\n\n return t2", "def advance_time_mins(self, advance_mins=None, dataset_name='ds', time_name='time', comment=\"\"):\n\n if advance_mins == 0 or advance_mins is None or np.isnan(advance_mins):\n print('Not advancing time')\n return\n \n string = 'Advanced the time variable \"{}\" by {} minutes with user comment \"{}\"'.format(time_name, advance_mins, comment)\n self.add_comment('UWA', string, ds_name='ds', data_var=None)\n \n print(string)\n\n dataset = getattr(self, dataset_name)\n dataset = dataset.assign_coords({time_name: dataset[time_name] + np.timedelta64(advance_mins,'m')})\n setattr(self, dataset_name, dataset)", "def round_advance(self):\n\n\t\t# takes current round and advances it by one.\n\t\tif self.model.status <= 9:\n\t\t\t\n\t\t\tself.model.status += 1", "def res_plus_five(unit: ActiveUnit) -> None:\n unit.mod_res += 5", "def forward(self, amount):\n newX = self._x + round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y - round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)", "def inc(self, amount=1):\n if amount < 0:\n raise ValueError('Counters can only be incremented by non-negative amounts.')\n self._shared_list.append((self._labels_args, ('inc', amount)))", "def incTurn(self):\n self.turnOn = (self.turnOn+1)%self.turns", "def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.controller.row = self.rps * value", "def move(self, dt):\n dt = dt", "def cool(self):\n self.t = self.t - 1", "def passTime(self, time: int) -> None:\n if self.delayed == True:\n self.delayed = None\n return\n\n if self.enabled == True:\n self.time -= time", "def __next_step(self, state) -> None:\n self.days += 1", "def increment_speed(self):\n self.speed += 0.0004", "def _advance(self, c=1):\n self._index += c", "def _increment_turn(self):\r\n\r\n self.turn_number += 1", "def tick(self):\n val = self.reset()\n self.seconds.append(val)\n self.total += val", "def pay(self, amt: float):\n self._money += amt", "def advance(self):\n self._current_inst += 1\n self._line = self._lines[self._current_inst].strip()", "def advance(self) -> None:\n pass", "def give_raise(self, amount=5000):\n \n self.annual_salary += amount", "def give_raise(self, amount=5000):\n\t\tself.annual_salary += amount", "def Incrpower(self, increment):\n self.power += increment", "def give_raise(self, amount=5000):\n self.salary += amount", "def contribute(k):\n global amount\n global _alarm\n amount = amount + k\n # remove the alarm\n if amount >= initial_amount * 0.3:\n _alarm = False", "def increment_steps(self):\n self.num_steps += 1", "def tick(self):\n if Clock.__repr__(self) == 'Clock(23, 59, 59)':\n Clock.tick(self)\n Calendar.advance(self)\n else:\n Clock.tick(self)", "def tick(self, dt):\n # Increment the timer.\n self.timer += dt\n\n # If the timer reached the end.\n if self.timer > Game.DELAY:\n # Reset the timer.\n self.timer = 0\n\n # Advance to the next phase.\n self.__advance()", "def Advance(self, *, forward: bool = True, amount: int = 1, extend: bool = False):\n i = self.Index\n if forward: i += amount\n else: i -= amount\n\n if i > self.Count:\n if extend:\n for _ in range(amount): self.Append('')\n else: i = self.Count\n elif i < 0: i = 0\n\n self.Index = i", "def increment_playback_time(self):\n increment = self.playbackGain * self.playbackTimeout / 1000\n if self.playbackTime + increment <= self.currentEndTime:\n self.playbackTime += increment\n pos = self.playbackTime / self.currentEndTime * self.timeSliderRange\n self.timeSlider.blockSignals(True)\n self.timeSlider.setValue(pos)\n self.timeSlider.blockSignals(False)\n self.playbackTimeChanged.emit()\n else:\n self.pause_animation()\n return", "def tick(self):\n self.count += 1", "def updateTimeStep(self, newDt):\n self.timeStep = newDt", "def advance(self):\n u, f, k, t = self.u, self.f, self.k, self.t\n dt = t[k + 1] - t[k]\n u_new = u[k] + dt * f(u[k], t[k], k)\n u_new = [(i > 0) * i for i in u_new]\n\n return u_new", "def testAdvance(self):\n events = []\n c = task.Clock()\n call = c.callLater(2, lambda: events.append(None))\n c.advance(1)\n self.assertEquals(events, [])\n c.advance(1)\n self.assertEquals(events, [None])\n self.failIf(call.active())", "def tick(self):\n self.delta = self.clock.tick(50) / 1000.0", "def tick(self):\n self.delta = self.clock.tick(50) / 1000.0", "def Step(self, t, dt):\n self.s += dt * self.dsdt + self.weighted_incoming_spikes / self.tau_s\n if self.ref_remaining <= 0:\n self.v += dt * self.dvdt\n else:\n self.v = 0.0\n self.ref_remaining -= dt\n if self.v >= 1.0:\n v0 = self.v_history[-1]\n v1 = self.v\n t0 = t\n tstar = t + dt * (1.0 - v0) / (v1 - v0)\n self.spikes.append(tstar)\n self.v = 1.0\n self.ref_remaining = self.tau_ref - (self.spikes[-1] - t)\n self.v_history.append(self.v)\n self.s_history.append(self.s)\n self.weighted_incoming_spikes = 0.0", "def increment(self, inc):\n self.done += inc", "def increment_amount(self, add_amount=1):\n new_amount = self.amount + add_amount\n if new_amount < self.min:\n new_amount = self.min\n if new_amount > self.max:\n new_amount = self.max\n self.amount = new_amount\n self.build_bar()", "def credit(self, title: str, minutes: float):\n self._screen_time += minutes", "def increment(cls, value):\r\n value.value += 1", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def GAME_TIME_ADVANCE(dt):", "def give_raise(self,amount=5000):\n self.salary += amount", "def increment(self, n=1):\n with self.current_counter.get_lock():\n self.current_counter.value += n" ]
[ "0.74948883", "0.7390248", "0.7373512", "0.72310776", "0.7102698", "0.70964295", "0.688938", "0.6812329", "0.6760769", "0.6728943", "0.64948493", "0.6393024", "0.6281656", "0.6251204", "0.619589", "0.61536574", "0.6126449", "0.60908896", "0.6087889", "0.6015486", "0.60153484", "0.6002451", "0.59907424", "0.5940177", "0.59203255", "0.591805", "0.5907898", "0.58963704", "0.58940864", "0.5880523", "0.5873661", "0.5834942", "0.5794246", "0.5766047", "0.5759371", "0.5745569", "0.5734473", "0.57312775", "0.5729656", "0.5722839", "0.57120705", "0.5684192", "0.56754893", "0.56537604", "0.56453776", "0.5637698", "0.5632789", "0.56072813", "0.560174", "0.55846345", "0.55731666", "0.55654883", "0.5550349", "0.5543924", "0.55414355", "0.55361676", "0.5508354", "0.5505426", "0.55038035", "0.5490959", "0.54871666", "0.5486177", "0.54812145", "0.54762137", "0.54724395", "0.54685795", "0.5455954", "0.5455924", "0.54478395", "0.5441495", "0.5437562", "0.54281056", "0.5421968", "0.5413819", "0.5401798", "0.5397863", "0.5395611", "0.5394963", "0.5386394", "0.53861666", "0.53713256", "0.5362389", "0.53495026", "0.5340966", "0.533379", "0.5323082", "0.53154624", "0.53092974", "0.530458", "0.52895695", "0.52895695", "0.5276764", "0.527429", "0.525764", "0.5235562", "0.52348304", "0.52272654", "0.5223953", "0.52211595", "0.5219809" ]
0.8371448
0
Advance the time reference so that now is the given timestamp.
def advance_to(self, timestamp: float): now = self.__original_time() if timestamp < now: raise ValueError("cannot retreat time reference: " "target {} < now {}" .format(timestamp, now)) self.__delta = timestamp - now
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_time(self, new_time):\r\n self.when = new_time", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()", "def increment_datetime(self):\n self.current_datetime += timedelta(seconds=self.step_size)", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def update_time(self):\n pass # Do nothing", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def setTimepoint(self, tp):\n\t\tpass", "def advance(self, dt):\n self.workTill(self.currentTime + dt)", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_time_now(self, *args, **kwargs)", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_time_now(self, *args, **kwargs)", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_time_now(self, *args, **kwargs)", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n t -= self.initTime\n\n p = float(self.period) / POINTS_PER_CYCLE\n t2 = math.floor(t / p) * p + p\n\n t2 += self.initTime\n\n return t2", "def set_current_time(self, ttime):\n if not isinstance(ttime, Time):\n raise TypeError\n try:\n localtime = ttime.local_repr().split()\n timeSetCmd = 'date -s ' + localtime[3]\n #XXX: here seems a dirty quick way (os.system).\n os.system(timeSetCmd)\n yield WaitDBus(self.rtc.SetCurrentTime, int(ttime.value) )\n except Exception, ex:\n logger.exception(\"Exception : %s\", ex)\n raise", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_time_now(self, *args, **kwargs)", "def update_time(self, offset):\n offset = float(offset[1:])\n self.diff_since_last = offset - self.time_offset\n self.time_since_last += self.diff_since_last\n self.time_since_last_events += self.diff_since_last\n self.time_offset = offset", "def time_updated(self, time_updated):\n self._time_updated = time_updated", "def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time", "def _set_timestamp(self):\n d = datetime.now()\n self._time_stamp = \"{:>2} {} {} {:>2}:{:>02}\".format(\n d.day, MONTH_ABBREV[d.month], d.year, d.hour, d.minute)", "def _freeze_time(self, timestamp):\n now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)\n now_patch.start()\n self.addCleanup(now_patch.stop) # lint-amnesty, pylint: disable=no-member", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def set_time(self, time):\n self._time = time", "def tick(self):\r\n new_time = time.strftime('%H:%M:%S')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.config(text=self.time)\r\n self.after(200, self.tick)", "def _get_next_time(self, curr_time):\n return curr_time + self.time_dist.random()", "def setSubmitTime(t):", "def set_time_next_pps(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_time_next_pps(self, *args, **kwargs)", "def step(self, value):\n self.real_time += pd.DateOffset(**{self.time_unit: value})\n self.simu_time += value\n logger.debug(\"NEW TIME\")", "def set_time_next_pps(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_time_next_pps(self, *args, **kwargs)", "def current_time(cls) -> float:", "def set_imeastime(self, time):\n self.itime = time", "def reference_time(self):\n if hasattr(self, '_reference_time') is False:\n self._reference_time = self.midtime\n\n return self._reference_time", "def time_settime(currenttime):\r\n\r\n time_query_times.append((getruntime(), currenttime))", "def _next_update_time(self, seconds=10):\n now = get_aware_utc_now()\n next_update_time = now + datetime.timedelta(\n seconds=seconds)\n return next_update_time", "def advance_time_delta(timedelta):\r\n assert(not utcnow.override_time is None)\r\n try:\r\n for dt in utcnow.override_time:\r\n dt += timedelta\r\n except TypeError:\r\n utcnow.override_time += timedelta", "def _set_last_time(self, cur_time):\n self._last_time = cur_time", "def _set_last_time(self, cur_time):\n self._last_time = cur_time", "def set_time_next_pps(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_time_next_pps(self, *args, **kwargs)", "def set_time(self, timestamp):\n\n\t\tdata = pack(\"!bL\", 2, timestamp)\n\t\tself._send_message(\"TIME\", data)", "def _add_time(time_to_add: int):\n store.time += time_to_add", "def set_time_next_pps(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_time_next_pps(self, *args, **kwargs)", "def update_time(self, update_time):\n\n self._update_time = update_time", "def timestamp(self, timestamp: datetime):\r\n self._timestamp = timestamp", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def setTime(self,time):\n self.time = time", "def report_now(self, registry=None, timestamp=None):\n timestamp = timestamp or int(round(self.clock.time()))\n super().report_now(registry, timestamp)", "def time_automation_listener(now):\n hass.async_add_job(action, {\n 'trigger': {\n 'platform': 'time',\n 'now': now,\n },\n })", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def next_announcement_time(ref: Optional[datetime] = None) -> datetime:\n if ref is None:\n ref = ET.localize(datetime.now())\n else:\n ref = ref.astimezone(ET)\n for start, end, announce in WINDOWS:\n if _datetime(ref, *start) <= ref < _datetime(ref, *end):\n return _datetime(ref, *announce)\n raise RuntimeError('Could not arrive at next announcement time')", "def _clock_time(self):\n return self._shifted_time % (24*3600)", "def time_automation_listener(now):\n action()", "def mod_time(self, mod_time):\n\n self._mod_time = mod_time", "def mod_time(self, mod_time):\n\n self._mod_time = mod_time", "def tick(self):\n if self.display_seconds:\n new_time = time.strftime('%I:%M:%S %p')\n else:\n new_time = time.strftime('%I:%M:%S %p').lstrip('0')\n if new_time != self.time:\n self.time = new_time\n self.display_time = self.time\n self.config(text=self.display_time)\n self.after(200, self.tick)", "def time_to_now(self, **options):\n return self.time_to(self.now())", "def updateTimeStamp(self, ts):\n self.ga_timestamp = ts", "def tick(self):\n self.times.append(timeit.default_timer())", "def time(self, time: float) -> None:\n self._time = time", "def update(self, time=None):\n if self.realtime:\n return\n if time is None: # clock in externally-clocked mode, need valid time\n return\n self._time = time", "def _update_time_cursor(self):\n for line in self.timeLines:\n line.setValue(self.playbackTime)", "def update(self, delta_time):\n self.total_time += delta_time", "def _update_time(self, current=None, total=None):\n if current is None:\n current = self._current\n if total is None:\n total = self._total\n\n if self._last_time is None:\n self._last_time = datetime.datetime.now()\n self._remaining_time = \"?\"\n else:\n diff = datetime.datetime.now() - self._last_time\n self._last_time = datetime.datetime.now()\n diff = (diff.seconds * 1E6 + diff.microseconds) /\\\n (current - self._last_current) * (total - current) / 1E6\n self._last_current = current\n\n if diff > 3600:\n h = round(diff//3600)\n m = round((diff - h*3600)/60)\n self._remaining_time = \"{0:d}h {1:d}m\".format(int(h), int(m))\n elif diff > 60:\n m = round(diff // 60)\n s = round((diff - m * 60))\n self._remaining_time = \"{0:d}m {1:d}s\".format(int(m), int(s))\n else:\n self._remaining_time = \"{0:d}s\".format(int(round(diff)))", "def _shifted_time(self):\n return self.sim_time + self.options.time.start_clocktime", "def tick(self):\r\n if self.display_seconds:\r\n new_time = time.strftime('%H:%M:%S')\r\n else:\r\n new_time = time.strftime('%I:%M %p').lstrip('0')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.display_time = self.time\r\n self.config(text=self.display_time)\r\n self.after(200, self.tick)", "def ref_now():\n return as_datetime(datetime.datetime.now(), REF_TZ)", "def time_from_now(self, **options):\n return self.time_from(self.now())", "def update_stay_time(self):\n # It would not be better to simply self.stay_time = self.get_length() ??\n self.stay_time = self.get_length()", "def walk(self, community, now):\n self._get_or_create_timestamps(community).last_walk = now", "def offset(self, timedelta, function, parameters=None):\n datetime = self.current_time + timedelta\n self.schedule(datetime, function, parameters)\n return datetime", "def fake_time(cls, ignored):\n cls.FAKE_TIME += 2\n return cls.FAKE_TIME", "def set_time(self, set_time):\n\n self._set_time = set_time", "def __update_time(self, time: float):\n if self.__prev_time is None:\n self.__prev_time = time\n\n added_time = time - self.__prev_time\n assert added_time >= 0.0\n self.__prev_time = time\n\n self.__from_window_start += added_time\n\n while self.__from_window_start > self.__time_window:\n self.__processed_windows += 1\n self.__from_window_start -= self.__time_window\n del self.__counters[0]\n sketch = FullCounter()\n self.__counters.append(sketch)", "def tick(self):\n if Clock.__repr__(self) == 'Clock(23, 59, 59)':\n Clock.tick(self)\n Calendar.advance(self)\n else:\n Clock.tick(self)", "def ref_time(self) -> float:\n return ntp_to_system_time(self.ref_timestamp)", "def passTime(self, time: int) -> None:\n if self.delayed == True:\n self.delayed = None\n return\n\n if self.enabled == True:\n self.time -= time", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def advance(self, delta_t: float) -> None:\n pass", "def _event_time_changed(self, sender, obj, **kwargs):\n handle_event_time_update(obj)", "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)", "def advance_processing_time(self, advance_by):\n self._add(ProcessingTimeEvent(advance_by))\n return self", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def _setCursorLocOnTimeLabel(self, waveform, t):\n self.tm.setTime(t)", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def next_freeze_time(ref: Optional[datetime] = None) -> datetime:\n if ref is None:\n ref = ET.localize(datetime.now())\n else:\n ref = ref.astimezone(ET)\n for start, end, announce in WINDOWS:\n if _datetime(ref, *start) <= ref < _datetime(ref, *end):\n return _datetime(ref, *end)\n raise RuntimeError('Could not arrive at next freeze time')" ]
[ "0.6708446", "0.6519798", "0.63856924", "0.63803643", "0.6373168", "0.63460845", "0.6326127", "0.62440217", "0.62124366", "0.6148152", "0.614451", "0.61350346", "0.6095682", "0.604849", "0.6047468", "0.60337085", "0.60114896", "0.6003276", "0.6003276", "0.6003276", "0.6003276", "0.6003276", "0.59980756", "0.59876317", "0.5980938", "0.5979058", "0.59748805", "0.59616286", "0.5951179", "0.5944869", "0.5940932", "0.5940932", "0.5908181", "0.5891541", "0.5889168", "0.5857611", "0.5853469", "0.5852931", "0.58481133", "0.5847542", "0.58450377", "0.5828154", "0.58239144", "0.5801864", "0.5793199", "0.5791169", "0.5791169", "0.5790131", "0.57862383", "0.57845265", "0.5761653", "0.5739256", "0.57299036", "0.57275146", "0.57231575", "0.57121754", "0.5705721", "0.5698831", "0.568286", "0.5665644", "0.56609875", "0.56459254", "0.56459254", "0.5637481", "0.56362325", "0.5632837", "0.56311864", "0.5611489", "0.5611128", "0.55882204", "0.5587358", "0.5583405", "0.5581345", "0.55737597", "0.5571824", "0.5566563", "0.5559133", "0.5556813", "0.55387604", "0.5519528", "0.5513966", "0.5508094", "0.5504149", "0.5489063", "0.5488111", "0.54836756", "0.5482205", "0.5482111", "0.5475792", "0.5466157", "0.5459596", "0.5457173", "0.54464316", "0.54464316", "0.54464316", "0.54464316", "0.54464316", "0.54464316", "0.54464316", "0.54461473" ]
0.7582537
0
Email the given document to the given email address.
def email_document(document, to, template='django_dms/email.txt', subject=''): # Start a new thread to email the document # This avoids a frozen screen while the email is being sent (particularly if the document is big). t = threading.Thread(target=_email_document, args=[document, to, template, subject]) t.setDaemon(True) t.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def send_mail(self, address, title, message):\n pass", "def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email(self, email: str):\n\n self._email = email", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request))\n \n # NB: Temporarily disabling actual email sending for development\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n print \"Sending email to %s\" % email \n \n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))", "def send_mail(email):\n return email.send()", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_ebook(filename, from_email, to_addr, subject, smtp_server, smtp_username, smtp_password):\n mroot = MIMEMultipart('related')\n mroot['Subject'] = subject\n mroot['From'] = from_email\n mroot['To'] = to_addr\n with open(filename, 'rb') as f:\n m = MIMEBase('application', 'octet-stream')\n m.set_payload(open(filename, 'rb').read())\n encode_base64(m)\n m.add_header('Content-Disposition', 'attachment; filename=\"{0}\"'.format(filename))\n mroot.attach(m)\n smtp = smtplib.SMTP()\n smtp.connect(smtp_server)\n smtp.starttls()\n smtp.login(smtp_username, smtp_password)\n smtp.sendmail(from_email, to_addr, mroot.as_string())\n smtp.quit()", "def reply_to_email_address(self, val: EmailAddress):\n self._reply_to = val", "def send_email(self, email):\n\n if not isinstance(email, str):\n raise TypeError('type of email must be str not %s' % type(email))\n\n message = self.get_message(email)\n self.server.send_message(message)", "def email_address(self, email_address: \"str\"):\n self._attrs[\"emailAddress\"] = email_address", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def send_email(email_body, email_to):\n html = f'{HTML_START}{email_body}{HTML_END}'\n try:\n SES_CLIENT.send_email(\n Source=os.environ['SENDER_EMAIL'],\n Destination={\n 'ToAddresses': [\n email_to\n ]\n },\n Message={\n 'Subject': {\n 'Data': 'Newest Music in Last 7 Days (Spotify)',\n },\n 'Body': {\n 'Html': {\n 'Data': html,\n }\n }\n }\n )\n\n except:\n traceback.print_exc()\n return False\n\n return True", "def email(self, email_id):\r\n return emails.Email(self, email_id)", "def from_email_address(self, val: EmailAddress):\n self._from_email = val", "def setEmail(self, email):\n self.email = email\n return self", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def send(self, address_to, message, emailSubject = \"Automated Email\", attachmentFilePath = None):\r\n\t\tmail = self._createEmail(address_to, message, emailSubject)\r\n\t\tif attachmentFilePath != None:\r\n\t\t\tmail.attachment = self._createAttachment(attachmentFilePath)\r\n\t\tsg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n\t\tresponse = sg.send(mail)\r\n\t\tif response.status_code == 202:\r\n\t\t\tprint(\"Email sent\")\r\n\t\telse:\r\n\t\t\tprint(\"Email not sent. Please check error codes below - \")\r\n\t\t\tprint(response.status_code)\r\n\t\t\tprint(response.headers)", "def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email", "def email_address(self, email_address: str):\n if email_address is None:\n raise ValueError(\"Invalid value for `email_address`, must not be `None`\") # noqa: E501\n\n self._email_address = email_address", "def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def email_user(self, subject, message, from_email=None):\n\t\tsend_mail(subject, message, from_email, [self.email])", "def email(self):\r\n webbrowser.open(\"mailto: [email protected]\")", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def send_email(self, message):\n pass", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def eap_email():\n\n # skip authorization\n data = request.get_json()\n email_address = data['email_address']\n\n # email address validation\n if not check_valid_email_address(email_address):\n return json.dumps({\"success\": False}), 403\n\n # create object\n subject = '[EAP] - New Inquiry from %s' % email_address\n body = '''<html><head></head><body>%s</body></html>''' % EAP_INQUIRY_BODY.format(email_address)\n email = {\n 'email_from': settings.EMAIL_AUTHOR_PROTECTED,\n 'email_to': settings.EMAIL_AUTHOR_PROTECTED,\n 'subject': subject,\n 'body': body,\n 'cc': [],\n 'sent': False,\n 'num_failures': 0,\n 'errors': []\n }\n\n # insert into mongodb\n email_conn = app.data.driver.db['email']\n email_conn.insert(email)\n\n return json.dumps({\"success\": True}), 201", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def emit(self, record):\r\n try:\r\n port = self.mailport\r\n if not port:\r\n port = smtplib.SMTP_PORT\r\n smtp = smtplib.SMTP(self.mailhost, port)\r\n smtp.login(self.username, self.password)\r\n msg = self.format(record)\r\n msg = \"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nDate: %s\\r\\n\\r\\n%s\" % (\r\n self.fromaddr,\r\n ','.join(self.toaddrs),\r\n self.getSubject(record),\r\n formatdate(), msg)\r\n smtp.sendmail(self.fromaddr, self.toaddrs, msg)\r\n smtp.quit()\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def send_ajax(self, request, id, tribe_slug):\n\n document = self.get_document(id, tribe_slug)\n\n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if not email and not form:\n form = EmailForm()\n \n if form:\n content = '<form class=\"ajax_update_email\" action=\"%s\" method=\"post\">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])\n content += '%s<input type=\"submit\" value=\"Send\"/></form>' % form['email']\n return HttpResponse(content)\n \n print \"Sending email to %s\" % email\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n\n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponse('Email sent to %s' % email)", "def mail(note,\n sender,\n recipients,\n cc_recipients=[],\n attachments=[],\n subject = '',\n verbosity = 0):\n if verbosity > 1:\n msgb(\"SENDING EMAIL\")\n note = [x.rstrip() for x in note]\n body = '\\n'.join(note)\n att = []\n for attachment in attachments:\n att.append( (attachment, os.path.basename(attachment)) )\n try:\n _send_email(recipients,\n sender,\n subject,\n body,\n att,\n cc_recipients,\n verbosity)\n except:\n die(\"Sending email failed\")\n return 0", "def __send_email_to_user(self, template_name, email_subject, book, redirect, date=None):\r\n ctx = {\r\n 'date': datetime.now().date(),\r\n 'user': \"{} {}\".format(self.request.user.first_name, self.request.user.last_name),\r\n 'book': \"{} - {}\".format(book.title, book.author),\r\n 'profile_url': self.request.build_absolute_uri(reverse(redirect)),\r\n 'cons_res_date': date\r\n }\r\n\r\n html_content = render_to_string(\r\n 'users/emails/{}.html'.format(template_name), ctx)\r\n # Strip the html tag. So people can see the pure text at least.\r\n text_content = strip_tags(html_content)\r\n\r\n msg = EmailMultiAlternatives(\r\n email_subject, text_content, \"[email protected]\", [\r\n self.request.user.email])\r\n msg.attach_alternative(html_content, \"text/html\")\r\n msg.send()", "def sendEmail(self, address_book: List[str], subject: str, html: str, attachmentFpath: Optional[str] = None) -> None:\n msg = MIMEMultipart()\n msg['From'] = self.mailAddress\n msg['To'] = ','.join(address_book)\n msg['Subject'] = subject\n # msg.attach(MIMEText(body, 'plain'))\n msg.attach(MIMEText(html, 'html'))\n \n if not(attachmentFpath == None) and not(attachmentFpath == \"\"):\n fPath = cast(str, attachmentFpath)\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(fPath, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\n 'attachment; filename=\"{0}\"'.format(os.path.basename(fPath)))\n msg.attach(part)\n \n text = msg.as_string()\n # Send the message via our SMTP server\n s = smtplib.SMTP(self.host, self.port)\n s.starttls()\n s.login(self.username, self.password)\n s.sendmail(self.mailAddress, address_book, text)\n s.quit()", "def customer_email(self, customer_email):\n self._customer_email = customer_email", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def send_emails(emails, author, title):\n subject = 'New post by %s' % author.capitalize()\n message = '%s wrote a new post with the title: %s' % (author.capitalize(), title)\n print('Sending emails to ', emails)\n send_mails_count = send_mail(\n subject=subject,\n message=message,\n from_email=EMAIL_HOST_USER,\n recipient_list=emails\n )\n print('Successfully sent %s - letters' % send_mails_count)", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 64):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `64`\") # noqa: E501\n\n self._email = email", "def email_user(self, subject, message, from_email=None, **kwargs):\n\t\tsend_mail(subject, message, from_email, [self.email], **kwargs)", "async def handle_email(self, email):\n\t\tif 'logdir' in self.log_settings:\n\t\t\tfilename = 'email_%s_%s.eml' % (datetime.datetime.utcnow().isoformat(), str(uuid.uuid4()))\n\t\t\twith open(str(Path(self.log_settings['logdir'], 'emails', filename).resolve()), 'wb') as f:\n\t\t\t\tf.write(email.email.as_bytes())\n\n\t\tawait self.log('You got mail!')", "def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def email_user(self, subject: str, message: str, from_email: str = None) -> None:\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None, **kwargs):\r\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\r\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def client_email(self, client_email):\n\n self._client_email = client_email", "def send_email(self, email_from, email_to, message):\n logging.info(\"Attempting to send email from \" + email_from + \" to \" + email_to)\n self.conn.sendmail(email_from, email_to, message)\n logging.info(\"Email sent\")", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def set_reply_to(self, address):\n if not self.validate_email_address(address):\n raise Exception(\"Invalid email address '%s'\" % address)\n self._reply_to = address", "def admin_email(self, admin_email):\n\n self._admin_email = admin_email", "def onAboutLeoEmail(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(\"mailto:\" + self.email)\n except:\n g.es(\"not found: \" + self.email)", "def post(self):\n return send_email(request.args)", "def send_email(self, new_address):\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.starttls()\n s.login(from_address, password)\n email = MIMEText(\"Received a request for ION-X information from:\\n{}\"\n .format(new_address))\n email['To'] = to_address\n email['From'] = from_address\n email['Subject'] = \"Website Request Received\"\n s.sendmail(from_address, to_address, email.as_string())\n s.quit()", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def from_email(self, from_email):\n\n self._from_email = from_email", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)" ]
[ "0.7192423", "0.62836236", "0.6040218", "0.6037075", "0.59470624", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.58806413", "0.58806413", "0.58301985", "0.5820901", "0.5814947", "0.5814947", "0.5814947", "0.5790432", "0.5716784", "0.5679747", "0.56669414", "0.5635135", "0.56337947", "0.56128144", "0.5592558", "0.5585707", "0.55786395", "0.55599797", "0.5555133", "0.5539705", "0.5533234", "0.5507434", "0.5504406", "0.54648834", "0.5455615", "0.5453999", "0.54361844", "0.54303235", "0.5427299", "0.54044837", "0.5396529", "0.5386192", "0.53808886", "0.5354288", "0.5353265", "0.5351801", "0.5340145", "0.5339383", "0.5339383", "0.5339383", "0.5339383", "0.5339383", "0.5339383", "0.5339383", "0.5339383", "0.5335449", "0.5329162", "0.53166825", "0.5315445", "0.53078514", "0.5293617", "0.5265408", "0.5264837", "0.52604455", "0.52521276", "0.5244252", "0.5242837", "0.5238955", "0.5228853", "0.52231497", "0.52231497", "0.52195144", "0.52162504", "0.5203086", "0.5200062", "0.5193608", "0.51919526", "0.51844573", "0.5163981", "0.51567733", "0.514906", "0.5145852", "0.51403433", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256", "0.51347256" ]
0.7277005
0
Helper function to email document in another thread.
def _email_document(document, to, template='django_dms/email.txt', subject=''): # TODO: A really cool system would delay sending the email for 10 seconds or so, # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS) # Create the message message = EmailMessage(to=to, subject=subject) message.to = to message.subject = subject message.body = render_to_string(template, {'document': document}) message.attach(document.friendly_filename, document.file.read(), document.file_mimetype) # Send the message message.send()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()", "def send_async_email(self, msg):\n with app.app_context():\n result = mail.send(msg)\n print result", "def send_email(subject, sender, recipients, html_body):\n\n try:\n # Create a new SendGrid Mail object with the arguments given\n message = Mail(\n from_email=sender,\n to_emails=recipients,\n subject=subject,\n html_content=html_body)\n\n # We prepare a new Thread here to send the email in the background. This takes in the send_async_email\n # function as its target and runs the function with the parameters passed through args.\n Thread(target=send_async_email,\n args=(current_app._get_current_object(), message)).start()\n\n except Exception as e:\n print(e)\n # FIXME: should do some type of error handling here or allow error to bubble up", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def create_email_job(app, db):\n from app.models import Lembrete\n lock = threading.Lock()\n\n def send_email():\n with lock:\n sp = datetime.now(tz=sao_paulo_tz)\n agora = datetime(\n year=sp.year,\n month=sp.month,\n day=sp.day,\n hour=sp.hour,\n minute=sp.minute\n )\n lembretes = Lembrete.query.filter(\n Lembrete.data_notificacao <= agora\n ).all()\n print('Enviando emails')\n if lembretes:\n for lembrete in lembretes:\n texto = lembrete.texto\n nome = ''\n veiculo = ''\n telefone = ''\n celular = ''\n tel_comercial = ''\n e_mail = ''\n if lembrete.cliente is not None:\n nome = lembrete.cliente.nome\n telefone = lembrete.cliente.telefone\n celular = lembrete.cliente.celular\n tel_comercial = lembrete.cliente.telefone_comercial\n e_mail = lembrete.cliente.email\n if lembrete.cliente is not None:\n veiculo = lembrete.veiculo.descricao()\n\n mensagem = \"\"\"\n Nome: {0}\n Telefone: {1}\n Celular: {2}\n Telefone Comercial: {3}\n E-mail: {4}\n Veículo: {5}\n Lembrete: {6}\n \"\"\".format(\n nome,\n telefone,\n celular,\n tel_comercial,\n e_mail,\n veiculo,\n texto\n )\n email = MIMEText(mensagem)\n\n me = app.config['EMAIL_ME']\n you = app.config['EMAIL_YOU']\n password = app.config['EMAIL_ME_PASSWORD']\n smtp = app.config['EMAIL_SMTP']\n smtp_port = app.config['EMAIL_SMTP_PORT']\n\n email['Subject'] = 'Lembrete: {0}|{1}'.format(\n nome, veiculo\n )\n email['From'] = me\n email['To'] = you\n\n s = smtplib.SMTP(smtp, smtp_port)\n s.ehlo()\n s.starttls()\n s.login(me, password)\n s.sendmail(me, [you], email.as_string())\n s.quit()\n # excluindo o lembrete\n db.session.delete(lembrete)\n db.session.commit()\n return send_email", "def mail():\n mail_server = 'localhost'\n mail_port = 1025\n CustomSMTPServer((mail_server, mail_port), None)\n asyncore.loop()", "def send_ajax(self, request, id, tribe_slug):\n\n document = self.get_document(id, tribe_slug)\n\n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if not email and not form:\n form = EmailForm()\n \n if form:\n content = '<form class=\"ajax_update_email\" action=\"%s\" method=\"post\">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])\n content += '%s<input type=\"submit\" value=\"Send\"/></form>' % form['email']\n return HttpResponse(content)\n \n print \"Sending email to %s\" % email\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n\n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponse('Email sent to %s' % email)", "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request))\n \n # NB: Temporarily disabling actual email sending for development\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n print \"Sending email to %s\" % email \n \n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))", "def send_email(self, message):\n pass", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, '[email protected]',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def send_realtime_email(self,body_):\n import smtplib, ssl\n\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = self.fromaddr # Enter your address\n receiver_email = self.toaddr # Enter receiver address\n password = self.pswd\n message = f\"\"\"\\\nSubject: [Test] Twitter real time (half) hourly trending alert\n\n{body_}\"\"\"\n\n context = ssl.create_default_context()\n # send to multiple emails\n for receiver in receiver_email:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver, message)\n \n print(f'Email successfully sent to {receiver}')", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def main():\n return render_template('doc.html', docid=queue.pop(0))", "def send_async_email(app, msg):\n\n # The function is called on a custom Thread, so we need to get the application context before sending a message.\n with app.app_context():\n\n # Instantiate the SendGridAPIClient with API key and send message\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n sg.send(msg)", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def send_email_on_delay(template, context, subject, email):\n print(\"delay\")\n send_mail_from_template(template, context, subject, email)", "def async_mail_task(subject_or_message, to=None, template=None, **kwargs):\n to = to or kwargs.pop('recipients', [])\n msg = make_message(subject_or_message, to, template, **kwargs)\n with mail.connect() as connection:\n connection.send(msg)", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def quick_email(self, send_to, subject, body, style=None):\n message = Message(body, style=style)\n\n self.send_message(message, send_to, subject)", "def emailNote(self, authenticationToken, parameters):\r\n self.send_emailNote(authenticationToken, parameters)\r\n self.recv_emailNote()", "def post(self):\n return send_email(request.args)", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def runDocumentInBackground (self):\n self.runDocument(background = True)", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def doctest_BackgroundWorkerThread_getTransactionNote():", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def send_mail(self, msg):\n mail_queue.put(msg)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def send_mail(self, address, title, message):\n pass", "def fetch_my_mail(request):\n q = Queue(connection=conn)\n if not request.user.email:\n return HttpResponse(\"User must have email defined.\")\n logger.info(\"Queuing job in EmailAnalyzer\")\n email_analyzer = EmailAnalyzer(request.user)\n q.enqueue(email_analyzer.process)\n return HttpResponse(\"Job queued.\")", "def send_mail(email_queue_name, smtp_server_instance: SMTPServer = None):\n\trecord = EmailQueue.find(email_queue_name)\n\trecord.send(smtp_server_instance=smtp_server_instance)", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def send_email_to_trial_user_with_link(\n to, context, from_email=settings.DEFAULT_FROM_EMAIL):\n template = EMAIL_DICT['parse_trial_user_resume']['template']\n subject = EMAIL_DICT['parse_trial_user_resume']['subject']\n return threadify(_send, to, context, subject, from_email, template)", "def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def send(self, recipient, template_path, context, subject, bcc_email=[]):\n\n body = self.email_render(template_path, context)\n self.send_email(recipient, subject, body, bcc_email)", "def test_send_mass_html_mail_reply_to(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n self.assertEqual(send_mass_html_mail__mock.call_count, 1)\n self.assertEqual(send_mass_html_mail__mock.call_args[1]['reply_to'],\n [\"Marie <[email protected]>\"])", "def emailAggregatorPage(from_addr, to_addr, subj, smtp_host, out_fn):\n # Begin building the email message.\n msg = MIMEMultipart()\n msg['To'] = to_addr\n msg['From'] = from_addr\n msg['Subject'] = subj\n msg.preamble = \"You need a MIME-aware mail reader.\\n\"\n msg.epilogue = \"\"\n\n # Generate a plain text alternative part.\n plain_text = \"\"\"\n This email contains entries from your subscribed feeds in HTML.\n \"\"\"\n part = MIMEText(plain_text, \"plain\", UNICODE_ENC)\n msg.attach(part)\n\n # Generate the aggregate HTML page, read in the HTML data, attach it\n # as another part of the email message.\n html_text = open(out_fn).read()\n part = MIMEText(html_text, \"html\", UNICODE_ENC)\n msg.attach(part)\n\n # Finally, send the whole thing off as an email message.\n print \"Sending email '%s' to '%s'\" % (subj, to_addr)\n s = smtplib.SMTP(smtp_host)\n s.sendmail(from_addr, to_addr, msg.as_string())\n s.close()", "def _feedback_email(email, body, kind, name='', reply_to = ''):\r\n Email.handler.add_to_queue(c.user if c.user_is_loggedin else None,\r\n None, [feedback], name, email,\r\n datetime.datetime.now(),\r\n request.ip, kind, body = body,\r\n reply_to = reply_to)", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def _make_thread(self):\r\n pass", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def process_thread(self):", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def send_mail(self, subject):\r\n pass", "def doctest_BackgroundWorkerThread_forSite():", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def new_reply(cls, thread, user, content):\n msg = cls.objects.create(thread=thread, sender=user, content=content)\n thread.userthread_set.exclude(user=user).update(deleted=False, unread=True)\n thread.userthread_set.filter(user=user).update(deleted=False, unread=False)\n message_sent.send(sender=cls, message=msg, thread=thread, reply=True)\n #for recip in thread.userthread_set.exclude(user=user):\n # send_newmessage_mail(msg, recip.user)\n return msg", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def doctest_BackgroundWorkerThread():", "def alert_for_pending_mails_1(request):\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>Beginning of alert_for_pending_mails_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tThread(target=alert_for_pending_mails_1_worker).start()\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>End of alert_for_pending_mails_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tresponse = {}\n\n\tresponse[\"info_to_contact\"] = \"Ok\"\n\n\treturn response", "def _message(self, recipient, connection, context=None):\n base_subject = '{{ event.calendar.course.name }} {{ event.title }}'\n if not self.event.get_documents(True):\n template_name = self.REQUEST_TEMPLATE\n subject = 'Got a {} study guide?'.format(base_subject)\n else:\n template_name = self.PUBLISH_TEMPLATE\n subject = '{} study guide'.format(base_subject)\n\n subject = Template(subject).render(context)\n body = get_template(template_name).render(context)\n\n return make_email_message(subject, body,\n make_display_email(\n self.sender_address,\n self.sender_name),\n recipient, connection)", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def email_ebook(filename, from_email, to_addr, subject, smtp_server, smtp_username, smtp_password):\n mroot = MIMEMultipart('related')\n mroot['Subject'] = subject\n mroot['From'] = from_email\n mroot['To'] = to_addr\n with open(filename, 'rb') as f:\n m = MIMEBase('application', 'octet-stream')\n m.set_payload(open(filename, 'rb').read())\n encode_base64(m)\n m.add_header('Content-Disposition', 'attachment; filename=\"{0}\"'.format(filename))\n mroot.attach(m)\n smtp = smtplib.SMTP()\n smtp.connect(smtp_server)\n smtp.starttls()\n smtp.login(smtp_username, smtp_password)\n smtp.sendmail(from_email, to_addr, mroot.as_string())\n smtp.quit()", "def send_reminder(self):\n pass", "def pass_message_to_main_thread_fn():\n\n pass", "def message_thread(self):\r\n return resource.MessageThread(self)", "def _send(message: Message, application: Flask) -> None:\n\n with application.app_context():\n mail.send(message)", "def email_members_old(request, course_prefix, course_suffix):\n error_msg=\"\"\n success_msg=\"\"\n form = EmailForm()\n if request.method == \"POST\":\n form = EmailForm(data=request.POST)\n if form.is_valid():\n sender = request.common_page_data['course'].title + ' Staff <[email protected]>'\n \n recipient_qset = User.objects.none() #get recipients in a QuerySet\n \n if form.cleaned_data['to'] == \"all\" :\n recipient_qset = request.common_page_data['course'].get_all_members()\n elif form.cleaned_data['to'] == \"students\" :\n recipient_qset = request.common_page_data['course'].get_all_students()\n elif form.cleaned_data['to'] == \"staff\" :\n recipient_qset = request.common_page_data['course'].get_all_course_admins()\n elif form.cleaned_data['to'] == \"myself\":\n recipient_qset = User.objects.filter(id=request.user.id)\n #pdb.set_trace()\n courses.email_members.tasks.email_with_celery.delay(\n form.cleaned_data['subject'],\n form.cleaned_data['message'],\n sender,\n recipient_qset.values_list('email',flat=True),\n course_title=request.common_page_data['course'].title,\n course_url=request.build_absolute_uri(reverse('courses.views.main', args=[course_prefix, course_suffix])))\n success_msg = \"Your email was successfully queued for sending\"\n #form = EmailForm()\n \n else:\n error_msg = \"Please fix the errors below:\"\n \n context = RequestContext(request)\n return render_to_response('email/email.html',\n {'form': form,\n 'error_msg': error_msg,\n 'success_msg': success_msg,\n 'course': request.common_page_data['course'],\n 'common_page_data': request.common_page_data},\n context_instance=context)", "def send_email(msg_body: str, run_path: str, experiment_name: str, run_number: int) -> None:\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(\"[email protected]\", \"thisisnotimportant\")\n\n msg = MIMEMultipart()\n\n files = [f\"{run_path}/cumulative_reward_{experiment_name}_R{run_number}_plot.png\",\n f\"{run_path}/max_reward_{experiment_name}_R{run_number}_plot.png\"]\n\n msg_body += \"\\n\"\n msg_body += time.asctime(time.localtime(time.time()))\n msg.attach(MIMEText(msg_body))\n\n for f in files:\n with open(f, \"rb\") as fil:\n part = MIMEApplication(\n fil.read(),\n Name=basename(f)\n )\n # After the file is closed\n part['Content-Disposition'] = 'attachment; filename=\"%s\"' % basename(f)\n msg.attach(part)\n\n server.sendmail(\"[email protected]\", \"[email protected]\", msg.as_string())\n server.quit()", "def send_mail(email):\n return email.send()", "def _send_email_in_transaction():\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_mail(\n sender_name_email, recipient_email, email_subject,\n cleaned_plaintext_body, cleaned_html_body, bcc_admin,\n reply_to_id=reply_to_id)\n email_models.SentEmailModel.create(\n recipient_id, recipient_email, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def send_email(subject, sender, recipients, text_body, html_body):\n\t\tmsg = Message(subject, sender=sender, recipients=recipients)\n\t\tmsg.body = text_body\n\t\tmsg.html = html_body\n\t\tmail.send(msg)", "def send_bulk_course_email(entry_id, _xmodule_instance_args):\r\n # Translators: This is a past-tense verb that is inserted into task progress messages as {action}.\r\n action_name = ugettext_noop('emailed')\r\n visit_fcn = perform_delegate_email_batches\r\n return run_main_task(entry_id, visit_fcn, action_name)", "def test_threading(self, mock_slack):\n from mail.management.commands.monitor_imap_folder import get_messages\n mock_slack.return_value.api_call.return_value = {'ok': True, 'ts': '1234'}\n get_messages(folder='INBOX', channel='#mailtest')\n\n # First it gets threaded...\n self.assertEqual(\n mock_slack.return_value.api_call.mock_calls[0],\n call(\n 'chat.postMessage',\n channel='#mailtest',\n text=('_Seb Bacon_ to _Seb Bacon - ebmdatalab '\n '<[email protected]>_\\n*adieu*\\n\\n'\n 'farewell reply'))\n )\n\n # ...then it gets posted to the top level\n self.assertEqual(\n mock_slack.return_value.api_call.mock_calls[1],\n call(\n 'chat.postMessage',\n channel='#mailtest',\n text=('_Seb Bacon_ to _Seb Bacon - ebmdatalab '\n '<[email protected]>_\\n*adieu*\\n\\n'\n 'farewell reply'),\n thread_ts=\"1538671906.000100\")\n )", "def send_mail_when_failed(self, body):\r\n pass", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def test_send_email_mailhog(self):\n\n pk = self.saved_pks[0]\n publisher = RssNotificationEmailPublisher(self.saved_pks, self.feedsource.pk)\n\n notification = RssNotification.objects.get(pk=pk)\n rendered_content = publisher.render_notification(notification)\n publisher.send_email(rendered_content, notification)", "def send_email(msg):\n\tprint(\"sendEmail: \" + msg)", "def send_mail_async(email_to,\n template,\n template_context=None,\n email_from=None,\n name_from=None,\n email_reply_to=None,\n attachments=None):\n if isinstance(email_to, string_types):\n email_to = [email_to]\n\n if email_to is None or not isinstance(email_to, (list, tuple)):\n raise ValueError(\"email_to is None or incompatible type!\")\n\n if template_context is None:\n template_context = {}\n\n email_from = email_from if email_from is not None else settings.get('email.from_email')\n name_from = name_from if name_from is not None else settings.get('email.from_name')\n email_reply_to = email_reply_to if email_reply_to is not None else email_from\n\n if attachments is None:\n attachments = []\n\n # render mail content\n template_context.update(settings.get('render_context', {}))\n template_path = \"email/{0}.multipart\".format(template)\n template = jinja_env.get_template(template_path)\n #we generate the module, which allows us the extract individual blocks from it\n #we capture those blocks of interest using the {% set ... %} syntax\n module = template.make_module(template_context)\n\n logger.info(\"Sending an email to: {}\\ntemplate: {}\\ntemplate_context: {}\\nsubject: {}\"\n .format(\"\".join(email_to), template, template_context, module.subject))\n\n message = {\n 'from_email': email_from,\n 'from_name': name_from,\n 'reply_to' : email_reply_to,\n 'subject': module.subject,\n 'html': module.html,\n 'text': module.text if module.text else None,\n 'to': email_to,\n 'attachments': attachments,\n }\n\n if not settings.providers['email.send']:\n logger.warning(\"No e-mail providers defined, aborting...\")\n return\n\n for params in settings.providers['email.send']:\n params['provider'](message)\n break", "def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def __send_email_to_user(self, template_name, email_subject, book, redirect, date=None):\r\n ctx = {\r\n 'date': datetime.now().date(),\r\n 'user': \"{} {}\".format(self.request.user.first_name, self.request.user.last_name),\r\n 'book': \"{} - {}\".format(book.title, book.author),\r\n 'profile_url': self.request.build_absolute_uri(reverse(redirect)),\r\n 'cons_res_date': date\r\n }\r\n\r\n html_content = render_to_string(\r\n 'users/emails/{}.html'.format(template_name), ctx)\r\n # Strip the html tag. So people can see the pure text at least.\r\n text_content = strip_tags(html_content)\r\n\r\n msg = EmailMultiAlternatives(\r\n email_subject, text_content, \"[email protected]\", [\r\n self.request.user.email])\r\n msg.attach_alternative(html_content, \"text/html\")\r\n msg.send()", "def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def receive(self, email):\n self.inbox += email", "def run():\n listen_active_email_channel()", "def doctest_BackgroundWorkerThread_run():", "def send_email(subject, sender, recipients, text_body, html_body):\n msg = Message(subject=subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n mail.send(msg)", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def email_comments(email_target, comments):\n\n smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # Connect to gmail stmp provider, 'smtp.gmail.com'\n smtpObj.ehlo() # Say \"hello\" to the server\n smtpObj.starttls() # Connect to port 587 (TLS encryption)\n\n from_addr = email()\n secret_password = email_password()\n smtpObj.login(config.emails['gmail'], secret_password) #Log in to access email\n # - Write message\n msg = '''Subject: Submission to my webpage\\n\n Hi! \\n\\n\n Thank you for submitting a message on my webpage. \\n\n I will try and get back to you.\\n\\n\\n\n\n --- Copy of submission ---\\n\\n\n Comments: '{}' '''.format(comments)\n # - Write message\n\n #Send the mail\n \n smtpObj.sendmail(from_addr=from_addr,\n to_addrs = email_target,\n msg = msg)", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )", "def send_msg(self, my_queue, my_msg):", "def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):\r\n # Get information from current task's request:\r\n task_id = subtask_status.task_id\r\n\r\n try:\r\n course_email = CourseEmail.objects.get(id=email_id)\r\n except CourseEmail.DoesNotExist as exc:\r\n log.exception(\"Task %s: could not find email id:%s to send.\", task_id, email_id)\r\n raise\r\n\r\n # Exclude optouts (if not a retry):\r\n # Note that we don't have to do the optout logic at all if this is a retry,\r\n # because we have presumably already performed the optout logic on the first\r\n # attempt. Anyone on the to_list on a retry has already passed the filter\r\n # that existed at that time, and we don't need to keep checking for changes\r\n # in the Optout list.\r\n if subtask_status.get_retry_count() == 0:\r\n to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)\r\n subtask_status.increment(skipped=num_optout)\r\n\r\n course_title = global_email_context['course_title']\r\n subject = \"[\" + course_title + \"] \" + course_email.subject\r\n from_addr = _get_source_address(course_email.course_id, course_title)\r\n\r\n course_email_template = CourseEmailTemplate.get_template()\r\n try:\r\n connection = get_connection()\r\n connection.open()\r\n\r\n # Define context values to use in all course emails:\r\n email_context = {'name': '', 'email': ''}\r\n email_context.update(global_email_context)\r\n\r\n while to_list:\r\n # Update context with user-specific values from the user at the end of the list.\r\n # At the end of processing this user, they will be popped off of the to_list.\r\n # That way, the to_list will always contain the recipients remaining to be emailed.\r\n # This is convenient for retries, which will need to send to those who haven't\r\n # yet been emailed, but not send to those who have already been sent to.\r\n current_recipient = to_list[-1]\r\n email = current_recipient['email']\r\n email_context['email'] = email\r\n email_context['name'] = current_recipient['profile__name']\r\n\r\n # Construct message content using templates and context:\r\n plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)\r\n html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)\r\n\r\n # Create email:\r\n email_msg = EmailMultiAlternatives(\r\n subject,\r\n plaintext_msg,\r\n from_addr,\r\n [email],\r\n connection=connection\r\n )\r\n email_msg.attach_alternative(html_msg, 'text/html')\r\n\r\n # Throttle if we have gotten the rate limiter. This is not very high-tech,\r\n # but if a task has been retried for rate-limiting reasons, then we sleep\r\n # for a period of time between all emails within this task. Choice of\r\n # the value depends on the number of workers that might be sending email in\r\n # parallel, and what the SES throttle rate is.\r\n if subtask_status.retried_nomax > 0:\r\n sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)\r\n\r\n try:\r\n log.debug('Email with id %s to be sent to %s', email_id, email)\r\n\r\n with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):\r\n connection.send_messages([email_msg])\r\n\r\n except SMTPDataError as exc:\r\n # According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.\r\n if exc.smtp_code >= 400 and exc.smtp_code < 500:\r\n # This will cause the outer handler to catch the exception and retry the entire task.\r\n raise exc\r\n else:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n except SINGLE_EMAIL_FAILURE_ERRORS as exc:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n else:\r\n dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])\r\n if settings.BULK_EMAIL_LOG_SENT_EMAILS:\r\n log.info('Email with id %s sent to %s', email_id, email)\r\n else:\r\n log.debug('Email with id %s sent to %s', email_id, email)\r\n subtask_status.increment(succeeded=1)\r\n\r\n # Pop the user that was emailed off the end of the list only once they have\r\n # successfully been processed. (That way, if there were a failure that\r\n # needed to be retried, the user is still on the list.)\r\n to_list.pop()\r\n\r\n except INFINITE_RETRY_ERRORS as exc:\r\n dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_nomax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_nomax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True\r\n )\r\n\r\n except LIMITED_RETRY_ERRORS as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # Errors caught are those that indicate a temporary condition that might succeed on retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n except BULK_EMAIL_FAILURE_ERRORS as exc:\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n num_pending = len(to_list)\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with \"fatal\" exception. %d emails unsent.',\r\n task_id, email_id, num_pending)\r\n # Update counters with progress to date, counting unsent emails as failures,\r\n # and set the state to FAILURE:\r\n subtask_status.increment(failed=num_pending, state=FAILURE)\r\n return subtask_status, exc\r\n\r\n except Exception as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # These are unexpected errors. Since they might be due to a temporary condition that might\r\n # succeed on retry, we give them a retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',\r\n task_id, email_id)\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n else:\r\n # All went well. Update counters with progress to date,\r\n # and set the state to SUCCESS:\r\n subtask_status.increment(state=SUCCESS)\r\n # Successful completion is marked by an exception value of None.\r\n return subtask_status, None\r\n finally:\r\n # Clean up at the end.\r\n connection.close()", "async def send(self):", "def emit(self, record):\r\n try:\r\n port = self.mailport\r\n if not port:\r\n port = smtplib.SMTP_PORT\r\n smtp = smtplib.SMTP(self.mailhost, port)\r\n smtp.login(self.username, self.password)\r\n msg = self.format(record)\r\n msg = \"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nDate: %s\\r\\n\\r\\n%s\" % (\r\n self.fromaddr,\r\n ','.join(self.toaddrs),\r\n self.getSubject(record),\r\n formatdate(), msg)\r\n smtp.sendmail(self.fromaddr, self.toaddrs, msg)\r\n smtp.quit()\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)", "def send_email(self, recipient, subject, body, bcc_email):\n if bcc_email:\n msg = EmailMultiAlternatives(subject, subject, self.sender, recipient, bcc=bcc_email)\n else:\n msg = EmailMultiAlternatives(subject, subject, self.sender, recipient)\n\n msg.attach_alternative(body, \"text/html\")\n\n if self.file:\n for file in self.file:\n msg.attach_file(file)\n msg.send()", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def replyMessage(_email, _name):\n\n _mailer = app.config['MAIL_USERNAME']\n mesg = Message(\"Message Received\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[_email])\n mesg.body = f'''Hello {_name},\nThe message you sent to Randy has been received. \nRandy will contact you within 24 hours.\nThank you.\n\nRegards,\nRandy\n\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(mesg)\n return 'OK'", "def add_manualpost_email(request, submission_id=None, access_token=None):\n\n if request.method == 'POST':\n try:\n button_text = request.POST.get('submit', '')\n if button_text == 'Cancel':\n return redirect(\"submit/manual_post.html\")\n \n form = SubmissionEmailForm(request.POST)\n if form.is_valid():\n submission_pk = form.cleaned_data['submission_pk']\n message = form.cleaned_data['message']\n #in_reply_to = form.cleaned_data['in_reply_to']\n # create Message\n \n if form.cleaned_data['direction'] == 'incoming':\n msgtype = 'msgin'\n else:\n msgtype = 'msgout'\n \n submission, submission_email_event = (\n add_submission_email(request=request,\n remote_ip=request.META.get('REMOTE_ADDR', None),\n name = form.draft_name,\n rev=form.revision,\n submission_pk = submission_pk,\n message = message,\n by = request.user.person,\n msgtype = msgtype) )\n \n messages.success(request, 'Email added.')\n \n try:\n draft = Document.objects.get(name=submission.name)\n except Document.DoesNotExist:\n # Assume this is revision 00 - we'll do this later\n draft = None\n \n if (draft != None):\n e = AddedMessageEvent(type=\"added_message\", doc=draft)\n e.message = submission_email_event.submissionemailevent.message\n e.msgtype = submission_email_event.submissionemailevent.msgtype\n e.in_reply_to = submission_email_event.submissionemailevent.in_reply_to\n e.by = request.user.person\n e.desc = submission_email_event.desc\n e.time = submission_email_event.time\n e.save()\n \n return redirect(\"ietf.submit.views.manualpost\")\n except ValidationError as e:\n form = SubmissionEmailForm(request.POST)\n form._errors = {}\n form._errors[\"__all__\"] = form.error_class([\"There was a failure uploading your message. (%s)\" % e.message])\n else:\n initial = {\n }\n\n if (submission_id != None):\n submission = get_submission_or_404(submission_id, access_token)\n initial['name'] = \"{}-{}\".format(submission.name, submission.rev)\n initial['direction'] = 'incoming'\n initial['submission_pk'] = submission.pk\n else:\n initial['direction'] = 'incoming'\n \n form = SubmissionEmailForm(initial=initial)\n\n return render(request, 'submit/add_submit_email.html',dict(form=form))", "def send_msg(self, text):\n\n if self.__webex_flag__ == 1:\n self.__send_msg_by_webex__(text)\n\n if self.__webex_flag__ == 1:\n self.__send_msg_by_mail__(text)\n\n return", "def _text(self, fromwhom, number, text):\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(self._smsaddress, self._smspw)\n server.sendmail(str(fromwhom), '{}@vtext.com'.format(number),\n str(text))\n server.quit()", "def email(self):\r\n webbrowser.open(\"mailto: [email protected]\")" ]
[ "0.7538517", "0.6329317", "0.5854818", "0.57976854", "0.57880086", "0.57461154", "0.57316077", "0.5677812", "0.56468177", "0.56263226", "0.5602722", "0.5572509", "0.55519354", "0.554117", "0.55247223", "0.5500545", "0.546613", "0.5464859", "0.5459716", "0.5438996", "0.5438996", "0.54373986", "0.54294187", "0.5409059", "0.5405909", "0.53975236", "0.5389291", "0.5387345", "0.5372609", "0.5349537", "0.5340281", "0.53348434", "0.53325796", "0.5304631", "0.5292836", "0.5289167", "0.5286645", "0.5282648", "0.5278866", "0.52765465", "0.52755755", "0.5275339", "0.5268292", "0.52652943", "0.5253211", "0.52525115", "0.5245108", "0.52446663", "0.5242845", "0.5241461", "0.5237603", "0.5227862", "0.5216552", "0.52147985", "0.5209471", "0.520718", "0.51993304", "0.51966166", "0.51947814", "0.5192882", "0.51922786", "0.517298", "0.51568544", "0.5154326", "0.51480395", "0.51471806", "0.5146665", "0.51444066", "0.51433", "0.5130495", "0.51289916", "0.51171833", "0.5111379", "0.5108509", "0.5107929", "0.51077175", "0.51025695", "0.51008105", "0.50954044", "0.5094102", "0.50927955", "0.5091875", "0.50851023", "0.50830585", "0.5081731", "0.50700635", "0.50644815", "0.50604093", "0.50599885", "0.50596577", "0.5049279", "0.50487316", "0.5044817", "0.5044207", "0.5040452", "0.5039303", "0.5035962", "0.5029584", "0.5013288", "0.5006984" ]
0.662225
1
Get the list of url patterns for this view.
def get_urls(self): return patterns('', url(r'^$', self.list, name="%s_document_list" % self.name), url(r'^upload/$', self.new_upload, name="%s_document_upload" % self.name), url(r'^([^\/]+)/download/$', self.download, name="%s_document_download" % self.name), url(r'^([^\/]+)/send/$', self.send, name="%s_document_send" % self.name), url(r'^([^\/]+)/send/ajax/$', self.send_ajax, name="%s_document_send_ajax" % self.name), url(r'^([^\/]+)/detail/$', self.detail, name="%s_document_detail" % self.name), url(r'^([^\/]+)/view/$', self.view, name="%s_document_view" % self.name), url(r'^([^\/]+)/delete/$', self.delete, name="%s_document_delete" % self.name), url(r'^(?P<object_id>([^\/]+))/detail/(?P<direction>up|down|clear)vote/?$', self.vote, name="%s_document_vote" % self.name), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urlpatterns(self) -> list:\n raise NotImplementedError()", "def get_urlpatterns(cls):\n cls.validate_urlpattern_with_options()\n return map(lambda s: s.format(**cls.urlpattern_options), cls.urlpatterns)", "def get_urls(self):\n return patterns('')", "def urls(self):\n patterns = []\n for sitecomp in self.modules():\n patterns.append(sitecomp.urls)\n pass\n return patterns", "def list_patterns(self) -> localedata.LocaleDataDict:\n return self._data['list_patterns']", "def patterns(self):\n return self._pattern_reg", "def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)", "def getDjangoURLPatterns():\n\n patterns = [\n (r'gsoc/tasks/assignslots/assign$',\n r'soc.modules.gsoc.tasks.slot_assignment.assignSlots'),\n (r'gsoc/tasks/assignslots/program$',\n r'soc.modules.gsoc.tasks.slot_assignment.assignProgramSlots'),\n ]\n\n return patterns", "def urls(self):\n return self._list_urls()", "def urlpatterns(self):\n regex = r'^%s/' % self.label\n urls_module = '%s.urls' % self.name\n ns = self.label\n return [url(regex, include(urls_module, namespace=ns, app_name=ns))]", "def getDjangoURLPatterns():\n\n patterns = [\n (r'^tasks/role_conversion/update_references',\n 'soc.tasks.updates.role_conversion.updateReferences'),\n (r'^tasks/role_conversion/update_project_references',\n 'soc.tasks.updates.role_conversion.updateStudentProjectReferences'),\n (r'^tasks/role_conversion/update_proposal_references',\n 'soc.tasks.updates.role_conversion.updateStudentProposalReferences'),\n (r'^tasks/role_conversion/update_roles$',\n 'soc.tasks.updates.role_conversion.updateRoles'),\n (r'^tasks/role_conversion/update_mentors$',\n 'soc.tasks.updates.role_conversion.updateMentors'),\n (r'^tasks/role_conversion/update_org_admins$',\n 'soc.tasks.updates.role_conversion.updateOrgAdmins'),\n (r'^tasks/role_conversion/update_students$',\n 'soc.tasks.updates.role_conversion.updateStudents'),\n (r'^tasks/role_conversion/update_hosts$',\n 'soc.tasks.updates.role_conversion.updateHosts'),\n ]\n\n return patterns", "def pattern_name_list(self):\n return list(self._pattern_reg.keys())", "def urls(self):\n return lambda : self.config.urls(active_only=True)", "def registered_urls(self):\n from pkg_resources import iter_entry_points\n\n entries = ['Priority', 'EP Name', 'Module', 'Class']\n for ep in iter_entry_points('appurl.urls'):\n c = ep.load()\n entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])\n\n return entries", "def all_urls(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/allUrls/')))", "def url_bases(self) -> List[str]:\n return self._url_module.url_bases", "def urls(self) -> list[str]:\r\n ...", "def get_urls():\r\n return []", "def static_routes(self):\n return self._static_routes", "def create_url_rules(self):\n return []", "def listPatterns(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.listPatterns(self, False)", "def get_view_endpoints(self):\n return []", "def base_urls(self):\n # Due to the way Django parses URLs, ``get_multiple`` won't work without\n # a trailing slash.\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<slug_list>[\\w\\d_-]+)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<slug>[\\w\\d_-]+)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def routes(self):\n return self._routes", "def site_patterns(*args):\n pattern_list = args\n return [SiteRegexURLResolver('', pattern_list)]", "def load_url_pattern_names(self, patterns):\n URL_NAMES = []\n for pat in patterns:\n if pat.__class__.__name__ == 'RegexURLResolver':\n # load patterns from this RegexURLResolver\n self.load_url_pattern_names(pat.url_patterns)\n elif pat.__class__.__name__ == 'RegexURLPattern':\n # load name from this RegexURLPattern\n if pat.name is not None and pat.name not in URL_NAMES:\n URL_NAMES.append(pat.name)\n return URL_NAMES", "def discover_urls():\n urlpatterns = []\n\n for app in settings.INSTALLED_APPS:\n try:\n _temp = __import__(f'{app}.urls', globals(), locals(), ['urlpatterns'], 0)\n urlpatterns += _temp.urlpatterns\n\n except ModuleNotFoundError:\n pass\n\n return urlpatterns", "def patterns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"patterns\")", "def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns", "def djangoURLPatterns(self):\n patterns = [url(r'tasks/gsoc/surveys/send_reminder/spawn$',\n self.spawnRemindersForProjectSurvey,\n name='spawn_survey_reminders'),\n url(r'tasks/gsoc/surveys/send_reminder/send$',\n self.sendSurveyReminderForProject)]\n return patterns", "def get_urls(self):\r\n from django.conf.urls.defaults import patterns, url\r\n\r\n def wrap(view):\r\n def wrapper(*args, **kwargs):\r\n return self.admin_site.admin_view(view)(*args, **kwargs)\r\n return update_wrapper(wrapper, view)\r\n\r\n info = (self.model._meta.app_label, self.model._meta.module_name)\r\n\r\n return patterns(\"\",\r\n url(r\"^panel/$\",\r\n wrap(self.panel_view),\r\n name=\"insert_%s_%s_panel\" % info),\r\n url(r\"^list/$\",\r\n wrap(self.list_view),\r\n name=\"insert_%s_%s_list\" % info),\r\n url(r\"^add_minimal/$\",\r\n self.add_view,\r\n name=\"insert_%s_%s_add\" % info),\r\n url(r\"^(.+)/detail/$\",\r\n wrap(self.detail_view),\r\n name=\"insert_%s_%s_detail\" % info),\r\n url(r\"^(.+)/render/$\",\r\n wrap(self.render_view),\r\n name=\"insert_%s_%s_render\" % info),\r\n url(r\"^(.+)/delete/$\",\r\n wrap(self.delete_view),\r\n name=\"insert_%s_%s_delete\" % info),\r\n )", "def urls(self) -> str:\n return self._data['urls']", "def listtypes(self):\n\n pattern_types = [i for i in sorted(self._allowed_patterns.iterkeys())]\n\n return pattern_types", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def getViews(self):\n return list(self.__views)", "def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)", "def get_urls(self):\n return patterns('',\n #url(r'^$', self.new_upload, name=\"%s_document_list\" % self.name),\n url(r'^([^\\/]+)/edit/$', self.edit, name=\"%s_document_edit\" % self.name),\n url(r'^([\\w\\d-]+)/confirm/$', self.confirm, name=\"%s_document_confirm\" % self.name)\n )", "def get_pattern_types(self) -> List[PatternType]:\n return list(self.pattern_types)", "def base_urls(self):\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<pk_list>\\w[\\w;-]*)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<pk>\\w[\\w-]*)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def urls(self):\n if not self._urls:\n urls = []\n for host in self.hosts:\n # Must end without a slash\n urls.append('http://%(host)s:%(port)s%(path)s' % {\n 'host': host,\n 'port': self.port,\n 'path': self.path,\n })\n self._urls = urls\n return self._urls", "def getDjangoURLPatterns():\n\n patterns = [\n (r'^tasks/project_conversion/update_projects',\n 'soc.tasks.updates.project_conversion.updateProjects'),\n\n ]\n\n return patterns", "def getViews(self):\n return list(self.__views.keys())", "def getDjangoURLPatterns():\n\n patterns = [\n (r'^tasks/gci/task/bulk_create_tasks$',\n 'soc.modules.gci.tasks.bulk_create.bulkCreateTasks'),]\n\n return patterns", "def getRoutes(self):\n pass", "def get_urls(self):\n from django.conf.urls import patterns, url\n\n def wrap(view):\n def wrapper(*args, **kwargs):\n return self.admin_site.admin_view(view)(*args, **kwargs)\n return update_wrapper(wrapper, view)\n\n info = self.model._meta.app_label, self.model._meta.module_name\n\n urlpatterns = patterns(\n '',\n url(r'^(\\d+)/add/objectives/$',\n wrap(self.add_objectives),\n name='%s_%s_objectives' % info),\n url(r'^(\\d+)/regional_objective/(\\d+)/delete$',\n wrap(self.delete_regional_objective),\n name='%s_%s_delete_regional_objective' % info),\n url(r'^(\\d+)/summary/$',\n wrap(self.summary),\n name='%s_%s_summary' % info),\n url(r'^(\\d+)/summary/pre/$',\n wrap(self.pre_summary),\n name='%s_%s_pre_summary' % info),\n url(r'^(\\d+)/summary/day/$',\n wrap(self.day_summary),\n name='%s_%s_day_summary' % info),\n url(r'^(\\d+)/summary/post/$',\n wrap(self.post_summary),\n name='%s_%s_post_summary' % info),\n url(r'^(\\d+)/summary/pdf/$',\n wrap(self.pdf_summary),\n name='%s_%s_pdf_summary' % info),\n url(r'^(\\d+)/download/$',\n wrap(self.pdflatex),\n name='%s_%s_download' % info),\n url(r'^(\\d+)/export/$',\n wrap(self.pdflatex),\n name='%s_%s_export' % info),\n url(r'^(\\d+)/cbas/$',\n wrap(self.corporate_approve),\n name='%s_%s_corporate_approve' % info),\n url(r'^(\\d+)/endorsement/$',\n wrap(self.endorse),\n name='%s_%s_endorse' % info),\n url(r'^(\\d+)/endorsement/(\\d+)/delete$',\n wrap(self.delete_endorsement),\n name='%s_%s_delete_endorsement' % info),\n url(r'^(\\d+)/endorsement/officers$',\n wrap(self.endorsing_roles),\n name='%s_%s_endorsing_roles' % info),\n url(r'^(\\d+)/approval/$',\n wrap(self.approve),\n name='%s_%s_approve' % info),\n url(r'^(\\d+)/closure/$',\n wrap(self.close),\n name='%s_%s_close' % info),\n url(r'^(\\d+)/sitemap/$',\n wrap(self.sitemap),\n name='%s_%s_sitemap' % info),\n )\n\n return urlpatterns + super(PrescriptionAdmin, self).get_urls()", "def get_patterns(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-patterns\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"get_patterns\",\n keywords=kwargs,\n params=parameters\n )", "def get_urls(self, **kwargs):\n pass # pragma: no cover", "def patterns(self) -> List[AttributeRulerPatternType]:\n all_patterns = []\n for i in range(len(self.attrs)):\n p = {}\n p[\"patterns\"] = self.matcher.get(str(i))[1]\n p[\"attrs\"] = self._attrs_unnormed[i] # type: ignore\n p[\"index\"] = self.indices[i] # type: ignore\n all_patterns.append(p)\n return all_patterns # type: ignore[return-value]", "def views(self):\n return self._views", "def patterns(self) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self.fuzzy_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern.text, \"type\": \"fuzzy\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, patterns in self.regex_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern, \"type\": \"regex\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n return all_patterns", "def urlpatterns(self):\n urlpatterns = [\n path(self.create_view.endpoint.url(), self.create_view.view(), name='create'),\n path(self.delete_view.endpoint.url(URL_PARAM), self.delete_view.view(), name='delete'),\n path(self.get_view.endpoint.url(URL_PARAM), self.get_view.view(), name='get'),\n path(self.get_or_create_view.endpoint.url(), self.get_or_create_view.view(), name='get_or_create'),\n path(self.list_view.endpoint.url(), self.list_view.view(), name='list'),\n path(self.update_view.endpoint.url(URL_PARAM), self.update_view.view(), name='update'),\n ]\n for method_view in self.method_views:\n # Pass the method view this ModelType's serializer class.\n method_view.model_serializer_cls = self.serializer.base_serializer_cls\n urlpatterns.append(\n path(method_view.endpoint.url(URL_PARAM), method_view.view(), name=method_view.name),\n )\n for static_method_view in self.static_method_views:\n static_method_view.model_type_cls = self.__class__\n urlpatterns.append(\n path(static_method_view.endpoint.url(), static_method_view.view(), name=static_method_view.name),\n )\n for property_view in self.property_views:\n urlpatterns.append(\n path(property_view.endpoint.url(URL_PARAM), property_view.view(), name=property_view.name),\n )\n return urlpatterns", "def get_flask_endpoints(self):\n urls = self.endpoints.keys()\n return urls", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def get_uri_schemes(self):\n return list(sorted(self.backends.with_playlists.keys()))", "def url_assets(self):\n return self.assets(asset_type='URL')", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def filenamePatterns(self):\n return ['*.'+e for e in self.filenameExtensions]", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def get_layer_urls(self):\n urls = []\n\n if getattr(self, 'additional_domains'):\n map(urls.append, (domain for domain in self.additional_domains.split(\";\") if domain))\n\n return urls", "def urlset(self):\n return self._urlset", "def resource_id_patterns(self) -> Sequence[str]:\n return pulumi.get(self, \"resource_id_patterns\")", "def getInfixPatterns(self):\n return self.getOrDefault(\"infixPatterns\")", "def getURLs():", "def get_static_regexps():\n handlers = modules_util.module_yaml('default')['handlers']\n retval = set()\n\n for handler in handlers:\n if handler.GetHandlerType() == 'static_dir':\n retval.add('^' + handler.url + '/')\n elif handler.GetHandlerType() == 'static_files':\n retval.add('^' + handler.url + '$')\n\n return sorted(retval)", "def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers", "def get_patterns(self):\n params = {\n \"query\" : \"\"\n }\n\n resp_from_server = self.get_api_result(self.URL_RETURNS_MINI_PATTERNS, params)\n\n if resp_from_server.get('status'):\n return resp_from_server\n else:\n patterns_dicts = self.get_mini_patterns_dict(resp_from_server)\n\n return patterns_dicts", "def get_patterns(\n self, pipeline: str, label: str, key: str\n ) -> List[Pattern]:", "def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)", "def get_uri_schemes(self) -> list[UriScheme]:\n return sorted(self.backends.with_playlists.keys())", "def website_routing_rules(self) -> typing.Optional[typing.List[\"RoutingRule\"]]:\n return self._values.get('website_routing_rules')", "def urls(self):\n base_url = r'^{}/'.format(self.label)\n return SiteModuleURLResolver(base_url, self.get_urls(), module=self, app_name=self.label, namespace=self.label)", "def routes_available():\n return json.dumps(\n [\"%s\" % rule for rule in app.url_map.iter_rules()],\n indent=4,\n separators=(\",\", \": \"),\n )", "def get_endpoints(self, request):\n enumerator = self.endpoint_enumerator_class(\n self._gen.patterns, self._gen.urlconf, request=request)\n endpoints = enumerator.get_api_endpoints()\n view_paths = defaultdict(list)\n view_cls = {}\n for path, method, callback, decorators in reversed(endpoints):\n view = self.create_view(callback, method, request)\n path = self._gen.coerce_path(path, method, view)\n view_paths[path].append((method, view, decorators))\n view_cls[path] = callback.cls\n return {path: (view_cls[path], methods)\n for path, methods in view_paths.items()}", "def urlBars(self):\n urlBars = []\n for index in range(self.count()):\n urlBars.append(self.widget(index))\n return urlBars", "def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))", "def get_urls(self) -> Dict[str, str]:\n return {}", "def endpoints(self):\n return self.settings[\"endpoints\"]", "def url_paths(self) -> Dict[str, str]:\n unformatted_paths = self._url_module.url_paths\n\n paths = {}\n for unformatted_path, handler in unformatted_paths.items():\n path = unformatted_path.format(\"\")\n paths[path] = handler\n\n return paths", "def getPossibleMatchesList(self):\n return [p for p in self._patterns if p.startswith(self._keyCode)]", "def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)", "def path_groups(self):\n return self._path_groups", "def ListUrlEntries(self):\n return [WprUrlEntry(request, self._http_archive[request])\n for request in self._http_archive.get_requests()]", "def getBookmarkableURLs(self):\n return getattr(CONFIG, 'zmi_bookmarkable_urls', True)", "def npatterns(self):\n return len(self.patterns)", "def getFrequentPatterns(self):\n return self.finalPatterns", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def get_uri_schemes(self) -> list[backend.UriScheme]:\n futures = [b.uri_schemes for b in self.backends]\n results = pykka.get_all(futures)\n uri_schemes = itertools.chain(*results)\n return sorted(uri_schemes)", "def get_urls(self):\r\n urls = super(ServeeAdminSite, self).get_urls()\r\n from django.conf.urls import patterns, url, include\r\n\r\n # Custom Views\r\n for path, view, name in self.custom_views:\r\n urls += patterns('',\r\n url(r'^%s$' % path, self.admin_view(view)),\r\n )\r\n\r\n # Inserts\r\n for insert_model_lookup, insert in self.insert_classes.iteritems():\r\n urls += patterns(\"\",\r\n (r\"^insert/%s/%s/\" % (insert.model._meta.app_label, insert.model._meta.module_name), include(insert.urls))\r\n )\r\n return urls", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def _filter_url_list(self, regex_pattern: str) -> None:\n matcher = re.compile(regex_pattern)\n filtered_list = []\n if self.url_list:\n for url in self.url_list:\n if matcher.search(url.url):\n filtered_list.append(url)\n self.url_list = filtered_list", "def load_url_pattern_names(patterns, include_with_args=True):\n global URL_NAMES\n for pat in patterns:\n if pat.__class__.__name__ == 'RegexURLResolver': # load patterns from this RegexURLResolver\n load_url_pattern_names(pat.url_patterns, include_with_args)\n elif pat.__class__.__name__ == 'RegexURLPattern': # load name from this RegexURLPattern\n if pat.name is not None and pat.name not in URL_NAMES:\n if include_with_args or re.compile(pat.regex).groups == 0:\n URL_NAMES.append(pat.name)\n return URL_NAMES", "def get_pattern(self):\n pattern = list()\n for item in self.gradual_items:\n pattern.append(item.gradual_item.tolist())\n return pattern", "def get_product_urls(self, page):\n return self.__url_list(page)", "def get_urls(r):\n url_list = find_urls(r)\n url_list += find_tag_urls(r)\n return set(url_list)", "def filteredUrls(pattern, view, kwargs=None, name=None):\n results = [(pattern, view, kwargs, name)]\n tail = ''\n mtail = re.search('(/+\\+?\\\\*?\\??\\$?)$', pattern)\n if mtail:\n tail = mtail.group(1)\n pattern = pattern[:len(pattern) - len(tail)]\n for filter in ('/state/(?P<state>\\w+)',\n '/group/(?P<group>[^/]+)',\n '/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)',\n '/server/(?P<server>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)'):\n results += [(pattern + filter + tail, view, kwargs)]\n return results", "def get_links(self) -> List[str]:\n return self.__links", "def endpoints(self):\n return self[\"endpoints\"]", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def include_regexes(self) -> Optional[List[str]]:\n return pulumi.get(self, \"include_regexes\")" ]
[ "0.8027784", "0.79029524", "0.78296137", "0.7634182", "0.69524354", "0.6944128", "0.67643464", "0.6622399", "0.6592495", "0.6579387", "0.653932", "0.64818805", "0.6450613", "0.6444061", "0.6438293", "0.64050204", "0.6334296", "0.6327021", "0.63207394", "0.62634665", "0.6236464", "0.62105787", "0.62073463", "0.6197516", "0.6177923", "0.6176312", "0.6153545", "0.6152025", "0.6139003", "0.6134816", "0.61301506", "0.612044", "0.61150575", "0.6085081", "0.6078764", "0.60754097", "0.6040245", "0.6033365", "0.6018508", "0.60129577", "0.6002511", "0.6001353", "0.5994349", "0.5943414", "0.58685905", "0.58581686", "0.58491284", "0.5799974", "0.57935125", "0.57879645", "0.5778435", "0.57726085", "0.57622063", "0.57568926", "0.57122415", "0.5671434", "0.56578475", "0.56574583", "0.56560963", "0.5642009", "0.5629165", "0.5606653", "0.5605557", "0.5589418", "0.55677533", "0.5565255", "0.55628467", "0.554645", "0.55408645", "0.55331606", "0.5528339", "0.551944", "0.5511675", "0.5506314", "0.54851115", "0.54715", "0.5469348", "0.54634297", "0.54587704", "0.545845", "0.5455274", "0.5434976", "0.53956443", "0.5394836", "0.5382178", "0.5376494", "0.5372844", "0.5356842", "0.53439444", "0.53395283", "0.5316361", "0.5314834", "0.5286448", "0.52851886", "0.52623683", "0.52436614", "0.5229272", "0.52204156", "0.52127165", "0.5210167" ]
0.6526326
11
Send the specified document to the user's email address (HTML version).
def send(self, request, id, tribe_slug): tribe = get_object_or_404(Tribe, slug=tribe_slug) document = self.get_document(id, tribe_slug) form = self._set_user_email_address(request) email = self._get_user_email_address(request) if form or not email: return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request)) # NB: Temporarily disabling actual email sending for development #email_document(document, to=[email], subject='Document: %s' % document.title) print "Sending email to %s" % email # Send a signal to let everyone know about this document interaction document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email) return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()", "def email(self):\r\n webbrowser.open(\"mailto: [email protected]\")", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def __send_email_to_user(self, template_name, email_subject, book, redirect, date=None):\r\n ctx = {\r\n 'date': datetime.now().date(),\r\n 'user': \"{} {}\".format(self.request.user.first_name, self.request.user.last_name),\r\n 'book': \"{} - {}\".format(book.title, book.author),\r\n 'profile_url': self.request.build_absolute_uri(reverse(redirect)),\r\n 'cons_res_date': date\r\n }\r\n\r\n html_content = render_to_string(\r\n 'users/emails/{}.html'.format(template_name), ctx)\r\n # Strip the html tag. So people can see the pure text at least.\r\n text_content = strip_tags(html_content)\r\n\r\n msg = EmailMultiAlternatives(\r\n email_subject, text_content, \"[email protected]\", [\r\n self.request.user.email])\r\n msg.attach_alternative(html_content, \"text/html\")\r\n msg.send()", "def send_ajax(self, request, id, tribe_slug):\n\n document = self.get_document(id, tribe_slug)\n\n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if not email and not form:\n form = EmailForm()\n \n if form:\n content = '<form class=\"ajax_update_email\" action=\"%s\" method=\"post\">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])\n content += '%s<input type=\"submit\" value=\"Send\"/></form>' % form['email']\n return HttpResponse(content)\n \n print \"Sending email to %s\" % email\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n\n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponse('Email sent to %s' % email)", "def send_email(subject, sender, recipients, text_body, html_body):\n\t\tmsg = Message(subject, sender=sender, recipients=recipients)\n\t\tmsg.body = text_body\n\t\tmsg.html = html_body\n\t\tmail.send(msg)", "def send_email(subject, sender, recipients, text_body, html_body):\n msg = Message(subject=subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n mail.send(msg)", "def email_out(row, html_file, subject, attach_folder, from_address):\n raw_html = open(html_file, 'r').read()\n html = raw_html.replace('$firstname', row['firstName'])\n html = html.replace('$surname', row['surname'])\n html = html.replace('$username', row['username'])\n html = html.replace('$password', row['newPassword'])\n html = html.replace('email', row['email'])\n attachments = []\n if os.path.exists(attach_folder):\n attachments = [os.path.join(os.getcwd(), attach_folder, fn) for fn in os.listdir(attach_folder)]\n o = Outlook()\n o.send(True, row['email'], subject, '', html, attachments=attachments, account_to_send_from=from_address)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def send_text_to_user(user):", "def messageHtml(self,fileName,doc=None):\n if doc:\n fileName=os.path.join(doc,'doc',fileName)\n if fileName[0]=='/': fileName = 'file://'+fileName\n WebBrowser=self.get('WebBrowser')\n if WebBrowser==DEFAULT:\n if os.path.isfile(FIREFOX):\n os.system(\"%s %s &\"%(FIREFOX,fileName))\n else:\n webbrowser.open(fileName, 1)\n else:\n os.system(\"%s %s &\"%(WebBrowser,fileName))", "def mail_text(self, template_name, subject, send_to=None, user = None, **kwargs):\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n payload = self.render_lang(template_name, **kwargs)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def mail_text(self, template_name, subject, send_to=None, user = None, **kwargs):\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n payload = self.render_lang(template_name, **kwargs)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_mail(email):\n return email.send()", "def send_html_email(email_subject,\n email_from, email_to,\n email_context, email_template):\n plain_email = loader.get_template(\"%s.txt\" % email_template)\n html_email = loader.get_template(\"%s.html\" % email_template)\n plain_content = plain_email.render(email_context)\n html_content = html_email.render(email_context)\n msg = EmailMultiAlternatives(\n email_subject,\n plain_content,\n email_from,\n email_to\n )\n msg.attach_alternative(html_content, \"text/html\")\n return msg.send()", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def send_mail(self, address, title, message):\n pass", "def send(self, recipient, template_path, context, subject, bcc_email=[]):\n\n body = self.email_render(template_path, context)\n self.send_email(recipient, subject, body, bcc_email)", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def _send(to, context, subject, from_email, template):\n body = render_to_string(template, context)\n msg = EmailMultiAlternatives(subject, body, from_email, to)\n msg.attach_alternative(body, \"text/html\")\n msg.send()", "def send_email(self, to, content):\r\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(self.from_, self.password)\r\n server.sendmail(self.from_, to, content)\r\n speak(\"Email has been sent Succesfully!\")\r\n return \"None\"", "def send_email(self, message):\n pass", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def send_contact_us_receipt_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\",\n \"contact-us-receipt\", \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_html = render_template(\"main/contact-us-receipt/content.html\")\n msg = Message(\n f'[SetNow Support] Re: {data[\"subject\"]}',\n sender=\"[email protected]\",\n recipients=[data[\"email\"]],\n )\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def send_contact_us_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\", \"contact-us\",\n \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_text = msg_text.format(**data)\n msg_html = render_template(\"main/contact-us/content.html\", **data)\n msg = Message(data[\"subject\"],\n sender=\"[email protected]\",\n recipients=[\"[email protected]\"])\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)", "def post(self):\n return send_email(request.args)", "def send_email():\n send_mail(\"You've got some problem.\", 'REPAIR IT', '[email protected]',\n ['[email protected]'], fail_silently=False,)", "def sendEmail(self, address_book: List[str], subject: str, html: str, attachmentFpath: Optional[str] = None) -> None:\n msg = MIMEMultipart()\n msg['From'] = self.mailAddress\n msg['To'] = ','.join(address_book)\n msg['Subject'] = subject\n # msg.attach(MIMEText(body, 'plain'))\n msg.attach(MIMEText(html, 'html'))\n \n if not(attachmentFpath == None) and not(attachmentFpath == \"\"):\n fPath = cast(str, attachmentFpath)\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(fPath, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\n 'attachment; filename=\"{0}\"'.format(os.path.basename(fPath)))\n msg.attach(part)\n \n text = msg.as_string()\n # Send the message via our SMTP server\n s = smtplib.SMTP(self.host, self.port)\n s.starttls()\n s.login(self.username, self.password)\n s.sendmail(self.mailAddress, address_book, text)\n s.quit()", "def send_email(self, recipient, subject, body, bcc_email):\n if bcc_email:\n msg = EmailMultiAlternatives(subject, subject, self.sender, recipient, bcc=bcc_email)\n else:\n msg = EmailMultiAlternatives(subject, subject, self.sender, recipient)\n\n msg.attach_alternative(body, \"text/html\")\n\n if self.file:\n for file in self.file:\n msg.attach_file(file)\n msg.send()", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)", "def mail_send():\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n with open(f\"{report_file_path}/subject\", \"rb\") as subject_handler:\n subject = pickle.load(subject_handler)\n with open(f\"{report_file_path}/{'recipient'}\", \"rb\") as recipient_handler:\n recipient = pickle.load(recipient_handler)\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n try:\n if os.path.isfile(f\"{report_file_path}/mail_report.html\"):\n os.popen(\n f\"ssh -i {Common.get_config_value('build_server_pemfile')} \"\n f\"-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\"\n f\" root@{Common.get_config_value('build_server_hostname')}\"\n f\" {Common.get_config_value('mail_script_location')}/\"\n f\"{Common.get_config_value('mail_script_name')} \"\n f\"{subject} {recipient}\"\n )\n Common.logger.info(\"Mail send successfully\")\n except Exception as ex:\n Common.logger.warning(f\"Mail sent failed due to exception: {ex}\")", "def send_mail(self, html):\n message = Message(\n From=self._config['mail']['address'], To=self._config['mail']['to'],\n Subject=self._config['mail']['subject']\n )\n message.Html = html\n return self.sender.send(message)", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def send(target: Owner, template: str, context: dict=None, session: SQLA_SESSION=None):\n wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER\n subject = wrapper.render(template, Layout.SUBJECT, target, context)\n body = wrapper.render(template, Layout.BODY, target, context)\n recipient = (owner_desc(target, True), target.email)\n send_mail(recipient, subject, body, copy_sysadmins=False, session=session)", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def quick_email(self, send_to, subject, body, style=None):\n message = Message(body, style=style)\n\n self.send_message(message, send_to, subject)", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def _text(self, fromwhom, number, text):\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(self._smsaddress, self._smspw)\n server.sendmail(str(fromwhom), '{}@vtext.com'.format(number),\n str(text))\n server.quit()", "def onAboutLeoEmail(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(\"mailto:\" + self.email)\n except:\n g.es(\"not found: \" + self.email)", "def send_html_email(subject: str, email: str, template_name: str, context: dict) -> int:\n html_message = render_to_string(template_name, context)\n message = strip_tags(html_message)\n return send_mail(\n subject,\n message,\n settings.DEFAULT_FROM_EMAIL,\n [email],\n html_message=html_message,\n fail_silently=True,\n )", "def send_email_with_paper(request, id):\n paper = get_object_or_404(Paper, pk=id)\n if paper.status == '5completed' or paper.status == '3rejected':\n id_email = paper.user.email\n subject = paper.title\n body = render_to_string(\n 'papers/paper_file.html',\n {\n 'request': request,\n 'paper': paper\n }\n )\n\n send_mail(\n subject, body, settings.EMAIL_HOST_USER, [id_email],\n html_message=body, fail_silently=False\n )", "def send_html_email(self, email_from, email_to, subject, html_body):\n msg = mime_MIMEMultipart('alternative')\n msg['Subject'] = subject\n msg['From'] = email_from\n msg['To'] = email_to\n\n html_part = mime_MIMEText(html_body, 'html', _charset='utf-8')\n msg.attach(html_part)\n\n self.send_email(email_from, email_to, msg.as_string())", "def share_email(request, pk):\n template_var = base_template_vals(request)\n subject = 'Your friend shared an event with you on Dons Affairs!'\n from_email = '[email protected]'\n to = '[email protected]'\n to = request.POST[\"email_to\"] #default is sending to self '[email protected]'\n link = request.POST[\"abs_url\"]\n text_content = 'This is an important message.'\n text_content += 'Your friend shared an event link with you. ' + link\n html_content = '<p>Hi Dear,</p>' \n html_content += '<p>Your friend shared an exciting event with you on ' \n html_content += '<a href=\"http://mtk.im/usf\">Don\\'s Affairs</a>!</p>'\n html_content += '<p><a href=\"' + link + '\"> '\n html_content += 'Here is the link to the event.</a>' \n html_content += '<br>Feel free to check it out!</p>' + '<p><br>With love,'\n html_content += '<br>Don\\'s Affairs Team</p>'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n return redirect('index')", "def mail_template(self, template_name, send_to=None, user = None, event_title=\"\", **kwargs):\n barcamp = kwargs.get('barcamp')\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n if barcamp is not None:\n subject = barcamp.mail_templates['%s_subject' %template_name]\n tmpl = jinja2.Template(barcamp.mail_templates['%s_text' %template_name])\n kwargs['fullname'] = user.fullname\n payload = tmpl.render(**kwargs)\n payload = payload.replace('((fullname))', user.fullname)\n payload = payload.replace('((event_title))', event_title)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def sendEmail(recipient, content):\n server = smtplib.SMTP(\"[email protected]\", 587)\n server.ehlo()\n server.starttls()\n server.login(\"[email protected]\", \"password\")\n server.sendmail(\"[email protected]\", recipient, content)\n server.close()", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" [email protected] < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b [email protected] < ' + emailFilePath, shell=True)", "def mail_template(self, template_name, send_to=None, user = None, **kwargs):\n barcamp = kwargs.get('barcamp')\n if user is None:\n user = self.user\n if send_to is None:\n send_to = user.email\n if barcamp is not None:\n subject = barcamp.mail_templates['%s_subject' %template_name]\n tmpl = jinja2.Template(barcamp.mail_templates['%s_text' %template_name])\n kwargs['fullname'] = user.fullname\n payload = tmpl.render(**kwargs)\n payload = payload.replace('((fullname))', user.fullname)\n mailer = self.app.module_map['mail']\n mailer.mail(send_to, subject, payload)", "def send_email(self, recipients, html_data, assignee=None):\n\n msg = MIMEMultipart('alternative')\n# msg['Subject'] = \"Jira Alert - Stagnant Jiras %s\" % self.options.fl_project\n msg['Subject'] = \"Jira Alert - Stagnant Jiras\"\n msg['From'] = '[email protected]'\n if assignee:\n msg['To'] = assignee\n msg['Cc'] = ', '.join(recipients) # Assignee emails\n else:\n msg['To'] = ', '.join(recipients) # Main email\n \n html1 = \"<!DOCTYPE html><html><head><meta charset=\\\"utf-8\\\"/><title>HTML Reference</title></head><body>\"\n \n html2 = \"</body></html>\"\n \n final_message = \"%s%s%s\" % (html1, html_data, html2)\n html_message = MIMEText(final_message, 'html', _charset='utf-8')\n msg.attach(html_message)\n \n # Send the message via our own SMTP server.\n s = smtplib.SMTP('localhost')\n s.set_debuglevel(1)\n# s.sendmail('[email protected]', recipients, msg.as_string())\n s.sendmail('[email protected]', recipients, msg.as_string())\n s.quit()", "def send_email(email_body, email_to):\n html = f'{HTML_START}{email_body}{HTML_END}'\n try:\n SES_CLIENT.send_email(\n Source=os.environ['SENDER_EMAIL'],\n Destination={\n 'ToAddresses': [\n email_to\n ]\n },\n Message={\n 'Subject': {\n 'Data': 'Newest Music in Last 7 Days (Spotify)',\n },\n 'Body': {\n 'Html': {\n 'Data': html,\n }\n }\n }\n )\n\n except:\n traceback.print_exc()\n return False\n\n return True", "def send_user_email(user, subject, template_name, context=None):\n\n if context is None:\n context = {}\n\n context['user'] = user\n\n to = (user.email,)\n\n send(subject, to, template_name, context)", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def send(self):\n logger.debug('Sending Email')\n self.mimepgp.send()", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def send_email_to_trial_user_with_link(\n to, context, from_email=settings.DEFAULT_FROM_EMAIL):\n template = EMAIL_DICT['parse_trial_user_resume']['template']\n subject = EMAIL_DICT['parse_trial_user_resume']['subject']\n return threadify(_send, to, context, subject, from_email, template)", "def sendEmail (to,content):\n server = smtplib.SMTP('smtp.gmail.com',587)\n server.ehlo()\n server.starttls()\n server.login('[email protected]','your-password') #replace with your email and pass to send\n server.sendmail('[email protected]',to,content)\n server.close()", "def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True", "def sendEmail(self):\r\n excel_file = ExcelConverter()\r\n attachementPath = excel_file.getFilePath(self.user.getUsername(), self.user.getPassword())\r\n subject = \"Fish Farm Data.\"\r\n body = Content(\"text/plain\", \"Attached below is a file containing your Fish Farm data.\")\r\n with open(attachementPath, 'rb') as fd:\r\n b64data = base64.b64encode(fd.read())\r\n attachment = Attachment()\r\n attachment.content = str(b64data,'utf-8')\r\n attachment.filename = \"Data\"\r\n self.message = Mail(self.sender, subject, self.recipient, body)\r\n self.message.add_attachment(attachment)\r\n response = self.sg.client.mail.send.post(request_body=self.message.get())\r\n print(\"Email sent\")", "def sendmail(sendername, senderemail, password, receivers, htmlfile, img, attach):\n import smtplib\n\n #Creating the email\n \n\n domain = senderemail.split('@')[1]\n if 'gmail' in domain.lower(): #Gmail SMTP\n smtpObj = smtplib.SMTP('smtp.gmail.com', 587)\n elif 'outlook' in domain.lower(): #Outlook SMTP\n smtpObj = smtplib.SMTP('smtp-mail.outlook.com', 587)\n elif 'yahoo' in domain.lower(): #Yahoo SMTP\n smtpObj = smtplib.SMTP('smtp.mail.yahoo.com', 587)\n else:\n print('Sorry I dont have your email SMTP setting.\\nBYE!')\n quit()\n\n smtpObj.starttls()\n try:\n smtpObj.login(senderemail, password)\n except smtplib.SMTPAuthenticationError:\n print('Authentication error!\\nWrong Email or Password.')\n quit()\n \n for user, email in receivers.items():\n msg = makeHTMLemail(sendername, senderemail, user, email, htmlfile, img, attach)\n smtpObj.send_message(msg)\n print('email sent to {}'.format(user))\n del msg\n smtpObj.quit()", "def send_mass_html_mail(datatuple, fail_silently=False, user=None, password=None, \n connection=None):\n connection = connection or get_connection(\n username=user, password=password, fail_silently=fail_silently)\n messages = []\n for subject, text, html, from_email, recipient in datatuple:\n message = EmailMultiAlternatives(subject, text, from_email, recipient)\n message.attach_alternative(html, 'text/html')\n messages.append(message)\n return connection.send_messages(messages)", "def email_ebook(filename, from_email, to_addr, subject, smtp_server, smtp_username, smtp_password):\n mroot = MIMEMultipart('related')\n mroot['Subject'] = subject\n mroot['From'] = from_email\n mroot['To'] = to_addr\n with open(filename, 'rb') as f:\n m = MIMEBase('application', 'octet-stream')\n m.set_payload(open(filename, 'rb').read())\n encode_base64(m)\n m.add_header('Content-Disposition', 'attachment; filename=\"{0}\"'.format(filename))\n mroot.attach(m)\n smtp = smtplib.SMTP()\n smtp.connect(smtp_server)\n smtp.starttls()\n smtp.login(smtp_username, smtp_password)\n smtp.sendmail(from_email, to_addr, mroot.as_string())\n smtp.quit()", "def email(self, instance):\r\n return mark_safe('<a href=\"mailto:{0}\">{1}</a>'.format(\r\n instance.user.email, instance.user.email,\r\n ))", "def send_emails(emails, author, title):\n subject = 'New post by %s' % author.capitalize()\n message = '%s wrote a new post with the title: %s' % (author.capitalize(), title)\n print('Sending emails to ', emails)\n send_mails_count = send_mail(\n subject=subject,\n message=message,\n from_email=EMAIL_HOST_USER,\n recipient_list=emails\n )\n print('Successfully sent %s - letters' % send_mails_count)", "def send_msg(self, text):\n\n if self.__webex_flag__ == 1:\n self.__send_msg_by_webex__(text)\n\n if self.__webex_flag__ == 1:\n self.__send_msg_by_mail__(text)\n\n return", "def send_email(subject, body, mail_to, reply_to=None):\n email_message = EmailMessage(\n subject=settings.EMAIL_SUBJECT.format(subject),\n body=body,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=mail_to,\n reply_to=reply_to,\n )\n email_message.content_subtype = 'html'\n email_message.send()", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def send_mail(user, subject, message, from_email, recipient_list, fail_silently=False,\\\n auth_user=None, auth_password=None, connection=None, html_message=None):\n message = smart_text(gpg.sign(message))\n try:\n Profile.objects.get(user= user).pgp_gpg_public_key\n message = smart_text(gpg.encrypt(message))\n except:\n pass\n send_email_django(subject, message, from_email, recipient_list, fail_silently,\\\n auth_user, auth_password, connection, html_message)\n return True", "def mail(self, subject, body):\n message = MIMEText.MIMEText(body)\n message[\"Subject\"] = subject\n message[\"From\"] = self.wiki.config.mail_from\n message[\"To\"] = self.wiki.config.admin_mail\n try:\n server = smtplib.SMTP(self.wiki.config.smtp_server)\n server.sendmail(self.wiki.config.mail_from, self.wiki.config.admin_mail,\n message.as_string())\n server.close()\n except Exception:\n print(\"MAIL SEND FAILED! GODDAMIT! Was sending this mail:\")\n print(message)", "def send_mail(self, subject):\r\n pass", "def send_mail(to_emails, from_email, subject,\r\n text_template='mail/message.txt',\r\n data={}):\r\n text_content = render_to_string(text_template, data)\r\n msg = EmailMultiAlternatives(subject, text_content, from_email, to_emails)\r\n msg.send()", "def send_mail(\n sender: str, recipients: list, title: str, text: str = None, html_text: str = None,\n attachments: list = None) -> dict:\n msg = create_multipart_message(sender, recipients, title, text, html_text, attachments)\n ses_client = boto3.client('ses') # Use your settings here\n return ses_client.send_raw_email(\n Source=sender,\n Destinations=recipients,\n RawMessage={'Data': msg.as_string()}\n )", "def emailAggregatorPage(from_addr, to_addr, subj, smtp_host, out_fn):\n # Begin building the email message.\n msg = MIMEMultipart()\n msg['To'] = to_addr\n msg['From'] = from_addr\n msg['Subject'] = subj\n msg.preamble = \"You need a MIME-aware mail reader.\\n\"\n msg.epilogue = \"\"\n\n # Generate a plain text alternative part.\n plain_text = \"\"\"\n This email contains entries from your subscribed feeds in HTML.\n \"\"\"\n part = MIMEText(plain_text, \"plain\", UNICODE_ENC)\n msg.attach(part)\n\n # Generate the aggregate HTML page, read in the HTML data, attach it\n # as another part of the email message.\n html_text = open(out_fn).read()\n part = MIMEText(html_text, \"html\", UNICODE_ENC)\n msg.attach(part)\n\n # Finally, send the whole thing off as an email message.\n print \"Sending email '%s' to '%s'\" % (subj, to_addr)\n s = smtplib.SMTP(smtp_host)\n s.sendmail(from_addr, to_addr, msg.as_string())\n s.close()", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def sendmail(template, to, subject, headers=None, **kwargs):\n if not web.config.get('smtp_server'):\n # TODO: log warn message\n return\n\n html = render_template(template, **kwargs)\n\n # inline CSS to make mail clients happy\n html = pynliner.fromString(html)\n\n envelope = Envelope(\n from_addr=web.config.from_address,\n to_addr=to,\n subject=subject,\n html_body=html,\n headers=headers\n )\n\n server = web.config.smtp_server\n port = int(web.config.get('smtp_port', 25))\n username = web.config.smtp_username\n password = web.config.get('smtp_password')\n tls = web.config.get('smtp_starttls', False)\n\n result = envelope.send(\n host=server,\n port=port,\n login=username,\n password=password,\n tls=tls)\n return result", "def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)", "def send_mail(to, subject, html, text, reply_to=None, sender=SENDER):\n\n from caravel.utils import principals\n\n # Verify that we are not sending spam to people.\n if not (isinstance(to, principals.Principal) and to.valid):\n raise ValueError(\"{!r} does not consented to email.\".format(to))\n\n # Verify that we are not sending spam from people.\n if reply_to:\n if not (isinstance(reply_to, principals.Principal) and reply_to.valid):\n raise ValueError(\"{!r} has not consented to send email.\"\n .format(reply_to))\n\n # Actually send the message to the user.\n _send_raw_mail(\n to=to.email,\n subject=subject,\n html=html,\n text=text,\n reply_to=reply_to.email if reply_to else None,\n sender=sender\n )", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, '[email protected]',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def send_email(to, subject, body, attachment=None):\n outlook = win32.Dispatch('outlook.application')\n new_mail = outlook.CreateItem(0)\n new_mail.Subject = subject\n new_mail.HTMLBody = body\n new_mail.To = to\n\n if attachment:\n new_mail.Attachments.Add(attachment)\n\n new_mail.Send()", "def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def position_applicants_send_email(id):\n if current_user.id is None:\n abort(403)\n else:\n form = ContactForm(request.form)\n if request.method == 'POST' and form.validate():\n position = db.session.query(Job).get(id)\n if position is None:\n abort(404)\n emails = [u.email for u in position.users]\n message = Message(subject=form.subject.data,\n sender='[email protected]',\n reply_to='[email protected]',\n recipients=[''],\n bcc=emails,\n body=form.text.data)\n mail.send(message)\n flash(\"Message was send.\", 'success')\n return redirect(url_for('organisations.view_applicants', id=id))\n return render_template('organisations/message_send_form.html', form=form)", "def send_email(msg):\n\tprint(\"sendEmail: \" + msg)", "def save(self, fail_silently=False):\n send_mail(fail_silently=fail_silently, **self.get_message_dict())", "def share_article(sharer, host, sender, reciever_email):\n message_subject = \"{} from Seal Team, shared an article with you via Authors Haven\".format(sharer)\n content = \"\"\"Hello, \\n\n {} via Authors Haven has shared an article with you.\n Kindly click the link below to read the article.\n {}\"\"\".format(sharer, host)\n \n response = send_mail(message_subject, content, sender, [reciever_email])\n return response", "def _send_email(\n recipient_id, sender_id, intent, email_subject, email_html_body,\n sender_email, bcc_admin=False, sender_name=None, reply_to_id=None):\n\n if sender_name is None:\n sender_name = EMAIL_SENDER_NAME.value\n\n _require_sender_id_is_valid(intent, sender_id)\n\n recipient_email = user_services.get_email_from_user_id(recipient_id)\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n if email_models.SentEmailModel.check_duplicate_message(\n recipient_id, email_subject, cleaned_plaintext_body):\n log_new_error(\n 'Duplicate email:\\n'\n 'Details:\\n%s %s\\n%s\\n\\n' %\n (recipient_id, email_subject, cleaned_plaintext_body))\n return\n\n def _send_email_in_transaction():\n \"\"\"Sends the email to a single recipient.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_mail(\n sender_name_email, recipient_email, email_subject,\n cleaned_plaintext_body, cleaned_html_body, bcc_admin,\n reply_to_id=reply_to_id)\n email_models.SentEmailModel.create(\n recipient_id, recipient_email, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(_send_email_in_transaction)", "def test_send_mass_html_mail_reply_to(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n self.assertEqual(send_mass_html_mail__mock.call_count, 1)\n self.assertEqual(send_mass_html_mail__mock.call_args[1]['reply_to'],\n [\"Marie <[email protected]>\"])", "def send_confirmation_email(user_pk):\n pass", "def send_email(to_address, from_address, subject, body):\n mail = \"\"\"echo \"From: %(from)s\\r\\nDate: $(date)\\r\\nSubject: %(subject)s\\r\\nMIME-Version: 1.0\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n%(body)s\" | ssmtp %(to)s\"\"\" % {\n \"to\": to_address,\n \"from\": from_address,\n \"subject\": subject,\n \"body\": body,\n }\n cmd(mail)", "def send_email(recipient, subject, body) -> None:\n port = 465\n smtp_server = \"smtp.gmail.com\"\n sender_email = user['username']\n password = user['password']\n\n message = MIMEMultipart()\n message['From'] = sender_email\n message['To'] = recipient\n message['Subject'] = subject\n body = MIMEText(body) \n message.attach(body)\n\n server = smtplib.SMTP_SSL(smtp_server, port)\n server.login(sender_email, password)\n server.sendmail(sender_email, recipient, message.as_string())\n server.quit()" ]
[ "0.7611227", "0.73684156", "0.67148703", "0.6527169", "0.6408137", "0.6307394", "0.6218779", "0.6209792", "0.61815774", "0.6166898", "0.616678", "0.6158147", "0.60088944", "0.5979466", "0.5952957", "0.5952957", "0.59498113", "0.59447414", "0.5928036", "0.59016263", "0.58767974", "0.58742714", "0.5842784", "0.5819987", "0.5819962", "0.58104074", "0.5804484", "0.5802932", "0.5774584", "0.57584894", "0.57219535", "0.5720447", "0.57174397", "0.5715434", "0.57100147", "0.5689573", "0.5682883", "0.5649216", "0.564693", "0.5645428", "0.5645428", "0.5615307", "0.5608889", "0.56056035", "0.5599903", "0.5581136", "0.5578362", "0.5577053", "0.5564063", "0.55485034", "0.5536574", "0.5535245", "0.55321336", "0.5521438", "0.55201817", "0.5511667", "0.549804", "0.54925764", "0.5489454", "0.54850924", "0.5481735", "0.54643077", "0.5462961", "0.5455927", "0.5442222", "0.5430995", "0.5430348", "0.5428917", "0.54282", "0.5423478", "0.54224783", "0.5417195", "0.54171085", "0.5413311", "0.54093343", "0.53979623", "0.53915316", "0.5381729", "0.53718746", "0.5370572", "0.5364018", "0.5363723", "0.53624153", "0.53575605", "0.53559345", "0.53528064", "0.53526366", "0.5348561", "0.53337115", "0.5332428", "0.53233397", "0.5322223", "0.53197217", "0.5318123", "0.53167117", "0.53165454", "0.5301111", "0.52988994", "0.5293082", "0.52902067" ]
0.6786847
2
Send the specified document to the user's email address (AJAX version).
def send_ajax(self, request, id, tribe_slug): document = self.get_document(id, tribe_slug) form = self._set_user_email_address(request) email = self._get_user_email_address(request) if not email and not form: form = EmailForm() if form: content = '<form class="ajax_update_email" action="%s" method="post">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)]) content += '%s<input type="submit" value="Send"/></form>' % form['email'] return HttpResponse(content) print "Sending email to %s" % email #email_document(document, to=[email], subject='Document: %s' % document.title) # Send a signal to let everyone know about this document interaction document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email) return HttpResponse('Email sent to %s' % email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request))\n \n # NB: Temporarily disabling actual email sending for development\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n print \"Sending email to %s\" % email \n \n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))", "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()", "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def post(self):\n return send_email(request.args)", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def save_form(self, request, form, change):\n\n document = form.instance\n self.send_notification_email(document, request, \n 'email/document_modified.txt.django')\n\n document = super(DocumentAdmin, self).save_form(request, form, change)\n document.uploader = request.user\n return document", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def send_confirmation_email(user_pk):\n pass", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def send_text_to_user(user):", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def email(self):\r\n webbrowser.open(\"mailto: [email protected]\")", "def resend_email(self, userdict):\n return self.post('resend', userdict)", "def __send_email_to_user(self, template_name, email_subject, book, redirect, date=None):\r\n ctx = {\r\n 'date': datetime.now().date(),\r\n 'user': \"{} {}\".format(self.request.user.first_name, self.request.user.last_name),\r\n 'book': \"{} - {}\".format(book.title, book.author),\r\n 'profile_url': self.request.build_absolute_uri(reverse(redirect)),\r\n 'cons_res_date': date\r\n }\r\n\r\n html_content = render_to_string(\r\n 'users/emails/{}.html'.format(template_name), ctx)\r\n # Strip the html tag. So people can see the pure text at least.\r\n text_content = strip_tags(html_content)\r\n\r\n msg = EmailMultiAlternatives(\r\n email_subject, text_content, \"[email protected]\", [\r\n self.request.user.email])\r\n msg.attach_alternative(html_content, \"text/html\")\r\n msg.send()", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>Confirming your account will give you </p> <b>full access to Kwikker</b>'\n subject = 'Confirm your Kwikker account, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/confirm/', html, True)\n return \"\", 200\n pass", "def receive_email_view(request):\n save_inbound_email(request.POST, request.FILES)\n return HttpResponse(200)", "def toggle_jobmail(request):\n if request.is_ajax():\n if request.method == 'POST':\n request.user.jobmail = not request.user.jobmail\n request.user.save()\n\n return HttpResponse(status=200, content=json.dumps({'state': request.user.jobmail}))\n raise Http404", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def send_email(self, message):\n pass", "def send_mail(email):\n return email.send()", "def save(self, commit=False):\n mail_result = self.send_email()\n if mail_result:\n self.instance.is_admin_notified = True\n\n contact = super().save(commit=commit)\n\n return contact", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def position_applicants_send_email(id):\n if current_user.id is None:\n abort(403)\n else:\n form = ContactForm(request.form)\n if request.method == 'POST' and form.validate():\n position = db.session.query(Job).get(id)\n if position is None:\n abort(404)\n emails = [u.email for u in position.users]\n message = Message(subject=form.subject.data,\n sender='[email protected]',\n reply_to='[email protected]',\n recipients=[''],\n bcc=emails,\n body=form.text.data)\n mail.send(message)\n flash(\"Message was send.\", 'success')\n return redirect(url_for('organisations.view_applicants', id=id))\n return render_template('organisations/message_send_form.html', form=form)", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def set_email():\n bid_fields = {'assertion':request.form['bid_assertion'],\n 'audience':settings.DOMAIN}\n headers = {'Content-type':'application/x-www-form-urlencoded'}\n h.disable_ssl_certificate_validation=True\n resp, content = h.request('https://browserid.org/verify',\n 'POST',\n body=urlencode(bid_fields),\n headers=headers)\n bid_data = json.loads(content)\n if bid_data['status'] == 'okay' and bid_data['email']:\n user = f.userFromBidOrNew(bid_data['email'])\n session['user_id'] = user.id\n session['user_email'] = user.email\n\n return redirect(url_for('main'))", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def newsletter():\n mongo.db.newsletter.insert_one(\n {\"email\": request.form.get(\"email\")})\n return jsonify(success=True)", "def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)", "def save(self, fail_silently=False):\n send_mail(fail_silently=fail_silently, **self.get_message_dict())", "def send_confirm_email(request,uid):\n user=models.UserProfile.objects.get(id=uid)\n current_site=get_current_site(request)\n email_subject='Activate Your Account'\n message=render_to_string('activate_account.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(uid)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The confirmation email has been sent.\",\n }\n )", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, '[email protected]',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def document_upload():\n form = SourceTextForm()\n if form.validate_on_submit():\n user = current_user\n\n doc = {}\n doc[\"file\"] = form.filename.data\n doc[\"author\"] = form.author.data\n doc[\"title\"] = form.title.data\n doc[\"language\"] = form.language.data\n\n params = {}\n params[\"email\"] = user.email\n params[\"new_page\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"PAGE_LIMIT\"]\n params[\"line_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"LINE_SIZE\"]\n params[\"early_cutoff\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"EARLY_CUTOFF\"]\n params[\"batch_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"BATCH_SIZE\"]\n params[\"tokenizer\"] = current_app.config[\"TOKENIZER\"].select(doc[\"language\"])\n params[\"resource\"] = create_book\n doc_uploader = DocumentUploader(params)\n \n could_upload = True\n try:\n doc_uploader.upload(doc)\n except Exception as e:\n traceback.print_exc()\n could_upload = False\n error_msg = \"Error uploading document. Please try again.\"\n flash(error_msg)\n\n if could_upload:\n success_msg = \"Document successfully uploaded.\"\n flash(success_msg)\n\n return render_template('content_management/document_upload.html', form=form)", "def send_owner_message(): \n data = order_obj.send_owner_message(request.forms)\n return data", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def send(cls, data):\n if settings.SENDINBLUE[\"API_KEY\"]:\n requests.request(\n \"POST\",\n cls.send_email_url,\n data=json.dumps(data),\n headers=cls.default_headers,\n )", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def share_email(request, pk):\n template_var = base_template_vals(request)\n subject = 'Your friend shared an event with you on Dons Affairs!'\n from_email = '[email protected]'\n to = '[email protected]'\n to = request.POST[\"email_to\"] #default is sending to self '[email protected]'\n link = request.POST[\"abs_url\"]\n text_content = 'This is an important message.'\n text_content += 'Your friend shared an event link with you. ' + link\n html_content = '<p>Hi Dear,</p>' \n html_content += '<p>Your friend shared an exciting event with you on ' \n html_content += '<a href=\"http://mtk.im/usf\">Don\\'s Affairs</a>!</p>'\n html_content += '<p><a href=\"' + link + '\"> '\n html_content += 'Here is the link to the event.</a>' \n html_content += '<br>Feel free to check it out!</p>' + '<p><br>With love,'\n html_content += '<br>Don\\'s Affairs Team</p>'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n return redirect('index')", "def send_email_via_api(self, to, subject, message):\n\n return self.mail.send(to, subject, message)", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def send_email_to_trial_user_with_link(\n to, context, from_email=settings.DEFAULT_FROM_EMAIL):\n template = EMAIL_DICT['parse_trial_user_resume']['template']\n subject = EMAIL_DICT['parse_trial_user_resume']['subject']\n return threadify(_send, to, context, subject, from_email, template)", "def put(self, authorized_username):\n data = request.get_json()\n is_updated = actions.update_user_email(authorized_username, data['email'])\n if is_updated:\n return \"\", 200\n else:\n abort(404, message='Email already exists.')\n pass", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def send_updating_email(request: Request, data: dict, action: str, email: str = None, updated_data: str = None) -> None:\n\n logger.info(msg=f'creating model subscriber for user id: {data[\"id\"]}')\n try:\n UserSubscriber.objects.create(owner=get_user_model().objects.get(id=data['id']))\n logger.info(msg=f'Subscriber for user {data[\"id\"]} has been created')\n send_telegram_message.delay(message=f'Subscriber for user {data[\"id\"]} has been created',\n group_type='success')\n except Exception as e:\n logger.warning(msg=f'Error on creating subscriber for user {data[\"id\"]}; {str(e)}')\n send_telegram_message.delay(message=f'Error on creating subscriber for user {data[\"id\"]}; {str(e)}',\n group_type='errors')\n logger.info(msg=f'creating web url for {action}')\n uid_data = _create_unique_uid(user_id=data['id'],\n updated_data=updated_data)\n url = _current_ip_port(is_secure=request.is_secure(),\n host=request.get_host(),\n url=f'/api/account/{action}/{uid_data[\"uid\"]}/{uid_data[\"user_id\"]}')\n logger.info(f'Move send email with action: {action} to celery task')\n request.data.pop('image', None)\n send_email_task.delay(data=data, action=action, url=url, request_data=request.data, email=email)\n if request.data.get('telegram_chat_id') is not None:\n send_telegram_message.delay(chat_id=request.data['telegram_chat_id'],\n message='This is your personal chat, you will receive '\n 'messages in this chat with new posts(if you have subscriptions).')", "def send(self, recipient, template_path, context, subject, bcc_email=[]):\n\n body = self.email_render(template_path, context)\n self.send_email(recipient, subject, body, bcc_email)", "def sendEmail(self):\r\n excel_file = ExcelConverter()\r\n attachementPath = excel_file.getFilePath(self.user.getUsername(), self.user.getPassword())\r\n subject = \"Fish Farm Data.\"\r\n body = Content(\"text/plain\", \"Attached below is a file containing your Fish Farm data.\")\r\n with open(attachementPath, 'rb') as fd:\r\n b64data = base64.b64encode(fd.read())\r\n attachment = Attachment()\r\n attachment.content = str(b64data,'utf-8')\r\n attachment.filename = \"Data\"\r\n self.message = Mail(self.sender, subject, self.recipient, body)\r\n self.message.add_attachment(attachment)\r\n response = self.sg.client.mail.send.post(request_body=self.message.get())\r\n print(\"Email sent\")", "def send_email_to_offending_user(subject, template):\n send_connect_email(subject=subject,\n template=template,\n recipient=logged_against,\n logged_against=logged_against,\n site=site,\n comments=comments)", "def send_mail(self, address, title, message):\n pass", "def send_email_key(request):\n if settings.EMAIL_VALIDATION == True:\n if request.user.email_isvalid:\n data = {\n 'email': request.user.email, \n 'action_type': 'key_not_sent', \n 'change_link': reverse('user_changeemail')\n }\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n send_new_email_key(request.user)\n return validation_email_sent(request)\n else:\n raise Http404", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def execute_automatic_email(self, request, pk=None):\n try:\n retreat = Retreat.objects.get(pk=pk)\n except Exception:\n response_data = {\n 'detail': \"Retreat not found\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n email = AutomaticEmail.objects.get(\n id=int(request.GET.get('email'))\n )\n except Exception:\n response_data = {\n 'detail': \"AutomaticEmail not found\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(is_active=True):\n if reservation.automatic_email_logs.filter(email=email):\n pass\n else:\n send_automatic_email(reservation.user, retreat, email)\n AutomaticEmailLog.objects.create(\n reservation=reservation,\n email=email\n )\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def contact_user(request, pk=None):\n # another way of checking if user is logged-in\n if not request.user.is_authenticated:\n return redirect('login')\n else:\n if request.method == 'GET':\n # identifying the sender and recipient of the message\n sender = User.objects.get(email=request.user.email)\n data = {'recipient': get_object_or_404(User, pk=pk)}\n contact_profile_form = ContactProfileForm(initial=data)\n else:\n contact_profile_form = ContactProfileForm(request.POST, request.FILES)\n if contact_profile_form.is_valid():\n sender = User.objects.get(email=request.user.email)\n contactuserpost = contact_profile_form.save(commit=False)\n contactuserpost.sender = request.user\n messages.success(request, 'Your message has been successfully sent!')\n contactuserpost.save() \n return redirect(reverse('all_users'))\n else:\n contact_profile_form = ContactProfileForm()\n return render(request, 'contactuserpost.html', {'contact_profile_form': contact_profile_form})", "def send_file(self, file_path) -> object:\n method = 'sendDocument?' + 'chat_id=' + str(self.__chat_id_response())\n try:\n files = {'document': open(file_path, 'rb')}\n return requests.post(self.api_url + method, files = files)\n except FileNotFoundError as fn_err:\n print(fn_err)\n sys.exit(1)\n except TimeoutError as tm_err:\n print(tm_err)\n sys.exit(1)", "def send_mail_to_user(user,\n template,\n template_context=None,\n delay=False,\n **kwargs):\n function = send_mail.delay if delay is True else send_mail\n\n if user.email and user.email_validated:\n return function(user.email, template, template_context=template_context, **kwargs)", "def do_POST(self): # noqa\n l = int(self.headers['Content-Length'])\n new_address = self.rfile.read(l).decode('utf-8')\n if check.match(new_address) is not None:\n logging.info(\"Forwarding {} to sales.\".format(new_address))\n Thread(target=self.send_email, args=(new_address, )).start()\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.send_header('Access-Control-Allow-Origin',\n 'http://numat-tech.com')\n self.end_headers()\n self.wfile.write(new_address.encode('utf-8'))\n else:\n logging.exception(\"Received malformed email: \" + new_address)\n self.send_response(500)", "def contact():\n if request.method == \"POST\":\n mongo.db.contact.insert_one(request.form.to_dict())\n\n return jsonify(success=True)\n\n return render_template(\"contact.html\", page_title=\"Contact Us\")", "def onAboutLeoEmail(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(\"mailto:\" + self.email)\n except:\n g.es(\"not found: \" + self.email)", "def send_email_with_paper(request, id):\n paper = get_object_or_404(Paper, pk=id)\n if paper.status == '5completed' or paper.status == '3rejected':\n id_email = paper.user.email\n subject = paper.title\n body = render_to_string(\n 'papers/paper_file.html',\n {\n 'request': request,\n 'paper': paper\n }\n )\n\n send_mail(\n subject, body, settings.EMAIL_HOST_USER, [id_email],\n html_message=body, fail_silently=False\n )", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def emit(self, record):\r\n try:\r\n port = self.mailport\r\n if not port:\r\n port = smtplib.SMTP_PORT\r\n smtp = smtplib.SMTP(self.mailhost, port)\r\n smtp.login(self.username, self.password)\r\n msg = self.format(record)\r\n msg = \"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nDate: %s\\r\\n\\r\\n%s\" % (\r\n self.fromaddr,\r\n ','.join(self.toaddrs),\r\n self.getSubject(record),\r\n formatdate(), msg)\r\n smtp.sendmail(self.fromaddr, self.toaddrs, msg)\r\n smtp.quit()\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def claim_email(request):\n email = request.POST.get('email', '')\n email_user = User.objects.filter(email=email)\n payload = {\n 'res': 'failed'\n }\n if email_user.exists() and \\\n not email_user[0].profile.send_mail:\n request.user.profile.add_email(email)\n payload['res'] = 'success'\n\n return payload", "def send(self):\n return get_current_sender().sendmail(self)", "def email_bibs():\n try:\n # get data sent by client\n client_data = request.get_json()\n print(' ')\n print('------- email_bibs route beginning -------')\n print(f\"recived: {client_data}\")\n\n # race is from databse, while client_data is from front end\n race = Race.query.get(client_data['race_id'])\n\n # create subject string for email\n subject = (f\"Bib Assingments for race at {race.location.name} \"\n f\"on {race.display_date()}\")\n\n # create message object\n msg = Message(\n subject,\n sender = ('New England Prep XC','[email protected]'),\n recipients=[client_data['recipients']]\n )\n\n # utilize message body provided by client\n msg.body = client_data['messageBody']\n\n # send message\n mail.send(msg)\n\n # Pass JSON_received to the frontend\n JSON_received = {'Status':'Received race'}\n return jsonify(JSON_received)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def post(self, request, *args, **kwargs):\n usuario=Usuario.objects.get(id=self.kwargs['pk'])\n if request.POST[\"esta_aprobado\"] == 'True':\n CorreoMail(\"Aprobado\",\"Usted fue apobado en el sistema, bienvenido!!\",usuario.user.email )\n return super(ActualizarUser, self).post(request, **kwargs)", "def post(self, request):\n\n try:\n eventoid = request.POST.get('id', '')\n correo = request.POST.get('correo', '')\n AsigStaff.objects.create(id_Evento = eventoid, email_staff = correo)\n print(\"Exito en la asignación de staff\")\n except:\n print(\"Error en la asignacion de staff\")\n\n \n return render(request, self.template, self.context)\n #return render(request, self.template, self.context)", "def contact():\n # request form data\n form = ContactForm(request.form)\n # Get user name if logged in\n user_name = None\n if session:\n user_name = session[\"user\"]\n # validate form\n if request.method == \"POST\" and form.validate():\n contact_name = form.name.data\n contact_email = form.email.data\n contact_message = form.message.data\n message = contact_get_message_string(\n user_name, contact_name, contact_email, contact_message)\n # Set server variable\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n # Put the SMTP connection in TLS (Transport Layer Security) mode\n server.starttls()\n # Attempt to log in to email server\n try:\n server.login(mail_username, mail_password)\n # Display flash message if there is is an exception\n except smtplib.SMTPException:\n flash(\n \"Could not log into email server, \" +\n \"please check configuration variables\",\n \"danger\")\n return render_template(\"contact.html\", form=form)\n else:\n # Set message variable\n msg = EmailMessage()\n msg[\"Subject\"] = \"New contact message from FreeFrom\"\n msg[\"From\"] = mail_username\n msg[\"To\"] = mail_username\n msg.set_content(message)\n # Attempt to send message\n try:\n server.send_message(msg)\n # Display flash message if there is is an exception\n except smtplib.SMTPException:\n flash(\n \"Contact email has not been succesfully sent, \" +\n \"please try again\",\n \"warning\")\n return render_template(\"contact.html\", form=form)\n # Display flash message if email is succesfully sent\n else:\n flash(\n \"Contact email has been succesfully sent\",\n \"success\")\n return render_template(\"home.html\")\n\n # If user is logged in, set email address field automatically\n if user_name:\n form.email.data = user_get(user_name)[\"email\"]\n return render_template(\"contact.html\", form=form)", "async def send_document(self, chat_id: typing.Union[base.Integer, base.String],\n document: typing.Union[base.InputFile, base.String],\n caption: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_to_message_id: typing.Union[base.Integer, None] = None,\n reply_markup: typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup,\n types.ReplyKeyboardRemove,\n types.ForceReply, None] = None) -> types.Message:\n reply_markup = prepare_arg(reply_markup)\n payload = generate_payload(**locals(), exclude=['document'])\n result = await self.send_file('document', api.Methods.SEND_DOCUMENT, document, payload)\n\n return types.Message(**result)", "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def send_confirmation(self):\r\n c.user.email_validated = False\r\n c.user.confirmation_code = random_key(6)\r\n c.user._commit()\r\n emailer.confirmation_email(c.user)", "def send_mail(api_url, user, key, sender, receiver, subject, text, file_name):\n\n authorization = (user, key)\n data = {\n \"from\": sender,\n \"to\": receiver,\n \"subject\": subject,\n \"text\": text\n }\n try:\n return requests.post(api_url,\n auth=authorization,\n files=[(\"attachment\", (file_name, open(file_name, \"rb\").read()))],\n data=data\n )\n except Exception as ex:\n print(type(ex))\n print(ex)", "def send_email(self, email):\n\n if not isinstance(email, str):\n raise TypeError('type of email must be str not %s' % type(email))\n\n message = self.get_message(email)\n self.server.send_message(message)", "def post_change_email(self, data=None):\n return self.client.post(self.change_email_url, data)", "def send_new_email(user):\n token = user.get_token()\n message = Message(\n 'Verify Your New Email',\n sender='[email protected]',\n recipients=[user.temp_email])\n message.body = f\"The email address associated with your Storc \" \\\n f\"account has changed.\\n\\nTo verify your new email address, \" \\\n f\"please click the link below:\\n\\n\" \\\n f\"{url_for('users.new_email', token=token, _external=True)}\"\n mail.send(message)", "def email_user(self, subject, message, from_email=None):\n\t\tsend_mail(subject, message, from_email, [self.email])", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='[email protected]')", "def edit_document(self):\n self.check_editable()\n\n document = self.agenda_item.resolve_document()\n checkout_manager = getMultiAdapter((document, self.request),\n ICheckinCheckoutManager)\n response = JSONResponse(self.request)\n\n if not checkout_manager.is_checked_out_by_current_user() \\\n and not checkout_manager.is_checkout_allowed():\n response.remain().error(\n _(u'document_checkout_not_allowed',\n default=u'You are not allowed to checkout the document.'))\n else:\n url = document.checkout_and_get_office_connector_url()\n response.proceed().data(officeConnectorURL=url)\n\n return response.dump()", "def send_email(\n template_name: str,\n to: typing.Union[str, typing.List[str]],\n personalisation: dict = None,\n reference: str = None,\n staff_email: bool = None,\n retry_attempts: int = 2,\n spoolable_ctx: Context = None,\n):\n client = NotifyClient.shared_client()\n try:\n client.send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n )\n except APIError as e:\n should_retry = (\n # no retry without uWSGI spooler\n spoolable_ctx.spooled\n # no retry if run out of retry attempts\n and retry_attempts\n # retry only for \"service unavailable\" / \"internal error\" statuses\n and 500 <= e.status_code < 600\n # …unless it was caused by an invalid json response\n and not isinstance(e, InvalidResponse)\n )\n if should_retry:\n send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n retry_attempts=retry_attempts - 1,\n )\n else:\n raise e", "def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])" ]
[ "0.7083903", "0.6592813", "0.6592162", "0.6246286", "0.5818268", "0.58028346", "0.5787325", "0.5658083", "0.56290865", "0.5581146", "0.55060256", "0.5494548", "0.5458515", "0.54515415", "0.544656", "0.5391331", "0.5326866", "0.53013676", "0.52664655", "0.5242208", "0.5226229", "0.5215729", "0.5182811", "0.51782763", "0.5169317", "0.51628906", "0.5138254", "0.5135522", "0.5119434", "0.51016647", "0.50927156", "0.5043762", "0.50417864", "0.50287163", "0.5023252", "0.5000956", "0.49826506", "0.49592188", "0.4954675", "0.4952347", "0.4951429", "0.49488184", "0.49471113", "0.49372762", "0.49364793", "0.49346095", "0.49329096", "0.49313322", "0.49310464", "0.4930073", "0.49181", "0.49155456", "0.4897765", "0.48923427", "0.48847494", "0.48835078", "0.48760512", "0.4874017", "0.48602146", "0.48510733", "0.48420277", "0.48295724", "0.482928", "0.48201084", "0.48065117", "0.48061347", "0.48053417", "0.4801521", "0.48003528", "0.47983626", "0.4798043", "0.47912133", "0.4787639", "0.47835356", "0.47794408", "0.47715136", "0.4767927", "0.47649795", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.4763627", "0.47552764", "0.47521958", "0.47518033", "0.4748426", "0.47476703", "0.4742805", "0.47427523", "0.4738505", "0.47380334", "0.47357774", "0.4727419", "0.4727419" ]
0.7920855
0
Gets a custom defined or default email address for the current user.
def _get_user_email_address(self, request): return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")", "def Email(self, default=None):\n return self.data.get('email', default)", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def get_email(self):\n return self.email", "def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def get_email(self):\n return self._email", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email_address(self) -> str:\n return self._email_address", "def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None", "def log_useremail(self):\n return self.user.email", "def email(self, instance):\r\n return instance.user.email", "def get_user_email():\n if not is_authenticated() or not is_authenticated_CSC_user() or 'samlUserdata' not in session:\n return None\n\n csc_email = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('email', None), False)\n\n return csc_email[0] if csc_email else not_found('csc_email')\n return None", "def getEmail(self):\n return self.__email", "def get_default_email(self):\n email = '[email protected]'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email", "def getEmail(self):\n return self.email", "def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address", "def service_account_email_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_account_email_address\")", "def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def get_email_for_nickname(cls, nickname):\n account = cls.get_account_for_nickname(nickname)\n if account is None:\n return None\n return account.email", "def getEmail(self):\n\t\treturn self.Email", "def email_address():\n hostname = socket.gethostname()\n if hostname == \"warpy\":\n email = \"[email protected]\"\n else:\n dummy_address = \"[email protected]\"\n email = os.environ.get(\"EMAIL\", dummy_address)\n if not email:\n # Environment variable exists but content is an empty string\n email = dummy_address\n return email", "def get_email(obj):\r\n return obj.user.email", "def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")", "def email(self):\n return self._dict.get('email')", "def get_user_email():\n email = input(\"Email address: \")\n menu.option_to_exit(email)\n try:\n if not is_valid_email(email):\n raise ValueError\n except ValueError:\n print(\"\\nOoops! That doesn't look like an email address.\\n\"\n \"Please try again.\\n\")\n return get_user_email()\n else:\n return email", "def get_user_email(username):\r\n return '{0}@test.com'.format(username)", "def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")", "def service_account_email_address(self) -> str:\n return pulumi.get(self, \"service_account_email_address\")", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self) -> str:\n return self._email", "def service_account_email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_account_email_address\")", "def get_email_address(user_id: UserID) -> str:\n email_address = db.session \\\n .query(DbUser.email_address) \\\n .filter_by(id=user_id) \\\n .scalar()\n\n if email_address is None:\n raise ValueError(\n f\"Unknown user ID '{user_id}' or user has no email address\"\n )\n\n return email_address", "def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)", "def get_email(khoros_object, user_settings=None, user_id=None, login=None, first_name=None, last_name=None,\n allow_multiple=False, display_warnings=True):\n user_settings = process_user_settings(user_settings, user_id=user_id, login=login,\n first_name=first_name, last_name=last_name)\n where_clause = _get_where_clause_for_email(user_settings)\n return _get_user_identifier(khoros_object, 'email', where_clause, allow_multiple, display_warnings)", "def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")", "def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")", "def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)", "def ldap_get_email(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n alias = result.get(\"alias\")[1]\n return alias\n\n return None", "def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email", "def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")", "def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")", "def get_email_of_user(auth0_id):\n _verify_auth0_id(auth0_id)\n return _get_email_of_user(\n auth0_id, token_redis_connection(), auth0_token(),\n current_app.config)", "def get_my_email():\n return check_output(['git', 'config', '--get',\n 'user.email']).strip().decode('utf-8')", "def _get_contact_email(app):\n return app[CONTACT_EMAIL_KEY]", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def customer_email(customer):\n return customer.get(\"email\")", "def email(self):\n return self.__email", "def get_nickname_for_email(cls, email, default=None):\n account = cls.get_account_for_email(email)\n if account is not None and account.nickname:\n return account.nickname\n if default is not None:\n return default\n return email.replace('@', '_')", "def customer_email(self):\n return self._customer_email", "def __email_for_user(self, username):\n user_3tuple = self.usermap.lookup_by_p4user(username)\n if not user_3tuple:\n return _('Unknown Perforce User <{}>').format(username)\n return \"<{0}>\".format(user_3tuple[p4gf_usermap.TUPLE_INDEX_EMAIL])", "def email_address() -> str:\n\n return os.environ.get(\"EMAIL_NOTIFICATION\", \"\")", "def management_account_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_email\")", "def email_for_default_sa(project):\n sa_structs = gcloud_json(['gcloud', 'iam',\n 'service-accounts', 'list',\n '--project', project])\n for sa_struct in sa_structs:\n if 'compute@developer' in sa_struct['email']:\n return sa_struct['email']\n Print.YL('Could not find compute default service account!')\n Print.YL('See `gcloud iam service-accounts list`.')\n sys.exit(1)", "def config_email_address() -> str:\n print(\"Email address not configured.\\nPlease enter your email: \")\n email = sys.stdin.readline().strip()\n\n return email", "def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")", "def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")", "def get_email(self, id_):\n\n query = self._db.User.select(self._db.User.c.id_ == id_)\n query = query.with_only_columns([self._db.User.c.email, ])\n\n record = query.execute().fetchone()\n return record[0]", "def create_user_email(user):\n if not user.is_authenticated:\n return False\n \n user.email = \"%s@%s\" % (user.username, settings.DEFAULT_EMAIL_HOST)\n user.save()\n \n return user.email", "def get_email():\n headers = request.headers\n token = headers['Authorization'].split()[1]\n return Token.objects(access_token=token).first().email", "def displayname(self):\n return self.email", "def get_full_name(self):\n\t\treturn self.email", "def get_short_name(self):\n # The user is identified by the email address\n return self.email", "def find_invited_user(email, default=None):\n\n User = apps.get_model(settings.AUTH_USER_MODEL)\n\n try:\n return User.objects.get(email=email)\n except User.DoesNotExist:\n return default", "def getEmail(self):\n return _libsbml.ModelCreator_getEmail(self)", "def business_owner_email(self):\n return self._business_owner_email", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def get_member_email(username=None, portal_membership=None):\n\n if portal_membership is None:\n portal = getSite()\n portal_membership = getToolByName(portal, 'portal_membership', None)\n if portal_membership is None:\n # unit test or non-CMF site\n return None\n\n if username is None:\n member = portal_membership.getAuthenticatedMember()\n else:\n member = portal_membership.getMemberById(username)\n if member is None:\n if username is not None and '@' in username:\n # Use case: explicitly adding a mailing list address\n # to the watchers.\n return username\n return None\n\n try:\n email = member.getProperty('email')\n except Unauthorized:\n # this will happen if CMFMember is installed and the email\n # property is protected via AT security\n email = member.getField('email').getAccessor(member)()\n return email", "def from_email_address(self):\n return self._from_email", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def get_primary_email(self):\n return self.associated_emails.get(is_primary_email=True)", "def email_user(user, template_path, from_address, context_dict):\n return email_list([user.email], template_path, from_address, context_dict)", "def get_assignee_email(self, assignee_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, assignee_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"email\"]", "def elastic_cloud_email_address(self) -> str:\n return pulumi.get(self, \"elastic_cloud_email_address\")", "def business_email(self):\n return self._business_email", "def get_primary_email(lookup_value, lookup_type=\"id\"):\n lookup_type = _validate_lookup_type(lookup_type, 'email')\n user_data = core.get_data('people', lookup_value, lookup_type, return_json=True)\n primary_email = user_data['emails'][0]['value']\n return primary_email", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def _set_user_email_address(self, request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email']\n else:\n return form", "def GetEmail(prompt):\n last_email_file_name = os.path.expanduser(\"~/.last_codereview_email_address\")\n last_email = \"\"\n if os.path.exists(last_email_file_name):\n try:\n last_email_file = open(last_email_file_name, \"r\")\n last_email = last_email_file.readline().strip(\"\\n\")\n last_email_file.close()\n prompt += \" [%s]\" % last_email\n except IOError, e:\n pass\n email = raw_input(prompt + \": \").strip()\n if email:\n try:\n last_email_file = open(last_email_file_name, \"w\")\n last_email_file.write(email)\n last_email_file.close()\n except IOError, e:\n pass\n else:\n email = last_email\n return email" ]
[ "0.79681355", "0.77469647", "0.7720694", "0.76566875", "0.7597106", "0.75681674", "0.7504296", "0.74986595", "0.74986595", "0.74986595", "0.74986595", "0.7492597", "0.7372196", "0.7372196", "0.7372196", "0.7372132", "0.73386735", "0.72807336", "0.72577626", "0.7211655", "0.72088623", "0.7208019", "0.7205832", "0.72001445", "0.7198179", "0.7190173", "0.71753037", "0.71753037", "0.71753037", "0.71753037", "0.71753037", "0.71753037", "0.71753037", "0.7154366", "0.71523464", "0.7137591", "0.71088105", "0.70823115", "0.70486134", "0.70485604", "0.7006021", "0.6993036", "0.6991589", "0.6987343", "0.6957957", "0.6957957", "0.6957957", "0.6957957", "0.69383657", "0.69329244", "0.69019556", "0.690184", "0.68631274", "0.6859159", "0.68591326", "0.68395793", "0.6812915", "0.67984754", "0.67544705", "0.67544705", "0.67496914", "0.6729735", "0.66958845", "0.6634633", "0.65761876", "0.6568352", "0.65443796", "0.6537156", "0.6494962", "0.64758444", "0.6468733", "0.6440013", "0.6423101", "0.6420478", "0.6420478", "0.64133435", "0.6411177", "0.6357005", "0.6354894", "0.6330674", "0.6326118", "0.63015705", "0.6295413", "0.62911725", "0.6274763", "0.6214206", "0.62141824", "0.61915165", "0.6189289", "0.6189289", "0.61776775", "0.61704004", "0.6160396", "0.6158219", "0.61576337", "0.6140353", "0.6139937", "0.6139937", "0.6138317", "0.6134788" ]
0.793554
1
If a new email address is posted, remember it.
def _set_user_email_address(self, request): if request.method == 'POST': form = EmailForm(request.POST) if form.is_valid(): request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email'] else: return form
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')", "def change_email(self, token):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n if data.get(\"user_id\") != self.id:\n return False\n new_email = data.get(\"new_email\")\n if new_email is None:\n return False\n # check to see if another user has this email\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = data.get(\"new_email\")\n db.session.add(self)\n return True", "def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email", "def change_email(self, token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n if data.get('change_email') != self.id:\n return False\n new_email = data.get('new_email')\n if new_email is None:\n return False\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = new_email\n db.session.add(self)\n return True", "def test_user_logged_in_post_changes_email(self):\n form_data = {\n 'password': self.password,\n 'new_email': \"[email protected]\",\n 'new_email2': \"[email protected]\"\n }\n self.assertTrue(self.login())\n post_response = self.post_change_email(form_data)\n self.assertEqual(post_response.status_code, 302)\n self.assertRedirects(post_response, reverse('account:overview'))\n user = User.objects.get(pk=self.user.id)\n self.assertEqual(user.email, '[email protected]')", "def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key", "def stash_data(self, form):\n get_adapter(self.request).stash_invite_email(\n self.request, form.cleaned_data[\"email\"]\n )", "def clean_email(self):\n existing = User.objects.filter(email__iexact=self.cleaned_data['email'])\n if existing.exists():\n raise forms.ValidationError(_(\"This email address is already in use. Please enter a different email \"\n \"address!\"))\n else:\n return self.cleaned_data['email']", "def restoreRequest(self, email):\n\t\tresult = self.db.request(\"getOne\", {\"email\": email});\n\t\tif result:\n\t\t\tnew_data = {};\n\t\t\tnew_data[\"field.utility\"] = makeHash(email + getTimeStamp() + result[\"password\"]);\n\n\t\t\tdata_prime = self.db.getData(result[\"_id\"], new_data);\n\n\t\t\tresult2 = self.db.request(\"update\", data_prime);\n\n\t\t\tif result2:\n\t\t\t\treturn \"Ok\";\n\t\t\telse:\n\t\t\t\treturn False;\n\n\t\telse:\n\t\t\treturn False;", "def post_change_email(self, data=None):\n return self.client.post(self.change_email_url, data)", "async def update_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been updated successfully..!\")\n else:\n await ctx.send(\"There is no email address configured, \"\n \"Please use add command to add one..!\")\n return", "def clean_email(self):\r\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\r\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))\r\n return self.cleaned_data['email']", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def email_post(request):\n if request.user.is_authenticated:\n messages.error(request, _(\"You are already logged in.\"))\n return redirect(ta_settings.LOGIN_REDIRECT)\n\n form = EmailForm(request.POST)\n if not form.is_valid():\n messages.error(request, _(\"The email address was invalid. Please check the address and try again.\"))\n return redirect(ta_settings.LOGIN_URL)\n\n email = ta_settings.NORMALIZE_EMAIL(form.cleaned_data[\"email\"])\n if not email:\n # The user's normalization function has returned something falsy.\n messages.error(\n request, _(\"That email address is not allowed to authenticate. Please use an alternate address.\")\n )\n return redirect(ta_settings.LOGIN_URL)\n\n email_login_link(request, email, next_url=request.GET.get(\"next\", \"\"))\n\n messages.success(request, _(\"Login email sent! Please check your inbox and click on the link to be logged in.\"))\n return redirect(ta_settings.LOGIN_URL)", "def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError('This email address is already in use. Please supply a different email address.')\n return self.cleaned_data['email']", "def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "async def add_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list.keys():\n await ctx.send(\n \"There is already an email address configured, \"\n \"Please use update command to update it..!\")\n return\n else:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been configured successfully..!\")", "def clean_email(self):\r\n email = self.cleaned_data.get(\"email\")\r\n\r\n if not email: \r\n return email\r\n\r\n if User.objects.filter(email__iexact=email).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That e-mail is already used.\")\r\n else:\r\n return email", "def save(self, **kwargs):\n if CustomerModel.objects.filter(user__email=self.cleaned_data['email']).exists():\n logger.info('Subscription from {} dropped, email address exists.'.format(self.cleaned_data['email']))\n return self.instance\n # email is not assigned by the form probably because it is a related user object field\n self.instance.email = self.cleaned_data['email']\n if send_confirmation_email(self.request, self.instance):\n self.instance = super(SubscribeForm, self).save(**kwargs)\n return self.instance", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def post_duplicate_meetup(self):\n\n self.post_meetup()\n return self.post_meetup()", "def check_and_add(email, name):\n\n key = ndb.Key(AddressEntry, email)\n model = key.get()\n # we only have a problem if a model for the given email exists AND the name is different\n if not model is None:\n if model.name != name:\n jdict = model.to_json_dict()\n jdict[\"requested_name\"] = name\n return False, jdict\n\n model = AddressEntry(\n id=email,\n email=email,\n name=name\n )\n model.put()\n return True, model.to_json_dict()", "def _save_drafts(self):\n ##logging.info('SAVING: %s -> %s', self.email, self._drafts)\n memcache.set('user_drafts:' + self.email, self._drafts, 3600)", "def email_change(request):\n if request.method == \"POST\":\n form = EmailForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return home(request, \"Email Changed Successfully\")\n \n else:\n form = EmailForm(instance=request.user)\n \n ctx = _make_context(request, \"email_form\", form)\n \n return TemplateResponse(request, \"users/index.html\", ctx)", "def handle_new_email(self):\n email = self.__email_handler.get_most_recent_email()\n message = self.__email_handler.get_email_body(email['id'])\n sender = self.__email_handler.get_email_sender(email['id'])\n\n if 'COMMAND' in message:\n\n if 'test' in message:\n print(f'{self.__source} Test Command Recieved')\n\n if 'new password' in message:\n print(f'{self.__source} New Password Command Recieved')\n # create new password\n # store new password\n # reset current password via screenscraper\n # notify users of change\n self.__email_handler.send(\n f'Password has been reset: {self.__password}')\n\n if 'get password' in message:\n print(f'{self.__source} Get Password Command Recieved')\n self.__email_handler.send('test', recipients=[sender])\n\n if 'add user' in message:\n print(f'{self.__source} Add User Command Recieved')\n self.__recipients.append(sender)\n # send email with message explaing email use\n\n if 'help' in message:\n print(f'{self.__source} Help Command Recieved')\n # send email with message explaing email use", "def opt_in(msg_hash):\r\n email, removed = Email.handler.opt_in(msg_hash)\r\n if email and removed:\r\n Email.handler.add_to_queue(None, None, [email], \"reddit.com\",\r\n datetime.datetime.now(g.tz),\r\n '127.0.0.1', Email.Kind.OPTIN)\r\n return email, removed", "def clean_email(self):\n return self.cleaned_data[\"email\"]", "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None,\n html_email_template_name=None):\n email = self.cleaned_data[\"email\"]\n User = get_user_model()\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n for user in active_users:\n subject = _('Flisol - Restore your password')\n # send_email(\n # subject,\n # [user.email],\n # email_template_name,\n # {\n # 'email': user.email,\n # 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n # 'user': user,\n # 'token': token_generator.make_token(user),\n # 'protocol': settings.PROTOCOL,\n # },\n # )", "async def change_email(self, new_email, password):\n data = {\"password\": password, \"emailAddress\": new_email}\n e = await self.request.request(url='https://accountsettings.roblox.com/v1/email', method='post', data=data)\n return e", "def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]", "def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def confirm_email(self):\n self.active = True\n self.save()", "def test_user_logged_in_new_emails_dont_match(self):\n form_data = {\n 'password': self.password,\n 'new_email': '[email protected]',\n 'new_email2': '[email protected]'\n }\n self.assertTrue(self.login())\n post_response = self.post_change_email(form_data)\n self.assertEqual(post_response.status_code, 200)\n form = post_response.context.get('form')\n self.assertIsInstance(form, ChangeEmailForm)\n self.assertContains(post_response, 'New emails must match')\n user = User.objects.get(pk=self.user.id)\n self.assertEqual(user.email, self.email)\n self.assertNotEqual(user.email, '[email protected]')\n self.assertNotEqual(user.email, '[email protected]')", "def validation_email_sent(request):\n assert(settings.EMAIL_VALIDATION == True)\n logging.debug('')\n data = {\n 'email': request.user.email,\n 'change_email_url': reverse('user_changeemail'),\n 'action_type': 'validate'\n }\n return render_to_response('authenticator/changeemail.html', RequestContext(request, data))", "def post(self, name):\n user = User.find_user_by_name(name).first_or_404()\n form = UserForm()\n if user == current_user and form.validate_on_submit(): \n old_name = current_user.name\n if form.email.data != '': \n user.email = form.email.data\n user.name = form.name.data\n user.twitter_handle = form.twitter.data\n db.session.commit()\n\n key = make_template_fragment_key(\"user\", vary_on=[old_name])\n cache.delete(key)\n\n flash('Your edits are saved, thanks.', category = 'info')\n return redirect(url_for('.user', name=form.name.data))", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def add(self, item):\n if item.email and item.email not in [i.email for i in self.lst]:\n self.lst.append(item)\n else:\n print(\"WARN: Recipient not added because a recipient with that email address already exists: {}\", item)", "def post(self, request):\n if 'email' in self.request.POST:\n if (Person.objects.filter(user__email=self.request.POST['email'])\n .exists()):\n user = (Person.objects\n .filter(user__email=self.request.POST['email'])\n .last().user)\n return Response({'status': True, 'user_id': user.id})\n return Response({'status': False, 'user_id': 0})", "def opt_out(msg_hash):\r\n email, added = Email.handler.opt_out(msg_hash)\r\n if email and added:\r\n Email.handler.add_to_queue(None, None, [email], \"reddit.com\",\r\n datetime.datetime.now(g.tz),\r\n '127.0.0.1', Email.Kind.OPTOUT)\r\n return email, added", "def duplicates_existing_address(self, new_address):\n\n addresses = Address.objects.filter(customer=self.user)\n\n for address in addresses:\n match = True\n for field in self.cleaned_data:\n value = self.cleaned_data[field]\n address_value = getattr(address, field)\n if value != address_value:\n match = False\n break\n if match:\n self.set_most_current_address(address)\n return False\n\n else:\n return True", "def save(self, **kwargs):\n\t\tif (self.answer != \"\"):\n\t\t\tself.answered = True\n\t\t\tsuper().save(**kwargs)\n\t\t\tmail_subject = \"Your Question has been answered.\"\n\t\t\t# Do not break the following string or the email will get cut off\n\t\t\tmessage = f\"Hi {self.user.first_name},\\n\\n You're receiving this email because you asked a question about {self.hall.name}. The HonestHalls team has now answered your question! Revisit the {self.hall.name} hall page to see their response.\\n\\n\"\n\n\t\t\temail = EmailMessage(\n\t\t\t\tmail_subject, message, to=[self.user.email]\n\t\t\t)\n\t\t\temail.send()\n\t\telse:\n\t\t\tsuper().save(**kwargs)", "def _store_response_for_duplicates(self, message):\n\n key = (message.remote, message.mid)\n if key in self._recent_messages:\n self._recent_messages[key] = message", "def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = JOSReservation.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def change_email_request(request):\r\n ## Make sure it checks for existing e-mail conflicts\r\n if not request.user.is_authenticated():\r\n raise Http404\r\n\r\n user = request.user\r\n\r\n if not user.check_password(request.POST['password']):\r\n return JsonResponse({\r\n \"success\": False,\r\n \"error\": _('Invalid password'),\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r\n\r\n new_email = request.POST['new_email']\r\n try:\r\n validate_email(new_email)\r\n except ValidationError:\r\n return JsonResponse({\r\n \"success\": False,\r\n \"error\": _('Valid e-mail address required.'),\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r\n\r\n if User.objects.filter(email=new_email).count() != 0:\r\n ## CRITICAL TODO: Handle case sensitivity for e-mails\r\n return JsonResponse({\r\n \"success\": False,\r\n \"error\": _('An account with this e-mail already exists.'),\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r\n\r\n pec_list = PendingEmailChange.objects.filter(user=request.user)\r\n if len(pec_list) == 0:\r\n pec = PendingEmailChange()\r\n pec.user = user\r\n else:\r\n pec = pec_list[0]\r\n\r\n pec.new_email = request.POST['new_email']\r\n pec.activation_key = uuid.uuid4().hex\r\n pec.save()\r\n\r\n if pec.new_email == user.email:\r\n pec.delete()\r\n return JsonResponse({\r\n \"success\": False,\r\n \"error\": _('Old email is the same as the new email.'),\r\n }) # TODO: this should be status code 400 # pylint: disable=fixme\r\n\r\n context = {\r\n 'key': pec.activation_key,\r\n 'old_email': user.email,\r\n 'new_email': pec.new_email\r\n }\r\n\r\n subject = render_to_string('emails/email_change_subject.txt', context)\r\n subject = ''.join(subject.splitlines())\r\n\r\n message = render_to_string('emails/email_change.txt', context)\r\n\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n\r\n send_mail(subject, message, from_address, [pec.new_email])\r\n\r\n return JsonResponse({\"success\": True})", "def clean_email(self):\r\n email = self.cleaned_data[\"email\"]\r\n #The line below contains the only change, removing is_active=True\r\n self.users_cache = User.objects.filter(email__iexact=email)\r\n if not len(self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unknown'])\r\n if any((user.password == UNUSABLE_PASSWORD)\r\n for user in self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unusable'])\r\n return email", "def amended_address(self, amended_address):\n\n self._amended_address = amended_address", "def receive(self, email):\n self.inbox += email", "def changeemail(request, action='change'):\n logging.debug('')\n msg = request.GET.get('msg', None)\n extension_args = {}\n user_ = request.user\n\n if request.POST:\n if 'cancel' in request.POST:\n msg = _('your email was not changed')\n request.user.message_set.create(message=msg)\n return HttpResponseRedirect(get_next_url(request))\n form = forms.ChangeEmailForm(request.POST, user=user_)\n if form.is_valid():\n new_email = form.cleaned_data['email']\n if new_email != user_.email:\n if settings.EMAIL_VALIDATION == True:\n action = 'validate'\n else:\n action = 'done_novalidate'\n set_new_email(user_, new_email,nomessage=True)\n else:\n action = 'keep'\n else:\n form = forms.ChangeEmailForm(initial={'email': user_.email},\n user=user_)\n \n output = render_to_response(\n 'authenticator/changeemail.html',\n {\n 'form': form,\n 'email': user_.email,\n 'action_type': action,\n 'gravatar_faq_url': reverse('faq') + '#gravatar',\n 'change_email_url': reverse('user_changeemail'),\n 'msg': msg \n },\n context_instance=RequestContext(request)\n )\n\n if action == 'validate':\n set_email_validation_message(user_)\n\n return output", "def test_duplicate_email(self):\n self.duplicate_email = {'user': {\n \"username\": \"remmy\",\n \"email\": \"[email protected]\",\n \"password\": \"@Password123\"\n }}\n\n self.duplicate_email2 = {'user': {\n \"username\": \"remmyk\",\n \"email\": \"[email protected]\",\n \"password\": \"@Password123\"\n }\n }\n self.client.post(\n self.reg_url,\n self.duplicate_email,\n format=\"json\")\n response = self.client.post(\n self.reg_url,\n self.duplicate_email2,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"email provided is already in use\", response.content)", "def email(self, email: str):\n\n self._email = email", "def add_manualpost_email(request, submission_id=None, access_token=None):\n\n if request.method == 'POST':\n try:\n button_text = request.POST.get('submit', '')\n if button_text == 'Cancel':\n return redirect(\"submit/manual_post.html\")\n \n form = SubmissionEmailForm(request.POST)\n if form.is_valid():\n submission_pk = form.cleaned_data['submission_pk']\n message = form.cleaned_data['message']\n #in_reply_to = form.cleaned_data['in_reply_to']\n # create Message\n \n if form.cleaned_data['direction'] == 'incoming':\n msgtype = 'msgin'\n else:\n msgtype = 'msgout'\n \n submission, submission_email_event = (\n add_submission_email(request=request,\n remote_ip=request.META.get('REMOTE_ADDR', None),\n name = form.draft_name,\n rev=form.revision,\n submission_pk = submission_pk,\n message = message,\n by = request.user.person,\n msgtype = msgtype) )\n \n messages.success(request, 'Email added.')\n \n try:\n draft = Document.objects.get(name=submission.name)\n except Document.DoesNotExist:\n # Assume this is revision 00 - we'll do this later\n draft = None\n \n if (draft != None):\n e = AddedMessageEvent(type=\"added_message\", doc=draft)\n e.message = submission_email_event.submissionemailevent.message\n e.msgtype = submission_email_event.submissionemailevent.msgtype\n e.in_reply_to = submission_email_event.submissionemailevent.in_reply_to\n e.by = request.user.person\n e.desc = submission_email_event.desc\n e.time = submission_email_event.time\n e.save()\n \n return redirect(\"ietf.submit.views.manualpost\")\n except ValidationError as e:\n form = SubmissionEmailForm(request.POST)\n form._errors = {}\n form._errors[\"__all__\"] = form.error_class([\"There was a failure uploading your message. (%s)\" % e.message])\n else:\n initial = {\n }\n\n if (submission_id != None):\n submission = get_submission_or_404(submission_id, access_token)\n initial['name'] = \"{}-{}\".format(submission.name, submission.rev)\n initial['direction'] = 'incoming'\n initial['submission_pk'] = submission.pk\n else:\n initial['direction'] = 'incoming'\n \n form = SubmissionEmailForm(initial=initial)\n\n return render(request, 'submit/add_submit_email.html',dict(form=form))", "def add_submission_email(request, remote_ip, name, rev, submission_pk, message, by, msgtype):\n\n #in_reply_to = form.cleaned_data['in_reply_to']\n # create Message\n parts = pyzmail.parse.get_mail_parts(message)\n body=''\n for part in parts:\n if part.is_body == 'text/plain' and part.disposition == None:\n payload, used_charset = pyzmail.decode_text(part.get_payload(), part.charset, None)\n body = body + payload + '\\n'\n\n msg = submit_message_from_message(message, body, by)\n\n if (submission_pk != None):\n # Must exist - we're adding a message to an existing submission\n submission = Submission.objects.get(pk=submission_pk)\n else:\n # Must not exist\n submissions = Submission.objects.filter(name=name,rev=rev).exclude(state_id='cancel')\n if submissions.count() > 0:\n raise ValidationError(\"Submission {} already exists\".format(name))\n \n # create Submission using the name\n try:\n submission = Submission.objects.create(\n state_id=\"waiting-for-draft\",\n remote_ip=remote_ip,\n name=name,\n rev=rev,\n title=name,\n note=\"\",\n submission_date=datetime.date.today(),\n replaces=\"\",\n )\n from ietf.submit.utils import create_submission_event, docevent_from_submission\n desc = \"Submission created for rev {} in response to email\".format(rev)\n create_submission_event(request, \n submission,\n desc)\n docevent_from_submission(request,\n submission,\n desc)\n except Exception as e:\n log(\"Exception: %s\\n\" % e)\n raise\n\n if msgtype == 'msgin':\n rs = \"Received\"\n else:\n rs = \"Sent\"\n\n desc = \"{} message - manual post - {}-{}\".format(rs, name, rev)\n submission_email_event = SubmissionEmailEvent.objects.create(\n desc = desc,\n submission = submission,\n msgtype = msgtype,\n by = by,\n message = msg)\n #in_reply_to = in_reply_to\n\n save_submission_email_attachments(submission_email_event, parts)\n return submission, submission_email_event", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': '[email protected]'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def _pre_put_hook(self) -> None:\n super()._pre_put_hook()\n self.email_hash = self._generate_hash(\n self.recipient_id, self.subject, self.html_body)", "def _expire(self):\n del self.map.addr[self.name]\n self.map.notify(\"addrmap_expired\", *[self.name], **{})", "def on_update(self):\n\t\tfor email_account in frappe.get_all(\"Email Account\", filters={\"domain\": self.name}):\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\", \"incoming_port\"]:\n\t\t\t\t\temail_account.set(attr, self.get(attr, default=0))\n\t\t\t\temail_account.save()\n\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(_(\"Error has occurred in {0}\").format(email_account.name), raise_exception=e.__class__)", "def RememberDNS(IPAddr, Hostname, RecType):\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (Hostname == ''):\n\t\treturn\n\n\tif (not DNSRecord.has_key(IPAddr + \",\" + RecType)):\t\t#If we haven't seen this hostname for this IPAddr,\n\t\tDNSRecord[IPAddr + \",\" + RecType] = [ Hostname ]\t#make an array with just this hostname\n\telif not (Hostname in DNSRecord[IPAddr + \",\" + RecType] ):\t#If we _do_ have existing hostnames for this IP, but this new Hostname isn't one of them\n\t\tDNSRecord[IPAddr + \",\" + RecType].append(Hostname)\t#Add this Hostname to the list\n\n\tif not(HostIPs.has_key(Hostname)):\n\t\tif not(isFQDN(Hostname)):\t#We don't want to remember ips for names like \"www\", \"ns1.mydom\", \"localhost\", etc.\n\t\t\treturn\n\t\tHostIPs[Hostname] = [ ]\n\t#else:\n\t\t#Since we've found \"Hostname\" as a key, we don't need to check if it's an FQDN again, we already checked once.\n\n\tif not( IPAddr in HostIPs[Hostname] ):\t\t#If we haven't seen this IP address for this hostname,\n\t\tHostIPs[Hostname].append(IPAddr)\t#Remember this new IP address for this hostname.", "def test_user_not_logged_in_redirects_from_change_email(self):\n get_response = self.get_change_email()\n post_response = self.post_change_email()\n self.assertRedirects(get_response, self.login_url)\n self.assertRedirects(post_response, self.login_url)", "def form_valid(self, form):\n self.object = form.save()\n self.send_verify_email()\n return super().form_valid(form)", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = User.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def user_profile_setemail(token, email):\n users = database.get_users()\n for user in users:\n if user['email'] is email:\n raise error.InputError(description=\"This email is already taken\")\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['email'] = email\n database.set_user_data(user)", "def clean_email(self):\n if getattr(self.instance, 'email', None):\n raise ValidationError(self.registered_error)\n return self.cleaned_data['email']", "def email(self):\n return self.__email", "def save(self, *args, **kwargs):\n if not self.identifier:\n if self.event is None:\n self.identifier = self.email.lower()\n else:\n self.identifier = \"%s@%s.event.pretix\" % (self.username.lower(), self.event.id)\n if not self.pk:\n self.identifier = self.identifier.lower()\n super().save(*args, **kwargs)", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n # Check if user exists already, error early\n if User.objects.filter(email=email).exists():\n LOGGER.debug(\"email already exists\", email=email)\n raise ValidationError(_(\"Email already exists\"))\n return email", "def is_duplicate_email(email):\n users = User.objects.filter(email=email).values()\n if len(users):\n return True\n return False", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(\"A user with that email already exists.\"))", "def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email", "def clean_email(self):\n if self.data.get(\"selected_item\") != self.AGENT_ID:\n # resume normal invite flow\n return super().clean_email()\n\n email = self.cleaned_data[\"email\"]\n email = get_invitations_adapter().clean_email(email)\n try:\n self._agent_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n return super().clean_email()\n\n if self._agent_user.account_type != AccountType.agent_user.value:\n raise forms.ValidationError(\n _(\"An active non-agent user is using this e-mail address\")\n )\n if self._agent_user.organisations.filter(\n id=self.instance.organisation.id\n ).exists():\n raise forms.ValidationError(\n _(\"This agent is already active for this organisation\")\n )\n\n return email", "def test_update_user_endpoint_new_email(self):\n print(\"Generate a new email and check if email is not allocated\")\n email_id = Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"])\n kwargs = {'email_id': email_id, 'return_response_obj': True,\n 'url': self.test_args[\"relative_url_check_email\"]}\n response = self.test_check_email_endpoint(**kwargs)\n assert json.loads(response.text)[\"data\"][\"available\"] is True, \"Unable to generate a new email id\"\n\n print(\"Update email id\")\n response = self.test_update_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"", "def enter_email(self, email):\n self.selib.input_text(self.locator.email, email)", "def save(self):\n email = self.validated_data['email']\n name = self.validated_data['name']\n password = self.validated_data['password']\n\n email_query = models.Email.objects.filter(address=email)\n if email_query.exists():\n email_instance = email_query.get()\n\n # If the email is already verified, we send a duplicate\n # notification and exit.\n if email_instance.is_verified:\n logger.info(\n \"Not registering a new user because the email address %r \"\n \"is already verified.\",\n email_instance,\n )\n email_instance.send_duplicate_notification()\n\n return\n\n # If the email is not verified, we send a new verification\n # token to the address.\n logger.info(\n \"Not registering a new user because the email address %r \"\n \"already exists. Sending a new verification token instead.\"\n )\n verification = models.EmailVerification.objects.create(\n email=email_instance,\n )\n verification.send_email()\n\n return\n\n # The email doesn't exist, so we create a new user and email,\n # then send a verification token to the email.\n user = models.User.objects.create_user(name, password)\n email_instance = models.Email.objects.create(address=email, user=user)\n\n # The user's primary email is their only email. This is the only\n # time the primary email can be unverified.\n user.primary_email = email_instance\n user.save()\n\n logger.info(\n \"Registered new user %r with email address %r\",\n user,\n email_instance,\n )\n\n verification = models.EmailVerification.objects.create(\n email=email_instance,\n )\n verification.send_email()", "def clean_email_address(self):\n c_d = self.cleaned_data\n if User.objects.exclude(id=c_d['id']).filter(\n email=c_d['email_address']):\n raise forms.ValidationError(u'The email is already registered.')\n return c_d['email_address']", "def test_existing_email(self):\n response = self.client.post(\n self.reset_password_url, {\"email\": \"[email protected]\"}, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data['detail'], \"Not found.\")", "def post_duplicate_question(self):\n\n self.post_question()\n return self.post_question()", "def validate_recipient_email(self, email):\n email_query = EmailAddress.objects.filter(\n email=email, is_verified=True\n )\n\n if not email_query.exists():\n raise serializers.ValidationError(\n ugettext(\"No Know Me user owns the provided email address.\")\n )\n\n self._recipient_email_inst = email_query.get()\n\n return email", "def confirm_email_change(request, key):\r\n try:\r\n try:\r\n pec = PendingEmailChange.objects.get(activation_key=key)\r\n except PendingEmailChange.DoesNotExist:\r\n response = render_to_response(\"invalid_email_key.html\", {})\r\n transaction.rollback()\r\n return response\r\n\r\n user = pec.user\r\n address_context = {\r\n 'old_email': user.email,\r\n 'new_email': pec.new_email\r\n }\r\n\r\n if len(User.objects.filter(email=pec.new_email)) != 0:\r\n response = render_to_response(\"email_exists.html\", {})\r\n transaction.rollback()\r\n return response\r\n\r\n subject = render_to_string('emails/email_change_subject.txt', address_context)\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/confirm_email_change.txt', address_context)\r\n up = UserProfile.objects.get(user=user)\r\n meta = up.get_meta()\r\n if 'old_emails' not in meta:\r\n meta['old_emails'] = []\r\n meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])\r\n up.set_meta(meta)\r\n up.save()\r\n # Send it to the old email...\r\n try:\r\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\r\n except Exception:\r\n log.warning('Unable to send confirmation email to old address', exc_info=True)\r\n response = render_to_response(\"email_change_failed.html\", {'email': user.email})\r\n transaction.rollback()\r\n return response\r\n\r\n user.email = pec.new_email\r\n user.save()\r\n pec.delete()\r\n # And send it to the new email...\r\n try:\r\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\r\n except Exception:\r\n log.warning('Unable to send confirmation email to new address', exc_info=True)\r\n response = render_to_response(\"email_change_failed.html\", {'email': pec.new_email})\r\n transaction.rollback()\r\n return response\r\n\r\n response = render_to_response(\"email_change_successful.html\", address_context)\r\n transaction.commit()\r\n return response\r\n except Exception:\r\n # If we get an unexpected exception, be sure to rollback the transaction\r\n transaction.rollback()\r\n raise", "def is_email_address_already_assigned(email_address: str) -> bool:\n return _do_users_matching_filter_exist(DbUser.email_address, email_address)", "def setEmail(self, email):\n self.email = email\n return self", "def _update_attendee_by_email(email, marketing, gdpr, name=None):\n\n attendee = Attendee.objects.get(email=email)\n attendee.date_signed = datetime.date.today()\n attendee.marketing = marketing\n attendee.gdpr = gdpr\n if name:\n attendee.name = name\n attendee.save()\n\n return attendee", "async def handle_email(self, email):\n\t\tif 'logdir' in self.log_settings:\n\t\t\tfilename = 'email_%s_%s.eml' % (datetime.datetime.utcnow().isoformat(), str(uuid.uuid4()))\n\t\t\twith open(str(Path(self.log_settings['logdir'], 'emails', filename).resolve()), 'wb') as f:\n\t\t\t\tf.write(email.email.as_bytes())\n\n\t\tawait self.log('You got mail!')", "def email_address(self, email_address: \"str\"):\n self._attrs[\"emailAddress\"] = email_address" ]
[ "0.65422535", "0.64260054", "0.63118654", "0.63064903", "0.60563016", "0.6038557", "0.6014613", "0.5898787", "0.58314365", "0.5781677", "0.577932", "0.5769288", "0.575497", "0.5719079", "0.57139456", "0.5708547", "0.5708547", "0.5700854", "0.5699276", "0.56723505", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56379414", "0.5627278", "0.5622241", "0.56210107", "0.56210107", "0.56210107", "0.56202716", "0.5619143", "0.55931324", "0.5564348", "0.5524306", "0.551634", "0.5512565", "0.5456366", "0.545083", "0.5435897", "0.5434686", "0.5430473", "0.54276687", "0.54252654", "0.54052424", "0.5400377", "0.5400377", "0.5396861", "0.53884435", "0.538779", "0.5349634", "0.5332724", "0.5328286", "0.5321071", "0.53190833", "0.5315497", "0.5306332", "0.5298069", "0.5280678", "0.5268922", "0.5267292", "0.52654344", "0.52545726", "0.52530277", "0.52515054", "0.52336544", "0.522959", "0.5224255", "0.5200759", "0.5197213", "0.5196407", "0.51957977", "0.518876", "0.51635116", "0.51510715", "0.51364243", "0.5130561", "0.51286674", "0.51258695", "0.5117222", "0.511178", "0.5089559", "0.50874686", "0.5085683", "0.508468", "0.50840884", "0.5075399", "0.5072665", "0.50719136", "0.50637144", "0.5063079", "0.5062774", "0.50592506", "0.5052448" ]
0.6590844
0
Get an item for the template, containing the document.
def _get_list_item(self, document): list_item = Item() list_item.document = document # TODO: call callables? list_item.display_fields = [self._prepare_field(document, field) for field in self.list_display] return list_item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def getItemTemplate(self):\n self.update()\n if self.resource_item_template is None:\n return None\n\n return ResourceItem(self.resource_item_template, self.__user__)", "def get_item(self):\n return self.item", "def get_item(self):\n return self.item", "def get_item(self):\n raise NotImplementedError", "def GetItem(self):\r\n \r\n return self._item", "def GetTemplate(self, _page_data):\n return self.template", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist", "def get_context(self):\n uuid = self.data.get('uuid', None)\n if uuid is None:\n return\n item = ploneapi.content.get(UID=uuid)\n return item", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def getItem(self) -> Optional[items.Item]:\n return None if self.__itemRef is None else self.__itemRef()", "def get_portal_item(portal_connection, item_name, item_type):\r\n search_item = portal_connection.content.search(\r\n query='title:{} AND owner:{}'.format(item_name, portal_connection.users.me.username),\r\n item_type=item_type\r\n )\r\n\r\n if not search_item:\r\n raise KeyError('unable to find {}'.format(item_name))\r\n\r\n item = search_item[0]\r\n item_id = item.id\r\n portal_item = portal_connection.content.get(item_id)\r\n\r\n if not portal_item:\r\n raise KeyError(\"unable to find an item with id of '{}'\".format(item_id))\r\n\r\n return portal_item", "def getItem(self):\n return self.getItem(0)", "def get_template(self, name):\n for template in self.templates:\n if template.name == name:\n assert isinstance(template, Template)\n return template\n return None", "def get_document(name):\n document = [d for d in documents if d.name == name]\n if len(document) > 0:\n return document[0]", "def get_document(self, docid):\n raise NotImplementedError", "def get(self, id):\n if id == 'body':\n return document.body\n else:\n return self.instances[id]", "def get_item(self, id: str, user: User) -> Optional[T]:", "def get_object(self):\n pk = self.kwargs.get('id')\n return get_object_or_404(Book, pk=pk)", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()", "def _get_template_by_id(self, template_id):\n raise NotImplementedError()", "def get_template(self, name):\n matches = self.find_templates(name=name)\n if not matches:\n raise ImageNotFoundError('template with name {}'.format(name))\n if len(matches) > 1:\n raise MultipleItemsError('multiple templates with name {}'.format(name))\n return matches[0]", "def get_template(self ,template_name):\n\n found = False\n for template in self.templates:\n if template['name'] == template_name:\n found = True\n return template\n if not found:\n return None", "def get_item(self, index):\n if index == 0:\n raise IndexError(\"<{0}> Index start as 1\".format(type(self).__name__))\n index = self.get_index(index)\n res = self.get_item_type()()\n self.get_Item(index, res)\n return res", "def get_item(self, item_type):\n if item_type not in self._internal_type_mapping:\n return None\n else:\n return self._internal_type_mapping[item_type]", "def _get_doc(results, index):\n return results[index]", "def get(self, template_name):\n template = db.Template.find_one(template_name=template_name)\n\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n\n return self.make_response({'template': template})", "def get_item(self, call_number):\n return self.item_list.get(call_number)", "def get_item(self, option):\n selected_item = None\n items = [item for item in self.items if item.id == option]\n if len(items) > 0:\n selected_item = items[0]\n return selected_item", "def get_item(context, item):\n item_instance = context.get(\"reports\") if item == \"reports\" else context.get(f\"report/{context.uuid['report']}\")\n if item != \"reports\":\n item_instance = [\n report for report in item_instance[\"reports\"] if report[\"report_uuid\"] == context.uuid[\"report\"]\n ][0]\n if item == \"notification_destination\":\n return item_instance[\"notification_destinations\"][context.uuid[\"notification_destination\"]]\n if item != \"report\":\n item_instance = item_instance[\"subjects\"][context.uuid[\"subject\"]]\n if item != \"subject\":\n item_instance = item_instance[\"metrics\"][context.uuid[\"metric\"]]\n if item != \"metric\":\n item_instance = item_instance[\"sources\"][context.uuid[\"source\"]]\n return item_instance", "def _get(self, object='emailTemplate', path=None, params=None):\n if params is None:\n params = {}\n result = self.client.get(object=object, path=path, params=params)\n return result", "def get_item(self, name: str) -> Optional[Item]:\n item = self.filter_items(name, limit=1)\n return item[0] if item else None", "def obj_get(self, request=None, **kwargs):\n return Document(self.get_collection(request).find_one({\n \"_id\": ObjectId(kwargs.get(\"pk\"))\n }))", "def getitem(self, code):\n\n fetch = self._collection.find_one({'code':code})\n\n return fetch", "def get_template(self, name):\n return self.templates.get(name)", "def get_document(self):\n return self.document", "def get_one(collection: Collection, query: Dict[str, Any]):\n data = collection.find_one(query)\n if data is None:\n raise CannotFindItemInDatabase(query, data, collection.name)\n return data", "def get_item_with_id(self, uid):\n for item in self.get_items():\n if item.id == uid:\n return item\n\n return None", "def _get_item(self, cont, index):\n # make sure the given object is a container:\n if not isinstance(cont, collections.Container):\n raise Exception(\"'%s': not a container: cannot index '%s' in '%s'\"\n % (self.name, index, cont))\n\n # try and return the element. Even an exception may or may\n # not be specified (ex: CFN's GetAtt: AvailabilityZone):\n try:\n # NOTE: we can't just test with 'in' here as we may\n # be trying to index a list:\n return cont[index]\n except (IndexError, KeyError):\n # if not found; make sure it's not an exception:\n if index in self._exceptions:\n # just log the event and return the arg directly:\n LOG.warn(\"'%s': get exception applied for '%s'. Defaulting to\"\n \" '%s'.\", self.name, index, self._exceptions[index])\n return self._exceptions[index]\n else:\n # rock bottom:\n raise FunctionApplicationException(\n \"'%s': index '%s' missing from :'%s'\" % (\n self.name, index, cont\n )\n )", "def get(self, item):\r\n raise NotImplementedError", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def get_item(self, itemID, no_html=False, external_id=False, depth=1):\n data = self._client.Item.find(int(itemID))\n item = self.make_dict(data, no_html=no_html, external_id=external_id, depth=depth)\n return item", "def get_template(self, name, args):\n key = name, len(args)\n template = self.templates.get(key)\n if not template:\n raise mio.MIOException('Undefined template \"%s/%d\"' % (name, len(args)))\n return template", "def item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to view the item because\n # the item is private and was created by a different user, send a\n # 403\n elif item.private and current_user != item.user:\n abort(403)\n\n return render_template('item.html', item=item)", "def get(self, item):\n return self.graph._get(self.parent.handle,\n **{self.type: item})", "def __getitem__(self, item):\n return self.notes[item]", "def get_one(self, arg):\n if self.validator.assert_id(arg):\n return self.get_item(item_id=arg)\n\n elif arg == 'schema':\n return self.validator.schema\n\n else:\n pecan.abort(404)", "def get_item(self, item_id):\n if self._database:\n try:\n return self._database.retrieve(item_id)\n except PyragargaError:\n pass\n # TODO: Retry if it times out \n details_page = self._build_tree(\n self._session.get(KG_URL + DETAILS_SCRIPT,\n params={'id': item_id, 'filelist':1}\n ).content)\n item = self._parse_details_page(details_page, item_id)\n if self._database:\n self._database.store(item)\n self.logger.info('Received details for item %d' % item.kg_id)\n return item", "def get_type(self):\n\n return ebooklib.ITEM_DOCUMENT", "def get_document(obj):\n try:\n return ES.get(\n index=obj.get_index_name(), doc_type=obj.get_document_type(), id=obj.pk)\n except NotFoundError:\n raise DocumentNotFound(obj.get_index_name(), obj.pk)", "def get(self, itemId):\n\n tableRow = self.__queryTableRow(itemId)\n return self.__getItemFromTableRow(tableRow)", "def get(self, request, pk):\n return self.retrieve(request, pk)", "def get(self, _id):\n try:\n doc = self._db[_id]\n # For speed testing\n del self._db[_id]\n except KeyError:\n return None\n else:\n return self._parse_doc(doc)", "def __getitem__(self, item):\n return self._metadata[item]", "def get_template(self):\n return self.template", "def read_template(site_name, doc_name):\n siteid = _get_site_id(site_name)\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = 'select text from {} where site_id = %s and name = %s;'\n result = execute_query(querystring.format(TABLES[5]), (siteid, doc_name))\n row = cur.fetchone()\n result = row['text']\n return result", "def get_item_by_id(self, item_id):\n\n return self.api.items.get(item_id)['item']", "def getItem(self, nodeName):\n allItems = pQt.getAllItems(self.twTree)\n for item in allItems:\n if item.name == nodeName:\n return item", "def get(template_id):\n try:\n template = Template.objects.get(pk=template_id)\n except:\n raise CMException('template_get')\n\n if not template or template.state != template_states['active']:\n raise CMException('template_get')\n\n return template", "def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj", "def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj", "def _get(self, language):\n document = self._data.get(language)\n if not document:\n document = self._data[self.getDefaultLanguage()]\n return document", "def read_item(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n item = crud.item.get(db=db, id=id)\n if not item:\n raise HTTPException(status_code=404, detail='Item not found')\n if not crud.user.is_superuser(current_user) and (item.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail='Not enough permissions')\n return item", "def _create_template( service):\n\n return DOCS.documents().create(body=template_page_setup).execute().get('documentId')", "def _get_item(self, item_name, item_type):\n\t\t# create local cache for performance optimizations. TODO: Rewrite functions that call this function\n\t\tif not self.item_list:\n\t\t\tself.item_list = self.pre_object_list\n\t\t\tself.item_cache = {}\n\t\t\tfor item in self.item_list:\n\t\t\t\tif not item.has_key('name'):\n\t\t\t\t\tcontinue\n\t\t\t\tname = item['name']\n\t\t\t\ttmp_item_type = (item['meta']['object_type'])\n\t\t\t\tif not self.item_cache.has_key( tmp_item_type ):\n\t\t\t\t\tself.item_cache[tmp_item_type] = {}\n\t\t\t\tself.item_cache[tmp_item_type][name] = item\n\t\ttry:\n\t\t\treturn self.item_cache[item_type][item_name]\n\t\texcept:\n\t\t\treturn None\n\t\tif self.item_cache[item_type].has_key(item_name):\n\t\t\treturn self.item_cache[item_type][item_name]\n\t\treturn None\n\t\tfor test_item in self.item_list: \n\t\t\t## Skip items without a name\n\t\t\tif not test_item.has_key('name'):\n\t\t\t\tcontinue\n\n\t\t\t## Make sure there isn't an infinite loop going on\n\t\t\ttry:\n\t\t\t\tif (test_item['name'] == item_name) and (test_item['meta']['object_type'] == item_type):\n\t\t\t\t\treturn test_item\n\t\t\texcept:\n\t\t\t\traise ParserError(\"Loop detected, exiting\", item=test_item)\n\t\t\t\n\t\t## If we make it this far, it means there is no matching item\n\t\treturn None", "def get_doc(item):\n __doc = {\"model\": __model_doc, \"fit\": __fit_doc}\n return __doc[item]", "def get_doc(ds, idx):\n rtn = ds[idx]\n if isinstance(rtn, dict):\n rtn = rtn['text']\n return rtn", "def __get__(self, instance, owner):\n if instance is None:\n # Document class being used rather than a document object\n return self\n\n # Get value from document instance if available\n return instance._data.get(self.name)", "def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)", "def __getitem__(self, item):\n return self.elements[item]", "def GetItem(self,index):\r\n return self.itemId_item[self.gList.GetItemData(index)]", "def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)", "def find_item(name, currentRoom):\n for i in currentRoom.contents:\n if i.name == name:\n return i\n\n return None", "def _get(self, object_name='emailTemplate', path=None, params=None):\n if params is None:\n params = {}\n response = self.client.get(object_name=object_name, path=path, params=params)\n return response", "def get_template():\r\n try:\r\n return CourseEmailTemplate.objects.get()\r\n except CourseEmailTemplate.DoesNotExist:\r\n log.exception(\"Attempting to fetch a non-existent course email template\")\r\n raise", "def get_named_document(self, entity, name):\n view = self.db.view(\"%s/name\" % entity, include_docs=True)\n result = view[name]\n if len(result) != 1:\n raise ValueError(\"no such %s document '%s'\" % (entity, name))\n return result.rows[0].doc", "def get_doc(self, item_id, id_field=\"_id\", **kwargs):\r\n\r\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\r\n lg.setLevel(self.log_level)\r\n\r\n args = inspect.getargvalues(inspect.currentframe())[3]\r\n lg.debug(\"\\n****** Args *****:\\n%s\",\r\n pp.pformat(args))\r\n\r\n es = kwargs.get(\"es\",self.es)\r\n doc_type = kwargs.get(\"doc_type\", self.doc_type)\r\n if id_field == \"_id\":\r\n lg.debug(\"*** _id lookup: index: %s item_id: %s\",\r\n self.es_index,\r\n item_id)\r\n result = es.get(index=self.es_index,\r\n id=item_id)\r\n else:\r\n dsl = {\r\n \"query\": {\r\n \"constant_score\": {\r\n \"filter\": {\r\n \"term\": { id_field: item_id }\r\n }\r\n }\r\n }\r\n }\r\n lg.debug(\"*** id_field lookup: index: %s item_id: %s \\nDSL: %s\",\r\n self.es_index,\r\n item_id,\r\n pp.pformat(dsl))\r\n result = es.search(index=self.es_index,\r\n doc_type=doc_type,\r\n body=dsl)\r\n result = first(result.get(\"hits\",{}).get(\"hits\",[]))\r\n lg.debug(\"\\tresult:\\n%s\", pp.pformat(result))\r\n return result", "def document(self, document_id):\r\n return doc.Document(self, document_id)", "def get_item(self, item_id):\n for item in self.order_items:\n if item.get_itemId() == item_id:\n return item", "def find(self,item):\n sig = str(item)\n try:\n return self.index[sig]\n except:\n return None", "def get_document(self, *args, **kwargs):\n return self._documents_manager.get_document(*args, **kwargs)", "def GetEntityByItem(self,i):\n\t\treturn self.Space.Item(i)", "def get(self, item, default=None):\n return self._data.get(item, default)", "def get(self, item, default=None):\n\n return self._data.get(item, default)", "def get(self, idx):\n if idx in self._objects:\n return self._objects[idx]\n else:\n warning(\"%s not found\" % idx)\n return None", "def _item_from_single(provider, container, looking_for):\n try:\n return _item_from(container, provider.index)\n except _EXTRACTION_EXCEPTIONS:\n exceptions.raise_with_cause(\n exceptions.NotFound,\n \"Unable to find result %r, expected to be able to find it\"\n \" created by %s but was unable to perform successful\"\n \" extraction\" % (looking_for, provider))", "def __getitem__(self, url):\n try:\n record = self.es.get(index=self.index, doc_type=self.doc_type, id=url)['_source']['result']\n return record\n except elasticsearch.NotFoundError as e:\n raise KeyError(url + ' does not exist')", "def __getitem__(self, item):\n return self.__dict__[item]", "def get_item_detail(item_id):\n pass", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def get_object(id):", "def get_list_fs_item(self):\n if self.request.path[1:].rfind('/') > 1:\n list_id = self.request.path[1:self.request.path.rfind('/')]\n item_id = self.request.path[self.request.path.rfind('/')+1:]\n #self.response.out.write(list_id + \";\" +item_id)\n listspec = self.get_fs_list_spec(list_id)\n if listspec != None:\n lst = List(**listspec)\n for entry in listspec['entries']:\n if '_headline' in entry and self.get_slug(entry['_headline']) == item_id:\n page = self.get_page_from_entry(entry)\n return ModelAndView(view='list-item.html',\n model={\n 'list': lst,\n 'page': page,\n 'syntax_list': get_syntax_list([page])})\n raise NotFoundException", "def _item_from(container, index):\n if index is None:\n return container\n return container[index]", "def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None", "def get_db_item(self, key, item_key):\n return self.get_db_items(key).get(item_key)", "def fetch_search_document(self, index):\n assert self.pk, \"Object must have a primary key before being indexed.\"\n client = get_client()\n return client.get(\n index=index,\n doc_type=self.search_doc_type,\n id=self.pk\n )", "def get(self):\n return self._template_data()" ]
[ "0.6883214", "0.6577675", "0.6520346", "0.6520346", "0.63746023", "0.6368625", "0.6285564", "0.6244785", "0.61841923", "0.61830777", "0.6121958", "0.61106426", "0.60945314", "0.6082757", "0.60224175", "0.6003464", "0.59755844", "0.59741163", "0.59633183", "0.5937414", "0.5898076", "0.58897376", "0.588816", "0.5885328", "0.58560425", "0.58528525", "0.5830332", "0.58115524", "0.58081514", "0.5789596", "0.5777058", "0.5772504", "0.5771198", "0.5742202", "0.5740524", "0.5726278", "0.5715787", "0.57098514", "0.57066405", "0.56957906", "0.56865615", "0.56759846", "0.5673834", "0.56582105", "0.5637106", "0.5619862", "0.5613202", "0.5610342", "0.5605869", "0.56056833", "0.56037056", "0.5598578", "0.5593363", "0.5582511", "0.55762357", "0.5572899", "0.55658436", "0.55631715", "0.55564624", "0.5553865", "0.55525905", "0.5552381", "0.5548512", "0.5548512", "0.55370045", "0.5533461", "0.55310535", "0.5530969", "0.55297303", "0.5528637", "0.5523012", "0.5515882", "0.5514667", "0.5508564", "0.55068403", "0.55067265", "0.5505488", "0.55042636", "0.54989886", "0.5496293", "0.54782355", "0.5475078", "0.54748434", "0.5472905", "0.5471145", "0.5460496", "0.5440965", "0.5438929", "0.543115", "0.54271364", "0.5422097", "0.5413504", "0.54117006", "0.5397312", "0.5396385", "0.53954977", "0.5392153", "0.5387919", "0.53798914", "0.5378943" ]
0.55808055
54
Get the list of url patterns for this view.
def get_urls(self): return patterns('', #url(r'^$', self.new_upload, name="%s_document_list" % self.name), url(r'^([^\/]+)/edit/$', self.edit, name="%s_document_edit" % self.name), url(r'^([\w\d-]+)/confirm/$', self.confirm, name="%s_document_confirm" % self.name) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urlpatterns(self) -> list:\n raise NotImplementedError()", "def get_urlpatterns(cls):\n cls.validate_urlpattern_with_options()\n return map(lambda s: s.format(**cls.urlpattern_options), cls.urlpatterns)", "def get_urls(self):\n return patterns('')", "def urls(self):\n patterns = []\n for sitecomp in self.modules():\n patterns.append(sitecomp.urls)\n pass\n return patterns", "def list_patterns(self) -> localedata.LocaleDataDict:\n return self._data['list_patterns']", "def patterns(self):\n return self._pattern_reg", "def urls(self):\r\n urls = []\r\n\r\n for url_name in sorted(self.resources.keys()):\r\n\r\n resource = self.resources[url_name]\r\n urls.append(resource.as_url(\r\n api=self,\r\n name_prefix='-'.join(\r\n (self.prefix, self.str_version)).strip('-'),\r\n url_prefix=self.str_version\r\n ))\r\n\r\n return patterns(self.prefix, *urls)", "def getDjangoURLPatterns():\n\n patterns = [\n (r'gsoc/tasks/assignslots/assign$',\n r'soc.modules.gsoc.tasks.slot_assignment.assignSlots'),\n (r'gsoc/tasks/assignslots/program$',\n r'soc.modules.gsoc.tasks.slot_assignment.assignProgramSlots'),\n ]\n\n return patterns", "def urls(self):\n return self._list_urls()", "def urlpatterns(self):\n regex = r'^%s/' % self.label\n urls_module = '%s.urls' % self.name\n ns = self.label\n return [url(regex, include(urls_module, namespace=ns, app_name=ns))]", "def getDjangoURLPatterns():\n\n patterns = [\n (r'^tasks/role_conversion/update_references',\n 'soc.tasks.updates.role_conversion.updateReferences'),\n (r'^tasks/role_conversion/update_project_references',\n 'soc.tasks.updates.role_conversion.updateStudentProjectReferences'),\n (r'^tasks/role_conversion/update_proposal_references',\n 'soc.tasks.updates.role_conversion.updateStudentProposalReferences'),\n (r'^tasks/role_conversion/update_roles$',\n 'soc.tasks.updates.role_conversion.updateRoles'),\n (r'^tasks/role_conversion/update_mentors$',\n 'soc.tasks.updates.role_conversion.updateMentors'),\n (r'^tasks/role_conversion/update_org_admins$',\n 'soc.tasks.updates.role_conversion.updateOrgAdmins'),\n (r'^tasks/role_conversion/update_students$',\n 'soc.tasks.updates.role_conversion.updateStudents'),\n (r'^tasks/role_conversion/update_hosts$',\n 'soc.tasks.updates.role_conversion.updateHosts'),\n ]\n\n return patterns", "def get_urls(self):\n return patterns('',\n url(r'^$', self.list, name=\"%s_document_list\" % self.name),\n url(r'^upload/$', self.new_upload, name=\"%s_document_upload\" % self.name),\n url(r'^([^\\/]+)/download/$', self.download, name=\"%s_document_download\" % self.name),\n url(r'^([^\\/]+)/send/$', self.send, name=\"%s_document_send\" % self.name),\n url(r'^([^\\/]+)/send/ajax/$', self.send_ajax, name=\"%s_document_send_ajax\" % self.name),\n url(r'^([^\\/]+)/detail/$', self.detail, name=\"%s_document_detail\" % self.name),\n url(r'^([^\\/]+)/view/$', self.view, name=\"%s_document_view\" % self.name),\n url(r'^([^\\/]+)/delete/$', self.delete, name=\"%s_document_delete\" % self.name),\n url(r'^(?P<object_id>([^\\/]+))/detail/(?P<direction>up|down|clear)vote/?$', self.vote, name=\"%s_document_vote\" % self.name),\n )", "def pattern_name_list(self):\n return list(self._pattern_reg.keys())", "def urls(self):\n return lambda : self.config.urls(active_only=True)", "def registered_urls(self):\n from pkg_resources import iter_entry_points\n\n entries = ['Priority', 'EP Name', 'Module', 'Class']\n for ep in iter_entry_points('appurl.urls'):\n c = ep.load()\n entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])\n\n return entries", "def all_urls(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/allUrls/')))", "def url_bases(self) -> List[str]:\n return self._url_module.url_bases", "def urls(self) -> list[str]:\r\n ...", "def get_urls():\r\n return []", "def static_routes(self):\n return self._static_routes", "def create_url_rules(self):\n return []", "def listPatterns(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.listPatterns(self, False)", "def get_view_endpoints(self):\n return []", "def base_urls(self):\n # Due to the way Django parses URLs, ``get_multiple`` won't work without\n # a trailing slash.\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<slug_list>[\\w\\d_-]+)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<slug>[\\w\\d_-]+)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def routes(self):\n return self._routes", "def site_patterns(*args):\n pattern_list = args\n return [SiteRegexURLResolver('', pattern_list)]", "def load_url_pattern_names(self, patterns):\n URL_NAMES = []\n for pat in patterns:\n if pat.__class__.__name__ == 'RegexURLResolver':\n # load patterns from this RegexURLResolver\n self.load_url_pattern_names(pat.url_patterns)\n elif pat.__class__.__name__ == 'RegexURLPattern':\n # load name from this RegexURLPattern\n if pat.name is not None and pat.name not in URL_NAMES:\n URL_NAMES.append(pat.name)\n return URL_NAMES", "def discover_urls():\n urlpatterns = []\n\n for app in settings.INSTALLED_APPS:\n try:\n _temp = __import__(f'{app}.urls', globals(), locals(), ['urlpatterns'], 0)\n urlpatterns += _temp.urlpatterns\n\n except ModuleNotFoundError:\n pass\n\n return urlpatterns", "def patterns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"patterns\")", "def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns", "def djangoURLPatterns(self):\n patterns = [url(r'tasks/gsoc/surveys/send_reminder/spawn$',\n self.spawnRemindersForProjectSurvey,\n name='spawn_survey_reminders'),\n url(r'tasks/gsoc/surveys/send_reminder/send$',\n self.sendSurveyReminderForProject)]\n return patterns", "def get_urls(self):\r\n from django.conf.urls.defaults import patterns, url\r\n\r\n def wrap(view):\r\n def wrapper(*args, **kwargs):\r\n return self.admin_site.admin_view(view)(*args, **kwargs)\r\n return update_wrapper(wrapper, view)\r\n\r\n info = (self.model._meta.app_label, self.model._meta.module_name)\r\n\r\n return patterns(\"\",\r\n url(r\"^panel/$\",\r\n wrap(self.panel_view),\r\n name=\"insert_%s_%s_panel\" % info),\r\n url(r\"^list/$\",\r\n wrap(self.list_view),\r\n name=\"insert_%s_%s_list\" % info),\r\n url(r\"^add_minimal/$\",\r\n self.add_view,\r\n name=\"insert_%s_%s_add\" % info),\r\n url(r\"^(.+)/detail/$\",\r\n wrap(self.detail_view),\r\n name=\"insert_%s_%s_detail\" % info),\r\n url(r\"^(.+)/render/$\",\r\n wrap(self.render_view),\r\n name=\"insert_%s_%s_render\" % info),\r\n url(r\"^(.+)/delete/$\",\r\n wrap(self.delete_view),\r\n name=\"insert_%s_%s_delete\" % info),\r\n )", "def urls(self) -> str:\n return self._data['urls']", "def listtypes(self):\n\n pattern_types = [i for i in sorted(self._allowed_patterns.iterkeys())]\n\n return pattern_types", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def getViews(self):\n return list(self.__views)", "def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)", "def get_pattern_types(self) -> List[PatternType]:\n return list(self.pattern_types)", "def base_urls(self):\n return [\n url(r\"^(?P<resource_name>%s)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name=\"api_dispatch_list\"),\n url(r\"^(?P<resource_name>%s)/schema%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name=\"api_get_schema\"),\n url(r\"^(?P<resource_name>%s)/set/(?P<pk_list>\\w[\\w;-]*)/$\" % self._meta.resource_name, self.wrap_view('get_multiple'), name=\"api_get_multiple\"),\n url(r\"^(?P<resource_name>%s)/(?P<pk>\\w[\\w-]*)%s$\" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]", "def urls(self):\n if not self._urls:\n urls = []\n for host in self.hosts:\n # Must end without a slash\n urls.append('http://%(host)s:%(port)s%(path)s' % {\n 'host': host,\n 'port': self.port,\n 'path': self.path,\n })\n self._urls = urls\n return self._urls", "def getDjangoURLPatterns():\n\n patterns = [\n (r'^tasks/project_conversion/update_projects',\n 'soc.tasks.updates.project_conversion.updateProjects'),\n\n ]\n\n return patterns", "def getViews(self):\n return list(self.__views.keys())", "def getDjangoURLPatterns():\n\n patterns = [\n (r'^tasks/gci/task/bulk_create_tasks$',\n 'soc.modules.gci.tasks.bulk_create.bulkCreateTasks'),]\n\n return patterns", "def getRoutes(self):\n pass", "def get_urls(self):\n from django.conf.urls import patterns, url\n\n def wrap(view):\n def wrapper(*args, **kwargs):\n return self.admin_site.admin_view(view)(*args, **kwargs)\n return update_wrapper(wrapper, view)\n\n info = self.model._meta.app_label, self.model._meta.module_name\n\n urlpatterns = patterns(\n '',\n url(r'^(\\d+)/add/objectives/$',\n wrap(self.add_objectives),\n name='%s_%s_objectives' % info),\n url(r'^(\\d+)/regional_objective/(\\d+)/delete$',\n wrap(self.delete_regional_objective),\n name='%s_%s_delete_regional_objective' % info),\n url(r'^(\\d+)/summary/$',\n wrap(self.summary),\n name='%s_%s_summary' % info),\n url(r'^(\\d+)/summary/pre/$',\n wrap(self.pre_summary),\n name='%s_%s_pre_summary' % info),\n url(r'^(\\d+)/summary/day/$',\n wrap(self.day_summary),\n name='%s_%s_day_summary' % info),\n url(r'^(\\d+)/summary/post/$',\n wrap(self.post_summary),\n name='%s_%s_post_summary' % info),\n url(r'^(\\d+)/summary/pdf/$',\n wrap(self.pdf_summary),\n name='%s_%s_pdf_summary' % info),\n url(r'^(\\d+)/download/$',\n wrap(self.pdflatex),\n name='%s_%s_download' % info),\n url(r'^(\\d+)/export/$',\n wrap(self.pdflatex),\n name='%s_%s_export' % info),\n url(r'^(\\d+)/cbas/$',\n wrap(self.corporate_approve),\n name='%s_%s_corporate_approve' % info),\n url(r'^(\\d+)/endorsement/$',\n wrap(self.endorse),\n name='%s_%s_endorse' % info),\n url(r'^(\\d+)/endorsement/(\\d+)/delete$',\n wrap(self.delete_endorsement),\n name='%s_%s_delete_endorsement' % info),\n url(r'^(\\d+)/endorsement/officers$',\n wrap(self.endorsing_roles),\n name='%s_%s_endorsing_roles' % info),\n url(r'^(\\d+)/approval/$',\n wrap(self.approve),\n name='%s_%s_approve' % info),\n url(r'^(\\d+)/closure/$',\n wrap(self.close),\n name='%s_%s_close' % info),\n url(r'^(\\d+)/sitemap/$',\n wrap(self.sitemap),\n name='%s_%s_sitemap' % info),\n )\n\n return urlpatterns + super(PrescriptionAdmin, self).get_urls()", "def get_patterns(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-patterns\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"get_patterns\",\n keywords=kwargs,\n params=parameters\n )", "def get_urls(self, **kwargs):\n pass # pragma: no cover", "def patterns(self) -> List[AttributeRulerPatternType]:\n all_patterns = []\n for i in range(len(self.attrs)):\n p = {}\n p[\"patterns\"] = self.matcher.get(str(i))[1]\n p[\"attrs\"] = self._attrs_unnormed[i] # type: ignore\n p[\"index\"] = self.indices[i] # type: ignore\n all_patterns.append(p)\n return all_patterns # type: ignore[return-value]", "def views(self):\n return self._views", "def patterns(self) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self.fuzzy_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern.text, \"type\": \"fuzzy\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, patterns in self.regex_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern, \"type\": \"regex\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n return all_patterns", "def urlpatterns(self):\n urlpatterns = [\n path(self.create_view.endpoint.url(), self.create_view.view(), name='create'),\n path(self.delete_view.endpoint.url(URL_PARAM), self.delete_view.view(), name='delete'),\n path(self.get_view.endpoint.url(URL_PARAM), self.get_view.view(), name='get'),\n path(self.get_or_create_view.endpoint.url(), self.get_or_create_view.view(), name='get_or_create'),\n path(self.list_view.endpoint.url(), self.list_view.view(), name='list'),\n path(self.update_view.endpoint.url(URL_PARAM), self.update_view.view(), name='update'),\n ]\n for method_view in self.method_views:\n # Pass the method view this ModelType's serializer class.\n method_view.model_serializer_cls = self.serializer.base_serializer_cls\n urlpatterns.append(\n path(method_view.endpoint.url(URL_PARAM), method_view.view(), name=method_view.name),\n )\n for static_method_view in self.static_method_views:\n static_method_view.model_type_cls = self.__class__\n urlpatterns.append(\n path(static_method_view.endpoint.url(), static_method_view.view(), name=static_method_view.name),\n )\n for property_view in self.property_views:\n urlpatterns.append(\n path(property_view.endpoint.url(URL_PARAM), property_view.view(), name=property_view.name),\n )\n return urlpatterns", "def get_flask_endpoints(self):\n urls = self.endpoints.keys()\n return urls", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def get_uri_schemes(self):\n return list(sorted(self.backends.with_playlists.keys()))", "def url_assets(self):\n return self.assets(asset_type='URL')", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def filenamePatterns(self):\n return ['*.'+e for e in self.filenameExtensions]", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def get_layer_urls(self):\n urls = []\n\n if getattr(self, 'additional_domains'):\n map(urls.append, (domain for domain in self.additional_domains.split(\";\") if domain))\n\n return urls", "def urlset(self):\n return self._urlset", "def resource_id_patterns(self) -> Sequence[str]:\n return pulumi.get(self, \"resource_id_patterns\")", "def getInfixPatterns(self):\n return self.getOrDefault(\"infixPatterns\")", "def getURLs():", "def get_static_regexps():\n handlers = modules_util.module_yaml('default')['handlers']\n retval = set()\n\n for handler in handlers:\n if handler.GetHandlerType() == 'static_dir':\n retval.add('^' + handler.url + '/')\n elif handler.GetHandlerType() == 'static_files':\n retval.add('^' + handler.url + '$')\n\n return sorted(retval)", "def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers", "def get_patterns(self):\n params = {\n \"query\" : \"\"\n }\n\n resp_from_server = self.get_api_result(self.URL_RETURNS_MINI_PATTERNS, params)\n\n if resp_from_server.get('status'):\n return resp_from_server\n else:\n patterns_dicts = self.get_mini_patterns_dict(resp_from_server)\n\n return patterns_dicts", "def get_patterns(\n self, pipeline: str, label: str, key: str\n ) -> List[Pattern]:", "def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)", "def get_uri_schemes(self) -> list[UriScheme]:\n return sorted(self.backends.with_playlists.keys())", "def website_routing_rules(self) -> typing.Optional[typing.List[\"RoutingRule\"]]:\n return self._values.get('website_routing_rules')", "def urls(self):\n base_url = r'^{}/'.format(self.label)\n return SiteModuleURLResolver(base_url, self.get_urls(), module=self, app_name=self.label, namespace=self.label)", "def routes_available():\n return json.dumps(\n [\"%s\" % rule for rule in app.url_map.iter_rules()],\n indent=4,\n separators=(\",\", \": \"),\n )", "def get_endpoints(self, request):\n enumerator = self.endpoint_enumerator_class(\n self._gen.patterns, self._gen.urlconf, request=request)\n endpoints = enumerator.get_api_endpoints()\n view_paths = defaultdict(list)\n view_cls = {}\n for path, method, callback, decorators in reversed(endpoints):\n view = self.create_view(callback, method, request)\n path = self._gen.coerce_path(path, method, view)\n view_paths[path].append((method, view, decorators))\n view_cls[path] = callback.cls\n return {path: (view_cls[path], methods)\n for path, methods in view_paths.items()}", "def urlBars(self):\n urlBars = []\n for index in range(self.count()):\n urlBars.append(self.widget(index))\n return urlBars", "def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))", "def get_urls(self) -> Dict[str, str]:\n return {}", "def endpoints(self):\n return self.settings[\"endpoints\"]", "def url_paths(self) -> Dict[str, str]:\n unformatted_paths = self._url_module.url_paths\n\n paths = {}\n for unformatted_path, handler in unformatted_paths.items():\n path = unformatted_path.format(\"\")\n paths[path] = handler\n\n return paths", "def getPossibleMatchesList(self):\n return [p for p in self._patterns if p.startswith(self._keyCode)]", "def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)", "def path_groups(self):\n return self._path_groups", "def ListUrlEntries(self):\n return [WprUrlEntry(request, self._http_archive[request])\n for request in self._http_archive.get_requests()]", "def getBookmarkableURLs(self):\n return getattr(CONFIG, 'zmi_bookmarkable_urls', True)", "def npatterns(self):\n return len(self.patterns)", "def getFrequentPatterns(self):\n return self.finalPatterns", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def get_uri_schemes(self) -> list[backend.UriScheme]:\n futures = [b.uri_schemes for b in self.backends]\n results = pykka.get_all(futures)\n uri_schemes = itertools.chain(*results)\n return sorted(uri_schemes)", "def get_urls(self):\r\n urls = super(ServeeAdminSite, self).get_urls()\r\n from django.conf.urls import patterns, url, include\r\n\r\n # Custom Views\r\n for path, view, name in self.custom_views:\r\n urls += patterns('',\r\n url(r'^%s$' % path, self.admin_view(view)),\r\n )\r\n\r\n # Inserts\r\n for insert_model_lookup, insert in self.insert_classes.iteritems():\r\n urls += patterns(\"\",\r\n (r\"^insert/%s/%s/\" % (insert.model._meta.app_label, insert.model._meta.module_name), include(insert.urls))\r\n )\r\n return urls", "def getUrlsList(self):\n\t\ttry:\n\t\t\tf = ur.urlopen(self.sitemap_url)\n\t\t\tres = f.readlines()\n\t\t\tfor d in res:\n\t\t\t data = re.findall('<loc>(https?:\\/\\/.+?)<\\/loc>',d)\n\t\t\t for i in data:\n\t\t\t\tself.urls.append(i)\n\t\texcept Exception as e:\n\t\t\tself.app.printflush(str(e))\n\t\t\tself.app.printflush(traceback.format_exc())\n\t\tself.fetched_count = len(self.urls)", "def _filter_url_list(self, regex_pattern: str) -> None:\n matcher = re.compile(regex_pattern)\n filtered_list = []\n if self.url_list:\n for url in self.url_list:\n if matcher.search(url.url):\n filtered_list.append(url)\n self.url_list = filtered_list", "def load_url_pattern_names(patterns, include_with_args=True):\n global URL_NAMES\n for pat in patterns:\n if pat.__class__.__name__ == 'RegexURLResolver': # load patterns from this RegexURLResolver\n load_url_pattern_names(pat.url_patterns, include_with_args)\n elif pat.__class__.__name__ == 'RegexURLPattern': # load name from this RegexURLPattern\n if pat.name is not None and pat.name not in URL_NAMES:\n if include_with_args or re.compile(pat.regex).groups == 0:\n URL_NAMES.append(pat.name)\n return URL_NAMES", "def get_pattern(self):\n pattern = list()\n for item in self.gradual_items:\n pattern.append(item.gradual_item.tolist())\n return pattern", "def get_product_urls(self, page):\n return self.__url_list(page)", "def get_urls(r):\n url_list = find_urls(r)\n url_list += find_tag_urls(r)\n return set(url_list)", "def filteredUrls(pattern, view, kwargs=None, name=None):\n results = [(pattern, view, kwargs, name)]\n tail = ''\n mtail = re.search('(/+\\+?\\\\*?\\??\\$?)$', pattern)\n if mtail:\n tail = mtail.group(1)\n pattern = pattern[:len(pattern) - len(tail)]\n for filter in ('/state/(?P<state>\\w+)',\n '/group/(?P<group>[^/]+)',\n '/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)',\n '/server/(?P<server>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)'):\n results += [(pattern + filter + tail, view, kwargs)]\n return results", "def get_links(self) -> List[str]:\n return self.__links", "def endpoints(self):\n return self[\"endpoints\"]", "def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]", "def include_regexes(self) -> Optional[List[str]]:\n return pulumi.get(self, \"include_regexes\")" ]
[ "0.8027784", "0.79029524", "0.78296137", "0.7634182", "0.69524354", "0.6944128", "0.67643464", "0.6622399", "0.6592495", "0.6579387", "0.653932", "0.6526326", "0.64818805", "0.6450613", "0.6444061", "0.6438293", "0.64050204", "0.6334296", "0.6327021", "0.63207394", "0.62634665", "0.6236464", "0.62105787", "0.62073463", "0.6197516", "0.6177923", "0.6176312", "0.6153545", "0.6152025", "0.6139003", "0.6134816", "0.61301506", "0.612044", "0.61150575", "0.6085081", "0.6078764", "0.60754097", "0.6033365", "0.6018508", "0.60129577", "0.6002511", "0.6001353", "0.5994349", "0.5943414", "0.58685905", "0.58581686", "0.58491284", "0.5799974", "0.57935125", "0.57879645", "0.5778435", "0.57726085", "0.57622063", "0.57568926", "0.57122415", "0.5671434", "0.56578475", "0.56574583", "0.56560963", "0.5642009", "0.5629165", "0.5606653", "0.5605557", "0.5589418", "0.55677533", "0.5565255", "0.55628467", "0.554645", "0.55408645", "0.55331606", "0.5528339", "0.551944", "0.5511675", "0.5506314", "0.54851115", "0.54715", "0.5469348", "0.54634297", "0.54587704", "0.545845", "0.5455274", "0.5434976", "0.53956443", "0.5394836", "0.5382178", "0.5376494", "0.5372844", "0.5356842", "0.53439444", "0.53395283", "0.5316361", "0.5314834", "0.5286448", "0.52851886", "0.52623683", "0.52436614", "0.5229272", "0.52204156", "0.52127165", "0.5210167" ]
0.6040245
37
Print list of instances with their attached volume id/size to console, ie
def list_ebss_by_instance(): ec2 = u.create_ec2_resource() instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()] sorted_instances = sorted(instances, key=itemgetter(0)) for (seconds, instance) in sorted_instances: volumes = instance.volumes.all() volume_strs = [] for v in volumes: volume_strs.append("%s (%s)"%(v.id, v.size)) print("%s: %s" % (u.get_name(instance.tags), ','.join(volume_strs)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def volumes(self):", "def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass", "def show_instances():\n return get_instances()", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def do_show(self, args):\n args = args.split()\n print(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n print(all_objs[key])\n else:\n print(\"** no instance found **\")", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def do_show(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n print(dicti[\"{}.{}\".format(args[0], args[1])])\n else:\n print(\"** no instance found **\")", "def do_show(self, argv):\n argument_split = argv.split()\n aux = 0\n if len(argument_split) == 0:\n print(\"** class name missing **\")\n elif not argument_split[0] in self.__names:\n print(\"** class doesn't exist **\")\n elif len(argument_split) < 2:\n print(\"** instance id missing **\")\n elif argument_split[0] in self.__names:\n for key, obj in models.storage.all().items():\n if key == argument_split[0]+\".\"+argument_split[1]:\n aux = 1\n print(obj)\n if aux == 0:\n print(\"** no instance found **\")", "def do_show(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key not in storage.all():\n print(\"** no instance found **\")\n return\n print(storage.all()[key])", "def do_show(self, args):\n temp = args.split()\n\n if len(temp) == 0:\n print(\"** class name missing **\")\n return\n elif temp[0] not in self.myclasses:\n print(\"** class doesn't exist **\")\n return\n elif len(temp) < 2:\n print('** instance id missing **')\n return\n else:\n all_objs = storage.all()\n for i in all_objs.keys():\n if i == \"{}.{}\".format(temp[0], temp[1]):\n print(all_objs[i])\n return\n print('** no instance found **')", "def do_show(self, line):\n\n args = line.split()\n\n if not args:\n print(\"** class name missing **\")\n elif args[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n elif len(args) < 2:\n print(\"** instance id missing **\")\n else:\n key = args[0] + \".\" + args[1]\n dict_objects = storage.all()\n obj = dict_objects.get(key)\n if obj:\n print(obj)\n else:\n print(\"** no instance found **\")", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def show_vdcs(self):\n for v in self.vdcs:\n print v", "def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)", "def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def show(vol_path):\n name = \"qemu-img\"\n image = \"breqwatr/qemu-img:latest\"\n path = Path(vol_path)\n vol_abspath = path.absolute().__str__()\n run = f\"qemu-img info {vol_abspath}\"\n mount = f\"-v {vol_abspath}:{vol_abspath}\"\n cmd = f\"docker run --rm -it --name {name} {mount} {image} {run}\"\n shell(cmd)", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def show_volume(self, volume, check=True):\n cmd = 'cinder show ' + volume.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_SHOW_TIMEOUT, check=check)\n\n volume_table = output_parser.table(stdout)\n show_result = {key: value for key, value in volume_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(volume.id))\n if volume.name:\n assert_that(show_result['name'], is_(volume.name))\n if volume.description:\n assert_that(show_result['description'],\n is_(volume.description))", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)", "def print_vscsi_attributes(self,objects):\n print(\"\\n\")\n print((\"LocalPartitionID\".ljust(35),\":\",objects.LocalPartitionID.value()))\n print((\"VirtualSlotNumber\".ljust(35),\":\",objects.VirtualSlotNumber.value()))\n print((\"RequiredAdapter\".ljust(35),\":\",objects.RequiredAdapter.value()))\n print((\"RemoteLogicalPartitionID\".ljust(35),\":\",objects.RemoteLogicalPartitionID.value()))\n print((\"RemoteSlotNumber\".ljust(35),\":\",objects.RemoteSlotNumber.value()))", "def listPVs(self):\n for pv in self._pvlist:\n print pv", "def list(self, arguments):\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\n 'NAME'.rjust(20),\n 'ADDRESS'.rjust(15),\n 'BOX'.rjust(35),\n 'VERSION'.rjust(12),\n 'PATH',\n ))\n for instance_name, instance in utils.instances().items():\n path = instance.get('path')\n if path and os.path.exists(path):\n self.activate(instance_name)\n mech_path = os.path.join(path, '.mech')\n if os.path.exists(mech_path):\n vmx = self.get_vmx(silent=True)\n if vmx:\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(wait=False, quiet=True, lookup=lookup)\n else:\n ip = colored.red(\"invalid\")\n if ip is None:\n ip = colored.yellow(\"poweroff\")\n elif not ip:\n ip = colored.green(\"running\")\n else:\n ip = colored.green(ip)\n else:\n ip = \"\"\n box_name = self.box_name or \"\"\n box_version = self.box_version or \"\"\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\n colored.green(instance_name.rjust(20)),\n ip.rjust(15),\n box_name.rjust(35),\n box_version.rjust(12),\n path,\n ))", "def do_show(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.valid_classes:\n print(\"** class doesn't exist **\")\n return\n all_objs = storage.all()\n for objs_id in all_objs.keys():\n if objs_id == args[1] and args[0] in str(type(all_objs[objs_id])):\n print(all_objs[objs_id])\n return\n print(\"** no instance found **\")", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def show_info(self, group):\n print(group)\n for child in self.dict[group]:\n if child=='Version':\n print(child, ':', self(group, child))\n elif not self(group, child).shape:\n print(child, ':',self(group, child))\n else:\n print(child,': size', self(group, child).shape)", "def show(self,mode=0,level=0,ident=''):\n if self.locked : l='L'\n else : l=' '\n tmp= '%sd%-3d %s %-6s %-30s Vendor: %-10s %-10s Size: %10s' % \\\n (ident,self.idx,l,self.name,self.guid.strip()[-29:],self.vendor,self.model,printsz(self.size))\n if level>0:\n tmp+='\\n'+ident+' Paths:'\n for p in self.paths.values() : tmp+='\\n'+p.show(mode,level-1,ident+' ')\n tmp+='\\n'+ident+' Partitions:'\n for p in self.partitions.values() : tmp+='\\n'+p.show(mode,level-1,ident+' ')\n tmp+='\\n'\n return tmp", "def do_show(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in classes:\n if len(args) > 1:\n key = args[0] + \".\" + args[1] # args 0 is name, args 1 es id\n if key in models.storage.all():\n print(models.storage.all()[key])\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def print_max_sizes(self):\n print(\"max_sizes: %s\" % self.max_sizes)", "def do_show(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n print(object_dict[full_key])\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")", "def print_logs(self, shell=False):\n for l, v in self.logs(shell).items():\n print('\\n### Container ', l, ', id ', v.get('id', 'None'), ' ###\\n')\n for part in ['stdout', 'stderr']:\n print('##', part, '##')\n print(v[part])", "def main():\n dump(inventory(), fp=stdout, indent=4)", "def show(self, class_name, inst_id, stored_objects):\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n print(\"** no instance found **\")\n else:\n print(stored_objects[instance])", "def do_show(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n print(objects[args])", "def do_show(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n print(models.storage.all()[key])\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def do_show(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n else:\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n else:\n print(storage.all()[obj])", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def show_vdcs_detail(self):\n for v in self.vdcs:\n print self.vdcs[v]", "def watch():\n\n try:\n headers = ('CONTAINER ID', 'NAME', 'CPU %', 'MEM USAGE / LIMIT',\n 'MEM %', 'NET I/O', 'BLOCK I/O', 'PIDS')\n column_width = 20\n for element in headers:\n print(element.ljust(column_width)),\n print('')\n\n for container in CLIENT.containers.list():\n column_width = 20\n stats = container.stats(stream=False)\n\n # Block I/O stats\n blkio = stats.get('blkio_stats').get('io_service_bytes_recursive')\n # in case blkio is empty --> IndexError: list index out of range\n if not blkio:\n blkio_read = '0'\n blkio_write = '0'\n else:\n blkio_read = size(blkio[0].get('value'), system=si)\n blkio_write = size(blkio[1].get('value'), system=si)\n\n # Network stats\n rx_stats = size(stats.get('networks').get('eth0').get('rx_bytes'), system=si)\n tx_stats = size(stats.get('networks').get('eth0').get('tx_bytes'), system=si)\n\n # Memory stats\n mem = stats.get('memory_stats')\n mem_usage = mem.get('stats').get('active_anon')\n mem_limit = mem.get('limit')\n mem_percent = (\"%.2f\"%((mem_usage / mem_limit)*100))\n\n # CPU stats\n # this is taken directly from docker CLIENT:\n # https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/CLIENT/stats.go#L309\n cpu_percent = 0.0\n cpu = stats.get('cpu_stats')\n pre_cpu = stats.get('precpu_stats')\n cpu_total = cpu.get('cpu_usage').get('total_usage')\n pre_cpu_total = pre_cpu.get('cpu_usage').get('total_usage')\n cpu_count = cpu.get('online_cpus')\n\n cpu_delta = cpu_total - pre_cpu_total\n system_delta = cpu.get('system_cpu_usage') - pre_cpu.get('system_cpu_usage')\n\n if system_delta > 0.0 and cpu_delta > 0.0:\n cpu_percent = (\"%.2f\"%(cpu_delta / system_delta * 100.0 * cpu_count))\n\n # container attributes\n attrs = [(str(container.short_id), str(container.name), str(cpu_percent),\n str(size((mem_usage), system=si) + \" / \" + size((mem_limit), system=si)),\n str(mem_percent), str(rx_stats + \" / \" + tx_stats),\n str(blkio_read + \" / \" + blkio_write),\n str(stats.get('pids_stats').get('current')))]\n\n for row in attrs:\n for element in row:\n print(element.ljust(column_width)),\n print('')\n\n except (docker.errors.NotFound, KeyError, AttributeError):\n print('No such container or container not running!')", "def do_show(self, arg):\n args = shlex.split(arg)\n \"\"\"get FileStorage.__objects\"\"\"\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n print(stored_objects[instance])", "def do_show(self, line):\n list_line = line.split(' ')\n if line == \"\":\n print(\"** class name missing **\")\n elif list_line[0] not in HBNBCommand.classes.keys():\n print(\"** class doesn't exist **\")\n elif len(list_line) < 2:\n print(\"** instance id missing **\")\n elif list_line[0] + '.' + list_line[1] not in \\\n models.storage.all().keys():\n print(\"** no instance found **\")\n else:\n obj = models.storage.all().get(list_line[0] + '.' + list_line[1])\n print(obj)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def print_inventory(self):\r\n for item in self._inventory:\r\n print(item, '\\n')", "def execute(self):\n instances = self._get_active_instances()\n if not instances:\n print(\"No running instances.\")\n else:\n output = \"\\nInstanceId\\t\\tName\\n\\n\"\n for instance in instances:\n name = self._get_instance_name(instance)\n instance_id = instance['InstanceId']\n output += f\"{instance_id}\\t{name}\\n\"\n\n print(output)", "def GetAllInstancesInfo(self, hvparams=None):\n data = []\n for file_name in os.listdir(self._ROOT_DIR):\n path = utils.PathJoin(self._ROOT_DIR, file_name)\n if self._IsDirLive(path):\n data.append((file_name, 0, 0, 0, 0, 0))\n return data", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def update_volumes():\n print 'do something useful here'", "def lsfbvol(self, args: str = \"\") -> List[str]:\n\n lsfbvol_cmd = f\"{self.base_cmd} lsfbvol {args}\"\n lsfbvol_out = runsub.cmd(lsfbvol_cmd)\n\n return lsfbvol_out", "def print_volume_metadata(self):\n\n def print_aligned(header, value=''):\n print(\"{:<26} {}\".format(header, value))\n\n print_aligned(\"Signature:\", self.signature)\n print_aligned(\"SerialNumber:\", hex(self.serial_number))\n print_aligned(\"SectorsPerCluster:\", \"{} (0x{:x} bytes)\".format(\n self.sectors_per_cluster,\n self.sectors_per_cluster * FATX_SECTOR_SIZE))\n print_aligned('RootDirFirstCluster:', str(self.root_dir_first_cluster))\n print(\"\")\n\n print_aligned(\"Calculated Offsets:\")\n print_aligned(\"PartitionOffset:\", \"0x{:x}\".format(self.offset))\n print_aligned(\"FatByteOffset:\", \"0x{:x} (+0x{:x})\".format(\n self.byte_offset_to_physical_offset(self.fat_byte_offset),\n self.fat_byte_offset))\n print_aligned(\"FileAreaByteOffset:\", \"0x{:x} (+0x{:x})\".format(\n self.byte_offset_to_physical_offset(self.file_area_byte_offset),\n self.file_area_byte_offset))\n print(\"\")", "def ListInv(self):\n print(\"\\n Store Inventory \\n\") \n for key, value in self.videos.items():\n print(key, value)", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def generateInfoVolumes(regions):\n print \"\\nWriting volumes info to output file %s\" % volumes_data_output_file\n with open(volumes_data_output_file, 'w') as f1:\n f1.write(\"VOLUMES\\n\")\n f1.write(\n \"Name\\tvolume_ID\\tKEEP-tag_of_volume\\tKEEP-tag_of_instance\\tproduction?\\tvolume_attachment_state\\tassociated_instance\\tinstance_state\\tsize\\tcreate_time\\tregion\\tzone\\tassociated_snapshot\\n\\n\")\n for r in regions:\n volumes = getVolumes(r)\n print \".\" # give some feedback to the user\n for v in volumes:\n f1.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,\n v.create_time, v.region.name, v.zone, v.snapshot_id))", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')", "def list_instances(self):\n # list instances\n self._list_instances()", "def printHeap(self):\n print self.storeHeap.movies", "def print_vm_info(virtual_machine):\n config = virtual_machine.config\n print(\"Name : \", config.name)\n print(\"Template : \", config.template)\n print(\"Guest : \", config.guestFullName)\n print(\"Instance UUID : \", config.instanceUuid)\n print(\"Bios UUID : \", config.uuid)\n print(\"\")", "def help_show(self):\n print(\"print an instance based on the class name and id\")", "def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def view_variants(context, variant_id):\n adapter = context.obj['adapter']\n\n results = []\n if variant_id is not None:\n results = adapter.find_variant({'display_name': variant_id})\n\n else:\n results = adapter.find_variants({})\n\n click.echo(pprint(results))", "def cli(count):\n\n if count:\n files = db.count_files()\n click.echo(\"Number of files on inventory: %s\" % files)\n else:\n archives = db.get_files()\n print \"ID - NAME - SIZE - CREATED\"\n for archive in archives:\n if archive.size:\n size = int(archive.size) / 1024.0 / 1024.0\n if format(size, '.2f') != '0.00':\n size = format(size, '.2f') + \" mb\"\n else:\n # Under 1 kb\n size = format(size * 1024 * 1024, '.0f') + \" bytes\"\n\n\n else:\n size = \"Unknown\"\n print \" %s - %s - %s - %s\" % (archive.id, archive.name, size, archive.created_at)", "def showSnapshots(self):\n from .utils import sp\n s = self.getSnapshots()\n ax = sp(len(s))\n for i, S in enumerate(s):\n ax[i].imshow(S)", "def ShowPipeStats(cmd_args=None):\n print \"Number of pipes: {: d}\".format(kern.globals.amountpipes)\n print \"Memory used by pipes: {:s}\".format(sizeof_fmt(int(kern.globals.amountpipekva)))\n print \"Max memory allowed for pipes: {:s}\".format(sizeof_fmt(int(kern.globals.maxpipekva)))", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def print_size(self):\n return self.container['print_size']", "def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])", "def do_show(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n if len(tokens) < 1:\n print(\"** class name missing **\")\n else:\n objects = models.storage.all()\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n elif len(tokens) < 2:\n print(\"** instance id missing **\")\n elif \".\".join(tokens[:2]) not in objects:\n print(\"** no instance found **\")\n else:\n print(objects[\".\".join(tokens[:2])])", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def print(cls, vas):\n print(vas)", "def print_instance_summary(instance, use_color='auto'):\n\n colorize_ = partial(colorize, use_color=use_color)\n\n name = colorize_(instance.name, \"yellow\")\n instance_type = instance.extra['gonzo_size']\n\n if instance.state == NodeState.RUNNING:\n status_colour = \"green\"\n else:\n status_colour = \"red\"\n\n instance_status = NodeState.tostring(instance.state)\n status = colorize_(instance_status, status_colour)\n\n if 'owner' in instance.extra['gonzo_tags']:\n owner = instance.extra['gonzo_tags']['owner']\n else:\n owner = \"---\"\n\n uptime = format_uptime(instance.extra['gonzo_created_time'])\n uptime = colorize_(uptime, \"blue\")\n\n availability_zone = instance.extra['gonzo_az']\n\n result_list = [\n name,\n instance_type,\n status,\n owner,\n uptime,\n availability_zone,\n ]\n return result_list", "def ls(cls):\n for vm in cls._vm_agents_for_host():\n with vm:\n running = vm.qemu.process_exists()\n\n if running:\n vm_mem = vm.qemu.proc().memory_full_info()\n\n expected_size = (\n vm.cfg[\"memory\"] * 1024 * 1024\n + vm.qemu.vm_expected_overhead * 1024 * 1024\n )\n\n log.info(\n \"online\",\n machine=vm.name,\n cores=vm.cfg[\"cores\"],\n memory_booked=\"{:,.0f}\".format(vm.cfg[\"memory\"]),\n memory_pss=\"{:,.0f}\".format(vm_mem.pss / MiB),\n memory_swap=\"{:,.0f}\".format(vm_mem.swap / MiB),\n )\n else:\n log.info(\"offline\", machine=vm.name)", "def show(self):\r\n for card in self.cards_list:\r\n print(card)", "def collect_subinstances(img_list, subinstance_size, clickable_size):\n num_files = len(img_list)\n step_size = subinstance_size // 2\n num_subinstances = 0\n for i in range(num_files):\n num_rows = int(np.ceil((2.0 * img_list[i].shape[0] / float(subinstance_size)) - 1.0))\n num_cols = int(np.ceil((2.0 * img_list[i].shape[1] / float(subinstance_size)) - 1.0))\n num_subinstances += num_rows * num_cols\n #print(str(i) + \"-th image: \" + \"num_rows = \" + str(num_rows) + \", num_cols = \" + str(num_cols) + \", #(sub_files) = \", str(num_subinstances))\n \n print(\"#(subinstances) =\", num_subinstances)\n \n expand = (subinstance_size // clickable_size)\n \n subinstance_list = [None] * expand * expand * num_subinstances\n subinstance_org_loc_list = [None] * expand * expand * num_subinstances\n \n patch_i = 0\n for i in range(num_files):\n num_rows = int(np.ceil((2.0 * img_list[i].shape[0] / float(subinstance_size)) - 1.0))\n num_cols = int(np.ceil((2.0 * img_list[i].shape[1] / float(subinstance_size)) - 1.0))\n for j in range(num_rows):\n for k in range(num_cols):\n for l in range(expand):\n for m in range(expand):\n try:\n subinstance_list[patch_i * expand * expand + l * expand + m ] \\\n = img_list[i][step_size * j + l * clickable_size : step_size * j + (l + 1) * clickable_size,\n step_size * k + m * clickable_size : step_size * k + (m + 1) * clickable_size]\n except IndexError:\n sys.stdout.write(\"ERROR: images are not normalized.\\n\")\n exit(0)\n subinstance_org_loc_list[patch_i * expand * expand + l * expand + m ] \\\n = (i, (step_size * j + l * clickable_size,\n step_size * k + m * clickable_size,\n clickable_size,\n clickable_size))\n patch_i += 1\n\n if patch_i != num_subinstances:\n sys.stdout.write(\"ERROR: the number of subinstances is not consistent.\\n\")\n exit(-1)\n return subinstance_list, subinstance_org_loc_list", "def volume(self):\n return {'lvad': self._v}", "def show_board(self):\n print(self.capacity_list)", "def nice(self):\n print(self.getName(), \":\", self.getLen())", "def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs", "def volume(self):\n return [node.volume for node in self]", "def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volumes_per_instance\")", "def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volumes_per_instance\")", "def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"volumes_per_instance\")", "def show_linkages():\n appnames = tuple(linkages.keys())\n appcount = len(appnames)\n plural = (appcount == 1) and \"app\" or \"apps\"\n print(f\"LINKAGES ({appcount} {plural} total):\")\n \n for appname in appnames:\n LoaderCls = linkages[appname]\n qname = qualified_name(LoaderCls)\n instancedict = dict(LoaderCls.instances)\n instancecount = len(LoaderCls.instances)\n instanceplural = (instancecount == 1) and \"instance\" or \"instances\"\n string = pformat(instancedict, indent=4, width=consts.SEPARATOR_WIDTH)\n print()\n print(f\" «{appname}» ({qname}, {instancecount} {instanceplural}):\")\n print(f\"{string}\")", "def ShowAllVouchers(cmd_args=[], cmd_options={}):\n iv_hash_table = kern.globals.ivht_bucket\n num_buckets = sizeof(kern.globals.ivht_bucket) / sizeof(kern.globals.ivht_bucket[0])\n print GetIPCVoucherSummary.header\n for i in range(num_buckets):\n for v in IterateQueue(iv_hash_table[i], 'ipc_voucher_t', 'iv_hash_link'):\n print GetIPCVoucherSummary(v)", "def ListInstances(self, hvparams=None):\n return [name for name in os.listdir(self._ROOT_DIR)\n if self._IsDirLive(utils.PathJoin(self._ROOT_DIR, name))]", "def __mount_ebs_volume( self ):\n ebs_volume_size = self.instance_tag( 'ebs_volume_size' ) or '0'\n ebs_volume_size = int( ebs_volume_size )\n if ebs_volume_size:\n instance_name = self.instance_tag( 'Name' )\n cluster_ordinal = int( self.instance_tag( 'cluster_ordinal' ) )\n volume_name = '%s__%d' % (instance_name, cluster_ordinal)\n volume = EC2VolumeHelper( ec2=self.ec2,\n availability_zone=self.availability_zone,\n name=volume_name,\n size=ebs_volume_size,\n volume_type=\"gp2\" )\n # TODO: handle case where volume is already attached\n device_ext = '/dev/sdf'\n device = '/dev/xvdf'\n volume.attach( self.instance_id, device_ext )\n\n # Wait for inode to appear and make sure its a block device\n while True:\n try:\n assert stat.S_ISBLK( os.stat( device ).st_mode )\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n time.sleep( 1 )\n else:\n raise\n\n # Only format empty volumes\n volume_label = volume_label_hash( volume_name )\n if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data':\n check_call( [ 'mkfs', '-t', 'ext4', device ] )\n check_call( [ 'e2label', device, volume_label ] )\n else:\n # If the volume is not empty, verify the file system label\n actual_label = check_output( [ 'e2label', device ] ).strip( )\n if actual_label != volume_label:\n raise AssertionError(\n \"Expected volume label '%s' (derived from '%s') but got '%s'\" %\n (volume_label, volume_name, actual_label) )\n current_mount_point = self.__mount_point( device )\n if current_mount_point is None:\n mkdir_p( self.persistent_dir )\n check_call( [ 'mount', device, self.persistent_dir ] )\n elif current_mount_point == self.persistent_dir:\n pass\n else:\n raise RuntimeError(\n \"Can't mount device %s on '%s' since it is already mounted on '%s'\" % (\n device, self.persistent_dir, current_mount_point) )\n else:\n # No persistent volume is attached and the root volume is off limits, so we will need\n # to place persistent data on the ephemeral volume.\n self.persistent_dir = self.ephemeral_dir", "def get_lun_storage_info(lun_id):\n host = ll_hosts.get_spm_host(config.HOSTS)\n host_ip = ll_hosts.get_host_ip(host)\n executor = rhevm_helpers.get_host_executor(\n host_ip, config.VDC_ROOT_PASSWORD\n )\n # Execute 'pvscan' to display the latest volume info\n storage_resources.pvscan(host)\n logger.info(\"Executing command 'pvs | grep %s'\", lun_id)\n status, output, err = executor.run_cmd(\n shlex.split(PVS_SHOW_LUN_INFO % lun_id)\n )\n if status:\n logger.info(\n \"Status was False executing 'pvs | grep %s'. Err: %s\",\n lun_id, err\n )\n return 0, 0\n\n # Format the output into the 6 expected display parameters (PV, VG,\n # Format, LV Attributes, Physical size and Physical free size)\n formatted_output = shlex.split(output)\n logger.info(\n \"The output received when running pvs on LUN id %s is: %s\"\n % (lun_id, formatted_output)\n )\n # The 2nd last displayed data output is needed - Physical size\n lun_size = formatted_output[-2]\n lun_size = lun_size.replace(\"g\", \"\")\n lun_free_space = formatted_output[-1]\n lun_free_space = lun_free_space.replace(\"g\", \"\")\n lun_size_bytes = float(lun_size) * config.GB\n logger.info(\"The LUN size in bytes is '%s'\", str(lun_size_bytes))\n lun_free_bytes = float(lun_free_space) * config.GB\n logger.info(\"The LUN free space in bytes is '%s'\", str(lun_free_bytes))\n\n return int(lun_size_bytes), int(lun_free_bytes)", "def print_list(self):\r\n pass", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def list_instances(self):\n try:\n out, err = utils.execute(\n 'sudo', 'vzlist', '--all', '--no-header', '--output', 'ctid')\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to list VZs')\n\n ctids = []\n for line in out.splitlines():\n ctid = line.split()[0]\n ctids.append(ctid)\n\n return ctids" ]
[ "0.66387", "0.65122116", "0.64756095", "0.6264545", "0.61685747", "0.61603457", "0.6125444", "0.61109924", "0.6072397", "0.60526633", "0.60198474", "0.59469944", "0.5940197", "0.5938797", "0.59375834", "0.5925745", "0.5900101", "0.5888648", "0.58722466", "0.5867416", "0.5822639", "0.5807083", "0.57890534", "0.578699", "0.5758984", "0.5737905", "0.57206404", "0.57115394", "0.5705024", "0.56919706", "0.5690328", "0.5687685", "0.5671659", "0.56571466", "0.5647764", "0.56439817", "0.5641415", "0.5631838", "0.56190306", "0.56152195", "0.5610031", "0.5581087", "0.5569096", "0.5568823", "0.5566665", "0.5563958", "0.5563182", "0.5561778", "0.55312663", "0.55254185", "0.5506632", "0.5500908", "0.54721975", "0.54660857", "0.54415876", "0.5440723", "0.5423535", "0.54095966", "0.5389724", "0.53886336", "0.53814965", "0.53734845", "0.536717", "0.53530246", "0.535166", "0.53507787", "0.53491104", "0.5340893", "0.53371805", "0.53355485", "0.53308254", "0.53110164", "0.52936715", "0.5273405", "0.52638173", "0.52588564", "0.52576554", "0.525757", "0.52451223", "0.52420616", "0.5240047", "0.5237884", "0.5232683", "0.52274215", "0.5221799", "0.5221367", "0.5217575", "0.5213659", "0.5211787", "0.51870257", "0.51870257", "0.51870257", "0.51802117", "0.51764065", "0.5170256", "0.5161797", "0.51601386", "0.5159998", "0.5154777", "0.51536703" ]
0.6720033
0
Grows EBS volume for given task.
def grow_ebs_for_task(task_fragment, target_size_gb): ec2 = u.create_ec2_resource() client = u.create_ec2_client() # todo: don't crash on missing/duplicate names instances = {u.get_name(i.tags): i for i in ec2.instances.all()} ec2 = u.create_ec2_resource() instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()] sorted_instances = reversed(sorted(instances, key=itemgetter(0))) for (seconds, instance) in sorted_instances: task_name = u.get_name(instance.tags) hours_ago = (time.time()-seconds)/3600 hours_ago+=8 # adjust for time being in UTC if task_fragment in task_name: print("Found instance %s launched %.1f hours ago" %( task_name, hours_ago)) break print(instance.id) volumes = list(instance.volumes.all()) assert len(volumes)==1, "Must have 1 volume" print("Growing %s to %s"%(volumes[0].id, target_size_gb)) response = client.modify_volume( VolumeId=volumes[0].id, Size=target_size_gb, ) assert u.is_good_response(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grow_volume(self, volume, growth, async=False):\n\n assert isinstance(volume, dict), \"volume configuration is invalid, 'dict' type expected\"\n assert volume.get('id'), \"volume.id can't be blank\"\n\n async_result = __node__['bollard'].apply_async('api.postgresql.grow-volume',\n args=(volume, growth),\n soft_timeout=(1 * 24) * 3600,\n hard_timeout=(1 * 24 + 1) * 3600,\n callbacks={'task.pull': grow_volume_callback})\n if async:\n return async_result.task_id\n else:\n return async_result.get()", "def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)", "def expand_volume(self, vol, new_size):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(vol)\n size_in_bytes = vipr_utils.to_bytes(str(new_size) + \"G\")\n\n try:\n self.volume_obj.expand(\n self.configuration.vipr_tenant +\n \"/\" +\n self.configuration.vipr_project +\n \"/\" +\n volume_name,\n size_in_bytes,\n True)\n except vipr_utils.SOSError as e:\n if e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR:\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + volume_name + \": expand failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : %s expand failed\") % volume_name)", "def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )", "def perform_module_operation(self):\n size = self.module.params['size']\n state = self.module.params['state']\n new_name = self.module.params['new_name']\n vol_id = self.module.params['vol_id']\n vol_name = self.module.params['vol_name']\n sg_name = self.module.params['sg_name']\n cap_unit = self.module.params['cap_unit']\n new_sg_name = self.module.params['new_sg_name']\n\n if vol_name is not None and sg_name is None:\n self.show_error_exit(msg='Specify Storage group name along '\n 'with volume name')\n\n if size and cap_unit is None:\n cap_unit = 'GB'\n elif cap_unit and size is None:\n self.show_error_exit(msg='Parameters size and cap_unit are '\n 'required together')\n self.volume_id = vol_id\n\n vol = self.get_volume()\n\n existing_vol_size = 0\n if vol is not None:\n self.volume_id = vol['volumeId']\n vol_id = vol['volumeId']\n existing_vol_size = vol['cap_gb']\n\n changed = False\n\n # Call to create volume in storage group\n if state == 'present' and vol is None:\n if new_name:\n self.show_error_exit(msg=\"Invalid argument new_name \"\n \"while creating a volume\")\n if size is None:\n self.show_error_exit(msg='Size is required to create volume')\n vol_id = self.create_volume(vol_name, sg_name, size, cap_unit)\n changed = True\n\n if state == 'present' and vol and size:\n if size is None:\n self.show_error_exit(msg='Size is required to expand volume')\n # Convert the given size to GB\n if size is not None and size > 0:\n size = utils.get_size_in_gb(size, cap_unit)\n LOG.info('Existing Size: %s GB, Specified Size: %s GB',\n existing_vol_size, size)\n changed = self.expand_volume_helper(vol, size, existing_vol_size)\n\n if state == 'present' and vol and new_name is not None:\n if len(new_name.strip()) == 0:\n self.show_error_exit(msg=\"Please provide valid volume \"\n \"name.\")\n\n vol_name = vol['volume_identifier']\n if new_name != vol_name:\n LOG.info('Changing the name of volume %s to %s',\n vol_name, new_name)\n changed = self.rename_volume(vol_id, new_name) or changed\n\n if state == 'absent' and vol:\n LOG.info('Deleting volume %s ', vol_id)\n changed = self.delete_volume(vol_id) or changed\n\n if state == 'present' and vol and new_sg_name:\n vol_sg = vol['storageGroupId'][0]\n if vol_sg != new_sg_name:\n LOG.info('Moving volume from %s to %s', vol_sg, new_name)\n changed = self.move_volume_between_storage_groups(\n vol, sg_name, new_sg_name) or changed\n\n '''\n Finally update the module changed state and saving updated volume\n details\n '''\n self.u4v_conn.set_array_id(\n array_id=self.module.params['serial_no'])\n self.result[\"changed\"] = changed\n if state == 'present':\n self.result[\"volume_details\"] = self.get_volume()\n LOG.info(\"Closing unisphere connection %s\", self.u4v_conn)\n utils.close_connection(self.u4v_conn)\n LOG.info(\"Connection closed successfully\")\n self.module.exit_json(**self.result)", "def grow(self, size):\n # size of the instance\n if size is not None and (type(size) == int or size.isdigit()):\n size = { 'size': int(size) }\n else:\n # TODO : proper error\n raise Exception()\n\n if self.size > size['size']:\n # TODO : proper error\n raise Exception((\"This instance has a data storage volume of %d GB and cannot \" + \\\n \"be shrunk. (Tried to specify %d GB as new size.)\") % (self.size, size['size']))\n\n self.client.post(self.path+'/action', { 'resize': {'volume': size} })\n return True", "def extend_volume(self, volume, new_size):\n spdk_name = self._get_spdk_volume_name(volume.name)\n params = {'name': spdk_name, 'size': new_size * units.Gi}\n self._rpc_call('bdev_lvol_resize', params)", "def guest_grow_root_volume(self, userid, os_version):\n LOG.debug('Begin to punch grow partition commands to guest: %s',\n userid)\n linuxdist = self._dist_manager.get_linux_dist(os_version)()\n # get configuration commands\n config_cmds = linuxdist.get_extend_partition_cmds()\n # Creating tmp file with these cmds\n temp_folder = self._pathutils.get_guest_temp_path(userid)\n file_path = os.path.join(temp_folder, 'gpartvol.sh')\n LOG.debug('Creating file %s to contain root partition extension '\n 'commands' % file_path)\n with open(file_path, \"w\") as f:\n f.write(config_cmds)\n try:\n self._smtclient.punch_file(userid, file_path, \"X\")\n finally:\n LOG.debug('Removing the folder %s ', temp_folder)\n shutil.rmtree(temp_folder)", "def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)", "def resize_volume(self, delta_disk, vdisk_name):\n LOG.debug(\"Entering\")\n cmd = \"svctask expandvdisksize -size %s \" \\\n \"-unit b %s\" % (delta_disk, vdisk_name)\n\n output = self._svc_command(cmd)[0]\n LOG.debug(\"Exiting\")", "def volume_up(self) -> None:\n self.volume = min(self.volume + self.config.volume_step, 100)", "def GrowInstanceDisk(self, instance, disk, amount, wait_for_sync=None,\n reason=None):\n body = {\n \"amount\": amount,\n }\n\n _SetItemIf(body, wait_for_sync is not None, \"wait_for_sync\", wait_for_sync)\n\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n (\"/%s/instances/%s/disk/%s/grow\" %\n (GANETI_RAPI_VERSION, instance, disk)),\n query, body)", "async def expand(self, job, id, options):\n pool = await self.middleware.call('pool.get_instance', id)\n if osc.IS_LINUX:\n if options.get('passphrase'):\n raise CallError('Passphrase should not be supplied for this platform.')\n # FIXME: We have issues in ZoL where when pool is created with partition uuids, we are unable\n # to expand pool where all pool related options error out saying I/O error\n # https://github.com/zfsonlinux/zfs/issues/9830\n raise CallError('Expand is not supported on this platform yet because of underlying ZFS issues.')\n else:\n if pool['encrypt']:\n if not pool['is_decrypted']:\n raise CallError('You can only expand decrypted pool')\n\n for error in (\n await self.middleware.call('pool.pool_lock_pre_check', pool, options['geli']['passphrase'])\n ).errors:\n raise CallError(error.errmsg)\n\n all_partitions = {p['name']: p for p in await self.middleware.call('disk.list_all_partitions')}\n\n try:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 16\n geli_resize = []\n try:\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK':\n logger.debug('Not expanding vdev of type %r', vdev['type'])\n continue\n\n if vdev['status'] != 'ONLINE':\n logger.debug('Not expanding vdev that is %r', vdev['status'])\n continue\n\n part_data = all_partitions.get(vdev['device'])\n if not part_data:\n logger.debug('Unable to find partition data for %s', vdev['device'])\n\n partition_number = part_data['partition_number']\n if not partition_number:\n logger.debug('Could not parse partition number from %r', vdev['device'])\n continue\n\n assert part_data['disk'] == vdev['disk']\n\n if osc.IS_LINUX:\n await run(\n 'sgdisk', '-d', str(partition_number), '-n', f'{partition_number}:0:0',\n '-c', '2:', '-u', f'{partition_number}:{part_data[\"partition_uuid\"]}',\n '-t', f'{partition_number}:BF01', part_data['path']\n )\n await run('partprobe', os.path.join('/dev', part_data['disk']))\n else:\n await run('camcontrol', 'reprobe', vdev['disk'])\n await run('gpart', 'recover', vdev['disk'])\n await run('gpart', 'resize', '-i', str(partition_number), vdev['disk'])\n\n if osc.IS_FREEBSD and pool['encrypt']:\n geli_resize_cmd = (\n 'geli', 'resize', '-s', str(part_data['size']), vdev['device']\n )\n rollback_cmd = (\n 'gpart', 'resize', '-i', str(partition_number), '-s', str(part_data['size']), vdev['disk']\n )\n\n logger.warning('It will be obligatory to notify GELI that the provider has been resized: %r',\n join_commandline(geli_resize_cmd))\n logger.warning('Or to resize provider back: %r',\n join_commandline(rollback_cmd))\n geli_resize.append((geli_resize_cmd, rollback_cmd))\n finally:\n if osc.IS_FREEBSD and geli_resize:\n await self.__geli_resize(pool, geli_resize, options)\n finally:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 0\n\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK' or vdev['status'] != 'ONLINE':\n continue\n\n await self.middleware.call('zfs.pool.online', pool['name'], vdev['guid'], True)", "def add_volume(self, size=100):\n tfvars_file = \"terraform.tfvars.json\"\n with open(os.path.join(self.cluster_path, tfvars_file)) as f:\n tfvars = json.load(f)\n\n cluster_id = tfvars['cluster_id']\n worker_pattern = f'{cluster_id}-worker*'\n logger.info(f'Worker pattern: {worker_pattern}')\n self.create_ebs_volumes(worker_pattern, size)", "def create_transfer_tasks(\n task_queue, src_layer_path, dest_layer_path, \n chunk_size=None, shape=Vec(2048, 2048, 64), \n fill_missing=False, translate=(0,0,0), \n bounds=None, mip=0, preserve_chunk_size=True\n ):\n shape = Vec(*shape)\n vol = CloudVolume(src_layer_path, mip=mip)\n translate = Vec(*translate) // vol.downsample_ratio\n \n if not chunk_size:\n chunk_size = vol.info['scales'][mip]['chunk_sizes'][0]\n chunk_size = Vec(*chunk_size)\n\n try:\n dvol = CloudVolume(dest_layer_path, mip=mip)\n except Exception: # no info file\n info = copy.deepcopy(vol.info)\n dvol = CloudVolume(dest_layer_path, info=info)\n dvol.commit_info()\n\n dvol.info['scales'] = dvol.info['scales'][:mip+1]\n dvol.info['scales'][mip]['chunk_sizes'] = [ chunk_size.tolist() ]\n dvol.commit_info()\n\n create_downsample_scales(dest_layer_path, \n mip=mip, ds_shape=shape, preserve_chunk_size=preserve_chunk_size)\n \n if bounds is None:\n bounds = vol.bounds.clone()\n else:\n bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)\n bounds = Bbox.clamp(bounds, vol.bounds)\n\n total = int(reduce(operator.mul, np.ceil(bounds.size3() / shape)))\n for startpt in tqdm(xyzrange( bounds.minpt, bounds.maxpt, shape ), desc=\"Inserting Transfer Tasks\", total=total):\n task = TransferTask(\n src_path=src_layer_path,\n dest_path=dest_layer_path,\n shape=shape.clone(),\n offset=startpt.clone(),\n fill_missing=fill_missing,\n translate=translate,\n mip=mip,\n )\n task_queue.insert(task)\n task_queue.wait('Uploading Transfer Tasks')\n\n job_details = {\n 'method': {\n 'task': 'TransferTask',\n 'src': src_layer_path,\n 'dest': dest_layer_path,\n 'shape': list(map(int, shape)),\n 'fill_missing': fill_missing,\n 'translate': list(map(int, translate)),\n 'bounds': [\n bounds.minpt.tolist(),\n bounds.maxpt.tolist()\n ],\n 'mip': mip,\n },\n 'by': OPERATOR_CONTACT,\n 'date': strftime('%Y-%m-%d %H:%M %Z'),\n }\n\n dvol = CloudVolume(dest_layer_path)\n dvol.provenance.sources = [ src_layer_path ]\n dvol.provenance.processing.append(job_details) \n dvol.commit_provenance()\n\n if vol.path.protocol != 'boss':\n vol.provenance.processing.append(job_details)\n vol.commit_provenance()", "def volume_up(self) -> None:\n newvolume = min(self._client.volume + 4, 100)\n self._client.set_volume(newvolume)", "def expand_volume_helper(self, vol, size_in_gb, existing_vol_size):\n vol_id = vol['volumeId']\n try:\n if size_in_gb < existing_vol_size:\n self.show_error_exit(msg='Current volume size {0} GB is '\n 'greater than {1} GB specified.'.\n format(existing_vol_size, size_in_gb))\n elif size_in_gb > existing_vol_size:\n if 'rdfGroupId' in vol:\n array_id = self.module.params['serial_no']\n array_details = self.common.get_array(array_id=array_id)\n if utils.parse_version(array_details['ucode'])\\\n < utils.parse_version(self.foxtail_version):\n msg = (\"Expansion of SRDF protected volume is\"\n \" supported from v5978.444.444 onward. Please\"\n \" upgrade the array for this support.\")\n self.show_error_exit(msg=msg)\n return self.srdf_volume_expansion(vol, size_in_gb,\n existing_vol_size)\n return self.expand_volume(vol_id, size_in_gb,\n existing_vol_size)\n\n LOG.info('Current volume size and specified volume size'\n ' are equal')\n return False\n except Exception as e:\n error_message = 'Expand volume %s failed with error: %s' \\\n % (vol_id, str(e))\n self.show_error_exit(msg=error_message)", "def asm_volume_puse(self, name):\n sql = '''select round(((TOTAL_MB-FREE_MB)/TOTAL_MB*100),2) \n from v$asm_diskgroup_stat where name = '{0}' '''.format(name)\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def resize_volume(self, size):\n curr_size = self.volume.size\n if size <= curr_size:\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\n \"than the current volume size of '%s'.\" % curr_size)\n body = {\"volume\": {\"size\": size}}\n self.manager.action(self, \"resize\", body=body)", "def volumes(self):", "def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)", "def ensure_space(self,\n context: context.RequestContext,\n volume: objects.Volume) -> bool:\n\n # Check to see if the cache is actually limited.\n if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:\n return True\n\n # Make sure that we can potentially fit the image in the cache\n # and bail out before evicting everything else to try and make\n # room for it.\n if (self.max_cache_size_gb != 0 and\n volume.size > self.max_cache_size_gb):\n return False\n\n # Assume the entries are ordered by most recently used to least used.\n entries = self.db.image_volume_cache_get_all(\n context,\n **self._get_query_filters(volume))\n\n current_count = len(entries)\n\n current_size = 0\n for entry in entries:\n current_size += entry['size']\n\n # Add values for the entry we intend to create.\n current_size += volume.size\n current_count += 1\n\n LOG.debug('Image-volume cache for %(service)s current_size (GB) = '\n '%(size_gb)s (max = %(max_gb)s), current count = %(count)s '\n '(max = %(max_count)s).',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'max_gb': self.max_cache_size_gb,\n 'count': current_count,\n 'max_count': self.max_cache_size_count})\n\n while (((current_size > self.max_cache_size_gb and\n self.max_cache_size_gb > 0)\n or (current_count > self.max_cache_size_count and\n self.max_cache_size_count > 0))\n and len(entries)):\n entry = entries.pop()\n LOG.debug('Reclaiming image-volume cache space; removing cache '\n 'entry %(entry)s.', {'entry': self._entry_to_str(entry)})\n self._delete_image_volume(context, entry)\n current_size -= entry['size']\n current_count -= 1\n LOG.debug('Image-volume cache for %(service)s new size (GB) = '\n '%(size_gb)s, new count = %(count)s.',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'count': current_count})\n\n # It is only possible to not free up enough gb, we will always be able\n # to free enough count. This is because 0 means unlimited which means\n # it is guaranteed to be >0 if limited, and we can always delete down\n # to 0.\n if self.max_cache_size_gb > 0:\n if current_size > self.max_cache_size_gb > 0:\n LOG.warning('Image-volume cache for %(service)s does '\n 'not have enough space (GB).',\n {'service': volume.service_topic_queue})\n return False\n\n return True", "def resize_volume(self, volumeObj, sizeInGb, bsize=1000):\n current_vol = self.get_volume_by_id(volumeObj.id)\n if current_vol.size_kb > (sizeInGb * bsize * bsize):\n raise RuntimeError(\n \"resize_volume() - New size needs to be bigger than: %d KBs\" % current_vol.size_kb)\n \n resizeDict = { 'sizeInGB' : str(sizeInGb) }\n response = self.conn.connection._do_post(\"{}/{}{}/{}\".format(\n self.conn.connection._api_url, \"instances/Volume::\", volumeObj.id, 'action/setVolumeSize'), json=resizeDict)\n return response", "def __mount_ebs_volume( self ):\n ebs_volume_size = self.instance_tag( 'ebs_volume_size' ) or '0'\n ebs_volume_size = int( ebs_volume_size )\n if ebs_volume_size:\n instance_name = self.instance_tag( 'Name' )\n cluster_ordinal = int( self.instance_tag( 'cluster_ordinal' ) )\n volume_name = '%s__%d' % (instance_name, cluster_ordinal)\n volume = EC2VolumeHelper( ec2=self.ec2,\n availability_zone=self.availability_zone,\n name=volume_name,\n size=ebs_volume_size,\n volume_type=\"gp2\" )\n # TODO: handle case where volume is already attached\n device_ext = '/dev/sdf'\n device = '/dev/xvdf'\n volume.attach( self.instance_id, device_ext )\n\n # Wait for inode to appear and make sure its a block device\n while True:\n try:\n assert stat.S_ISBLK( os.stat( device ).st_mode )\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n time.sleep( 1 )\n else:\n raise\n\n # Only format empty volumes\n volume_label = volume_label_hash( volume_name )\n if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data':\n check_call( [ 'mkfs', '-t', 'ext4', device ] )\n check_call( [ 'e2label', device, volume_label ] )\n else:\n # If the volume is not empty, verify the file system label\n actual_label = check_output( [ 'e2label', device ] ).strip( )\n if actual_label != volume_label:\n raise AssertionError(\n \"Expected volume label '%s' (derived from '%s') but got '%s'\" %\n (volume_label, volume_name, actual_label) )\n current_mount_point = self.__mount_point( device )\n if current_mount_point is None:\n mkdir_p( self.persistent_dir )\n check_call( [ 'mount', device, self.persistent_dir ] )\n elif current_mount_point == self.persistent_dir:\n pass\n else:\n raise RuntimeError(\n \"Can't mount device %s on '%s' since it is already mounted on '%s'\" % (\n device, self.persistent_dir, current_mount_point) )\n else:\n # No persistent volume is attached and the root volume is off limits, so we will need\n # to place persistent data on the ephemeral volume.\n self.persistent_dir = self.ephemeral_dir", "def volume_up(self):\n self._remote.volume(int(self._volume * 60) + 2)", "def get_capacity():\n fs.get_capacity()", "def add(self, task, qhigh, qlow):\n try:\n qlen = self.tasks.qsize()\n if qlen > qhigh:\n print \"Throttling input, reached HWM:\", qhigh\n while qlen > qlow:\n delay = random.randint(1,10)\n time.sleep(delay)\n qlen = self.tasks.qsize()\n print \"Throttling released, down to LWM:\", qlow\n except NotImplementedError:\n # Skip on Mac OS X (WARNING - use on OS X in testing only, queue \n # size will max out at a paltry 32768 items)\n pass\n try:\n self.tasks.put(task)\n self.recordsProcessed += task.datalen\n except qFull:\n # While testing: we shouldn't hopefully end up here...\n print \"ERR: queue full\"\n sys.exit(-1)", "def manage_existing_get_size(self, volume, existing_ref):\n existing_vol_name = self._get_existing_vol_name(existing_ref)\n\n # The ZFSSA NFS driver only has one mounted share.\n local_share_mount = self._get_mount_point_for_share(\n self._mounted_shares[0])\n local_vol_path = os.path.join(local_share_mount, existing_vol_name)\n\n try:\n if os.path.isfile(local_vol_path):\n size = int(math.ceil(float(\n utils.get_file_size(local_vol_path)) / units.Gi))\n except (OSError, ValueError):\n err_msg = (_(\"Failed to get size of existing volume: %(vol). \"\n \"Volume Manage failed.\"), {'vol': existing_vol_name})\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n\n LOG.debug(\"Size volume: %(vol)s to be migrated is: %(size)s.\",\n {'vol': existing_vol_name, 'size': size})\n\n return size", "def disk_set(vm_hostname, size):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n current_size_gib = vm.dataset_obj['disk_size_gib']\n if size.startswith('+'):\n new_size_gib = current_size_gib + parse_size(size[1:], 'g')\n elif size.startswith('-'):\n new_size_gib = current_size_gib - parse_size(size[1:], 'g')\n else:\n new_size_gib = parse_size(size, 'g')\n\n if new_size_gib == vm.dataset_obj['disk_size_gib']:\n raise Warning('Disk size is the same.')\n\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_disk_set(new_size_gib)\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n vm.hypervisor.vm_set_disk_size_gib(vm, new_size_gib)\n\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n vm.dataset_obj['disk_size_gib'] = new_size_gib\n vm.dataset_obj.commit()", "def volume_up(self) -> None:\n if self.volume_level is None:\n return\n volume = round(self.volume_level * MAX_VOLUME)\n self._monoprice.set_volume(self._zone_id, min(volume + 1, MAX_VOLUME))", "def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"", "def volume_up(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)", "def increase_volume(self) -> None:\n for _ in range(10):\n self.media.volume_up()\n self.system.notify(f\"Jarvis::Increased Volume: {self.media.get_volume()['volume']}%\")", "def _tag_volume():\n if dry:\n print('Would tag the new volume.')\n return True\n\n while True:\n # waiting for the volume to be up to tag it\n i = _fetch('vm')\n v = [x for x in i.volumes.all()]\n if len(v) == 0:\n # volumes should actually be already there once the IP is up\n time.sleep(1)\n else:\n for x in v:\n print('Tagging volume ' + x.id + '.')\n _tag_resource(x)\n break", "def _estimate_free(self):\n # Query the information we need for this task's channel and package.\n capacity_deferred = self.channel.total_capacity()\n open_tasks_deferred = self.channel.tasks(state=[task_states.OPEN])\n avg_delta_deferred = self.estimate_duration()\n deferreds = [capacity_deferred,\n open_tasks_deferred,\n avg_delta_deferred]\n results = yield defer.gatherResults(deferreds, consumeErrors=True)\n capacity, open_tasks, avg_delta = results\n # Ensure this task's channel has spare capacity for this task.\n open_weight = sum([task.weight for task in open_tasks])\n if open_weight >= capacity:\n # TODO: Evaluate all tasks in the channel and\n # determine when enough OPEN tasks will complete so that we can\n # get to OPEN.\n raise NotImplementedError('channel %d is at capacity' %\n self.channel_id)\n # A builder will pick up this task and start it within SLEEPTIME.\n # start_time is the maximum amount of time we expect to wait here.\n start_time = self.created + SLEEPTIME\n if avg_delta is None:\n defer.returnValue(None)\n est_completion = start_time + avg_delta\n defer.returnValue(est_completion)", "async def volume(\n self, ctx: commands.Context, volume: int = None\n ) -> Optional[float]:\n\n if volume is None:\n return ctx.voice_client.source.volume * 100\n\n ctx.voice_client.source.volume = volume / 100\n self.queue[ctx.guild.id].volume = volume / 100\n return ctx.voice_client.source.volume * 100", "def total_volume(self):", "def manage_existing_get_size(self, volume, existing_ref):\n\n target_vol_name = existing_ref['source-name']\n\n self.client_login()\n try:\n size = self.client.get_volume_size(target_vol_name)\n return size\n except stx_exception.RequestError as ex:\n LOG.exception(\"Error manage existing get volume size.\")\n raise exception.Invalid(ex)\n finally:\n self.client_logout()", "def __percentage_storage_saved(task):\n task_information = task.get_task_information()\n return int((1 - task_information.output_filesize / task_information.filesize) * 100)", "def __call__(self, task):\n self.put(task)\n return self.get()", "def update_fields(task):\n task['totalLength'] = int(task['totalLength'])\n task['completedLength'] = int(task['completedLength'])\n task['downloadSpeed'] = int(task['downloadSpeed'])\n task['eta'] = calculate_eta(task)\n\n if task['files']:\n # there might be multiple files for BT tasks, but we don't support BT\n path = task['files'][0]['path']\n if path:\n filename = os.path.relpath(path, task['dir'])\n task['filename'] = filename\n # the following fields are not needed and should not be exposed\n task.pop('files')\n task.pop('dir')\n\n return task", "def perform_module_operation(self):\n filesystem_name = self.module.params['filesystem_name']\n filesystem_id = self.module.params['filesystem_id']\n nas_server_name = self.module.params['nas_server_name']\n nas_server_id = self.module.params['nas_server_id']\n pool_name = self.module.params['pool_name']\n pool_id = self.module.params['pool_id']\n size = self.module.params['size']\n cap_unit = self.module.params['cap_unit']\n quota_config = self.module.params['quota_config']\n state = self.module.params['state']\n snap_schedule_name = self.module.params['snap_schedule_name']\n snap_schedule_id = self.module.params['snap_schedule_id']\n\n # result is a dictionary to contain end state and FileSystem details\n changed = False\n result = dict(\n changed=False,\n filesystem_details=None\n )\n\n to_modify_dict = None\n filesystem_details = None\n quota_config_obj = None\n\n self.validate_input_string()\n\n if size is not None and size == 0:\n self.module.fail_json(msg=\"Size can not be 0 (Zero)\")\n\n if size and not cap_unit:\n cap_unit = 'GB'\n\n if quota_config:\n if (quota_config['default_hard_limit'] is not None\n or quota_config['default_soft_limit'] is not None) and \\\n not quota_config['cap_unit']:\n quota_config['cap_unit'] = 'GB'\n\n if quota_config['grace_period'] is not None \\\n and quota_config['grace_period_unit'] is None:\n quota_config['grace_period_unit'] = 'days'\n\n if quota_config['grace_period'] is not None \\\n and quota_config['grace_period'] <= 0:\n self.module.fail_json(msg=\"Invalid grace_period provided. \"\n \"Must be greater than 0.\")\n\n if quota_config['default_soft_limit'] is not None \\\n and utils.is_size_negative(quota_config['default_soft_limit']):\n self.module.fail_json(msg=\"Invalid default_soft_limit provided. \"\n \"Must be greater than or equal to 0.\")\n\n if quota_config['default_hard_limit'] is not None \\\n and utils.is_size_negative(quota_config['default_hard_limit']):\n self.module.fail_json(msg=\"Invalid default_hard_limit provided. \"\n \"Must be greater than or equal to 0.\")\n\n if (cap_unit is not None) and not size:\n self.module.fail_json(msg=\"cap_unit can be specified along \"\n \"with size\")\n\n nas_server = None\n if nas_server_name or nas_server_id:\n nas_server = self.get_nas_server(\n name=nas_server_name, id=nas_server_id)\n\n obj_pool = None\n if pool_name or pool_id:\n obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id)\n\n obj_fs = None\n obj_fs = self.get_filesystem(name=filesystem_name,\n id=filesystem_id,\n obj_nas_server=nas_server)\n\n self.snap_sch_id = None\n if snap_schedule_name or snap_schedule_id:\n snap_schedule_params = {\n \"name\": snap_schedule_name,\n \"id\": snap_schedule_id\n }\n self.snap_sch_id = self.resolve_to_snapschedule_id(snap_schedule_params)\n elif snap_schedule_name == \"\" or snap_schedule_id == \"\":\n self.snap_sch_id = \"\"\n\n if obj_fs:\n filesystem_details = obj_fs._get_properties()\n filesystem_id = obj_fs.get_id()\n to_modify_dict = self.is_modify_required(obj_fs, cap_unit)\n LOG.info(\"From Mod Op, to_modify_dict: %s\", to_modify_dict)\n\n if state == 'present' and not filesystem_details:\n if not filesystem_name:\n msg_noname = \"FileSystem with id {0} is not found, unable to \" \\\n \"create a FileSystem without a valid \" \\\n \"filesystem_name\".format(filesystem_id)\n self.module.fail_json(msg=msg_noname)\n\n if not pool_name and not pool_id:\n self.module.fail_json(msg=\"pool_id or pool_name is required \"\n \"to create new filesystem\")\n if not size:\n self.module.fail_json(msg=\"Size is required to create\"\n \" a filesystem\")\n size = utils.get_size_bytes(size, cap_unit)\n\n obj_fs = self.create_filesystem(name=filesystem_name,\n obj_pool=obj_pool,\n obj_nas_server=nas_server,\n size=size)\n\n LOG.debug(\"Successfully created filesystem , %s\", obj_fs)\n filesystem_id = obj_fs.id\n filesystem_details = obj_fs._get_properties()\n to_modify_dict = self.is_modify_required(obj_fs, cap_unit)\n LOG.debug(\"Got filesystem id , %s\", filesystem_id)\n changed = True\n\n if state == 'present' and filesystem_details and to_modify_dict:\n self.modify_filesystem(update_dict=to_modify_dict, obj_fs=obj_fs)\n changed = True\n\n \"\"\"\n Set quota configuration\n \"\"\"\n if state == \"present\" and filesystem_details and quota_config:\n quota_config_obj = self.get_quota_config_details(obj_fs)\n\n if quota_config_obj is not None:\n is_quota_config_modified = self.modify_quota_config(\n quota_config_obj=quota_config_obj,\n quota_config_params=quota_config)\n\n if is_quota_config_modified:\n changed = True\n else:\n self.module.fail_json(msg=\"One or more operations related\"\n \" to this task failed because the\"\n \" new object created could not be fetched.\"\n \" Please rerun the task for expected result.\")\n\n if state == 'absent' and filesystem_details:\n changed = self.delete_filesystem(filesystem_id)\n filesystem_details = None\n\n if state == 'present' and filesystem_details:\n filesystem_details = self.get_filesystem_display_attributes(\n obj_fs=obj_fs)\n\n result['changed'] = changed\n result['filesystem_details'] = filesystem_details\n self.module.exit_json(**result)", "def derive_newrelic_volume(self):\n # read and write volume\n self.update_metric(\"newrelic/volume_reads\", self.sum_of([\"status/com_select\", \"status/qcache_hits\"]))\n self.update_metric(\"newrelic/volume_writes\", self.sum_of([\"status/com_insert\", \"status/com_insert_select\",\n \"status/com_update\", \"status/com_update_multi\",\n \"status/com_delete\", \"status/com_delete_multi\",\n \"status/com_replace\", \"status/com_replace_select\"]))", "def transferGEE(self):\n task_id = ee.data.newTaskId()[0]\n time = self.meta['properties']['system:time_start']\n \n self.meta['properties']['system:time_start'] = ee.Date(time).getInfo()['value']\n \n request = {\n 'id':'{collectionAsset}/{assetName}'.format(collectionAsset= self.meta['collectionAsset'],assetName =self.meta['assetName']),\n 'properties':self.meta['properties'],\n 'tilesets': [{'sources': self.sources}],\n 'pyramidingPolicy':self.meta['pyramidingPolicy'].upper(),\n 'bands':self.meta['bandNames']\n }\n ee.data.startIngestion(task_id, request, True)\n return task_id", "def disk():\n run(env.disk_usage_command % env)", "def get_drive_stat(self, table_row):\n statvfs = os.statvfs(table_row[2])\n bytes_in_gigabytes = 1024 ** 3\n total = statvfs.f_frsize * statvfs.f_blocks / bytes_in_gigabytes\n # free space for ordinary users (excl. reserved)\n free = statvfs.f_frsize * statvfs.f_bavail / bytes_in_gigabytes\n used = total - free\n for item in [total, free, used]:\n table_row.append(str(\"%.2f\" % item + \" GiB\"))\n return table_row", "def _update_total_ask(self, volume):\r\n self.total_ask += self.gox.base2float(volume)", "def execute(self):\n\n c = self.config\n regions = dict((x.name, x) for x in boto.ec2.regions(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key']))\n connect = regions[c['region']].connect(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key'])\n volume = connect.get_all_volumes([c['volume_id']])[0]\n volume.create_snapshot(c['volume_id'])\n snapshots = {}\n for x in connect.get_all_snapshots():\n if x.volume_id == c['volume_id']:\n snapshots.update({x.id: x.start_time})\n snapshots = sorted(snapshots.items(), key=lambda (k, v): (v, k), reverse=True)\n for i in range(int(c['keep']), len(snapshots)):\n connect.delete_snapshot(snapshots[i][0])", "def get_free_space(config, task):\n if 'host' in config:\n import paramiko\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n ssh.connect(\n config.get('host'),\n config.get('port', 22),\n config.get('user'),\n config.get('password', None),\n config.get('pkey', None),\n config.get('ssh_key_filepath'),\n timeout=5000,\n )\n except Exception as e:\n logger.error(\"Issue connecting to remote host. {}\", e)\n task.abort('Error with remote host.')\n if config['allotment'] != -1:\n stdin, stdout, stderr = ssh.exec_command(f\"du -s {config['path']} | cut -f 1\")\n else:\n stdin, stdout, stderr = ssh.exec_command(\n f\"df -k {config['path']} | tail -1 | tr -s ' ' | cut -d' ' -f4\"\n )\n outlines = stdout.readlines()\n resp = ''.join(outlines)\n ssh.close()\n try:\n if config['allotment'] != -1:\n free = int(config['allotment']) - ((int(resp.strip()) * 1024) / 1000000)\n else:\n free = int(resp.strip()) / 1000\n except ValueError:\n logger.error('Non-integer was returned when calculating disk usage.')\n task.abort('Error with remote host.')\n return free\n elif os.name == 'nt':\n import ctypes\n\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(\n ctypes.c_wchar_p(config['path']), None, None, ctypes.pointer(free_bytes)\n )\n return free_bytes.value / (1024 * 1024)\n else:\n stats = os.statvfs(config['path'])\n return (stats.f_bavail * stats.f_frsize) / (1024 * 1024)", "def total_volume(self):\n del self._total_volume", "def aliquot(self, vol, endless=False):\n vol = (vol if isinstance(vol, Q_) else\n to_units(vol, self.volume.units))\n assert vol.dimensionality == Q_(1, 'L').dimensionality, (\n \"Dimension of volume must be [length]**3\")\n if vol > self.volume and not self.endless:\n print \"Insufficient volume.\"\n else:\n quantities = self.quantities * vol/self.volume\n endless = endless if endless is not None else self.endless\n if not self.endless:\n self.volume -= vol\n self.quantities -= quantities\n return Sample(self.solutes, quantities, vol, endless)", "def _update_task(self, st_task):\n\n # Get the estimated hours or None\n rv = self._update_trac_ticket(st_task)\n est = rv['est_hours']\n if est:\n try:\n est = float(est) * 60 * 60\n except:\n est = None\n\n # Update the store\n db_id = self.db.update_task(src_id = st_task.id,\n name = st_task.name,\n tags = ','.join(st_task.tags),\n owner = self.users.get_trac_user(st_task.owner),\n created = st_task.created_at,\n updated = st_task.updated_at,\n time_worked = st_task.hours * 60 * 60,\n time_estimated = est,\n completed = (None,st_task.completed_on)\\\n [st_task.complete])\n\n return db_id", "def grow(self):\n self.mass *= 1.1", "def add(self, task):\n self._count += 1\n path = os.path.join(self._root, \"%d_%s\" % (self._count, task.guid))\n j.sal.fs.writeFile(path, self._serialize_task(task))", "def _update(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")", "def update_volumes():\n print 'do something useful here'", "def b_plus(self, task, q, details=None, **kwargs):\n assert(task.scheduling_parameter != None)\n assert(task.wcet >= 0)\n\n w = q * task.wcet\n\n if task.name == \"Task_20ms\":\n pass\n\n while True:\n # logging.debug(\"w: %d\", w)\n # logging.debug(\"e: %d\", q * task.wcet)\n s = self.get_largestCriticalSection(task,kwargs[\"task_results\"])\n # logging.debug(task.name+\" interferers \"+ str([i.name for i in task.get_resource_interferers()]))\n for ti in task.get_resource_interferers():\n assert(ti.scheduling_parameter != None)\n assert(ti.resource == task.resource)\n \n if self.priority_cmp(ti.scheduling_parameter, task.scheduling_parameter): # equal priority also interferes (FCFS)\n if isinstance(ti, model.LETTask):\n if s + q * task.wcet >= ti.in_event_model.offset:\n s += ti.wcet * ti.in_event_model.eta_plus(w)\n else:\n s += ti.wcet * ti.in_event_model.eta_plus(w)\n #print (\"Task: %s, s: %d, w: %d\" % ()\n # logging.debug(\"e: %s %d x %d\", ti.name, ti.wcet, ti.in_event_model.eta_plus(w))\n\n w_new = q * task.wcet + s\n # print (\"w_new: \", w_new)\n if w == w_new:\n assert(w >= q * task.wcet)\n if details is not None:\n details['q*WCET'] = str(q) + '*' + str(task.wcet) + '=' + str(q * task.wcet)\n for ti in task.get_resource_interferers():\n if self.priority_cmp(ti.scheduling_parameter, task.scheduling_parameter):\n if isinstance(ti, model.LETTask):\n if w > ti.in_event_model.offset:\n details[str(ti) + ':eta*WCET'] = str(ti.in_event_model.eta_plus(w)) + '*'\\\n + str(ti.wcet) + '=' + str(ti.wcet * ti.in_event_model.eta_plus(w))\n else:\n details[str(ti) + ':eta*WCET'] = str(ti.in_event_model.eta_plus(w)) + '*'\\\n + str(ti.wcet) + '=' + str(ti.wcet * ti.in_event_model.eta_plus(w))\n return w\n\n w = w_new", "def volup(self, raiseby=1):\n command + 'volup ' + str(raiseby)\n self.run_command(command)", "def put(self, task):\n self.put_idx += 1\n self.task_queue.put(task)", "def put(self, task):\n self.put_id += 1\n self.task_queue.put(task)", "def volume_up(self):\n if self.volume_level < 1:\n self.set_volume_level(min(1, self.volume_level + 0.1))", "def update_volume(self):\r\n\r\n # for the first cell\r\n self.cells[0].volume = self.cells[0].volume + \\\r\n self.inflow - self.flows[0]\r\n # for the intermediate cells\r\n for i in range(1, self.cells_number-1):\r\n self.cells[i].volume = self.cells[i].volume + \\\r\n self.flows[i-1]-self.flows[i]\r\n # for the last cells\r\n self.cells[-1].volume = self.cells[-1].volume + \\\r\n self.flows[-1] - self.outflow", "def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def update_partition(self, event) -> None:\n self.min_width = 150 * len(self.partition)\n self.request_update()", "def get_best_volume_by_quota(volumes, filter_func=None, **kwargs):\n if hasattr(filter_func, '__call__'):\n volumes = [v for v in volumes if filter_func(v, **kwargs)]\n if not volumes:\n raise NidhoggException(\"No volume available.\")\n # use min() to get the volume with the smallest ratio\n return min(volumes)", "def bdev_rbd_resize(client, name, new_size):\n params = {\n 'name': name,\n 'new_size': new_size,\n }\n return client.call('bdev_rbd_resize', params)", "def resize(self, size):\n self.instance.resize_volume(size)\n self.size = size", "def task_stagnant(task):", "def cost_pressure_exchanger(blk):\n cost_by_flow_volume(\n blk,\n blk.costing_package.pressure_exchanger.cost,\n pyo.units.convert(\n blk.unit_model.low_pressure_side.properties_in[0].flow_vol,\n (pyo.units.meter**3 / pyo.units.hours),\n ),\n )", "def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp", "def grow_down_shared(self, cidx, amt):\n # split grow amount among number of clients\n per_amt = amt / (len(self.relative_sizes) - 1 - cidx)\n for idx in range(cidx + 1, len(self.relative_sizes)):\n self._grow(idx, per_amt)", "def volume_get(context, volume_id):\n return _volume_get(context, volume_id)", "def build_expand_volume_command(vol_info_dict, si):\n\n return_dict = None\n try:\n # First get all the node/disk combinations where the volume is not\n # present\n anl = []\n num_nodes = 0\n\n ondisk_storage = \"normal\"\n if \"compressed\" in vol_info_dict['bricks'][0]:\n ondisk_storage = \"compressed\"\n elif \"deduplicated\" in vol_info_dict['bricks'][0]:\n ondisk_storage = \"deduplicated\"\n\n anl, err = _get_allowable_node_list(si, vol_info_dict['name'])\n if err:\n raise Exception(err)\n\n cmd = 'gluster volume add-brick %s ' % vol_info_dict[\"name\"]\n\n repl_count = 0\n\n if 'replicate' in vol_info_dict['type'].lower():\n vol_type = \"replicated\"\n repl_count = int(vol_info_dict[\"replica_count\"])\n else:\n vol_type = \"distributed\"\n\n return_dict, err = build_create_or_expand_volume_command(\n cmd, si, anl, vol_type, ondisk_storage, repl_count, vol_info_dict[\"name\"])\n if err:\n raise Exception(err)\n\n if \"cmd\" in return_dict:\n return_dict[\"cmd\"] = return_dict[\"cmd\"] + \" force --xml\"\n except Exception, e:\n return None, 'Error building expand volume command: %s' % str(e)\n else:\n return return_dict, None", "def get_space_committed():\n reserved = jobtracker.query(\"SELECT SUM(size) FROM files \" \\\n \"WHERE status IN ('downloading', 'new', \" \\\n \"'retrying', 'failed')\", \\\n fetchone=True)\n if reserved is None:\n reserved = 0\n return reserved", "def by_volume(self, TP):\n try:\n vol = self._data_cache[TP]\n except:\n phases = self._phases\n chemicals = self._chemicals\n V = [i.V for i in chemicals]\n size = chemicals.size\n self._data_cache[TP] = \\\n vol = VolumetricFlowIndexer.from_data(\n SparseArray.from_rows([\n SparseVector.from_dict(VolumetricFlowDict(i.dct, TP, V, j, None, {}), size)\n for i, j in zip(self.data, self._phases)\n ]),\n phases, chemicals,\n False\n )\n return vol", "async def volume(self, ctx, vol=-1):\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n vol = int(vol)\n if self.user_in_channel(server_id, ctx.message.author) and vol <= 200 and vol >= 0:\n srv['volume'] = vol/100\n if srv['player']:\n srv['player'].volume = srv['volume']\n await ctx.bot.send_message(ctx.message.channel, self.format_volume_bar(srv['volume']))", "def test_extend_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.extended = {'name': '', 'size': '0',\n 'storageserver': ''}\n self.driver.extend_volume(volume, 12)\n expected = {'name': 'volume10', 'size': '2',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,'}\n self.assertDictMatch(expected, self.extended)", "def get_volume(cls) -> float:\n raise NotImplementedError", "def by_volume(self, TP):\n try:\n vol = self._data_cache['vol', TP]\n except:\n chemicals = self._chemicals\n V = [i.V for i in chemicals]\n phase = self._phase\n self._data_cache['vol', TP] = \\\n vol = ChemicalVolumetricFlowIndexer.from_data(\n SparseVector.from_dict(\n VolumetricFlowDict(self.data.dct, TP, V, None, phase, {}),\n chemicals.size\n ),\n phase, chemicals,\n False\n )\n return vol", "def put(self, task):\n self.async_vis.get_indices_ls.append(task.id)\n self.model.put(task)", "def volume_up(self):\n self._volume += settings.get(\"vlc\", \"volume\", \"step\")\n self._update_volume()\n # self.stdin_queue.put(\"volup\")", "def add_task(self, task): \n self.buffer = np.vstack((self.buffer, task))\n return self.buffer", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n data = connection_info['data']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n luns = [i.lun for i in data_disks]\n new_lun = 1\n # azure allow upto 16 extra datadisk, 1 os disk + 1 ephemeral disk\n # ephemeral disk will always be sdb for linux.\n for i in range(1, 16):\n if i not in luns:\n new_lun = i\n break\n else:\n msg = 'Can not attach volume, exist volume amount upto 16.'\n LOG.error(msg)\n raise nova_ex.NovaException(msg)\n disk = self.disks.get(CONF.azure.resource_group, data['disk_name'])\n managed_disk = dict(id=disk.id)\n data_disk = dict(lun=new_lun,\n name=data['disk_name'],\n managed_disk=managed_disk,\n create_option='attach')\n data_disks.append(data_disk)\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Attach Volume to Instance in Azure finish\"),\n instance=instance)", "def volume(self, v: int) -> None:\n # changed so it returns to the default volume\n if v > VOLUME_MAX:\n self._volume = VOLUME_DEFAULT\n elif v < VOLUME_MIN:\n self._volume = VOLUME_MIN\n else:\n self._volume = v", "def total_volume(self) -> int:\n return self.quantity * self.one_item_volume", "def calculate_task_volatile_size(**kwargs):\n task = kwargs.get(\"data\", {})\n memory_members = kwargs.get(\"resources\", {}).get(\"memory\", [])\n interleave_sets = task.get(\"Payload\").get(\"JsonBody\").get(\"InterleaveSets\", [])\n selected_members = []\n for interleave_set in interleave_sets:\n for pmem in memory_members:\n if PmemHelpers.compare_id(\n interleave_set.get(\"Memory\").get(\"@odata.id\"), pmem.get(\"@odata.id\")\n ):\n selected_members.append(pmem)\n # finding total capacity\n total_capacity = Mapper.get_single_attribute(\n selected_members,\n \"TotalCapacity\",\n MappingTable.summary.value,\n output_as_json=True,\n )\n total_capacity = total_capacity.get(\"TotalCapacity\", {}).get(\"Value\", 0)\n volatile_size = total_capacity\n # finding memory chunk size\n memory_chunk_size = Mapper.get_single_attribute(\n task, \"MemoryChunkSize\", MappingTable.tasks.value, output_as_json=True\n )\n memory_chunk_size = memory_chunk_size.get(\"MemoryChunkSize\", {}).get(\n \"Value\", None\n )\n if memory_chunk_size is not None:\n size = memory_chunk_size\n volatile_size = total_capacity - size\n else:\n # finding memory chunk size percentage\n memory_chunk_size_percentage = Mapper.get_single_attribute(\n task,\n \"MemoryChunkSizePercentage\",\n MappingTable.tasks.value,\n output_as_json=True,\n )\n memory_chunk_size_percentage = memory_chunk_size_percentage.get(\n \"MemoryChunkSizePercentage\", {}\n ).get(\"Value\", None)\n if memory_chunk_size_percentage is not None:\n size = total_capacity * memory_chunk_size_percentage / 100\n volatile_size = total_capacity - size\n # returning value in MiB\n return volatile_size * 1024", "def grow(self, seconds):\n for n in self.children:\n\n n.grow(seconds)\n\n energy_needed = Trunk.ENERGY_NEEDED_TO_GROW_ONE_SECOND * seconds\n\n #verify that this trunk has the energy needed to grow\n if self.energy >= energy_needed:\n\n #varify that this trunk has not reached its maximum length\n if self.current_length < self.max_length:\n\n self.energy -= energy_needed\n\n self.current_length += self.max_grow_unit_one_second * seconds\n\n if self.current_length > self.max_length and len(self.children) <= 0:\n\n self.current_length = self.max_length\n\n self.children.extend(Trunk.next_children(self.type, self.max_length))", "def create_new_volume(self, volumeInfo, change_name=True):\n size = volumeInfo.get(SVC_KEY_VDISK_CAPACITY)\n if (change_name):\n new_volume_name = self._get_new_volume_name(\n volumeInfo.get(SVC_KEY_VDISK_NAME))\n else:\n new_volume_name = volumeInfo.get(SVC_KEY_VDISK_NAME)\n if SVC_KEY_VOLUME_GROUP in volumeInfo:\n volumeGroup = volumeInfo.get(SVC_KEY_VOLUME_GROUP)\n elif self.dft_stg_pool:\n volumeGroup = self.dft_stg_pool\n else:\n volumeGroup = self.get_mdisk_grp_by_size(size)\n\n if volumeGroup is None:\n raise SVCNoSANStoragePoolException\n\n # iogrp parameter should not use name since it could be\n # customized. It is always safe to use iogrp 0.\n cmd = \"svctask mkvdisk -name %s -iogrp 0 -mdiskgrp %s \" \\\n \"-size %s -unit b\" % (new_volume_name, volumeGroup, size)\n\n output, err_output = self._svc_command(cmd)\n\n volume_uid = self.get_uid(new_volume_name)\n\n # Check if it got created\n if not volume_uid:\n # The SVC message of out of space is not really user friendly.\n # So, we will manully check whether the pool ran out of space\n free_capacity = self.get_mdisk_grp_size(volumeGroup)\n\n if float(size) > float(free_capacity):\n ex_args = {'pool_name': volumeGroup,\n 'size': size,\n 'free_capacity': free_capacity}\n raise SVCVolumeGroupOutOfSpace(**ex_args)\n if err_output:\n ex_args = {'new_volume_name': new_volume_name,\n 'err_output': err_output}\n raise SVCVolumeCreationFailed(**ex_args)\n else:\n # failed to create volume but with no error msg\n # really shouldn't hit this condition\n ex_args = {'cmd': cmd,\n 'e': _(\"No error available\")}\n raise SVCCommandException(**ex_args)\n\n return new_volume_name, volume_uid", "def test_08_migrate_vm_live_resize_volume(self):\n global vm\n global data_disk_1\n\n vol = self.helper.resize_volume(apiclient = self.apiclient, volume = data_disk_1, shrinkOk = False, maxiops = 15000)\n # Migrate all volumes and VMs\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def total_volume(bottle_size, pressure=DEFAULT_BOTTLE_PRESSURE):\n return bottle_size * pressure", "def _get_size(self, volume, app_inst=None, si_name=None, vol_name=None):\n policies = self._get_policies_for_resource(volume)\n si_name = si_name if si_name else policies['default_storage_name']\n vol_name = vol_name if vol_name else policies['default_volume_name']\n if not app_inst:\n vol_url = URL_TEMPLATES['ai_inst']().format(\n _get_name(volume['id']))\n app_inst = self._issue_api_request(vol_url)\n size = app_inst[\n 'storage_instances'][si_name]['volumes'][vol_name]['size']\n return size", "def _mount_gluster_vol(self, mount_path, ensure=False):\n self._execute('mkdir', '-p', mount_path)\n command = ['mount', '-t', 'glusterfs', self.gluster_manager.export,\n mount_path]\n self._do_mount(command, ensure)", "async def async_volume_up(self) -> None:\n await self._volumio.volume_up()", "async def volume(self, ctx: commands.Context, volume: int):\n if not 0 <= volume <= 100:\n raise InvalidVolume()\n\n player = ctx.bot.lavalink.player_manager.get(ctx.guild.id)\n \n await player.set_volume(volume)\n await ctx.send(f'Volume alterado para {volume}%.')", "def _create_volume(self):\n vol = {}\n vol['size'] = 1\n vol['availability_zone'] = 'test'\n return db.volume_create(self.context, vol)['id']", "def test_update_volume_stats_cached(self):\n self._fail_host_storage = True\n actual = self.driver.get_volume_stats(False)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual(90, actual['total_capacity_gb'])\n self.assertEqual(87, actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])", "def allowed_volumes(context, requested_volumes, size):\n project_id = context.project_id\n context = context.elevated()\n size = int(size)\n requested_gigabytes = requested_volumes * size\n used_volumes, used_gigabytes = db.volume_data_get_for_project(context,\n project_id)\n quota = get_project_quotas(context, project_id)\n allowed_volumes = _get_request_allotment(requested_volumes, used_volumes,\n quota['volumes'])\n allowed_gigabytes = _get_request_allotment(requested_gigabytes,\n used_gigabytes,\n quota['gigabytes'])\n allowed_volumes = min(allowed_volumes,\n int(allowed_gigabytes // size))\n return min(requested_volumes, allowed_volumes)", "def volume(self, volume_percent: int = None, device: str = None, **kwargs):\n device = self.get_device(device)\n if volume_percent is None:\n return device.volume_percent\n\n assert 0 <= volume_percent <= 100\n return self._put(\n API.VOLUME.value,\n volume_percent=volume_percent,\n device_id=device.id,\n check_202=True,\n **kwargs,\n )", "def test_least_busy_host_gets_volume(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)\n volume1.kill()\n volume2.kill()" ]
[ "0.64236933", "0.6261913", "0.60468847", "0.587187", "0.5659941", "0.5560449", "0.53718984", "0.534629", "0.5339353", "0.53099704", "0.52602965", "0.5249011", "0.518899", "0.5125479", "0.50864977", "0.5082796", "0.5080378", "0.50407803", "0.50402933", "0.5031618", "0.5022762", "0.5007264", "0.49975425", "0.49558678", "0.49314073", "0.490756", "0.4846032", "0.48446414", "0.4841026", "0.4829933", "0.47881892", "0.47798368", "0.47795236", "0.47302505", "0.47232547", "0.47160947", "0.47156537", "0.46991917", "0.46976328", "0.46960807", "0.46931586", "0.4688799", "0.4662391", "0.4656912", "0.46504974", "0.4640914", "0.4625365", "0.46235147", "0.45906305", "0.4572263", "0.45695886", "0.45683116", "0.4560902", "0.45606098", "0.4552189", "0.45516154", "0.45506105", "0.4548121", "0.4547609", "0.45281893", "0.4520379", "0.4518305", "0.45104742", "0.44918856", "0.44897795", "0.4489005", "0.44887543", "0.4484405", "0.44751278", "0.4469567", "0.44689307", "0.44651", "0.4464762", "0.44642997", "0.4462862", "0.44614473", "0.4456811", "0.44509047", "0.44382545", "0.4438153", "0.44295612", "0.4420578", "0.4419796", "0.44095942", "0.4408972", "0.44079727", "0.44051912", "0.43960637", "0.4394124", "0.4393937", "0.4388437", "0.43768823", "0.4375729", "0.43707508", "0.43690044", "0.43669227", "0.43633035", "0.43470308", "0.43468502", "0.434496" ]
0.69317895
0
This class tests the PyTorchYolo object detector.
def get_pytorch_yolo(get_default_cifar10_subset): import cv2 import torch from pytorchyolo import models from pytorchyolo.utils.loss import compute_loss from art.estimators.object_detection.pytorch_yolo import PyTorchYolo model_path = "/tmp/PyTorch-YOLOv3/config/yolov3.cfg" weights_path = "/tmp/PyTorch-YOLOv3/weights/yolov3.weights" model = models.load_model(model_path=model_path, weights_path=weights_path) class YoloV3(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, x, targets=None): if self.training: outputs = self.model(x) # loss is averaged over a batch. Thus, for patch generation use batch_size = 1 loss, loss_components = compute_loss(outputs, targets, self.model) loss_components_dict = {"loss_total": loss} return loss_components_dict else: return self.model(x) model = YoloV3(model) object_detector = PyTorchYolo( model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=("loss_total",) ) n_test = 10 (_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset x_test_cifar10 = x_test_cifar10[0:n_test] x_test = cv2.resize( x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC ).transpose((2, 0, 1)) x_test = np.expand_dims(x_test, axis=0) x_test = np.repeat(x_test, repeats=2, axis=0) # Create labels result = object_detector.predict(x=x_test) y_test = [ { "boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]), }, { "boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]), }, ] yield object_detector, x_test, y_test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detection(input_path, output_path, yolo_model_path):\n detector = VideoObjectDetection()\n # this function sets the model type of the object detection instance you created to the YOLOv3 model\n detector.setModelTypeAsYOLOv3()\n # this function accepts a string that must be the path to the model file,\n # it must correspond to the model typeset for the object detection instance\n detector.setModelPath(yolo_model_path)\n # this function loads the model from the path given\n detector.loadModel()\n\n # the function performs object detection on a video file or video live-feed\n # after the model has been loaded into the instance that was created\n detector.detectCustomObjectsFromVideo(input_file_path=input_path, output_file_path=output_path,\n frames_per_second=20, log_progress=True)", "def yolo_test_file(self):\n # Detect objects\n annotatedImage, predictedObjects = self.detect_from_file(\n self.inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(10)\n # Save annotated image\n if self.saveAnnotatedImage:\n cv2.imwrite(self.outputFile, annotatedImage)\n # Save the parameters of detected objects in xml format\n if self.saveAnnotatedXML:\n xmlFileName = os.path.join(\n self.textOutputFolder,\n self.outputFile.split('.')[0] + '.xml')\n self.save_xml(xmlFileName, predictedObjects)", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def yolo_test_video(self):\n # Open the input video, blocking call\n inputVideo = cv2.VideoCapture(self.inputFile)\n\t\t\n # Get infomration about the input video\n codec = int(inputVideo.get(cv2.CAP_PROP_FOURCC))\n fps = int(inputVideo.get(cv2.CAP_PROP_FPS))\n frameWidth = int(inputVideo.get(cv2.CAP_PROP_FRAME_WIDTH))\n frameHeight = int(inputVideo.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Open the output stream\n outputVideo = cv2.VideoWriter(self.outputFile,\n codec,\n fps,\n (frameWidth,frameHeight))\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n totalFrames = inputVideo.get(cv2.CAP_PROP_FRAME_COUNT)\n \t \n\tavgGrabTime = 0\n\tavgYoloTime = 0\n\tavgWriteTime = 0\n \n # For each frame in the video\n while True:\n \n startTime = time.time()\n \n # Calculate the time it takes to grab a frame\n startGrabTime = time.time()\n grabbed, frame = inputVideo.read()\n endGrabTime = time.time() \n\t avgGrabTime+=(endGrabTime-startGrabTime)\n\t \n\n if grabbed:\n\t\t\n # Calculate the time it takes to run YOLO pipeline \n\t\tstartYoloTime = time.time()\n annotatedFrame, predictedObjects = self.detect_from_image(frame)\n\t\tendYoloTime = time.time()\n\t\tavgYoloTime+= ( endYoloTime - startYoloTime)\n\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n \t\n\t\tcurrentTime = time.time()\n\t\telapsedTime = currentTime - startTime\n\t\tcurrentFPS = (1)/elapsedTime \n\t\t \t\n #cv2.rectangle(annotatedFrame, (0, 0), (30, 30), (0,0,0), -1)\n cv2.putText(\n annotatedFrame, 'FPS' + ': %.2f' % currentFPS,\n (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2\n )\n\t\t\n # Calculate the time it takes to write an annotated frame to video\n\t\tstartWriteTime = time.time()\n outputVideo.write(annotatedFrame)\n\t\tendWriteTime = time.time()\n\t\tavgWriteTime +=(endWriteTime - startWriteTime)\n\t\n else:\n inputVideo.set(cv2.CAP_PROP_POS_FRAMES, frameIndex-1)\n cv2.waitKey(100)\n\n if frameIndex==totalFrames:\n break\n\t\t\n inputVideo.release()\n outputVideo.release()\n cv2.destroyAllWindows()\n \n avgGrabTime/=totalFrames\n avgYoloTime/=totalFrames\n avgWriteTime/=totalFrames\n\n if self.verbose:\n print ('Average time for extracting compressed video frame : %.3f' %avgGrabTime)\n print ('Average time for YOLO object detection : %.3f' %avgYoloTime )\n print ('Average time for writing frame to video : %.3f' %avgWriteTime)", "def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]", "def run_yolo_onpic(image_path):\n try:\n Image.open(image_path)\n # print('running detector on %s' % image_path)\n except:\n print('Cannot open image', image_path)\n return 0, 0, 0\n output_file = \"predictions_\" + os.path.basename(image_path)\n test_detector(b'cfg/coco.data', b'cfg/yolo.cfg', b'yolo.weights',\n image_path.encode('utf-8'), parameters.YOLO_THRES, 0.5, output_file.encode('utf-8'))\n w, h, o = read_bounding_boxes('bounding_boxes.txt')\n return w, h, o", "def run_detect(**kwargs):\n cmd = 'python yolov3/detect.py'\n pms_list = [\n 'image_folder', 'model_def', \n 'weights_path', 'class_path', \n 'conf_thres', 'nms_thres',\n 'batch_size', 'n_cpu', \n 'img_size', 'checkpoint_model'\n ]\n call_command(pms_list, cmd, kwargs)", "def object_detection(self):\r\n pass", "def yolo_test_db(self):\n # For each file in database\n for inputFileName in tqdm.tqdm(os.listdir(self.inputFolder)):\n # File path\n inputFile = os.path.join(self.inputFolder, inputFileName)\n # Detect object\n annotatedImage, predictedObjects = self.detect_from_file(\n inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(1)\n # Save annotated image\n if self.saveAnnotatedImage:\n outputFileName = os.path.join(self.outputFolder, inputFileName)\n cv2.imwrite(outputFileName, annotatedImage)\n # Save the parameters of detected objects in xml format\n if self.saveAnnotatedXML:\n xmlFileName = os.path.join(\n\n self.textOutputFolder, fileName.split('.')[0] + '.xml')\n self.save_xml(xmlFileName, predictedObjects)", "def testVideoOnObjectDetection(testVideo1, testVideo2, label):\n \n this_dir = os.path.abspath(os.path.join(os.getcwd(), '../objectDetection/testing/'))\n \n print('****************************************************************************************************')\n print('getenv: ', os.getcwd())\n print(\"this_dir: \", this_dir)\n print('labelmap: ', os.path.abspath(os.path.join(this_dir, \"..\", \"training/labelmap.pbtxt\")))\n print('****************************************************************************************************')\n \n GRAPH_PATH = os.path.abspath(os.path.join(this_dir, \"..\", \"inference_graph/frozen_inference_graph.pb\"))\n LABEL_PATH = os.path.abspath(os.path.join(this_dir, \"..\", \"training/labelmap.pbtxt\"))\n \n video1 = cv2.VideoCapture(testVideo1)\n video2 = cv2.VideoCapture(testVideo2)\n \n coors = objectDetection.coordinates.coordinates()\n obj_detect = hand_detection.Object_Detection(coors, GRAPH_PATH, LABEL_PATH, video1, video2, Verbose=True)\n \n results = []\n \n while(video1.isOpened() and video2.isOpened()):\n output = obj_detect.Detect()\n if output is None: break\n else: results.append(output)\n \n cv2.destroyAllWindows()\n \n print(results)\n print([result for result in results])\n correct = CheckWrong([result[\"video1\"][\"classes\"] for result in results], label)\n \n assert correct == True\n \n return", "def enable_detector_yolo():\n global enabled_detector, enable_detection, detector, use_cuda\n if not enable_detection:\n enable_detection = True\n\n thresh = request.form[\"thresh\"]\n confidence = request.form['confidence']\n distance_check = request.form['tracker_dst']\n\n if thresh == '':\n thresh = float(0.25)\n\n if confidence == '':\n confidence = float(0.25)\n\n if distance_check == '':\n distance_check = float(350)\n\n print('Using thresh and conf {} {}'.format(thresh, confidence))\n detector = Yolo(confidence_param=confidence,\n thresh_param=thresh, use_cuda=use_cuda, distance_check=distance_check)\n if detector is not None:\n enabled_detector = \"Yolo4 tiny detector\"\n return render_settings_view()", "def test_predictor():", "def run_test(**kwargs):\n cmd = 'python yolov3/test.py'\n pms_list = [\n 'batch_size', 'model_def',\n 'data_config', 'weights_path',\n 'class_path', 'iou_thres',\n 'nms_thres', 'conf_thres',\n 'n_cpu', 'img_size'\n ]\n call_command(pms_list, cmd, kwargs)", "def main(\n image = None ,\n gpu = -1,\n weights_path= f\"{ Path(__file__).parent }/weights/yolov3.weights\",\n background = False\n):\n print( weights_path )\n my_path = Path( __file__ ).parent\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=int, default= gpu )\n parser.add_argument('--cfg', type=str, default=my_path/'config/yolov3_default.cfg')\n parser.add_argument('--ckpt', type=str,\n help='path to the checkpoint file')\n parser.add_argument('--weights_path', type=str,\n default= weights_path, help='path to weights file')\n parser.add_argument('--image', type=str , default= image )\n parser.add_argument('--background', type=bool,\n default= background , help='background(no-display mode. save \"./output.png\")')\n parser.add_argument('--detect_thresh', type=float,\n default= 0.5 , help='confidence threshold')\n args = parser.parse_args()\n\n with open(args.cfg, 'r') as f:\n cfg = yaml.load(f)\n\n imgsize = cfg['TEST']['IMGSIZE']\n model = YOLOv3(cfg['MODEL'])\n\n confthre = cfg['TEST']['CONFTHRE'] \n nmsthre = cfg['TEST']['NMSTHRE']\n\n if args.detect_thresh:\n confthre = args.detect_thresh\n\n\n\n img = imread( args.image )\n if img is None :\n print( \"load image failed\" )\n print( args.image )\n return\n\n img_raw = img.copy()[:, :, ::-1].transpose((2, 0, 1))\n img, info_img = preprocess(img, imgsize, jitter=0) # info = (h, w, nh, nw, dx, dy)\n img = np.transpose(img / 255., (2, 0, 1))\n img = torch.from_numpy(img).float().unsqueeze(0)\n\n if args.gpu >= 0:\n model.cuda(args.gpu)\n img = Variable(img.type(torch.cuda.FloatTensor))\n else:\n img = Variable(img.type(torch.FloatTensor))\n\n assert args.weights_path or args.ckpt, 'One of --weights_path and --ckpt must be specified'\n\n if args.weights_path:\n print(\"loading yolo weights %s\" % (args.weights_path))\n parse_yolo_weights(model, args.weights_path)\n elif args.ckpt:\n print(\"loading checkpoint %s\" % (args.ckpt))\n state = torch.load(args.ckpt)\n if 'model_state_dict' in state.keys():\n model.load_state_dict(state['model_state_dict'])\n else:\n model.load_state_dict(state)\n\n model.eval()\n\n\n with torch.no_grad():\n outputs1 = model(img)\n # np.save(\"output.npy\" , outputs.numpy() )\n # torch.save( outputs1 , \"outputs1.pt\" )\n out1 = torch.load( \"outputs1.pt\" )\n rere = torch.equal( outputs1 , out1 )\n outputs = postprocess(outputs1, 80, confthre, nmsthre)\n\n a = \"hoho\"\n\n\n if outputs[0] is None:\n print(\"No Objects Deteted!!\")\n return\n\n coco_class_names, coco_class_ids, coco_class_colors = get_coco_label_names()\n\n bboxes = list()\n classes = list()\n colors = list()\n\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in outputs[0]:\n\n cls_id = coco_class_ids[int(cls_pred)]\n print(int(x1), int(y1), int(x2), int(y2), float(conf), int(cls_pred))\n print('\\t+ Label: %s, Conf: %.5f' %\n (coco_class_names[cls_id], cls_conf.item()))\n box = yolobox2label([y1, x1, y2, x2], info_img)\n bboxes.append(box)\n classes.append(cls_id)\n colors.append(coco_class_colors[int(cls_pred)])\n\n # args.background = True\n\n if args.background:\n import matplotlib\n matplotlib.use('Agg')\n\n from utils.vis_bbox import vis_bbox\n\n vis_bbox(\n img_raw, bboxes, label=classes, label_names=coco_class_names,\n instance_colors=colors, linewidth=2)\n\n\n if args.background:\n output = Path( \"./output\" )\n output.mkdir( parents=True , exist_ok=True )\n now = datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n output /= f\"output-{now}.png\"\n plt.savefig( output )\n\n return str( output.absolute() )\n # return plt_to_qpixmap(plt.gca())\n else :\n plt.show()", "def test():\n\n # load image and adjust its format\n if MEMORY_CACHE:\n test_input = dataset[0]['file']\n oriImg = test_input.byte().permute((1, 2, 0)).numpy() # B,G,R order\n else:\n oriImg = cv2.imread(dataset[0]['file']) # B,G,R order\n test_input = torch.from_numpy(oriImg).permute((2, 0, 1)).float()\n \n # transfer data on GPU on demand\n if CUDA:\n test_input = test_input.cuda()\n\n # perform prediction\n net.eval()\n with torch.no_grad():\n result = net(test_input.unsqueeze(0))[0]\n\n print(result)\n\n # draw rectangles and its class\n img = cv2.cvtColor(oriImg, cv2.COLOR_BGR2RGB)\n for box, label, score in zip(result['boxes'], result['labels'], result['scores']):\n # if score > 0.5:\n if label < len(orig_labels):\n img = cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)\n img = cv2.putText(img, '{}: {:.0%}'.format(orig_labels[label], score), (box[0] + 5, box[3] - 5), cv2.FONT_HERSHEY_SIMPLEX, .7, (0, 255, 0), 2, cv2.LINE_AA)\n plt.imshow(img)\n plt.axis('off')\n plt.show()", "def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)", "def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator()\n cls.livenessEstimator = cls.faceEngine.createLivenessV1Estimator()\n cls.detection = cls.detector.detectOne(VLImage.load(filename=CLEAN_ONE_FACE))", "def predict_from_cv2(yolo, inputfilepath):\n\n print(\"call func of predict_from_cv2\")\n img = cv2.imread(inputfilepath)\n yolo_results = yolo.predict(img)\n for yolo_result in yolo_results:\n print(yolo_result.get_detect_result())", "def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # obtain VideoCapture object \n vid = cv2.VideoCapture(video_path)\n \n # obtain width, height and fps of video\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n\n # obtain video codec\n codec = cv2.VideoWriter_fourcc(*'XVID')\n \n # obtain output_path\n # output_path must be .mp4\n out = cv2.VideoWriter(output_path, codec, fps+1, (width, height)) \n\n # create list to store images\n images = []\n \n # variable to track frame\n frame = 0 \n \n while True:\n \n try:\n \n # grabs, decodes and returns the next video frame\n _, image = vid.read()\n \n # append original image to original_images list\n images.append(image[:])\n \n # increment frame\n frame += 1\n \n \n # if current frame is less than batch_frames\n if frame < batch_frames:\n \n # move to next frame \n continue\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # convert original image to grayscale \n image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n \n # obtain concat frame if none exist\n if x == 0: \n \n concat_image = image[:]\n \n # concatenate subsequent frames to concat_image\n else:\n \n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n except:\n \n break\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n\n # list to store bboxes from respective scales\n pred_bbox = []\n\n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n\n # append to pred_bbox\n pred_bbox.append(pred_result)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n\n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)\n\n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n\n # draw bbox on latest image in orignal_images\n image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image frame to video path if path to save is given\n if output_path != '': out.write(image)\n \n # display image frame (i.e play video) if show is true \n if show:\n \n # show the image\n cv2.imshow('output', image)\n \n # if q key is presssed\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n \n # end session\n cv2.destroyAllWindows()\n \n # break out of while loop\n break\n \n # When everything done, release the capture\n vid.release()\n cv2.destroyAllWindows()", "def detect_image(yolo_v3_model, image_paths, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # create list to store images\n original_images = []\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # obtain original image\n original_image = cv2.imread(image_paths[x])\n \n # append original image to original_images list\n original_images.append(original_image[:])\n \n # convert original image to grayscale \n image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n\n # obtain concat frame if none exist\n if x == 0: \n\n concat_image = image[:]\n\n # concatenate subsequent frames to concat_image\n else:\n\n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n \n # list to store bboxes from respective scales\n pred_bbox = []\n \n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_result_reshaped = tf.reshape(pred_result, (-1, tf.shape(pred_result)[-1]))\n \n # append to pred_bbox\n pred_bbox.append(pred_result_reshaped)\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n \n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, original_images[-1], train_input_size, score_threshold)\n \n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n \n # draw bbox on latest image in orignal_images\n image = draw_bbox(original_images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image if path to save is given\n if output_path != '': cv2.imwrite(output_path, image)\n \n # display image if show is true \n if show:\n \n # show the image\n cv2.imshow(\"predicted image\", image)\n \n # load and hold the image\n cv2.waitKey(0)\n \n # to close the window after the required kill value was provided\n cv2.destroyAllWindows()\n \n return image", "def run_yolo(net, image, coco_classes, save_image=False):\n\n global frame, classes\n # Give the configuration and weight files for the model and load the network using them.\n classes = coco_classes\n\n frame = cv2.imread(str(image))\n\n # Crop the frame\n # (y_min, y_max) (x_min, x_max)\n # frame = frame[300:1080, 200:1920] # Classifying people\n # frame = frame[0:500, 0:1920] # Classifying Cars\n\n # Stop the program if reached end of video\n if frame is None:\n return\n\n # Create a 4D blob from a frame.\n blob = cv2.dnn.blobFromImage(\n frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False\n )\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n\n # Remove the bounding boxes with low confidence\n postprocess(frame, outs, save_image)\n\n # Get the overall time for inference(t) and the timings for each of the layers(in layersTimes)\n t, _ = net.getPerfProfile()\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv2.getTickFrequency())\n # cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n print(label)\n\n # Save image with all bounding boxes\n # utils.write_image(frame)", "def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))", "def demo(sess, net, img_path):\n\n # Load the demo image\n once_time = 0\n\n im = cv2.imread(img_path)\n im = cv2.resize(im, (227, 227))\n # im = im[np.newaxis, :, :, :]\n t = time.time()\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n print('subtract consume time {}s'.format(time.time() - t))\n im = im_orig[np.newaxis, :, :, :]\n # print('>>>>>>>', im.shape[0], im.shape[1])\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n yaw, pitch, roll, yaw_raw, pitch_raw, roll_raw = net.test_image(sess, im)\n # yaw, pitch = net.test_image(sess, im)\n print(yaw, pitch, roll)\n # print(yaw_raw)\n # print(pitch_raw)\n # print(roll_raw)\n timer.toc()\n once_time = timer.total_time\n print('Detection took {:.3f}s'.format(timer.total_time))\n\n # cv2_vis(im, CLASSES[1], dets, result_file)\n return yaw, pitch, roll, once_time", "def enable_detector_yolo_full():\n global enabled_detector, enable_detection, detector, use_cuda\n if not enable_detection:\n enable_detection = True\n\n thresh = request.form[\"thresh\"]\n confidence = request.form['confidence']\n distance_check = request.form['tracker_dst']\n\n if thresh == '':\n thresh = float(0.3)\n\n if confidence == '':\n confidence = float(0.5)\n\n if distance_check == '':\n distance_check = float(350)\n\n yolo4_cfg = os.path.join(\n \"detectors/yolo_detector/weights/yolo4coco/yolo4.cfg\")\n yolo4_weights = os.path.join(\n \"detectors/yolo_detector/weights/yolo4coco/yolo4.weights\")\n labels = os.path.join(\n \"detectors/yolo_detector/weights/yolo-coco/coco.names\")\n\n detector = Yolo(config=yolo4_cfg, weights=yolo4_weights, labels=labels,\n confidence_param=confidence, thresh_param=thresh, use_cuda=use_cuda, distance_check=distance_check)\n if detector is not None:\n enabled_detector = \"Yolo4 detector\"\n return render_settings_view()", "def demo(net, data_dir, imgfile, out_dir):\n\n # Load the demo image\n im_file = os.path.join(data_dir, imgfile)\n im = cv2.imread(im_file)\n\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n scores = np.squeeze(scores)\n timer.toc()\n print ('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.12\n NMS_THRESH = 0.3\n color_white = (0, 0, 0)\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 \n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n bbox = map(int, bbox)\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=4)\n cv2.putText(im, '%s %.3f' % (cls, score), (bbox[0], bbox[1] + 15),\n color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)\n return im", "def test():\n args = parse_args()\n\n devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=True, device_id=devid)\n\n # logger\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n rank_id = int(os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0\n args.logger = get_logger(args.outputs_dir, rank_id)\n\n context.reset_auto_parallel_context()\n parallel_mode = ParallelMode.STAND_ALONE\n context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)\n\n args.logger.info('Creating Network....')\n network = SolveOutput(YOLOV3DarkNet53(is_training=False))\n\n data_root = args.data_root\n ann_file = args.annFile\n\n args.logger.info(args.pretrained)\n if os.path.isfile(args.pretrained):\n param_dict = load_checkpoint(args.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('yolo_network.'):\n param_dict_new[key[13:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n args.logger.info('load_model {} success'.format(args.pretrained))\n else:\n args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))\n exit(1)\n\n config = ConfigYOLOV3DarkNet53()\n if args.testing_shape:\n config.test_img_shape = conver_testing_shape(args)\n\n ds, data_size = create_yolo_dataset(data_root, ann_file, is_training=False, batch_size=1,\n max_epoch=1, device_num=1, rank=rank_id, shuffle=False,\n config=config)\n\n args.logger.info('testing shape : {}'.format(config.test_img_shape))\n args.logger.info('totol {} images to eval'.format(data_size))\n\n network.set_train(False)\n # build attacker\n attack = DeepFool(network, num_classes=80, model_type='detection', reserve_ratio=0.9, bounds=(0, 1))\n input_shape = Tensor(tuple(config.test_img_shape), ms.float32)\n\n args.logger.info('Start inference....')\n batch_num = args.samples_num\n adv_example = []\n for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):\n if i >= batch_num:\n break\n image = data[\"image\"]\n image_shape = data[\"image_shape\"]\n\n gt_boxes, gt_logits = network(image, input_shape)\n gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()\n gt_labels = np.argmax(gt_logits, axis=2)\n\n adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()), (gt_boxes, gt_labels))\n adv_example.append(adv_img)\n np.save('adv_example.npy', adv_example)", "def setUp(self):\n \n self.DetectorObj = Detector(light_type, position, angle)\n\n self.detector_type = self.DetectorObj.detector_type\n self.psd = self.DetectorObj.psd\n self.intensity = self.DetectorObj.intensity\n self.database = self.DetectorObj.database\n self.position = self.DetectorObj.position\n self.angle = self.DetectorObj.angle\n self.linearity_curve = self.DetectorObj.linearity_curve\n self.FOV = self.DetectorObj.FOV\n \n pass", "def yolo_forward(net, LABELS, image, confidence_level, save_image=False):\n\n # initialize a list of colors to represent each possible class label\n np.random.seed(42)\n colors = np.random.randint(0, 255, size=(10000, 3),\n dtype='uint8')\n\n # grab image spatial dimensions\n (H, W) = image.shape[:2]\n\n # determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # construct a blob from the input image and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes and\n # associated probabilities\n # also time it\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layer_outputs = net.forward(ln)\n end = time.time()\n\n # show timing information on YOLO\n print('[INFO] YOLO took {:.6f} seconds'.format(end - start))\n\n # initialize our lists of detected bounding boxes, confidences, and\n # class IDs, respectively\n boxes = []\n confidences = []\n class_ids = []\n\n # loop over each of the layer outputs\n for output in layer_outputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability) of\n # the current object detection\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > confidence_level:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype('int')\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n # apply non-maxima suppression to suppress weak, overlapping bounding\n # boxes\n # idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_level, threshold)\n\n print(class_ids)\n print(LABELS)\n # print(labels)\n\n labels = [LABELS[i] for i in class_ids]\n\n if save_image:\n yolo_save_img(image, class_ids, boxes, labels, confidences, colors, 'python_predictions.jpg')\n\n return class_ids, labels, boxes, confidences", "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n #boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n #count = int(get_output_tensor(interpreter, 3))\n\n #results = []\n #for i in range(count):\n # if scores[i] >= threshold:\n # result = {\n # #'bounding_box': boxes[i],\n # 'class_id': classes[i],\n # 'score': scores[i]\n # }\n # results.append(result)\n \n \n #print(\"detection results:\\n\" + str(results))\n #return results\n return np.array([int(_class) for _class in classes]), np.array(scores)", "def detect(parser):\n cli_args = add_all_args(parser, DETECTION)\n detector = Detector(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n check_args = [\n item for item in [cli_args.image, cli_args.image_dir, cli_args.video] if item\n ]\n assert (\n len(check_args) == 1\n ), 'Expected --image or --image-dir or --video, got more than one'\n target_photos = []\n if cli_args.image:\n target_photos.append(get_abs_path(cli_args.image))\n if cli_args.image_dir:\n target_photos.extend(\n get_abs_path(cli_args.image_dir, image)\n for image in get_image_files(cli_args.image_dir)\n )\n if cli_args.image or cli_args.image_dir:\n detector.predict_photos(\n photos=target_photos,\n trained_weights=cli_args.weights,\n batch_size=cli_args.process_batch_size,\n workers=cli_args.workers,\n output_dir=cli_args.output_dir,\n )\n if cli_args.video:\n detector.detect_video(\n video=get_abs_path(cli_args.video, verify=True),\n trained_weights=get_abs_path(cli_args.weights, verify=True),\n codec=cli_args.codec,\n display=cli_args.display_vid,\n output_dir=cli_args.output_dir,\n )", "def __init__(self,\n yolo_masks,\n yolo_anchors,\n obj_threshold,\n nms_threshold,\n yolo_input_resolution):\n self.masks = yolo_masks\n self.anchors = yolo_anchors\n self.object_threshold = obj_threshold\n self.nms_threshold = nms_threshold\n self.input_resolution_yolo = yolo_input_resolution", "def test_basic(self):\n detect_objects(data={CLIP_IDS: []})", "def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r", "def run(self):\n\n global image, preds, write_flag\n\n print(\"YOLOv4TrtThread: Loading the Engine...\")\n\n # build the YOLOv4 TensorRT model engine\n self.yolo_trt = TrtYOLO(model=self.model, input_shape=(288, 288), category_num=self.categories)\n\n \n print(\"YOLOv4TrtThread: start running...\")\n self.running = True\n while self.running:\n ret, frame = self.cam.read()\n\n if ret:\n results = self.yolo_trt.detect(frame, conf_th=self.conf_threshold)\n with self.condition:\n preds = results\n image = frame\n write_flag = True\n self.condition.notify()\n else:\n with self.condition:\n write_flag = False\n self.condition.notify()\n\n # delete the model after inference process\n del self.yolo_trt\n self.cam.release()\n\n print(\"YOLOv4TrtThread: stopped...\")", "def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_DEFAULT)\n cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator()\n cls.image = VLImage.load(filename=ONE_FACE)\n cls.detection = TestHeadPose.detector.detectOne(cls.image, detect5Landmarks=True, detect68Landmarks=True)", "def predict_from_pil(yolo, inputfilepath):\n\n print(\"call func of predict_from_pil\")\n img = np.array(Image.open(inputfilepath))\n yolo_results = yolo.predict(img)\n for yolo_result in yolo_results:\n print(yolo_result.get_detect_result())", "def loop_and_detect(camera, trt_yolo, args, confidence_thresh, visual):\n fps = 0.0\n cumulative_frame_time = 0.0\n iterations = 0\n\n # endless loop when user provides single image/webcam\n while len(camera.imageNames) != 0:\n if args.activate_display and (cv2.getWindowProperty(WINDOW_NAME, 0) < 0):\n break\n img = camera.read()\n if img is None:\n break\n tic = time.time()\n boxes, confidences, classes = trt_yolo.detect(img, confidence_thresh)\n toc = time.time()\n if args.activate_display or args.write_images:\n img = visual.draw_bboxes(img, boxes, confidences, classes)\n img = show_fps(img, fps)\n if args.activate_display:\n cv2.imshow(WINDOW_NAME, img)\n\n frame_time = (toc - tic)\n cumulative_frame_time += frame_time\n curr_fps = 1.0 / frame_time\n # calculate an exponentially decaying average of fps number\n fps = curr_fps if fps == 0.0 else (fps * 0.95 + curr_fps * 0.05)\n if args.activate_display:\n key = cv2.waitKey(1)\n if key == 27: # ESC key: quit program\n break\n if args.write_images:\n path = os.path.join(args.image_output, os.path.basename(camera.currentImage))\n print(\"Image path: \", path)\n cv2.imwrite(path, img)\n print(\"FPS: {:3.2f} and {} Images left.\".format(fps, len(camera.imageNames)))\n append_coco(boxes, confidences, classes, camera)\n\n iterations += 1\n\n # Write coco json file when done\n coco_file = json.dumps(resultJson, cls=NpEncoder)\n f = open(args.result_json, \"w+\")\n f.write(coco_file)\n f.close()\n\n print(f\"Average FPS: {(1 / (cumulative_frame_time / iterations))}\")", "def demo(sess, net, image_name):\n # Load the demo image\n global CLASS_NAME\n global CHECK\n CHECK = 0\n # 读取的截图所在的位置\n # im_file = Cnn_path + \"data/VOCdevkit2007/VOC2007/JPEGImages/\" + image_name\n curpath = os.path.dirname(os.path.realpath(__file__))\n im_file = curpath + \"\\\\data\\\\VOCdevkit2007\\\\VOC2007\\\\JPEGImages\\\\\" + image_name\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n # score 阈值,最后画出候选框时需要,>thresh才会被画出\n CONF_THRESH = 0.5\n # 非极大值抑制的阈值,剔除重复候选框\n NMS_THRESH = 0.3\n # 利用enumerate函数,获得CLASSES中 类别的下标cls_ind和类别名cls\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n # 取出bbox ,score\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n # 将bbox,score 一起存入dets\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n # 进行非极大值抑制,得到抑制后的 dets\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # 画框\n vis_detections(im, cls, dets, thresh=CONF_THRESH)\n if CHECK == 0:\n CLASS_NAME = \"None\"\n # im = im[:, :, (2, 1, 0)]\n # fig, ax = plt.subplots()\n # ax.imshow(im, aspect='equal')\n # ax.set_title(\"None\",fontsize=10)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n # RES[INDS.__getitem__(image_name.split(\"_\")[0])][INDS.__getitem__(CLASS_NAME)]+=1\n # plt.savefig(\"./output/\"+CLASS_NAME+\"_\" + image_name)\n # plt.savefig(\"./output/\" + image_name)\n MAX_SCORE[0] = 0.0", "def run_yolo_indir(images_path):\n for filename in os.listdir(images_path):\n try:\n # print(filename)\n Image.open(os.path.join(images_path, filename))\n test_detector(b'cfg/voc.data', b'cfg/yolo.cfg', b'yolo.weights', os.path.join(\n images_path, filename).encode('utf-8'), parameters.YOLO_THRES, 0.5)\n w, h, o = read_bounding_boxes('bounding_boxes.txt')\n crop_all_bounding_boxes(o, filename, os.path.join, images_path)\n except:\n print('Cannot test image', filename)\n continue", "def test_detector(self, nexus_base):\n assert isinstance(nexus_base.detector, nx.NXdetector)", "def test_text_classifier_vaporise(self):\n pass", "def __init__(self):\n\t\tself.hog = cv2.HOGDescriptor()\n\t\tself.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())", "def yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n score_threshold=.2,\n nms_threshold=.3):\n yolo_outputs = yolo_outputs\n num_layers = len(yolo_outputs)\n max_per_image = 100\n anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]] # default setting\n input_shape = torch.Tensor([yolo_outputs[0].shape[2] * 32, yolo_outputs[0].shape[3] * 32]).type_as(yolo_outputs[0])\n input_shape = input_shape.cpu()\n boxes = []\n box_scores = []\n\n # output all the boxes and scores in two lists\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes.cpu())\n box_scores.append(_box_scores.cpu())\n # concatenate data based on batch size\n boxes = torch.cat(boxes, dim=1) # torch.Size([1, 10647, 4])\n box_scores = torch.cat(box_scores, dim=1) # torch.Size([1, 10647, num_classes])\n dets_ = []\n classes_ = []\n images_ = []\n for i in range(boxes.size(0)):\n mask = box_scores[i] >= score_threshold\n img_dets = []\n img_classes = []\n img_images = []\n for c in range(num_classes):\n # tf.boolean_mask(boxes, mask[:, c])\n class_boxes = boxes[i][mask[:, c]]\n if len(class_boxes) == 0:\n continue\n class_box_scores = box_scores[i][:, c][mask[:, c]]\n _, order = torch.sort(class_box_scores, 0, True)\n # do nms here.\n cls_dets = torch.cat((class_boxes, class_box_scores.view(-1, 1)), 1)\n cls_dets = cls_dets[order]\n keep = non_max_suppression(cls_dets.cpu().numpy(), nms_threshold)\n keep = torch.from_numpy(np.array(keep))\n cls_dets = cls_dets[keep.view(-1).long()]\n\n img_dets.append(cls_dets)\n img_classes.append(torch.ones(cls_dets.size(0)) * c)\n img_images.append(torch.ones(cls_dets.size(0)) * i)\n # Limit to max_per_image detections *over all classes*\n if len(img_dets) > 0:\n img_dets = torch.cat(img_dets, dim=0)\n img_classes = torch.cat(img_classes, dim=0)\n img_images = torch.cat(img_images, dim=0)\n\n if max_per_image > 0:\n if img_dets.size(0) > max_per_image:\n _, order = torch.sort(img_dets[:, 4], 0, True)\n keep = order[:max_per_image]\n img_dets = img_dets[keep]\n img_classes = img_classes[keep]\n img_images = img_images[keep]\n\n dets_.append(img_dets)\n classes_.append(img_classes)\n images_.append(img_images)\n\n if not dets_:\n return torch.Tensor(dets_), torch.Tensor(classes_), torch.Tensor(images_)\n dets_ = torch.cat(dets_, dim=0)\n images_ = torch.cat(images_, dim=0)\n classes_ = torch.cat(classes_, dim=0)\n\n return dets_, images_, classes_", "def yolo_show_img(image, class_ids, boxes, labels, confidences, colors):\n for i, box in enumerate(boxes):\n # extract the bounding box coordinates\n (x, y) = (box[0], box[1])\n (w, h) = (box[2], box[3])\n\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in colors[class_ids[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 3)\n text = '{}: {:.4f}'.format(labels[i], confidences[i])\n print(text)\n\n font_scale = 1.3\n # set the rectangle background to white\n rectangle_bgr = color\n # set some text\n # get the width and height of the text box\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=1)[0]\n # set the text start position\n text_offset_x = x\n text_offset_y = y - 3 \n # make the coords of the box with a small padding of two pixels\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 10, text_offset_y - text_height - 10 ))\n cv2.rectangle(image, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n cv2.putText(image, text, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=font_scale, color=(255, 255, 255), thickness=2)\n\n cv2.imshow('yolo prediction', image)\n cv2.waitKey(0)", "def test_Image():\n assert Image(cur, \"Simple_Linear\").detect_image() == True\n assert Image(cur, \"Logistic_Linear\").detect_image() == False\n assert Image(cur, \"Simple_Linear\").date == \"2021-04-20\"\n assert Image(cur, \"Breslow-Day_Test\").source == \"Course BIOSTAT703 slide\"", "def _test_learner(self,\n num_channels: int,\n channel_display_groups: Any,\n num_classes: int = 5):\n\n with get_tmp_dir() as tmp_dir:\n class_config = ClassConfig(\n names=[f'class_{i}' for i in range(num_classes)])\n dataset_cfg = DatasetConfig(\n class_config=class_config,\n train_scenes=[\n make_scene(num_channels, num_classes, tmp_dir)\n for _ in range(2)\n ],\n validation_scenes=[\n make_scene(num_channels, num_classes, tmp_dir)\n for _ in range(2)\n ],\n test_scenes=[])\n data_cfg = ObjectDetectionGeoDataConfig(\n scene_dataset=dataset_cfg,\n window_opts=ObjectDetectionGeoDataWindowConfig(\n method=GeoDataWindowMethod.random, size=20, max_windows=8),\n class_names=class_config.names,\n class_colors=class_config.colors,\n plot_options=PlotOptions(\n channel_display_groups=channel_display_groups),\n num_workers=0)\n backend_cfg = PyTorchObjectDetectionConfig(\n data=data_cfg,\n model=ObjectDetectionModelConfig(\n backbone=Backbone.resnet18, pretrained=False),\n solver=SolverConfig(batch_sz=4, num_epochs=1),\n log_tensorboard=False)\n pipeline_cfg = ObjectDetectionConfig(\n root_uri=tmp_dir, dataset=dataset_cfg, backend=backend_cfg)\n pipeline_cfg.update()\n backend = backend_cfg.build(pipeline_cfg, tmp_dir)\n learner = backend.learner_cfg.build(tmp_dir, training=True)\n\n learner.plot_dataloaders()\n learner.train()\n learner.plot_predictions(split='valid')\n learner.save_model_bundle()\n\n learner = None\n backend.learner = None\n backend.load_model()\n\n pred_scene = dataset_cfg.validation_scenes[0].build(\n class_config, tmp_dir)\n _ = backend.predict_scene(pred_scene, chip_sz=100)", "def test(self, img_path):\n import cv2 \n\n self.load_data_test(path=img_path)\n self.C.horizontal_flips = False\n self.C.vertical_flips = False\n self.C.rotate_90 = False\n\n st = time.time()\n\n from .utils.data_generators import format_img_size\n from .utils.data_generators import format_img_channels\n from .utils.data_generators import format_img\n from .utils.data_generators import get_real_coordinates\n\n if self.cnn_name == 'vgg16' or self.cnn_name == 'vgg19':\n num_feature = 512\n else:\n num_feature = 1024 # any other convNet\n \n input_shape_img = (None, None, 3)\n input_shape_features = (None, None, num_feature)\n\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(self.C.num_roi, 4))\n feature_map_input = Input(shape=input_shape_features)\n\n # define the base network\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n\n # define the RPN, built on the base layers\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n rpn_layers = self.region_proposal_net(shared_layers, num_anchors)\n classifier = self.classifier(feature_map_input, \n self.cnn_model.classifier_layers, \n roi_input, \n self.C.num_roi, \n num_class=len(self.class_mapping), \n trainable=True)\n\n model_rpn = Model(img_input, rpn_layers)\n model_classifier_only = Model([feature_map_input, roi_input], classifier)\n model_classifier = Model([feature_map_input, roi_input], classifier)\n\n print('Loading weights from {}'.format(self.C.model_path))\n model_rpn.load_weights(self.C.model_path, by_name=True)\n model_classifier.load_weights(self.C.model_path, by_name=True)\n\n model_rpn.compile(optimizer='sgd', loss='mse')\n model_classifier.compile(optimizer='sgd', loss='mse')\n\n for i in range(len(self.test_images)):\n img = cv2.imread(self.test_images[i])\n X, ratio = format_img(img, self.C)\n X = np.transpose(X, (0, 2, 3, 1))\n\n # get the feature maps and output from the RPN\n [Y1, Y2, F] = model_rpn.predict(X)\n\n R = roi_helpers.rpn_to_roi(Y1, Y2, self.C, K.image_data_format(), overlap_thresh=0.7)\n\n # convert from (x1,y1,x2,y2) to (x,y,w,h)\n R[:, 2] -= R[:, 0]\n R[:, 3] -= R[:, 1]\n\n # apply the spatial pyramid pooling to the proposed regions\n bboxes = {}\n probs = {}\n\n for jk in range(R.shape[0] // self.C.num_roi+1):\n ROIs = np.expand_dims(R[self.C.num_roi*jk:self.C.num_roi*(jk+1), :], axis=0)\n if ROIs.shape[1] == 0:\n break\n\n if jk == R.shape[0] // self.C.num_roi:\n # pad R\n curr_shape = ROIs.shape\n target_shape = (curr_shape[0], self.C.num_roi, curr_shape[2])\n ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)\n ROIs_padded[:, :curr_shape[1], :] = ROIs\n ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]\n ROIs = ROIs_padded\n\n [P_cls, P_regr] = model_classifier_only.predict([F, ROIs])\n\n for ii in range(P_cls.shape[1]):\n if np.max(P_cls[0, ii, :]) < self.C.bbox_threshold or \\\n np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):\n continue\n\n cls_name = self.class_mapping[np.argmax(P_cls[0, ii, :])]\n if cls_name not in bboxes:\n bboxes[cls_name] = []\n probs[cls_name] = []\n\n (x, y, w, h) = ROIs[0, ii, :]\n cls_num = np.argmax(P_cls[0, ii, :])\n try:\n (tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]\n tx /= C.class_regress_std[0]\n ty /= C.class_regress_std[1]\n tw /= C.class_regress_std[2]\n th /= C.class_regress_std[3]\n x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)\n except:\n pass\n \n bboxes[cls_name].append([self.C.stride*x, \n self.C.stride*y, \n self.C.stride*(x+w), \n self.C.stride*(y+h)])\n probs[cls_name].append(np.max(P_cls[0, ii, :]))\n\n all_detections = []\n\n for key in bboxes:\n bbox = np.array(bboxes[key])\n new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, \n np.array(probs[key]), overlap_thresh=0.5)\n \n for jk in range(new_boxes.shape[0]):\n (x1, y1, x2, y2) = new_boxes[jk,:]\n (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)\n\n cv2.rectangle(img,(real_x1, real_y1), \n (real_x2, real_y2), \n (int(self.class_to_color[key][0]), \n int(self.class_to_color[key][1]), \n int(self.class_to_color[key][2])),\n 2)\n\n textLabel = '%s: %.3f' % (key, new_probs[jk])\n all_detections.append((key, new_probs[jk]))\n\n (retval,baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n text_org = (real_x1+10, real_y1+20)\n\n cv2.rectangle(img, (text_org[0], text_org[1]+baseLine), \n (text_org[0]+retval[0]+10, text_org[1]-retval[1]-10), \n (0, 0, 0), 2)\n cv2.rectangle(img, (text_org[0],text_org[1]+baseLine), \n (text_org[0]+retval[0]+10, text_org[1]-retval[1]-10), \n (255, 255, 255), -1)\n cv2.putText(img, textLabel, text_org, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n\n print('Elapsed time = {}'.format(time.time() - st))\n print(self.test_images[i], all_detections)\n if all_detections:\n cv2.imwrite(self.test_images_bbox[i], img)", "def draw_detections(self, img, yolo_results):\n\n _, height, _ = img.shape\n for yolo_result in yolo_results:\n class_index = yolo_result.class_index\n obj_name = yolo_result.obj_name\n x = yolo_result.x_min\n y = yolo_result.y_min\n w = yolo_result.width\n h = yolo_result.height\n\n offset = class_index * 123457 % self.meta.classes\n\n red = self._get_color(2, offset, self.meta.classes)\n green = self._get_color(1, offset, self.meta.classes)\n blue = self._get_color(0, offset, self.meta.classes)\n box_width = int(height * 0.006)\n cv2.rectangle(img, (int(x), int(y)), (int(x+w)+1, int(y+h)+1), (red, green, blue), box_width)\n cv2.putText(img, obj_name, (int(x) -1, int(y) -1), cv2.FONT_HERSHEY_PLAIN, 2, (red, green, blue), 2)\n\n return img", "def visualize_detection_examples(model: tf.keras.Model, dataset: tf.data.Dataset,\n index_to_category: tf.lookup.StaticHashTable,\n examples: int = 5) -> None:\n # Getting anchor shapes for latter use\n anchor_shapes = tf.convert_to_tensor(utils.ANCHORS_SHAPE)\n # Colormap for bounding boxes\n cmap = cm.get_cmap('hsv', 80)\n for image, path, output in dataset.take(examples):\n # Creates figure/axes\n fig, axes = plt.subplots(1, 2)\n fig.set_tight_layout(tight=0.1)\n fig.suptitle(path.numpy().decode('utf-8'))\n # Parses image dimensions\n image_height, image_width, image_depth = image.shape\n # Parses info from sparse outputs\n steps = range(0, output.values.shape[0], 6)\n bboxes = [\n denormalize_bbox_to_image_size(\n yolo.decode_from_yolo_format(\n output.values[i + 1: i + 5],\n output.indices[i][:2]\n ).numpy(),\n image_width,\n image_height\n ) for i in steps\n ]\n labels = [(tf.cast(output.indices[i + 5][2], dtype=tf.int32) - tf.cast(5 * tf.shape(anchor_shapes)[0],\n dtype=tf.int32)).numpy() for i in steps]\n objectnesses = [output.values[i].numpy() for i in steps]\n objects = [Object(*entry) for entry in zip(bboxes, labels, objectnesses)]\n # Plots all objects\n axes[0].imshow(image.numpy())\n for obj in objects:\n add_object_to_axes(axes[0], obj, index_to_category, cmap)\n # Plots detection results\n axes[1].imshow(image.numpy())\n # Gets all valid bboxes (one per cell)\n predicted = tf.squeeze(model(tf.expand_dims(yolo.preprocess_image(image), axis=0)))\n indices = tf.range(5 * tf.shape(anchor_shapes)[0], tf.shape(predicted)[2])\n probability = tf.gather(predicted, indices=indices, axis=-1)\n category = tf.cast(tf.argmax(probability, axis=-1), dtype=tf.int32)\n indices = tf.range(0, tf.shape(anchor_shapes)[0]) * 5\n objectness = tf.gather(predicted, indices=indices, axis=-1)\n anchors = tf.argmax(objectness, axis=-1)\n objects = [\n Object(\n bbox=denormalize_bbox_to_image_size(\n yolo.clip_bbox_to_image(yolo.decode_from_yolo_format(\n predicted[i, j, anchors[i, j] * 5 + 1: anchors[i, j] * 5 + 1 + 4],\n tf.convert_to_tensor([i, j])\n )).numpy(),\n image_width,\n image_height\n ),\n index=category[i, j],\n objectness=objectness[i, j, anchors[i, j]] * probability[i, j, category[i, j]]\n ) for i in range(7) for j in range(7)\n ]\n # Only objects with high certainty are considered\n detections = filter(lambda entry: entry.objectness > OBJECTNESS_THRESHOLD, objects)\n # Performs non-max suppression\n sorted_detections = sorted(detections, key=lambda entry: entry.objectness, reverse=True)\n included_detections = []\n excluded_detections = []\n while len(sorted_detections) > 0:\n # Top element is always a detection since is the highest confidence object\n root = sorted_detections[0]\n included_detections.append(root)\n # Filter out all elements from the same class having a high IoU with the top element\n suppression = [non_max_supression(root, entry) for entry in sorted_detections[1:]]\n excluded_detections.extend([entry for entry, suppressed in zip(sorted_detections[1:], suppression) if suppressed])\n sorted_detections = [entry for entry, suppressed in zip(sorted_detections[1:], suppression) if not suppressed]\n # Plots included detections\n for obj in included_detections:\n add_object_to_axes(axes[1], obj, index_to_category, cmap)\n # Plots excluded detections\n for obj in excluded_detections:\n add_deleted_object_to_axes(axes[1], obj)\n # Let the magic show!\n axes[0].axis('off')\n axes[1].axis('off')\n axes[1].set_xlim(axes[0].get_xlim())\n axes[1].set_ylim(axes[0].get_ylim())\n plt.show()", "def test_xray_classifier():\n model = X_ray_Classifier()\n assert type(model) == X_ray_Classifier", "def initiate_yolo_detect(images_path, save_to_path, detections_file='pickles/bounding_boxes.pickle'):\n for filename in os.listdir(images_path):\n bound_boxes = detect_objects_on_image(\n os.path.join(images_path, filename), detections_file)\n predictions_path = os.path.join(\n save_to_path, 'predictions_' + filename)\n print('predictions path', predictions_path)\n copy2('predictions_' + os.path.basename(image_directory) +\n '.png', predictions_path)", "def __init__(self,\n obj_threshold,\n nms_threshold,\n classes_num,\n anchors,\n masks,\n batch_size,\n yolo_input_resolution):\n self.masks = masks\n self.anchors = anchors\n self.object_threshold = obj_threshold\n self.batch_size = batch_size\n self.nms_threshold = nms_threshold\n self.classes_num = classes_num\n self.input_resolution_yolo = torch.tensor(yolo_input_resolution).to(torch.float16).cuda()\n\n self.grids = []\n yis = yolo_input_resolution[0] # create different for X and Y\n self.sizes = [yis//2//2//2//2//2, yis//2//2//2//2, yis//2//2//2] # create more flexible\n for size in self.sizes:\n col = np.tile(np.arange(0, size), size).reshape(-1, size)\n row = np.tile(np.arange(0, size).reshape(-1, 1), size)\n\n col = col.reshape(size, size, 1, 1).repeat(3, axis=-2)\n row = row.reshape(size, size, 1, 1).repeat(3, axis=-2)\n grid = np.concatenate((col, row), axis=-1)\n self.grids.append(torch.tensor(grid).to(torch.float16).cuda())\n\n self.sizes_cuda = [torch.tensor([size, size]).cuda() for size in self.sizes] # ???????\n self.number_two = torch.tensor(2).cuda()\n self.anchors_cuda = []\n self.image_dims = None\n for i_m, mask in enumerate(self.masks):\n anchor = torch.tensor([self.anchors[i_m][i] for i in mask]).to(torch.float16).cuda()\n anchor = anchor.reshape([3, 2])\n anchor = anchor / self.input_resolution_yolo\n self.anchors_cuda.append(anchor)\n #anchor = anchor.reshape([1, 1, 3, 2]) # reshape 1 1 3 2, because 3 anchors in every mask # CHANGE 3 to MASK LEN\n\n self.output_shapes = [(batch_size,\n self.sizes[i], self.sizes[i], \n len(self.masks[i]),\n classes_num + 5) for i in range(len(self.sizes))]\n\n self.batch_inds = []\n for i, mask in enumerate(self.masks):\n batch_inds_vector_len = len(mask) * self.sizes[i] * self.sizes[i]\n self.batch_inds.append(torch.tensor(np.arange(batch_size)).repeat_interleave(batch_inds_vector_len).cuda())", "async def async_inference_detector(model, img):\r\n cfg = model.cfg\r\n device = next(model.parameters()).device # model device\r\n # build the data pipeline\r\n test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]\r\n test_pipeline = Compose(test_pipeline)\r\n # prepare data\r\n data = dict(img=img)\r\n data = test_pipeline(data)\r\n data = scatter(collate([data], samples_per_gpu=1), [device])[0]\r\n\r\n # We don't restore `torch.is_grad_enabled()` value during concurrent\r\n # inference since execution can overlap\r\n torch.set_grad_enabled(False)\r\n result = await model.aforward_test(rescale=True, **data)\r\n return result", "def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_boxes_annotated\",\n detections_topic=\"/opendr/objects\", device=\"cuda\", backbone=\"darknet53\"):\n\n # Initialize the face detector\n self.object_detector = YOLOv3DetectorLearner(backbone=backbone, device=device)\n self.object_detector.download(path=\".\", verbose=True)\n self.object_detector.load(\"yolo_default\")\n self.class_names = self.object_detector.classes\n\n # Initialize OpenDR ROSBridge object\n self.bridge = ROSBridge()\n\n # setup communications\n if output_image_topic is not None:\n self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)\n else:\n self.image_publisher = None\n\n if detections_topic is not None:\n self.bbox_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=10)\n else:\n self.bbox_publisher = None\n\n rospy.Subscriber(input_image_topic, ROS_Image, self.callback)", "def __init__(self, yolo_input_resolution):\n self.yolo_input_resolution = yolo_input_resolution", "def test_text_classifier_test(self):\n pass", "def dog_detector( img_path, model, dog_model ):\n\n\n def model_predict( img_path, model, use_cuda=False ):\n '''\n Use pre-trained VGG-16 model to obtain index corresponding to \n predicted ImageNet class for image at specified path\n \n Args:\n img_path: path to an image\n model: trained classifier\n \n Returns:\n Index corresponding to VGG-16 model's prediction\n '''\n \n ## Load and pre-process an image from the given img_path\n ## Return the *index* of the predicted class for that image\n # Load in the image and convert to rgb\n img = Image.open(img_path).convert('RGB')\n \n # transforming image\n transform = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],#see https://pytorch.org/docs/stable/torchvision/models.html\n std=[0.229, 0.224, 0.225])])#for how they are obtained from the images \n img = transform(img)\n \n # move model to cuda if available\n if use_cuda:\n model = model.cuda()\n img = img.cuda()\n else:\n model = model.cpu()\n \n # Unsqueezing to add artificial dimension\n img = img.unsqueeze(0)\n \n # getting predictions, set model to evaluation\n model.eval()\n with torch.no_grad():\n output = model.forward(img)\n output = output.cpu()\n \n _, predicted_class_idx = torch.max(output, 1)\n \n return predicted_class_idx # predicted class index\n\n ## Detect Dogs\n dog_label = model_predict( img_path=img_path, model=dog_model )\n if dog_label in torch.LongTensor( np.arange(151, 269, 1) ):\n return True\n else:\n return False # true/false", "def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def demo(net, image_name, classes):\n\n # Load pre-computed Selected Search object proposals\n # box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')\n test_mats_path = '/home/tanshen/fast-rcnn/data/kaggle/test_bbox'\n box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')\n obj_proposals = sio.loadmat(box_file)['boxes']\n\n # Load the demo image\n test_images_path = '/home/tanshen/fast-rcnn/data/kaggle/ImagesTest'\n # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')\n im_file = os.path.join(test_images_path, image_name + '.jpg')\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im, obj_proposals)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0\n NMS_THRESH = 0.3\n max_inds = 0\n max_score = 0.0\n for cls in classes:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n keep = np.where(cls_scores >= CONF_THRESH)[0]\n cls_boxes = cls_boxes[keep, :]\n cls_scores = cls_scores[keep]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,\n # CONF_THRESH, image_name)\n #if get_max!=[]: \n\n [ind,tmp]=get_max(im, cls, dets, thresh=CONF_THRESH)\n #print image_name,cls,tmp\n\n #vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)\n #print dets[:,-1]\n #print image_name,max_score\n file.writelines([image_name,'\\t',cls,'\\t',str(tmp),'\\n'])\n if(max_score<tmp):\n max_score=tmp\n cls_max=cls\n print image_name,cls_max,max_score", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def demo(sess, net, image_name):\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n #im_file = os.path.join('/home/corgi/Lab/label/pos_frame/ACCV/training/000001/',image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print (('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)", "def test_hog_cnn(video_location, output_location, frame_count):\n cap = cv2.VideoCapture(video_location)\n if not cap.isOpened():\n click.echo('cannot open this video', err=True)\n return\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n frame_count = min(frame_count, video_length)\n seperate = int(video_length / frame_count)\n frame_list = [None] * frame_count\n result_index = 0\n frame_index = 0\n while cap.isOpened():\n ret, frame = cap.read()\n if frame_index % seperate == 0 and result_index < frame_count:\n frame_list[result_index] = frame\n result_index = result_index + 1\n frame_index = frame_index + 1\n # if there is no more frames left\n if frame_index >= video_length:\n cap.release()\n break\n # if the frame_list is not filled\n if not result_index == frame_count:\n frame_list = frame_list[:result_index]\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n frame_list = [frame[:, :, ::-1] for frame in frame_list]\n\n # firstly use cnn\n print('start using cnn to detect faces')\n cnn_faces = [detect_faces_cnn(frame, index) for index, frame in enumerate(frame_list)]\n print('finish detecting faces in cnn')\n\n # use hog to detect\n print('start using hog to detect faces')\n arguments = [[frame, index] for index, frame in enumerate(frame_list)]\n with multiprocessing.Pool(processes=4) as pool:\n hog_faces = pool.starmap(detect_faces_hog, arguments)\n\n # draw cnn rectangles\n print('begin writing to files')\n index = 0\n for faces in cnn_faces:\n frame = frame_list[index][:,:,::-1]\n for top, right, bottom, left in faces:\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n index += 1\n index = 0\n for faces in hog_faces:\n frame = frame_list[index][:,:,::-1]\n for top, right, bottom, left in faces:\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)\n cv2.imwrite(os.path.join(output_location, '{}.png'.format(index)), frame)\n index += 1", "def test(self, X_src, Y_src):\n X_test,Y_test = self.prep_data(X_src,Y_src,shuffle = False, mode = \"test\")\n print(\"TEST: Images-\",X_test.shape, \"Labels-\", Y_test.shape )\n X_test = X_test[:,:,:,[0,2]] #extract channels with data\n\n #load weights\n ret = self.load_weights()\n if ret:\n #test the model on unseen data\n print(\"Starting testing\")\n pred = self.model.predict(X_test)\n with open(Y_src,\"w\") as file:\n for i in tqdm(range(len(pred))):\n file.write(str(pred[i,0])+'\\n')\n # rewriting last prediction to eqaute number of frames and predictions\n file.write(str(pred[-1,0])+'\\n')\n\n print(\"Saved prediction\")\n \n else:\n print(\"Test failed to complete with improper weights\")", "def demo(sess, net, color_image, depth_colormap):\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, color_image)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n CONF_THRESH = 0.7\n NMS_THRESH = 0.3\n dets_col = []\n cls_col = []\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n dets_col.append(dets)\n cls_col.append(cls)\n\n vis_detections(color_image, depth_colormap, cls_col, dets_col, thresh=CONF_THRESH)\n\n depth_col, bbox_col = calc_histogram(depth_image, cls_col, dets_col, thresh=CONF_THRESH)\n print(\"box depth:\", depth_col[0], \"sucker depth:\", depth_col[1])\n print(\"box bbox:\", bbox_col[0], \"sucker bbox\", bbox_col[1])", "def test_model(model, dataObj, index):\n\t(s,m,l), img = dataObj.__getitem__(index)\n\timg = img.float().unsqueeze(0)\n\t\n\tif next(model.parameters()).is_cuda:\n\t\toutput = model(img.cuda()) \n\telse:\n\t\toutput = model(img)\n\n\ts_pred,m_pred,l_pred = output[0].squeeze(0).cpu(), output[1].squeeze(0).cpu(), output[2].squeeze(0).cpu()\n\ts_pred = s_pred.detach().numpy()\n\tm_pred = m_pred.detach().numpy()\n\tl_pred = l_pred.detach().numpy()\n\n\timg = img.float().squeeze(0)\n\timg = img.permute(1,2,0)\n\n\tfor j in range(22):\n\t\tvisualize(img, s[j], m[j], l[j], s_pred[j], m_pred[j], l_pred[j])\n\t\tk = np.array(s[j])", "def __detect_objs(self):\n while True:\n # Wait for input images\n if (not self.__predict_start) or \\\n (self.__img is None):\n continue\n\n # Client for detection\n client = vision.ImageAnnotatorClient()\n\n # Encode image to binary\n _, img_buffer = cv2.imencode(\".jpg\", self.__img)\n img_bytes = img_buffer.tobytes()\n\n # Change to vision Image type\n image = vision.Image(content=img_bytes)\n # Detect Person\n self.__detect_info = client.object_localization(image=image,\n max_results=self.__max_results\n ).localized_object_annotations\n cv2.waitKey(30)", "def setUp(self) -> None:\n fake_image = numpy.zeros(100)\n fake_pose = numpy.eye(4)\n fake_intrinsic = numpy.eye(3)\n self.evaluator = Evaluator(fake_image, fake_pose, fake_intrinsic, 1.0)\n self.evaluator.setComparisionImage(fake_image, fake_pose)\n return super().setUp()", "def _analyze(self):\n frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n faces = self._face_detector(frame)\n try:\n landmarks = self._predictor(frame, faces[0])\n self.eye_left = Eye(frame, landmarks, 0, self.calibration)\n self.eye_right = Eye(frame, landmarks, 1, self.calibration)\n except IndexError:\n self.eye_left = None\n self.eye_right = None", "def test_predict():\n im = call_model(model_type=\"mosaic\", file_path=\"tests/flowers.jpg\")\n assert im.size == (640, 284)\n\n px_ctr_l_flower = im.getpixel((75, 170)) # the center of the flower on the left\n px_ctr_sky = im.getpixel((300, 30)) # near the center of the sky\n px_weeds = im.getpixel((240, 250)) # the brown weeds near the base of the 2nd flower\n\n # the center of the flower should be yellow (red + green but no blue)\n assert px_ctr_l_flower[0] > 150\n assert px_ctr_l_flower[1] > 150\n assert px_ctr_l_flower[2] < 50\n\n # the sky in this model is either blue or tan\n assert px_ctr_sky[0] > 125\n assert px_ctr_sky[1] > 125\n assert px_ctr_sky[2] > 125\n\n # the weeds should be dark\n assert px_weeds[0] < 50\n assert px_weeds[1] < 50\n assert px_weeds[2] < 50\n\n \"\"\"Test the candy model\"\"\"\n im = call_model(model_type=\"candy\", file_path=\"tests/flowers.jpg\")\n assert im.size == (640, 284)\n\n px_ctr_l_flower = im.getpixel((75, 170)) # the center of the flower on the left\n px_ctr_sky = im.getpixel((300, 30)) # near the center of the sky\n px_weeds = im.getpixel((240, 250)) # the brown weeds near the base of the 2nd flower\n\n # the center of the flower should be orange (red + green but no blue)\n assert px_ctr_l_flower[0] > 150\n assert px_ctr_l_flower[1] > 100\n assert px_ctr_l_flower[2] < 50\n\n # the sky in this model the sky is orange\n assert px_ctr_sky[0] > 200\n assert px_ctr_sky[1] > 150\n assert px_ctr_sky[2] > 100\n\n # the weeds should be dark with a tint of red\n assert px_weeds[0] < 100\n assert px_weeds[1] < 50\n assert px_weeds[2] < 50\n\n \"\"\"Test the rain princess model\"\"\"\n\n im = call_model(model_type=\"rain_princess\", file_path=\"tests/flowers.jpg\")\n assert im.size == (640, 284)\n\n px_ctr_l_flower = im.getpixel((75, 170)) # the center of the flower on the left\n px_ctr_sky = im.getpixel((300, 30)) # near the center of the sky\n px_weeds = im.getpixel((240, 250)) # the brown weeds near the base of the 2nd flower\n\n # the center of the flower should be yellow (red + green but no blue)\n assert px_ctr_l_flower[0] > 225\n assert px_ctr_l_flower[1] > 150\n assert px_ctr_l_flower[2] < 50\n\n # the sky in this model is dark blue\n assert px_ctr_sky[0] < 50\n assert px_ctr_sky[1] < 50\n assert px_ctr_sky[2] < 75\n assert px_ctr_sky[2] > px_ctr_sky[0] # the sky is more blue than red\n assert px_ctr_sky[2] > px_ctr_sky[1] # the sky is more blue than green\n\n # the weeds should be dark with a red tint\n assert 25 < px_weeds[0] < 75 # red tint\n assert px_weeds[1] < 50\n assert px_weeds[2] < 50\n\n \"\"\"Test the udnie model\"\"\"\n im = call_model(model_type=\"udnie\", file_path=\"tests/flowers.jpg\")\n assert im.size == (640, 284)\n px_ctr_l_flower = im.getpixel((75, 170)) # the center of the flower on the left\n px_ctr_sky = im.getpixel((300, 30)) # near the center of the sky\n px_weeds = im.getpixel((240, 250)) # the brown weeds near the base of the 2nd flower\n\n # the center of the flower should be brown\n assert px_ctr_l_flower[0] > 25\n assert px_ctr_l_flower[1] > 20\n assert px_ctr_l_flower[2] < 25\n assert px_ctr_l_flower[0] > px_ctr_l_flower[1] > px_ctr_l_flower[2] # mostly red, some green, tiny bit of blue\n\n # this pixel is milky, every value should be ~150\n assert 175 > px_ctr_sky[0] > 125\n assert 175 > px_ctr_sky[1] > 125\n assert 175 > px_ctr_sky[2] > 125\n\n # the weeds should be very dark brown\n assert px_weeds[0] < 25\n assert px_weeds[1] < 25\n assert px_weeds[2] < 25\n assert px_weeds[0] > px_weeds[1] > px_weeds[2]", "def yolo_show(image_path_list, batch_list):\n font = cv2.FONT_HERSHEY_SIMPLEX\n for img_path, batch in zip(image_path_list, batch_list):\n result_list = batch.tolist()\n img = cv2.imread(img_path)\n for result in result_list:\n cls = int(result[0])\n bbox = result[1:-1]\n score = result[-1]\n print('img_file:', img_path)\n print('cls:', cls)\n print('bbox:', bbox)\n c = ((int(bbox[0]) + int(bbox[2])) / 2, (int(bbox[1] + int(bbox[3])) / 2))\n cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 255), 1)\n cv2.putText(img, str(cls), (int(c[0]), int(c[1])), font, 1, (0, 0, 255), 1)\n result_name = img_path.split('/')[-1]\n cv2.imwrite(\"constant/results/\" + result_name, img)", "def test_net(args, dataset_name, proposal_file, output_dir, ind_range=None, gpu_id=0, early_stop=False):\n # print('test_net')\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes = {}\n\n timers = defaultdict(Timer)\n \n \n\n\n if 'train' in dataset_name:\n if ind_range is not None:\n det_name = 'discovery_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'discovery.pkl'\n else:\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n \n det_file = os.path.join(output_dir, det_name)\n if os.path.exists(det_file):\n print('the file', det_file, 'exists. I am loading detections from it...')\n return load_object(det_file)['all_boxes']\n\n for i, entry in enumerate(roidb):\n if early_stop and i > 10: break\n\n box_proposals = entry['boxes']\n if len(box_proposals) == 0:\n continue\n \n im = cv2.imread(entry['image'])\n # print(entry['image'])\n cls_boxes_i = im_detect_all(model, im, box_proposals, timers)\n\n all_boxes[entry['image']] = cls_boxes_i\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n \n det_time = (timers['im_detect_bbox'].average_time)\n \n logger.info(('im_detect: range [{:d}, {:d}] of {:d}:{:d}/{:d} {:.3f}s (eta: {})').format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, eta))\n\n cfg_yaml = yaml.dump(cfg)\n\n save_object(\n dict(\n all_boxes=all_boxes,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes", "def testModel( self, classTest, classPred):", "def __detect_objects_with_given_sight_from_img_topic(self, labels, timeout):\n # Create the goal\n goalObjDetection = ObjectDetectionGoal()\n goalObjDetection.labels = labels\n goalObjDetection.moveHead = False\n # Outputs\n state, result = self.__detectObjects(goalObjDetection, timeout)\n return state, result", "def test(self, obj):\n pass", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def do_stuff(self, net, meta):\n cv2_img = self.img_to_cv2(self.last_img)\n # Now we can use cv2 functions as the image is <type 'numpy.ndarray'>\n # rospy.loginfo(\"cv2_img: \" + str(type(cv2_img)))\n # Your OpenCV stuff\n # cv2_img = cv2.resize(cv2_img, (0,0), fx=0.25, fy=0.25) \n\n (rows,cols,channels) = cv2_img.shape\n # if cols > 60 and rows > 60 :\n # cv2.circle(cv2_img, (50,50), 10, 255)\n \n global x_old\n global no_meas_counter\n global est\n global cor\n global w\n global h\n \n\n r = darknet.detect(net, meta, cv2_img)\n # print(r)\n\n if not r:\n no_meas_counter += 1\n\n for i in r:\n if i[0].decode() == \"person\":\n x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]\n xmin, ymin, xmax, ymax = darknet.convertBack(float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n cv2.rectangle(cv2_img, pt1, pt2, (0, 255, 0), 2)\n cv2.putText(cv2_img, i[0].decode() + \" [\" + str(round(i[1] * 100, 2)) + \"]\", (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0], 4)\n \n global mp\n mp = np.array([[np.float32(x)],[np.float32(y)]])\n cor = kalman.correct(mp)\n no_meas_counter = 0\n\t\t\n\n else:\n no_meas_counter += 1\n \n # x_old = x\n\n # cv2.imshow(\"cv2_img\", cv2_img)\n # k = cv2.waitKey(1)\n # if k == 27:\n # cv2.destroyAllWindows()\n # exit()\n\n if no_meas_counter < 30:\n est = kalman.predict()\n msg = PolygonStamped()\n msg.header.stamp = rospy.Time.now()\n # msg.polygon.points = [Point32(x=x, y=y), Point32(x=cols, y=rows), Point32(x=w, y=h)]\n msg.polygon.points = [Point32(x=est[0], y=est[1]), Point32(x=cols, y=rows), Point32(x=w, y=h)] \n self.pub_yolo_detection.publish(msg)\n\n # cv2.imshow(\"Image window\", cv2_img)\n # cv2.waitKey(3)\n\n self.pub_images(cv2_img)\n self.is_new_img = False", "def test_text_classifier_update_testing_samples(self):\n pass", "def test_function_kg(test_loader, model, settype, S, lk, bk, num_iters, epsilon, num_classes, topk):\r\n\r\n det_boxes = []\r\n det_labels = []\r\n det_scores = []\r\n true_boxes = []\r\n true_labels = []\r\n true_difficulties = []\r\n true_areas = []\r\n\r\n with torch.no_grad():\r\n for i, (images, targets) in enumerate(test_loader):\r\n\r\n # Move to default device.\r\n images = [im.to(device) for im in images]\r\n\r\n # Some (1021) images of COCO contain no objects at all. These are filtered out in the data loader, but\r\n # return an empty list, which raises an error in the model, so they are skipped.\r\n if len(images) == 0:\r\n continue\r\n\r\n prediction = model(images)\r\n\r\n for p in range(len(prediction)-1):\r\n true_boxes.append(targets[p]['boxes'].to(device))\r\n true_labels.append(targets[p]['labels'].to(device))\r\n\r\n if settype == 'voc':\r\n true_difficulties.append(targets[p]['difficulties'].to(device))\r\n # true_difficulties.append(torch.zeros(len(targets[p]['boxes'])).to(device))\r\n true_areas.append(torch.zeros(len(targets[p]['boxes'])).to(device))\r\n else:\r\n true_difficulties.append(torch.zeros(len(targets[p]['boxes'])).to(device))\r\n true_areas.append(targets[p]['areas'].to(device))\r\n\r\n boxes_temp = prediction[1][0]['boxes']\r\n labels_temp = prediction[1][0]['labels']\r\n scores_temp = prediction[1][0]['scores']\r\n\r\n new_predictions = torch.zeros((boxes_temp.shape[0], num_classes)).to(device)\r\n\r\n for l in range(new_predictions.shape[0]):\r\n label = labels_temp[l] - 1\r\n new_predictions[l, label] = scores_temp[l]\r\n\r\n p_hat = find_p_hat(boxes_temp, new_predictions, bk, lk, S, num_iters, epsilon)\r\n\r\n predk, boxk, labk, scok = find_top_k(p_hat, boxes_temp, topk)\r\n\r\n det_boxes.append(boxk)\r\n det_labels.append(labk)\r\n det_scores.append(scok)\r\n\r\n del prediction\r\n torch.cuda.empty_cache()\r\n\r\n return det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties, true_areas", "def test(self, X, y):\n\t\tself.test_X = X\n\t\tself.test_y = y\n\n\t\tclassifier = self.classifier.fit(self.X, self.y)\n\t\ty_pred = classifier.predict(X) \t\t\t# class prediction\n\t\ty_prob = classifier.predict_proba(X)\t# probability of each class\n\t\tself.test_metrics = ModelMetrics(classifier, y, y_pred, y_prob, 'holdout')", "def analyze_objects_detections_predictions(_context, config_path):\n\n import yaml\n\n import net.analysis\n import net.data\n import net.ml\n\n with open(config_path, encoding=\"utf-8\") as file:\n config = yaml.safe_load(file)\n\n ssd_model_configuration = config[\"vggish_model_configuration\"]\n\n network = net.ml.VGGishNetwork(\n model_configuration=ssd_model_configuration,\n categories_count=len(config[\"categories\"]))\n\n network.model.load_weights(config[\"model_checkpoint_path\"])\n\n validation_samples_loader = net.data.VOCSamplesDataLoader(\n data_directory=config[\"voc\"][\"data_directory\"],\n data_set_path=config[\"voc\"][\"validation_set_path\"],\n categories=config[\"categories\"],\n size_factor=config[\"size_factor\"])\n\n logger = net.utilities.get_logger(config[\"log_path\"])\n\n default_boxes_factory = net.ssd.DefaultBoxesFactory(model_configuration=ssd_model_configuration)\n\n thresholds_matching_data_map = net.analysis.MatchingDataComputer(\n samples_loader=validation_samples_loader,\n model=network,\n default_boxes_factory=default_boxes_factory,\n thresholds=[0, 0.5, 0.9],\n categories=config[\"categories\"]).get_thresholds_matched_data_map()\n\n net.analysis.log_precision_recall_analysis(\n logger=logger,\n thresholds_matching_data_map=thresholds_matching_data_map)\n\n net.analysis.log_mean_average_precision_analysis(\n logger=logger,\n thresholds_matching_data_map=thresholds_matching_data_map)\n\n net.analysis.log_performance_with_annotations_size_analysis(\n logger=logger,\n thresholds_matching_data_map=thresholds_matching_data_map)", "def carla_obj_det_test(\n split: str = \"large+medium+small\",\n epochs: int = 1,\n batch_size: int = 1,\n dataset_dir: str = None,\n preprocessing_fn: Callable = carla_obj_det_canonical_preprocessing,\n label_preprocessing_fn=carla_obj_det_label_preprocessing,\n cache_dataset: bool = True,\n framework: str = \"numpy\",\n shuffle_files: bool = False,\n **kwargs,\n):\n if \"class_ids\" in kwargs:\n raise ValueError(\n \"Filtering by class is not supported for the carla_obj_det_test dataset\"\n )\n if batch_size != 1:\n raise ValueError(\"carla_obj_det_test batch size must be set to 1\")\n\n if split == \"test\":\n split = \"large+medium+small\"\n\n modality = kwargs.pop(\"modality\", \"rgb\")\n if modality not in [\"rgb\", \"depth\", \"both\"]:\n raise ValueError(\n 'Unknown modality: {}. Must be one of \"rgb\", \"depth\", or \"both\"'.format(\n modality\n )\n )\n\n def rgb_fn(batch):\n return batch[:, 0]\n\n def depth_fn(batch):\n return batch[:, 1]\n\n def both_fn(batch):\n return np.concatenate((batch[:, 0], batch[:, 1]), axis=-1)\n\n func_dict = {\"rgb\": rgb_fn, \"depth\": depth_fn, \"both\": both_fn}\n mode_split_fn = func_dict[modality]\n preprocessing_fn = datasets.preprocessing_chain(mode_split_fn, preprocessing_fn)\n\n context = (\n carla_obj_det_multimodal_context\n if modality == \"both\"\n else carla_obj_det_single_modal_context\n )\n\n return datasets._generator_from_tfds(\n \"carla_obj_det_test:1.0.0\",\n split=split,\n batch_size=batch_size,\n epochs=epochs,\n dataset_dir=dataset_dir,\n preprocessing_fn=preprocessing_fn,\n label_preprocessing_fn=label_preprocessing_fn,\n cache_dataset=cache_dataset,\n framework=framework,\n shuffle_files=shuffle_files,\n context=context,\n as_supervised=False,\n supervised_xy_keys=(\"image\", (\"objects\", \"patch_metadata\")),\n **kwargs,\n )", "def object_detection(): # needs to be modified so definition can be called as part of main function\r\n green_lower = (29, 86, 6) # define the lower boundaries of the \"green\"\r\n green_upper = (64, 255, 255) # define the upper boundaries of the \"green\"\r\n pts = deque(maxlen=args[\"buffer\"]) # ball in the HSV color space, then initialize the list of tracked points\r\n\r\n if not args.get(\"video\", False): # if a video path was not supplied, grab the reference to the picam\r\n vs = VideoStream(usePiCamera=args[\"picamera\"] > 0).start()\r\n else: # otherwise, grab a reference to the video file\r\n vs = cv2.VideoCapture(args[\"video\"])\r\n time.sleep(2.0) # allow the camera or video file to warm up\r\n while True: # keep looping\r\n frame = vs.read() # grab the current frame\r\n frame = frame[1] if args.get(\"video\", False) else frame # handle the frame from VideoCapture or VideoStream\r\n if frame is None: # if viewing video and did not grab frame, then reached end of video\r\n break\r\n frame = imutils.resize(frame, width=600) # resize the frame\r\n blurred = cv2.GaussianBlur(frame, (11, 11), 0) # blur it\r\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # and convert it to the HSV color space\r\n\r\n mask = cv2.inRange(hsv, green_lower, green_upper) # construct a mask for the color \"green\"\r\n mask = cv2.erode(mask, None, iterations=2) # then perform a series of erosions\r\n mask = cv2.dilate(mask, None, iterations=2) # and dilations to remove any small blobs left in the mask\r\n\r\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE) # find contours in the mask\r\n cnts = imutils.grab_contours(cnts)\r\n center = None # and initialize the current (x, y) center of the ball\r\n\r\n if len(cnts) > 0: # only proceed if at least one contour was found\r\n c = max(cnts, key=cv2.contourArea) # find the largest contour in the mask\r\n ((x, y), radius) = cv2.minEnclosingCircle(c) # then use it to compute minimum enclosing circle and centroid\r\n M = cv2.moments(c) # calculate moments\r\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"])) # use moment to find centroid in x,y\r\n if radius > 10: # only proceed if the radius meets a minimum size\r\n cv2.circle(frame, (int(x), int(y)), int(radius),\r\n (0, 255, 255), 2) # draw the circle\r\n cv2.circle(frame, center, 5, (0, 0, 255), -1) # draw the centroid\r\n object_tracking(int(x), int(y)) # update the list of tracked points\r\n\r\n pts.appendleft(center) # update the points queue\r\n for i in range(1, len(pts)): # loop over the set of tracked points\r\n if pts[i - 1] is None or pts[i] is None: # if either of the tracked points are None, ignore them\r\n continue\r\n thickness = int(np.sqrt(args[\"buffer\"] / float(i + 1)) * 2.5) # otherwise, compute thickness of line\r\n cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness) # draw the connecting lines\r\n\r\n cv2.imshow(\"Frame\", frame) # show the frame to our screen\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"): # if the 'q' key is pressed, stop the loop\r\n break\r\n\r\n if not args.get(\"video\", False): # if we are not using a video file, stop the camera video stream\r\n vs.stop()\r\n else: # otherwise, release the camera\r\n vs.release()\r\n cv2.destroyAllWindows() # close all windows\r", "def inference_detector(model,img:str):\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # build the data pipeline\n test_pipeline = cfg.test_pipeline\n test_pipeline = Compose(test_pipeline)\n\n if isinstance(img,str):\n img = cv2.imread(img)\n elif isinstance(img,np.ndarray):\n img = img\n elif isinstance(img,Image):\n #TODO:将PIL改为CV2\n pass\n else:\n raise TypeError('img must be a PIL.Image or str or np.ndarray, '\n 'but got {}'.format(type(img)))\n\n ori_h,ori_w,ori_c = img.shape\n\n # prepare data\n data = dict(img=img)\n data = test_pipeline(data)\n img_tensor = data['img'].unsqueeze(0).to(device)\n _,_,new_h,new_w = img_tensor.shape\n data_dict = dict(img=img_tensor)\n # forward the model\n with torch.no_grad():\n preds = model(data_dict,return_loss=False)\n pred_bbox_list,score_bbox_list = model.postprocess(preds)\n\n #pred_bbox_list(b,n,4,2) [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] for bbox model\n batch_pred_bbox = pred_bbox_list[0]\n score_bbox_list = score_bbox_list[0]\n w_scale = float(ori_w) / new_w\n h_scale = float(ori_h) / new_h\n\n if type(batch_pred_bbox)==np.ndarray:\n if len(batch_pred_bbox)!=0:\n ##bbox 情况,其4个点个数稳定\n batch_pred_bbox[:,:,0] *=w_scale\n batch_pred_bbox[:, :, 1] *= h_scale\n else:\n #polygon\n for polygon_array in batch_pred_bbox:\n polygon_array[:, 0] = np.clip(\n np.round(polygon_array[:, 0] / new_w * ori_w), 0, ori_w)\n polygon_array[:, 1] = np.clip(\n np.round(polygon_array[:, 1] / new_h * ori_h), 0, ori_h)\n\n return batch_pred_bbox,score_bbox_list", "def runTest(self):\n self.setUp()\n self.test_visuThreeD1()", "def setUp(self):\n self.batch_size = 1\n self.imagenet_inception_v3 = testproblems.imagenet_inception_v3(\n self.batch_size)", "def test_text_classifier_del_testing_samples(self):\n pass", "def main(args):\n\n print(now(), \"test_model.py main() running.\")\n\n test_log = \"clean_test_log.txt\"\n to_log_file(args, args.output, test_log)\n\n # Set device\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n ####################################################\n # Dataset\n if args.dataset.lower() == \"cifar10\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n elif args.dataset.lower() == \"cifar100\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n\n elif args.dataset.lower() == \"tinyimagenet_first\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"firsthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"firsthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_last\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"lasthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"lasthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_all\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"all\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"all\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n else:\n print(\"Dataset not yet implemented. Exiting from test_model.py.\")\n sys.exit()\n\n ####################################################\n\n ####################################################\n # Network and Optimizer\n net = get_model(args.model, args.dataset)\n\n # load model from path if a path is provided\n if args.model_path is not None:\n net = load_model_from_checkpoint(args.model, args.model_path, args.dataset)\n else:\n print(\"No model path provided, continuing test with untrained network.\")\n net = net.to(device)\n ####################################################\n\n ####################################################\n # Test Model\n training_acc = test(net, trainloader, device)\n natural_acc = test(net, testloader, device)\n print(now(), \" Training accuracy: \", training_acc)\n print(now(), \" Natural accuracy: \", natural_acc)\n stats = OrderedDict(\n [\n (\"model path\", args.model_path),\n (\"model\", args.model),\n (\"normalize\", args.normalize),\n (\"augment\", args.train_augment),\n (\"training_acc\", training_acc),\n (\"natural_acc\", natural_acc),\n ]\n )\n to_results_table(stats, args.output, \"clean_performance.csv\")\n ####################################################\n\n return", "def __init__(self, detector, detectors, nodename, viewname):\n\n self.detector_type = detector\n # set detect_state function to detector_type (e.g., light or motion)\n if detector == 'light':\n self.detect_state = self.detect_light\n if 'threshold' in detectors[detector]:\n self.threshold = detectors[detector]['threshold']\n else:\n self.threshold = 100 # 100 is a default for testing\n if 'min_frames' in detectors[detector]:\n self.min_frames = detectors[detector]['min_frames']\n else:\n self.min_frames = 5 # 5 is default\n # need to remember min_frames of state history to calculate state\n self.state_history_q = deque(maxlen=self.min_frames)\n\n elif detector == 'motion':\n self.detect_state = self.detect_motion\n self.moving_frames = 0\n self.still_frames = 0\n self.total_frames = 0\n if 'delta_threshold' in detectors[detector]:\n self.delta_threshold = detectors[detector]['delta_threshold']\n else:\n self.delta_threshold = 5 # 5 is a default for testing\n if 'min_area' in detectors[detector]:\n self.min_area = detectors[detector]['min_area']\n else:\n self.min_area = 3 # 3 is default percent of ROI\n if 'min_motion_frames' in detectors[detector]:\n self.min_motion_frames = detectors[detector]['min_motion_frames']\n else:\n self.min_motion_frames = 3 # 3 is default\n if 'min_still_frames' in detectors[detector]:\n self.min_still_frames = detectors[detector]['min_still_frames']\n else:\n self.min_still_frames = 3 # 3 is default\n self.min_frames = max(self.min_motion_frames, self.min_still_frames)\n if 'blur_kernel_size' in detectors[detector]:\n self.blur_kernel_size = detectors[detector]['blur_kernel_size']\n else:\n self.blur_kernel_size = 15 # 15 is default blur_kernel_size\n if 'print_still_frames' in detectors[detector]:\n self.print_still_frames = detectors[detector]['print_still_frames']\n else:\n self.print_still_frames = True # True is default print_still_frames\n\n if 'ROI' in detectors[detector]:\n self.roi_pct = literal_eval(detectors[detector]['ROI'])\n else:\n self.roi_pct = ((0, 0), (100, 100))\n if 'draw_roi' in detectors[detector]:\n self.draw_roi = literal_eval(detectors[detector]['draw_roi'])\n self.draw_color = self.draw_roi[0]\n self.draw_line_width = self.draw_roi[1]\n else:\n self.draw_roi = None\n # name of the ROI detector section\n if 'roi_name' in detectors[detector]:\n self.roi_name = detectors[detector]['roi_name']\n else:\n self.roi_name = ''\n # include ROI name in log events\n if 'log_roi_name' in detectors[detector]:\n self.log_roi_name = detectors[detector]['log_roi_name']\n else:\n self.log_roi_name = False\n # draw timestamp on image\n if 'draw_time' in detectors[detector]:\n self.draw_time = literal_eval(detectors[detector]['draw_time'])\n self.draw_time_color = self.draw_time[0]\n self.draw_time_width = self.draw_time[1]\n if 'draw_time_org' in detectors[detector]:\n self.draw_time_org = literal_eval(detectors[detector]['draw_time_org'])\n else:\n self.draw_time_org = (0, 0)\n if 'draw_time_fontScale' in detectors[detector]:\n self.draw_time_fontScale = detectors[detector]['draw_time_fontScale']\n else:\n self.draw_time_fontScale = 1\n else:\n self.draw_time = None\n send_frames = 'None Set'\n self.frame_count = 0\n # send_frames option can be 'continuous', 'detected event', 'none'\n if 'send_frames' in detectors[detector]:\n send_frames = detectors[detector]['send_frames']\n if not send_frames: # None was specified; send 0 frames\n self.frame_count = 0\n if 'detect' in send_frames:\n self.frame_count = 10 # detected events default; adjusted later\n elif 'continuous' in send_frames:\n self.frame_count = -1 # send continuous flag\n elif 'none' in send_frames: # don't send any frames\n self.frame_count = 0\n else:\n self.frame_count = -1 # send continuous flag\n # send_count option is an integer of how many frames to send if event\n if 'send_count' in detectors[detector]:\n self.send_count = detectors[detector]['send_count']\n else:\n self.send_count = 5 # default number of frames to send per event\n # send_test_images option: if True, send test images like ROI, Gray\n if 'send_test_images' in detectors[detector]:\n self.send_test_images = detectors[detector]['send_test_images']\n else:\n self.send_test_images = False # default is NOT to send test images\n\n # self.event_text is the text message for this detector that is\n # sent when the detector state changes\n # example: JeffOffice Window|light|dark\n # example: JeffOffice Window|light|lighted\n # self.event_text will have self.current_state appended when events are sent\n node_and_view = ' '.join([nodename, viewname]).strip()\n self.event_text = '|'.join([node_and_view, self.detector_type])\n\n # An event is a change of state (e.g., 'dark' to 'lighted')\n # Every detector is instantiated with all states = 'unknown'\n self.current_state = 'unknown'\n self.last_state = 'unknown'\n\n self.msg_image = np.zeros((2, 2), dtype=\"uint8\") # blank image tiny\n if self.send_test_images:\n # set the blank image wide enough to hold message of send_test_images\n self.msg_image = np.zeros((5, 320), dtype=\"uint8\") # blank image wide", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def main(_argv):\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n session = InteractiveSession(config=config)\n STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(\n FLAGS)\n\n input_size = FLAGS.size\n saved_model_loaded = tf.saved_model.load(\n FLAGS.weights, tags=[tag_constants.SERVING])\n yolo_classifier = YoloClassifier(saved_model_loaded, input_size)\n species_classifier = SpeciesClassifier(\"../best_model_inception.hdf5\")\n new_vid = True\n while True:\n # Poll for a new video and load it\n yolo_classifier.load_video(\"vid07-02-2020_08:29:08.avi\")\n yolo_classifier.classify()\n\n species_classifier.classify(\n yolo_classifier.get_crops(), yolo_classifier.confidence_arr)\n # Save information about species in DB\n yolo_classifier.clear_data()\n species_classifier.reset_pred_dict()", "def test_01_lighting(self):", "def test_text_classifier_create(self):\n pass", "def my_evaluate(original_image, img_id, annotations, min_score=0.4, max_overlap=0.3, top_k=200):\n\n # Transform\n image = normalize(to_tensor(resize(original_image)))\n\n # Move to default device\n image = image.to(device)\n\n # Forward prop.\n predicted_locs, predicted_scores = model(image.unsqueeze(0))\n\n # Detect objects in SSD output\n det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score,\n max_overlap=max_overlap, top_k=top_k)\n\n # Move detections to the CPU\n det_boxes = det_boxes[0].to('cpu')\n\n # Transform to original image dimensions\n original_dims = torch.FloatTensor(\n [original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0)\n det_boxes = det_boxes * original_dims\n\n # Decode class integer labels\n det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]\n det_scores = det_scores[0].to('cpu').tolist()\n\n if det_labels != ['background']:\n for i in range(det_boxes.size(0)):\n \n # For every detection, see whether it matches a ground truth\n match = False\n for j in range(len(annotations['boxes'])):\n if annotations['labels'][j] == -1: # this is being set when a ground truth already matched\n continue\n match = box_match(det_boxes[i], \n det_labels[i], \n torch.Tensor(annotations['boxes'][j]), \n rev_label_map[annotations['labels'][j]])\n if match:\n annotations['labels'][j] = -1\n break\n \n if match: # true positive if the detection is correct and matched a ground truth\n all_results['true_positive'] += 1\n else: # false positive if the detection did not match a ground truth\n all_results['false_positive'] += 1\n \n # After all detections were checked, let us see whether the detector missed something\n for label in annotations['labels']: \n if label == -1: # This is set after a detection matched this ground truth\n continue\n \n # false negative if we reach this line, since the ground truth object was not found\n all_results['false_negative'] += 1", "def constrained_lens_object_test():\n return # TODO", "def test_machine_learning():", "def test():\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', type=str, help='name of the model',\n default='model_new_o')\n parser.add_argument('-f', '--filename', type=str,\n help='name of the dataset (.h5 file)', default='./dataset.h5')\n parser.add_argument('-bs', '--batch-size', type=int,\n help='size of the batches of the training data', default=256)\n args = parser.parse_args()\n\n name = args.name\n filename = args.filename\n batch_size = args.batch_size\n\n out_channels = 400\n model_path = './model/' + name\n checkpoint_path = model_path + '/checkpoints'\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n for k, v in vars(args).items():\n print('{0} = \"{1}\"'.format(k, v))\n print('device = \"' + device + '\"')\n\n if not os.path.exists(checkpoint_path):\n print('Model parameters not found: ' + checkpoint_path)\n exit()\n\n # Dataset\n\n input_cols = ['camera', 'pos_x', 'pos_y', 'theta']\n target_cols = ['target_map']\n train_test_split = 11\n\n dataset = get_dataset(filename, device=device, augment=False,\n input_cols=input_cols, target_cols=target_cols)\n split_index = dataset.cumulative_sizes[train_test_split]\n\n # Model\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model = NN(in_channels=3, out_channels=out_channels).to(device)\n model.load_state_dict(torch.load(checkpoint_path + '/best.pth'))\n summary(model, (3, 64, 80), device=device)\n\n auc_function = MaskedAUROC()\n\n # Testing\n\n aucs = []\n for x, px, py, pt, y in dataset.batches(batch_size, start=split_index, shuffle=False):\n pose = torch.stack([px, py, pt], dim=-1).to(device)\n mask = y > -1\n\n preds = model(x)\n\n aucs.append(auc_function(preds, y, mask).cpu().numpy())\n\n auc = np.nanmean(aucs, axis=0).reshape(20, 20)\n auc = np.rot90(auc, 1)\n auc = np.fliplr(auc) * 100\n\n print('AUC: ' + str(auc.mean().item()))\n\n print(auc)\n\n rounded = (100 * coords).round(2).astype(int)\n fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(7, 5.8))\n sns.distplot(auc, bins=int(np.ceil(auc.max() - auc.min())),\n ax=ax[0], kde=False, rug=False, color='red', hist_kws={'rwidth': 0.75})\n sns.heatmap(auc, cmap='gray', annot=True, cbar_kws={'shrink': .8},\n vmin=50, vmax=100, linewidths=0, ax=ax[1])\n plt.yticks(.5 + np.arange(20), np.unique(rounded[:, 0])[::-1])\n plt.xticks(.5 + np.arange(20), np.unique(rounded[:, 1]))\n plt.xlabel('Y [cm]')\n plt.ylabel('X [cm]')\n plt.setp(ax[1].xaxis.get_majorticklabels(), rotation=0)\n plt.setp(ax[1].yaxis.get_majorticklabels(), rotation=0)\n plt.axis('equal')\n plt.tight_layout()\n plt.show()", "def test_text_classifier_curate(self):\n pass", "def yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=10,\n score_threshold=.6,\n iou_threshold=.5):\n for i in range(0,3):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[i], anchors[6-3*i:9-3*i], num_classes, i)\n if i==0:\n boxes, box_scores= _boxes, _box_scores\n else:\n boxes = K.concatenate([boxes,_boxes], axis=0)\n box_scores = K.concatenate([box_scores,_box_scores], axis=0)\n\n # Scale boxes back to original image shape.\n height = image_shape[0]\n width = image_shape[1]\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n for i in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, i])\n class_box_scores = tf.boolean_mask(box_scores[:, i], mask[:, i])\n # TODO: 13*13 + 26*26 + 52*52\n classes = K.constant(i, shape=(3549,), dtype='int32')\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.gather(classes, nms_index)\n if i==0:\n boxes_, scores_, classes_ = class_boxes, class_box_scores, classes\n else:\n boxes_ = K.concatenate([boxes_,class_boxes], axis=0)\n scores_ = K.concatenate([scores_,class_box_scores], axis=0)\n classes_ = K.concatenate([classes_,classes], axis=0)\n return boxes_, scores_, classes_", "def test_oss_sk_estimator():\n check_estimator(OneSidedSelection)", "def _run_test(self):\n scenes = [\n \"../images/uturn_scene.jpg\",\n \"../images/leftturn_scene.jpg\",\n \"../images/rightturn_scene.jpg\"\n ]\n\n for filename in scenes:\n scene_img = cv2.imread(os.path.join(self.file_path, filename), 0)\n pred = tm.predict(scene_img)\n print filename.split('/')[-1]\n print pred" ]
[ "0.6627789", "0.6447896", "0.6315877", "0.62284786", "0.6174531", "0.61566085", "0.6096233", "0.60372835", "0.58174103", "0.5799088", "0.5774036", "0.5773982", "0.57679653", "0.56752574", "0.5674558", "0.5668986", "0.5649171", "0.5648845", "0.5647306", "0.5644756", "0.56349933", "0.55926454", "0.5554171", "0.5548961", "0.5539405", "0.5535786", "0.5531372", "0.55205065", "0.5507268", "0.55062026", "0.5497991", "0.5483648", "0.5471467", "0.5460175", "0.5453206", "0.54353374", "0.5432541", "0.54285777", "0.5417035", "0.5415604", "0.5411879", "0.53736997", "0.5344873", "0.5344194", "0.5302502", "0.5285626", "0.5282746", "0.527298", "0.52724254", "0.5271492", "0.52410597", "0.52410465", "0.52303886", "0.5210306", "0.5196082", "0.51958925", "0.5195648", "0.51858616", "0.5185239", "0.5183784", "0.5167049", "0.5166852", "0.51574826", "0.51522285", "0.51456136", "0.512874", "0.51273274", "0.51233727", "0.5116325", "0.5113921", "0.5101015", "0.5092468", "0.50908124", "0.5049599", "0.50425357", "0.5040657", "0.50404066", "0.50339496", "0.5031536", "0.5020036", "0.5009486", "0.5006408", "0.49997297", "0.4996404", "0.49958962", "0.49954474", "0.49841732", "0.49787486", "0.49610847", "0.49604544", "0.4957684", "0.49565655", "0.49504498", "0.4948542", "0.4948368", "0.49296734", "0.49197483", "0.49177054", "0.49069965", "0.4903089" ]
0.7094565
0
Splits image into tiles by size of tile. tile_w tile width tile_h tile height
def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int): x_axis = -1 y_axis = -2 arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis] x_ntiles = ( arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1 ) y_ntiles = ( arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1 ) tiles = [] # row for i in range(0, y_ntiles): # height of this tile ver_f = tile_h * i ver_t = ver_f + tile_h # col for j in range(0, x_ntiles): # width of this tile hor_f = tile_w * j hor_t = hor_f + tile_w tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap) tiles.append(tile) tile_shape = [tile_h, tile_w] ntiles = dict(x=x_ntiles, y=y_ntiles) padding = dict(left=0, right=0, top=0, bottom=0) if arr_width % tile_w == 0: padding["right"] = 0 else: padding["right"] = tile_w - (arr_width % tile_w) if arr_height % tile_h == 0: padding["bottom"] = 0 else: padding["bottom"] = tile_h - (arr_height % tile_h) info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding) return tiles, info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )", "def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def _get_tile_info(img_shape, tile_shape, ambiguous_size=128):\n # * get normal tiling set\n tile_grid_top_left, _ = _get_patch_top_left_info(img_shape, tile_shape, tile_shape)\n tile_grid_bot_right = []\n for idx in list(range(tile_grid_top_left.shape[0])):\n tile_tl = tile_grid_top_left[idx][:2]\n tile_br = tile_tl + tile_shape\n axis_sel = tile_br > img_shape\n tile_br[axis_sel] = img_shape[axis_sel]\n tile_grid_bot_right.append(tile_br)\n tile_grid_bot_right = np.array(tile_grid_bot_right)\n tile_grid = np.stack([tile_grid_top_left, tile_grid_bot_right], axis=1)\n tile_grid_x = np.unique(tile_grid_top_left[:, 1])\n tile_grid_y = np.unique(tile_grid_top_left[:, 0])\n # * get tiling set to fix vertical and horizontal boundary between tiles\n # for sanity, expand at boundary `ambiguous_size` to both side vertical and horizontal\n stack_coord = lambda x: np.stack([x[0].flatten(), x[1].flatten()], axis=-1)\n tile_boundary_x_top_left = np.meshgrid(\n tile_grid_y, tile_grid_x[1:] - ambiguous_size\n )\n tile_boundary_x_bot_right = np.meshgrid(\n tile_grid_y + tile_shape[0], tile_grid_x[1:] + ambiguous_size\n )\n tile_boundary_x_top_left = stack_coord(tile_boundary_x_top_left)\n tile_boundary_x_bot_right = stack_coord(tile_boundary_x_bot_right)\n tile_boundary_x = np.stack(\n [tile_boundary_x_top_left, tile_boundary_x_bot_right], axis=1\n )\n #\n tile_boundary_y_top_left = np.meshgrid(\n tile_grid_y[1:] - ambiguous_size, tile_grid_x\n )\n tile_boundary_y_bot_right = np.meshgrid(\n tile_grid_y[1:] + ambiguous_size, tile_grid_x + tile_shape[1]\n )\n tile_boundary_y_top_left = stack_coord(tile_boundary_y_top_left)\n tile_boundary_y_bot_right = stack_coord(tile_boundary_y_bot_right)\n tile_boundary_y = np.stack(\n [tile_boundary_y_top_left, tile_boundary_y_bot_right], axis=1\n )\n tile_boundary = np.concatenate([tile_boundary_x, tile_boundary_y], axis=0)\n # * get tiling set to fix the intersection of 4 tiles\n tile_cross_top_left = np.meshgrid(\n tile_grid_y[1:] - 2 * ambiguous_size, tile_grid_x[1:] - 2 * ambiguous_size\n )\n tile_cross_bot_right = np.meshgrid(\n tile_grid_y[1:] + 2 * ambiguous_size, tile_grid_x[1:] + 2 * ambiguous_size\n )\n tile_cross_top_left = stack_coord(tile_cross_top_left)\n tile_cross_bot_right = stack_coord(tile_cross_bot_right)\n tile_cross = np.stack([tile_cross_top_left, tile_cross_bot_right], axis=1)\n return tile_grid, tile_boundary, tile_cross", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out", "def split_tiles(module_data):\n raise NotImplementedError", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints", "def slice(\n filename,\n number_tiles=None,\n col=None,\n row=None,\n save=True,\n DecompressionBombWarning=True,\n):\n if DecompressionBombWarning is False:\n Image.MAX_IMAGE_PIXELS = None\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if number_tiles:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(\n tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)\n )\n return tuple(tiles)", "def get_shape_for_tile_split(\n arr_height: int, arr_width: int, nchannels: int, tile_height: int, tile_width: int\n) -> list[int]:\n shape = [\n arr_height // tile_height,\n tile_height,\n arr_width // tile_width,\n tile_width,\n ]\n if nchannels > 1:\n shape.append(nchannels)\n return shape", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def __init__(self, width, height):\n self.w = width\n self.h = height\n self.cleanTiles = []\n self.tiles = [[False] * width for i in range(height)]\n self.cleaned = 0", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h", "def get_tile_info(file_info, img_info):\n all_tiles = []\n new_tiles = {}\n if img_info['invert_x']:\n xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']\n xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']\n else:\n xmin = img_info['viewer']['left']\n xmax = img_info['viewer']['right']\n if img_info['invert_y']:\n ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']\n ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']\n else:\n ymin = img_info['viewer']['top']\n ymax = img_info['viewer']['bottom']\n minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1\n maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))\n minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1\n maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))\n \n block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))\n block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))\n \n for row in range(minRow,maxRow):\n y0 = row*file_info['tile_height']\n yf = (row+1)*file_info['tile_height']\n y0_idx = int(y0/img_info['scale'])\n yf_idx = min(y0_idx + block_height, img_info['height'])\n for col in range(minCol,maxCol):\n all_tiles.append(str(col)+','+str(row))\n tile_idx = str(col)+','+str(row)\n if (tile_idx not in img_info['tiles'] or \n 'loaded' not in img_info['tiles'][tile_idx] or\n not img_info['tiles'][tile_idx]['loaded']):\n x0 = col*file_info['tile_width']\n xf = (col+1)*file_info['tile_width']\n x0_idx = int(x0/img_info['scale'])\n xf_idx = min(x0_idx+block_width, img_info['width'])\n tile_width = int((xf_idx-x0_idx)*img_info['scale'])\n tile_height = int((yf_idx-y0_idx)*img_info['scale'])\n new_filepath = get_tile_filename(\n file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)\n tile = {\n 'idx': tile_idx,\n 'left': x0,\n 'right': xf,\n 'top': y0,\n 'bottom': yf,\n 'y0_idx': y0_idx,\n 'yf_idx': yf_idx,\n 'x0_idx': x0_idx,\n 'xf_idx': xf_idx,\n 'new_filepath': new_filepath,\n 'loaded': False,\n 'row': row,\n 'col': col,\n 'x': col*file_info['tile_width'],\n 'y': row*file_info['tile_height'],\n 'width': tile_width,\n 'height': tile_height\n }\n if img_info['invert_y']:\n tile['top'] = yf\n tile['bottom'] = y0\n if img_info['invert_x']:\n tile['left'] = xf\n tile['right'] = x0\n new_tiles[tile_idx] = tile\n print('viewer:', img_info['viewer'])\n print('new tiles', new_tiles.keys())\n return all_tiles, new_tiles", "def make_tiles_limits(im, n_splits, margin=0):\n \n if n_splits == 1:\n return [0, im.shape[1], 0, im.shape[0]]\n # number of splits per axis\n ax_splits = int(np.log2(n_splits))\n x_segments = split_range(im.shape[1], ax_splits)\n y_segments = split_range(im.shape[0], ax_splits)\n \n if margin > 0:\n x_segments = extend_indices(x_segments, margin=margin)\n y_segments = extend_indices(y_segments, margin=margin)\n \n # make combinations of [xmin, xmax, ymin, ymax] indices of tiles\n tiles_indices = []\n for xlim in x_segments:\n for ylim in y_segments:\n tiles_indices.append(xlim + ylim)\n return tiles_indices", "def split(self):\n sub_images = []\n\n for region in regionprops(self.cells):\n minr, minc, maxr, maxc = region.bbox\n sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]\n\n sub_images.append(FQimage(data=sub_image))\n\n return sub_images", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n \n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n \n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n \n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n # colors default to 0 (i.e. black), alphas defaults to 1 (fully opaque i.e.\n # corresponding pixel fully visible in image))\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8') \n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype) \n\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n \n for i in range(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n \n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n \n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.ones(out_shape, dtype=dt)*255\n \n for tile_row in range(tile_shape[0]):\n for tile_col in range(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def split_image_with_bboxes_efficient(bboxes, image, bbox_size=50, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n\n ymin = int(tile_height-new_height)\n ymax = int(tile_height)\n xmin = int(tile_width-new_width)\n xmax = int(tile_width)\n\n canvas[ymin:ymax, xmin:xmax] = 1\n\n query_bboxes = find_query_boxes(bboxes, xmin, xmax, ymin, ymax, bbox_size)\n\n new_bboxes = find_overlaps(canvas, query_bboxes, col, row, new_width, new_height)\n\n cropped_image = image[ymin:ymax, xmin:xmax]\n\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs", "def tile_size_2d(self):\n return 32.0, 32.0", "def smaller(self):\n w1, h1 = float(self.imwidth), float(self.imheight)\n w2, h2 = float(self.__huge_size), float(self.__huge_size)\n aspect_ratio1 = w1 / h1\n aspect_ratio2 = w2 / h2 # it equals to 1.0\n if aspect_ratio1 == aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(w2) # band length\n elif aspect_ratio1 > aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(w2 / aspect_ratio1)))\n k = h2 / w1 # compression ratio\n w = int(w2) # band length\n else: # aspect_ratio1 < aspect_ration2\n image = Image.new('RGB', (int(h2 * aspect_ratio1), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(h2 * aspect_ratio1) # band length\n i, j, n = 0, 1, round(0.5 + self.imheight / self.__band_width)\n while i < self.imheight:\n print('\\rOpening image: {j} from {n}'.format(j=j, n=n), end='')\n band = min(self.__band_width, self.imheight - i) # width of the tile band\n self.__tile[1][3] = band # set band width\n self.__tile[2] = self.__offset + self.imwidth * i * 3 # tile offset (3 bytes per pixel)\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, band) # set size of the tile band\n self.__image.tile = [self.__tile] # set tile\n cropped = self.__image.crop((0, 0, self.imwidth, band)) # crop tile band\n image.paste(cropped.resize((w, int(band * k) + 1), self.__filter), (0, int(i * k)))\n i += band\n j += 1\n print('\\r' + 30 * ' ' + '\\r', end='') # hide printed string\n return image", "def chunks(img, n):\n shape = img.shape\n imgs = []\n\n nx = int(n * (shape[1]/(shape[0] + shape[1])))\n ny = n - nx\n\n x = int(shape[0]/n)\n y = int(shape[0]/n)\n\n for i in range(nx - 1):\n line = []\n for j in range(ny - 1):\n line.append(img[y*j: y*(j+1), x*i: x*(i+1), ::])\n imgs.append(line)\n return imgs", "def stitch(dir_path, in_canels=1, choice=0):\n directory = dir_path\n array = [] # array used to create matrix\n\n p = re.compile(tiles_xy_re)\n q = re.compile(original_img_xy_re)\n\n sum_of_files = len(os.listdir(directory))\n tiles_horizontal_num = 0\n\n first = os.listdir(directory)[0] # we take a sample to extract\n # original image information such as height, width, type\n\n original = q.match(first)\n Original_width, Original_height = int(original.group(1)), int(\n original.group(2))\n im = Image.open(dir_path + '\\\\' + first)\n\n tile_h = np.array(im).shape[0]\n tile_w= np.array(im).shape[1]\n file_type = first.split(\".\")[-1]\n\n # creating array to merge all tiles to\n if choice == 2: # if we choose and\n output_array = np.ones((Original_height, Original_width, in_canels))\n else:\n output_array = np.zeros((Original_height, Original_width, in_canels))\n\n for filename in os.listdir(directory):\n\n xy = p.match(filename)\n x, y = int(xy.group(1)), int(xy.group(2)) # extracting x,y relative\n # to original img\n\n im = Image.open(dir_path + '\\\\' + filename)\n if choice == 0:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n elif choice == 1:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_or(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n elif choice == 2:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_and(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n\n array.append([x, y])\n\n if int(xy.group(1)) == 0:\n tiles_horizontal_num = tiles_horizontal_num + 1\n\n # converting array to image and saving image\n output_im = Image.fromarray(output_array.astype(np.uint8))\n file_name = \"original.\" + file_type\n path = dir_path + '\\\\' + file_name\n output_im.save(path)\n\n # array = sorted(array, key=lambda k: [k[0], k[1]])\n # numpy_array = np.array(array)\n # matrix = numpy_array.reshape(sum_of_files // tiles_horizontal_num,\n # tiles_horizontal_num, 2)", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output np ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n # colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # functionmapping\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def image_shape_to_grids(height, width):\n out_height = tf.cast(height, tf.float32)\n out_width = tf.cast(width, tf.float32)\n x_range = tf.range(out_width, dtype=tf.float32)\n y_range = tf.range(out_height, dtype=tf.float32)\n x_grid, y_grid = tf.meshgrid(x_range, y_range, indexing='xy')\n return (y_grid, x_grid)", "def get_image_tiles_tensor(image, label, image_path, patch_width):\n tiles_before_reshape = tensorflow.extract_image_patches(\n tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1],\n [1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID')\n tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1])\n\n labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1])\n image_paths = tensorflow.tile(\n tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1])\n\n return tiles, labels, image_paths", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\r\n scale_rows_to_unit_interval=True,\r\n output_pixel_vals=True):\r\n\r\n assert len(img_shape) == 2\r\n assert len(tile_shape) == 2\r\n assert len(tile_spacing) == 2\r\n\r\n # The expression below can be re-written in a more C style as\r\n # follows :\r\n #\r\n # out_shape = [0,0]\r\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\r\n # tile_spacing[0]\r\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\r\n # tile_spacing[1]\r\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\r\n in zip(img_shape, tile_shape, tile_spacing)]\r\n\r\n if isinstance(X, tuple):\r\n assert len(X) == 4\r\n # Create an output numpy ndarray to store the image\r\n if output_pixel_vals:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype='uint8')\r\n else:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype=X.dtype)\r\n\r\n #colors default to 0, alpha defaults to 1 (opaque)\r\n if output_pixel_vals:\r\n channel_defaults = [0, 0, 0, 255]\r\n else:\r\n channel_defaults = [0., 0., 0., 1.]\r\n\r\n for i in xrange(4):\r\n if X[i] is None:\r\n # if channel is None, fill it with zeros of the correct\r\n # dtype\r\n dt = out_array.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array[:, :, i] = numpy.zeros(out_shape,\r\n dtype=dt) + channel_defaults[i]\r\n else:\r\n # use a recurrent call to compute the channel and store it\r\n # in the output\r\n out_array[:, :, i] = tile_raster_images(\r\n X[i], img_shape, tile_shape, tile_spacing,\r\n scale_rows_to_unit_interval, output_pixel_vals)\r\n return out_array\r\n\r\n else:\r\n # if we are dealing with only one channel\r\n H, W = img_shape\r\n Hs, Ws = tile_spacing\r\n\r\n # generate a matrix to store the output\r\n dt = X.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array = numpy.zeros(out_shape, dtype=dt)\r\n\r\n for tile_row in xrange(tile_shape[0]):\r\n for tile_col in xrange(tile_shape[1]):\r\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\r\n this_x = X[tile_row * tile_shape[1] + tile_col]\r\n if scale_rows_to_unit_interval:\r\n # if we should scale values to be between 0 and 1\r\n # do this by calling the `scale_to_unit_interval`\r\n # function\r\n this_img = scale_to_unit_interval(\r\n this_x.reshape(img_shape))\r\n else:\r\n this_img = this_x.reshape(img_shape)\r\n # add the slice to the corresponding position in the\r\n # output array\r\n c = 1\r\n if output_pixel_vals:\r\n c = 255\r\n out_array[\r\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\r\n tile_col * (W + Ws): tile_col * (W + Ws) + W\r\n ] = this_img * c\r\n return out_array", "def getNumTiles(self):\n return self.w * self.h", "def tile_iterator(im,\r\n blocksize = (64, 64),\r\n padsize = (64,64),\r\n mode = \"constant\",\r\n verbose = False):\r\n\r\n if not(im.ndim == len(blocksize) ==len(padsize)):\r\n raise ValueError(\"im.ndim (%s) != len(blocksize) (%s) != len(padsize) (%s)\"\r\n %(im.ndim , len(blocksize) , len(padsize)))\r\n\r\n subgrids = tuple([int(np.ceil(1.*n/b)) for n,b in zip(im.shape, blocksize)])\r\n\r\n\r\n #if the image dimension are not divible by the blocksize, pad it accordingly\r\n pad_mismatch = tuple([(s*b-n) for n,s, b in zip(im.shape,subgrids,blocksize)])\r\n\r\n if verbose:\r\n print(\"tile padding... \")\r\n\r\n im_pad = np.pad(im,[(p,p+pm) for pm,p in zip(pad_mismatch,padsize)], mode = mode)\r\n\r\n # iterates over cartesian product of subgrids\r\n for i,index in enumerate(product(*[range(sg) for sg in subgrids])):\r\n # the slices\r\n # if verbose:\r\n # print(\"tile %s/%s\"%(i+1,np.prod(subgrids)))\r\n\r\n # dest[s_output] is where we will write to\r\n s_input = tuple([slice(i*b,(i+1)*b) for i,b in zip(index, blocksize)])\r\n\r\n\r\n\r\n s_output = tuple([slice(p,-p-pm*(i==s-1)) for pm,p,i,s in zip(pad_mismatch,padsize, index, subgrids)])\r\n\r\n\r\n s_output = tuple([slice(p,b+p-pm*(i==s-1)) for b,pm,p,i,s in zip(blocksize,pad_mismatch,padsize, index, subgrids)])\r\n\r\n\r\n s_padinput = tuple([slice(i*b,(i+1)*b+2*p) for i,b,p in zip(index, blocksize, padsize)])\r\n padded_block = im_pad[s_padinput]\r\n\r\n\r\n\r\n yield padded_block, s_input, s_output", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)", "def get_tile_image(imgs, tile_shape=None, result_img=None, margin_color=None):\n def get_tile_shape(img_num):\n x_num = 0\n y_num = int(math.sqrt(img_num))\n while x_num * y_num < img_num:\n x_num += 1\n return x_num, y_num\n\n if tile_shape is None:\n tile_shape = get_tile_shape(len(imgs))\n\n # get max tile size to which each image should be resized\n max_height, max_width = np.inf, np.inf\n for img in imgs:\n max_height = min([max_height, img.shape[0]])\n max_width = min([max_width, img.shape[1]])\n\n # resize and concatenate images\n for i, img in enumerate(imgs):\n h, w = img.shape[:2]\n h_scale, w_scale = max_height / h, max_width / w\n scale = min([h_scale, w_scale])\n h, w = int(scale * h), int(scale * w)\n img = cv2.resize(img, (w, h))\n img = centerize(img, (max_height, max_width, 3),\n margin_color=margin_color)\n imgs[i] = img\n return _tile_images(imgs, tile_shape, result_img,\n margin_color=margin_color)", "def load_tile_table(filename, width, height):\n\ttry: \n\t\ttile_table = []\n\t\timage = pygame.image.load(filename).convert()\n\texcept:\n\t\tprint(\"Could not load tileset:\", filename)\n\telse:\n\t\timage_width, image_height = image.get_size()\n\t\tfor tile_x in range(0, int(image_width/width)):\n\t\t\tline = []\n\t\t\ttile_table.append(line)\n\t\t\tfor tile_y in range(0, int(image_height/height)):\n\t\t\t\trect = (tile_x*width, tile_y*height, width, height)\n\t\t\t\tline.append(image.subsurface(rect))\n\treturn tile_table", "def __init__(self, width, height):\n self.clean_tiles = []\n self.width = width\n self.height = height", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def _create_tile(self, img, mask, x,y,w,h, mode=\"binary\", is_stroma=False):\n tile = img[y:y+h, x:x+w, :]\n tile_mask = mask[y:y+h, x:x+w, :]\n\n if mode == \"binary\":\n tile_mask = self._convert_rgb_to_binary_mask(tile_mask)\n # np.where(tile_mask>0,1,0) assign background as 1, tumor as 0\n tile_mask = np.where(tile_mask>0,0,1) \n\n elif mode == \"multiclass\":\n tile_mask = self._cvt_mask3d_to_mask2d(tile_mask, self.mapper, 0)\n tile_mask = tile_mask.astype('uint8')\n\n if is_stroma:\n tile_mask = np.zeros(tile_mask.shape, \"uint8\")\n\n return tile, tile_mask", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def splitH_unlifted(pixmap):\n h = pixmap.shape[0]\n if h % 2 == 1:\n h = h // 2\n return [pixmap[:h,:], pixmap[h+1:,:]]\n else:\n h = h // 2\n return [pixmap[:h,:], pixmap[h:,:]]", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def choose_optimal_image_split(im, method='im_size', min_tile_size=360000):\n \n n_cores = os.cpu_count()\n # number of segmented objects, drop the background value\n n_obj = np.unique(im).size - 1\n \n if method == 'im_size':\n # avoid too many splits if image is not so big\n im_size = im.nbytes # slightly different from sys.getsizeof(im)\n # max power of 2\n max_i = int(np.log2(n_cores)) + 1\n n_splits = 1\n for i in range(1, max_i):\n new_split = 2**i\n if im_size / new_split >= min_tile_size:\n n_splits = new_split\n else:\n break\n elif method == 'naive':\n n_splits = n_cores\n \n return n_splits", "def test_unbounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = self.expected_tile_height\n\t\tself.expected_cols = self.expected_tile_width\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage without specifying dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def __init__(self, group, image, x, y, tile_size):\n\t\tsuper().__init__(group, image, x, y, tile_size)", "def build_grid(tiles, tile_size, grid_rows=None, grid_cols=None):\n if grid_rows is None or grid_cols is None:\n grid_rows = int(math.sqrt(len(tiles)))\n grid_cols = int(math.ceil(len(tiles) / grid_rows))\n\n grid = np.zeros(\n (grid_rows * tile_size[1], grid_cols * tile_size[0], 3), np.uint8)\n for tile_id, tile in enumerate(tiles):\n assert(tile.shape[0] == tile_size[1] and tile.shape[1] == tile_size[0])\n yy = int(tile_id / grid_cols)\n xx = tile_id % grid_cols\n grid[(yy * tile_size[1]):((yy + 1) * tile_size[1]),\n (xx * tile_size[0]):((xx + 1) * tile_size[0]), :] = tile\n return grid", "def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.numTiles = width*height\n\t\tself.tiles = []\n\t\tfor i in range(0, width):\n\t\t\tfor j in range(0, height):\n\t\t\t\tself.tiles.append(Tile(i, j))", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def pack_images(images, rows, cols):\n shape = tf.shape(images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(images, [0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def split(image):\n shred_width = 32\n image_width, image_height = image.size\n shreds = {}\n for x in range(0, image_width, shred_width):\n img = image.crop((x, 0, shred_width + x, image_height))\n s = Shred(img)\n shreds[x] = s\n return shreds", "def test_bounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = 5\n\t\tself.expected_cols = 4\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage with specific dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image, rows=self.expected_rows, cols=self.expected_cols)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')", "def pack_images(images, rows, cols):\n shape = tf.shape(input=images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(input=images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(a=images, perm=[0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images", "def tiffmatrixSplit(kv):\n filename, tiffmat = kv[0], kv[1]\n # Each image is 500x500\n kv_list = []\n if len(tiffmat) == 2500:\n num_matrices = 5**2\n split_size = 5\n elif len(tiffmat) == 5000:\n num_matrices = 10**2\n split_size = 10\n else:\n raise ValueError(\"TIFF file has dimensions other than 2500x2500 or 5000x5000\")\n all_matrices = []\n file_names = [filename + '-' + str(i) for i in np.arange(num_matrices)]\n big_rows = np.vsplit(tiffmat, split_size)\n for row in big_rows:\n all_matrices += np.hsplit(row, 5)\n return list(zip(file_names,all_matrices))", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def tiles_to_images(wfc_ns, tile_grid, tile_catalog, tile_size, visualize=False, partial=False, grid_count=None):\n new_img = np.zeros((tile_grid.shape[0] * tile_size, tile_grid.shape[1] * tile_size, wfc_ns.channels), dtype=np.int64)\n if partial and (len(tile_grid.shape) > 2):\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n pixel_merge_list = []\n for k in range(tile_grid.shape[2]):\n tile = tile_grid[i,j,k]\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = None#[200, 0, 200]\n #print(tile)\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n pixel = [200, 0, 200]\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n else:\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile) and (WFC_NULL_VALUE != tile): # TODO: instead of -3, use MaskedArrays\n pixel = tile_catalog[tile][u,v]\n if not(pixel is None):\n pixel_merge_list.append(pixel)\n if len(pixel_merge_list) == 0:\n if 0 == (i + j) % 2:\n pixel_merge_list.append([255, 0, 255])\n else:\n pixel_merge_list.append([0, 172, 172])\n \n if len(pixel_merge_list) > 0:\n pixel_to_add = pixel_merge_list[0]\n if len(pixel_merge_list) > 1:\n pixel_to_add = [round(sum(x) / len(pixel_merge_list)) for x in zip(*pixel_merge_list)]\n try:\n while (len(pixel_to_add) < wfc_ns.channels):\n pixel_to_add.append(255)\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = pixel_to_add\n except TypeError as e:\n wfc_logger.warning(e)\n wfc_logger.warning(\"Tried to add {} from {}\".format(pixel_to_add, pixel_merge_list))\n else:\n for i in range(tile_grid.shape[0]):\n for j in range(tile_grid.shape[1]):\n tile = tile_grid[i,j]\n for u in range(wfc_ns.tile_size):\n for v in range(wfc_ns.tile_size):\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = [200, 0, 200]\n #print(f\"tile: {tile}\")\n if (visualize) and ((-1 == tile) or (-2 == tile)):\n if (-1 == tile):\n if 0 == (i + j) % 2:\n pixel = [255, 0, 255]\n if (-2 == tile):\n pixel = [0, 255, 255]\n else:\n if (WFC_PARTIAL_BLANK != tile):\n pixel = tile_catalog[tile][u,v]\n # Watch out for images with more than 3 channels!\n new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = np.resize(pixel, new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v].shape)\n logging.debug('Output image shape is', new_img.shape)\n return new_img", "def grid_images(im_paths, w=4, h=4, margin=0, scale=0.5):\n n = w * h\n if len(im_paths) > n:\n raise ValueError('Number of images ({}) does not conform to '\n 'matrix size {}x{}'.format(w, h, len(im_paths)))\n return\n# unscaled_imgs = [gray_to_map_and_flip(img=cv2.imread(fp)[..., 0], colormap='o', rotate=180) for fp in im_paths]\n unscaled_imgs = [cv2.imread(fp) for fp in im_paths]\n imgs = [cv2.resize(I, (int(I.shape[1] * scale), int(I.shape[0] * scale))) for I in unscaled_imgs]\n if any(i.shape != imgs[0].shape for i in imgs[1:]):\n raise ValueError('Not all images have the same shape')\n return\n img_h, img_w, img_c = imgs[0].shape\n m_x = 0\n m_y = 0\n if margin is not None:\n m_x = int(margin)\n m_y = m_x\n imgmatrix = np.zeros((int(img_h * h + m_y * (h - 1)),\n int(img_w * w + m_x * (w - 1)),\n img_c), dtype=np.uint8)\n imgmatrix.fill(255)\n positions = itertools.product(range(int(w)), range(int(h)))\n for (x_i, y_i), img in zip(positions, imgs):\n x = x_i * (img_w + m_x)\n y = y_i * (img_h + m_y)\n imgmatrix[y: y + img_h, x: x + img_w, :] = img\n return imgmatrix", "def build_filler_images(self):", "def generate_all_sizes():\n dimension = 4\n h = 1\n while h <= dimension:\n w = 1\n while w <= dimension:\n for y in range(0, dimension, h):\n for x in range(0, dimension, w):\n yield FeatureSize(x / dimension,\n y / dimension,\n w / dimension,\n h / dimension)\n w = w * 2\n h = h * 2", "def getNumTiles(self):\n return self.height * self.width", "def get_tile_size(self, map_size = None, show_info = None):\n if not map_size: map_size = self.map_size\n w,h = self.img_size\n x_tiles,y_tiles = map_size\n\n tile_raw_w = w / x_tiles\n tile_raw_h = h / y_tiles\n\n if self.debug:\n print(f' ► Raw tile width: {tile_raw_w}\\n ► Raw tile height: {tile_raw_h}')\n\n tile_w = int(round(tile_raw_w))\n tile_h = int(round(tile_raw_h))\n\n if show_info:\n print(f' Image Size: {w} x {h} px\\n Tile Size: {tile_w} x {tile_h} px\\n Map Size: {x_tiles} x {y_tiles} tiles')\n\n error_w = tile_w - tile_raw_w\n error_h = tile_h - tile_raw_h\n print(f'\\n -=ERROR INFO=-\\n Tile Size Width Error: {round(error_w,4)} px \\n Tile Size Height Error: {round(error_h,4)} px \\n Total Width Rounding Error: {round(error_w * x_tiles,4)} px \\n Total Height Rounding Error: {round(error_h * y_tiles,4)} px\\n')\n\n return (tile_raw_w,tile_raw_h)", "def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')", "def extract_walls(img_array,x_scale,y_scale,wall_height):\n\n wall_th = 2\n length = 0\n wall_list = []\n\n #check for horizontal walls first\n for row in range(img_array.shape[0]):\n for col in range(img_array.shape[1]):\n \n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if left_edge(sec):\n #check two steps to the right\n next_sec = img_array.astype(int)[row:row+2, col+1:col+3]\n next_next_sec = img_array.astype(int)[row:row+2, col+2:col+4]\n\n #if horizontal wall, get coordinates and count length\n if is_wall(next_sec) and not right_edge(next_next_sec): \n #record corner coordinates\n x = col +1\n y = row\n while is_wall(next_sec):\n #start counting length across, until right edge found\n length +=1\n col +=1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list \n new_wall = Wall(x*x_scale,y*y_scale,length*x_scale,wall_th*y_scale,wall_height)\n wall_list.append(new_wall)\n length = 0\n\n #check for vertical walls\n for col in range(img_array.shape[1]):\n for row in range(img_array.shape[0]):\n\n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if top_edge(sec): \n #check two steps below\n next_sec = img_array.astype(int)[row+1:row+3, col:col+2]\n next_next_sec = img_array.astype(int)[row+2:row+4, col:col+2]\n\n #if vertical wall, get coordinates and count length\n if is_wall(next_sec) and is_wall(next_next_sec):\n x = col\n y = row\n while is_wall(next_sec):\n #start counting length downwards, until bottom edge found\n length += 1\n row += 1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list\n new_wall = Wall(x*x_scale,y*y_scale,wall_th*x_scale,length*y_scale, wall_height)\n wall_list.append(new_wall)\n length = 0\n\n return wall_list", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def _convert_chunk_to_tiles(\n feature_data: np.array, loss_window_radius: int, window_radius: int\n) -> Tuple[np.array, np.array]:\n\n output_array = []\n col_index = []\n for _col in range(0, feature_data.shape[1], loss_window_radius * 2):\n col_index.append(min(_col, feature_data.shape[1] - window_radius * 2))\n output_array.append(feature_data[:, col_index[-1] : col_index[-1] + window_radius * 2, :])\n output_array = np.stack(output_array)\n output_array = np.reshape(\n output_array, (output_array.shape[0], output_array.shape[1], output_array.shape[2], feature_data.shape[-1])\n )\n\n col_index = np.array(col_index)\n\n return output_array, col_index", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4", "def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles" ]
[ "0.76203066", "0.71610355", "0.69022644", "0.6677268", "0.6614609", "0.6609202", "0.6596505", "0.6574447", "0.6529179", "0.652334", "0.650743", "0.650743", "0.6493861", "0.6485636", "0.64644873", "0.6450046", "0.644942", "0.6383776", "0.6369156", "0.634634", "0.63084143", "0.6303704", "0.6262621", "0.62292117", "0.62207806", "0.6218618", "0.6204421", "0.6173811", "0.61385846", "0.61018085", "0.6076951", "0.60750395", "0.60674804", "0.60658497", "0.6041334", "0.60331964", "0.60298806", "0.6027999", "0.60100174", "0.5999131", "0.5994506", "0.59831095", "0.59787637", "0.5974916", "0.5951949", "0.5944011", "0.5941358", "0.5928414", "0.59234947", "0.59191704", "0.5917822", "0.59096587", "0.58995575", "0.5897591", "0.58869165", "0.5872089", "0.58599055", "0.58331794", "0.58268756", "0.5813925", "0.58004856", "0.578935", "0.5786165", "0.57856476", "0.57851446", "0.5781576", "0.57674956", "0.5757107", "0.57512414", "0.57149005", "0.57133627", "0.57132316", "0.5713165", "0.5704535", "0.56986415", "0.56910986", "0.5677013", "0.5661637", "0.56425846", "0.56322527", "0.563166", "0.56288195", "0.56288195", "0.5625313", "0.56184787", "0.56181616", "0.5615153", "0.5606748", "0.5606361", "0.5592302", "0.55905795", "0.5587068", "0.5581168", "0.5578418", "0.55759865", "0.55675906", "0.55671155", "0.5554739", "0.55544484", "0.55525887" ]
0.7902135
0
Splits image into tiles by number of tile. x_ntiles number of tiles horizontally y_ntiles number of tiles vertically
def split_image_into_number_of_tiles( arr: Image, x_ntiles: int, y_ntiles: int, overlap: int ): img_width, img_height = arr.shape[-1], arr.shape[-2] tile_w = img_width // x_ntiles tile_h = img_height // y_ntiles return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info", "def make_tiles_limits(im, n_splits, margin=0):\n \n if n_splits == 1:\n return [0, im.shape[1], 0, im.shape[0]]\n # number of splits per axis\n ax_splits = int(np.log2(n_splits))\n x_segments = split_range(im.shape[1], ax_splits)\n y_segments = split_range(im.shape[0], ax_splits)\n \n if margin > 0:\n x_segments = extend_indices(x_segments, margin=margin)\n y_segments = extend_indices(y_segments, margin=margin)\n \n # make combinations of [xmin, xmax, ymin, ymax] indices of tiles\n tiles_indices = []\n for xlim in x_segments:\n for ylim in y_segments:\n tiles_indices.append(xlim + ylim)\n return tiles_indices", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def chunks(img, n):\n shape = img.shape\n imgs = []\n\n nx = int(n * (shape[1]/(shape[0] + shape[1])))\n ny = n - nx\n\n x = int(shape[0]/n)\n y = int(shape[0]/n)\n\n for i in range(nx - 1):\n line = []\n for j in range(ny - 1):\n line.append(img[y*j: y*(j+1), x*i: x*(i+1), ::])\n imgs.append(line)\n return imgs", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def split_tiles(module_data):\n raise NotImplementedError", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile", "def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def getNumTiles(self):\n return self.w * self.h", "def calcul_xy_array(img_x, img_y, tile_x, tile_y):\n array = []\n\n modu_x = img_x % tile_x\n modu_y = img_y % tile_y\n div_x = img_x // tile_x\n div_y = img_y // tile_y\n current_x = 0\n current_y = 0\n\n for i in range(div_y):\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n current_y += tile_y\n current_x = 0\n\n if modu_y:\n current_y = img_y - tile_y\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n\n return array", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def getNumTiles(self):\n return self.height * self.width", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h", "def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)", "def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )", "def get_image_tiles_tensor(image, label, image_path, patch_width):\n tiles_before_reshape = tensorflow.extract_image_patches(\n tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1],\n [1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID')\n tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1])\n\n labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1])\n image_paths = tensorflow.tile(\n tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1])\n\n return tiles, labels, image_paths", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def getNumTiles(self):\n return (self.width) * (self.height)", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def split(self):\n sub_images = []\n\n for region in regionprops(self.cells):\n minr, minc, maxr, maxc = region.bbox\n sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]\n\n sub_images.append(FQimage(data=sub_image))\n\n return sub_images", "def get_shape_for_tile_split(\n arr_height: int, arr_width: int, nchannels: int, tile_height: int, tile_width: int\n) -> list[int]:\n shape = [\n arr_height // tile_height,\n tile_height,\n arr_width // tile_width,\n tile_width,\n ]\n if nchannels > 1:\n shape.append(nchannels)\n return shape", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def slice(\n filename,\n number_tiles=None,\n col=None,\n row=None,\n save=True,\n DecompressionBombWarning=True,\n):\n if DecompressionBombWarning is False:\n Image.MAX_IMAGE_PIXELS = None\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if number_tiles:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(\n tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)\n )\n return tuple(tiles)", "def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs", "def image_shape_to_grids(height, width):\n out_height = tf.cast(height, tf.float32)\n out_width = tf.cast(width, tf.float32)\n x_range = tf.range(out_width, dtype=tf.float32)\n y_range = tf.range(out_height, dtype=tf.float32)\n x_grid, y_grid = tf.meshgrid(x_range, y_range, indexing='xy')\n return (y_grid, x_grid)", "def _convert_chunk_to_tiles(\n feature_data: np.array, loss_window_radius: int, window_radius: int\n) -> Tuple[np.array, np.array]:\n\n output_array = []\n col_index = []\n for _col in range(0, feature_data.shape[1], loss_window_radius * 2):\n col_index.append(min(_col, feature_data.shape[1] - window_radius * 2))\n output_array.append(feature_data[:, col_index[-1] : col_index[-1] + window_radius * 2, :])\n output_array = np.stack(output_array)\n output_array = np.reshape(\n output_array, (output_array.shape[0], output_array.shape[1], output_array.shape[2], feature_data.shape[-1])\n )\n\n col_index = np.array(col_index)\n\n return output_array, col_index", "def split_image_with_bboxes_efficient(bboxes, image, bbox_size=50, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n\n ymin = int(tile_height-new_height)\n ymax = int(tile_height)\n xmin = int(tile_width-new_width)\n xmax = int(tile_width)\n\n canvas[ymin:ymax, xmin:xmax] = 1\n\n query_bboxes = find_query_boxes(bboxes, xmin, xmax, ymin, ymax, bbox_size)\n\n new_bboxes = find_overlaps(canvas, query_bboxes, col, row, new_width, new_height)\n\n cropped_image = image[ymin:ymax, xmin:xmax]\n\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles", "def toXY(im, blocksize):\n blocks = img.reshape(\n img.shape[0]//blocksize, \n blocksize, \n img.shape[1]//blocksize, \n blocksize, 3).swapaxes(1, 2)\n return blocks", "def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )", "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def tileY(self,ntiles,mirror=False):\n ntiles = int(ntiles)\n print('Creating',ntiles,'horizontal tiles')\n print(' before:',self.U.shape)\n if mirror:\n # [0 1 2] --> [0 1 2 1 0 1 2 .. ]\n NYnew = (self.NY-1)*ntiles + 1\n Unew = np.zeros((3,self.N,NYnew,self.NZ))\n Tnew = np.zeros(( self.N,NYnew,self.NZ))\n Unew[:,:,:self.NY,:] = self.U[:,:,:self.NY,:]\n Tnew[ :,:self.NY,:] = self.T[ :,:self.NY,:]\n delta = self.NY - 1\n flipped = True\n for i in range(1,ntiles):\n if flipped:\n Unew[:,:,i*delta+1:(i+1)*delta+1,:] = self.U[:,:,delta-1::-1,:]\n Tnew[ :,i*delta+1:(i+1)*delta+1,:] = self.T[ :,delta-1::-1,:]\n else:\n Unew[:,:,i*delta+1:(i+1)*delta+1,:] = self.U[:,:,1:,:]\n Tnew[ :,i*delta+1:(i+1)*delta+1,:] = self.T[ :,1:,:]\n flipped = not flipped\n self.U = Unew\n self.T = Tnew\n else:\n # [0 1 2] --> [0 1 0 1 .. 0 1 2]\n self.U = np.tile(self.U[:,:,:-1,:],(1,1,ntiles,1))\n self.T = np.tile(self.T[ :,:-1,:],( 1,ntiles,1))\n Uplane0 = np.zeros((3,self.N,1,self.NZ))\n Tplane0 = np.zeros(( self.N,1,self.NZ))\n Uplane0[:,:,0,:] = self.U[:,:,-1,:]\n Tplane0[ :,0,:] = self.T[ :,-1,:]\n self.U = np.concatenate((self.U,Uplane0),axis=1)\n self.T = np.concatenate((self.T,Tplane0),axis=1)\n print(' after :',self.U.shape)\n\n self.NY = NYnew\n assert( self.U.shape == (3,self.N,self.NY,self.NZ) )\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def lap_split_n(img, n):\n levels = []\n\n print(\"inside lap_split_n function \")\n\n for i in range(n):\n img, hi = lap_split(img)\n levels.append(hi)\n levels.append(img)\n return levels[::-1]", "def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))", "def _split_heads(self, x, is_picture):\n if is_picture is False:\n if len(x.shape) != 3:\n raise ValueError(\"x must have rank 3\")\n shape = x.shape\n return x.reshape(shape[0], shape[1], self.num_heads, shape[2]//self.num_heads).permute(0, 2, 1, 3).contiguous()\n else:\n if len(x.shape) != 5:\n raise ValueError(\"x must have rank 5\")\n shape = x.shape\n return x.reshape(shape[0], shape[1], shape[2], shape[3], self.num_heads, shape[4]//self.num_heads).permute(0, 4, 1, 2, 3, 5).contiguous()", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def create_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = [i for i in range(k)]\n return _create_grid(images, indices, n_rows, n_cols)", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def render_tiles(output):\n chunks = [output[i:i + 3] for i in range(0, len(output), 3)]\n max_i = max_j = 0\n for i, j, _ in chunks:\n max_i, max_j = max(i, max_i), max(j, max_j)\n\n matrix = [[None] * (max_j + 1) for _ in range(max_i + 1)]\n\n for i, j, tile_id in chunks:\n matrix[i][j] = draw_tile(tile_id)\n\n for i, row in enumerate(matrix):\n matrix[i] = \" \".join(row)\n return matrix", "def calculate_min_max_tiles(self):", "def immed_nbrs(tile):\n return [nbr for nbr in [TILES_BY_IDX.get(xy) for xy in xy_nbrs(tile['xyidx'])]\n if nbr is not None]", "def tiffmatrixSplit(kv):\n filename, tiffmat = kv[0], kv[1]\n # Each image is 500x500\n kv_list = []\n if len(tiffmat) == 2500:\n num_matrices = 5**2\n split_size = 5\n elif len(tiffmat) == 5000:\n num_matrices = 10**2\n split_size = 10\n else:\n raise ValueError(\"TIFF file has dimensions other than 2500x2500 or 5000x5000\")\n all_matrices = []\n file_names = [filename + '-' + str(i) for i in np.arange(num_matrices)]\n big_rows = np.vsplit(tiffmat, split_size)\n for row in big_rows:\n all_matrices += np.hsplit(row, 5)\n return list(zip(file_names,all_matrices))", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def getNumTiles(self):\n\t\treturn self.numTiles", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3", "def calc_tiles(raster, tile_x, tile_y):\n \n #get coordinates of upper left corner\n x_upper_left = raster.transform[2]\n y_upper_left = raster.transform[5]\n #calculate width and height based on tile_x and tile_y\n x,y = x_upper_left + tile_x, y_upper_left - tile_y\n height, width = raster.index(x,y)\n \n #get cols and rows of raster band\n ncols, nrows = raster.meta['width'], raster.meta['height']\n #create offsets for window processing\n subsets = product(range(0, ncols, width), range(0, nrows, height))\n #create bounding_window to fill missing windows\n bounding_window = rio.windows.Window(col_off=0, row_off=0, width=ncols, height=nrows)\n \n #create windows\n for col_off, row_off in subsets:\n #yield windows with the given parameters\n window = rio.windows.Window(col_off=col_off, row_off=row_off, \n width=width, height=height).intersection(bounding_window)\n yield window", "def test_tiled_iterator_nogen(self):\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=0\n )\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # no overlap\n tile = next(tile_no_gen)\n img0 = self.test_data_1[65 : 2 * 65, 65 : 2 * 65]\n np.array_equal(tile, img0)\n\n # --- overlapping --- #\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=2\n )\n\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # 64/(2**2) = 16\n tile = next(tile_no_gen)\n img0 = self.test_data_1[16 : 16 + 65, 16 : 16 + 65]\n np.array_equal(tile, img0)", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n \n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n \n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n \n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n # colors default to 0 (i.e. black), alphas defaults to 1 (fully opaque i.e.\n # corresponding pixel fully visible in image))\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8') \n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype) \n\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n \n for i in range(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n \n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n \n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.ones(out_shape, dtype=dt)*255\n \n for tile_row in range(tile_shape[0]):\n for tile_col in range(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def stitch(dir_path, in_canels=1, choice=0):\n directory = dir_path\n array = [] # array used to create matrix\n\n p = re.compile(tiles_xy_re)\n q = re.compile(original_img_xy_re)\n\n sum_of_files = len(os.listdir(directory))\n tiles_horizontal_num = 0\n\n first = os.listdir(directory)[0] # we take a sample to extract\n # original image information such as height, width, type\n\n original = q.match(first)\n Original_width, Original_height = int(original.group(1)), int(\n original.group(2))\n im = Image.open(dir_path + '\\\\' + first)\n\n tile_h = np.array(im).shape[0]\n tile_w= np.array(im).shape[1]\n file_type = first.split(\".\")[-1]\n\n # creating array to merge all tiles to\n if choice == 2: # if we choose and\n output_array = np.ones((Original_height, Original_width, in_canels))\n else:\n output_array = np.zeros((Original_height, Original_width, in_canels))\n\n for filename in os.listdir(directory):\n\n xy = p.match(filename)\n x, y = int(xy.group(1)), int(xy.group(2)) # extracting x,y relative\n # to original img\n\n im = Image.open(dir_path + '\\\\' + filename)\n if choice == 0:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n elif choice == 1:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_or(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n elif choice == 2:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_and(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n\n array.append([x, y])\n\n if int(xy.group(1)) == 0:\n tiles_horizontal_num = tiles_horizontal_num + 1\n\n # converting array to image and saving image\n output_im = Image.fromarray(output_array.astype(np.uint8))\n file_name = \"original.\" + file_type\n path = dir_path + '\\\\' + file_name\n output_im.save(path)\n\n # array = sorted(array, key=lambda k: [k[0], k[1]])\n # numpy_array = np.array(array)\n # matrix = numpy_array.reshape(sum_of_files // tiles_horizontal_num,\n # tiles_horizontal_num, 2)", "def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4", "def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def getNumTiles(self):\n return self.numTiles\n #raise NotImplementedError", "def tile_size_2d(self):\n return 32.0, 32.0", "def get_tile_info(file_info, img_info):\n all_tiles = []\n new_tiles = {}\n if img_info['invert_x']:\n xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']\n xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']\n else:\n xmin = img_info['viewer']['left']\n xmax = img_info['viewer']['right']\n if img_info['invert_y']:\n ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']\n ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']\n else:\n ymin = img_info['viewer']['top']\n ymax = img_info['viewer']['bottom']\n minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1\n maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))\n minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1\n maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))\n \n block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))\n block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))\n \n for row in range(minRow,maxRow):\n y0 = row*file_info['tile_height']\n yf = (row+1)*file_info['tile_height']\n y0_idx = int(y0/img_info['scale'])\n yf_idx = min(y0_idx + block_height, img_info['height'])\n for col in range(minCol,maxCol):\n all_tiles.append(str(col)+','+str(row))\n tile_idx = str(col)+','+str(row)\n if (tile_idx not in img_info['tiles'] or \n 'loaded' not in img_info['tiles'][tile_idx] or\n not img_info['tiles'][tile_idx]['loaded']):\n x0 = col*file_info['tile_width']\n xf = (col+1)*file_info['tile_width']\n x0_idx = int(x0/img_info['scale'])\n xf_idx = min(x0_idx+block_width, img_info['width'])\n tile_width = int((xf_idx-x0_idx)*img_info['scale'])\n tile_height = int((yf_idx-y0_idx)*img_info['scale'])\n new_filepath = get_tile_filename(\n file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)\n tile = {\n 'idx': tile_idx,\n 'left': x0,\n 'right': xf,\n 'top': y0,\n 'bottom': yf,\n 'y0_idx': y0_idx,\n 'yf_idx': yf_idx,\n 'x0_idx': x0_idx,\n 'xf_idx': xf_idx,\n 'new_filepath': new_filepath,\n 'loaded': False,\n 'row': row,\n 'col': col,\n 'x': col*file_info['tile_width'],\n 'y': row*file_info['tile_height'],\n 'width': tile_width,\n 'height': tile_height\n }\n if img_info['invert_y']:\n tile['top'] = yf\n tile['bottom'] = y0\n if img_info['invert_x']:\n tile['left'] = xf\n tile['right'] = x0\n new_tiles[tile_idx] = tile\n print('viewer:', img_info['viewer'])\n print('new tiles', new_tiles.keys())\n return all_tiles, new_tiles", "def stack_tifs(tifseries, xsize, ysize):\n holster = np.empty((len(tifseries), ysize, xsize))\n for i, layer in enumerate(tifseries):\n lay = Image.open(layer)\n lis = list(lay.getdata())\n holster[i] = np.array(lis, np.uint16).reshape(512, 512)\n\n return holster", "def get_bboxes(tile_width, tile_height, n):\n numX = n\n numY = n\n diffX = (tile_width-1) / numX\n diffY = (tile_height-1) / numY\n\n squaremesh = np.mgrid[\n 0:tile_width-1:numX*1j, 0:tile_height-1:numY*1j].reshape(2, -1).T\n maxpt = squaremesh.max(axis=0)\n\n vtxs = []\n for pt in squaremesh:\n if np.any(pt == maxpt):\n continue\n vtxs.append((pt, pt + np.array([diffX, diffY])))\n return vtxs", "def tile_iterator(im,\r\n blocksize = (64, 64),\r\n padsize = (64,64),\r\n mode = \"constant\",\r\n verbose = False):\r\n\r\n if not(im.ndim == len(blocksize) ==len(padsize)):\r\n raise ValueError(\"im.ndim (%s) != len(blocksize) (%s) != len(padsize) (%s)\"\r\n %(im.ndim , len(blocksize) , len(padsize)))\r\n\r\n subgrids = tuple([int(np.ceil(1.*n/b)) for n,b in zip(im.shape, blocksize)])\r\n\r\n\r\n #if the image dimension are not divible by the blocksize, pad it accordingly\r\n pad_mismatch = tuple([(s*b-n) for n,s, b in zip(im.shape,subgrids,blocksize)])\r\n\r\n if verbose:\r\n print(\"tile padding... \")\r\n\r\n im_pad = np.pad(im,[(p,p+pm) for pm,p in zip(pad_mismatch,padsize)], mode = mode)\r\n\r\n # iterates over cartesian product of subgrids\r\n for i,index in enumerate(product(*[range(sg) for sg in subgrids])):\r\n # the slices\r\n # if verbose:\r\n # print(\"tile %s/%s\"%(i+1,np.prod(subgrids)))\r\n\r\n # dest[s_output] is where we will write to\r\n s_input = tuple([slice(i*b,(i+1)*b) for i,b in zip(index, blocksize)])\r\n\r\n\r\n\r\n s_output = tuple([slice(p,-p-pm*(i==s-1)) for pm,p,i,s in zip(pad_mismatch,padsize, index, subgrids)])\r\n\r\n\r\n s_output = tuple([slice(p,b+p-pm*(i==s-1)) for b,pm,p,i,s in zip(blocksize,pad_mismatch,padsize, index, subgrids)])\r\n\r\n\r\n s_padinput = tuple([slice(i*b,(i+1)*b+2*p) for i,b,p in zip(index, blocksize, padsize)])\r\n padded_block = im_pad[s_padinput]\r\n\r\n\r\n\r\n yield padded_block, s_input, s_output", "def _pixel_to_tile(x: float, y: float) -> Tuple[float, float]:\n xy = ffi.new(\"double[2]\", (x, y))\n lib.TCOD_sys_pixel_to_tile(xy, xy + 1)\n return xy[0], xy[1]", "def build_grid(tiles, tile_size, grid_rows=None, grid_cols=None):\n if grid_rows is None or grid_cols is None:\n grid_rows = int(math.sqrt(len(tiles)))\n grid_cols = int(math.ceil(len(tiles) / grid_rows))\n\n grid = np.zeros(\n (grid_rows * tile_size[1], grid_cols * tile_size[0], 3), np.uint8)\n for tile_id, tile in enumerate(tiles):\n assert(tile.shape[0] == tile_size[1] and tile.shape[1] == tile_size[0])\n yy = int(tile_id / grid_cols)\n xx = tile_id % grid_cols\n grid[(yy * tile_size[1]):((yy + 1) * tile_size[1]),\n (xx * tile_size[0]):((xx + 1) * tile_size[0]), :] = tile\n return grid", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\r\n scale_rows_to_unit_interval=True,\r\n output_pixel_vals=True):\r\n\r\n assert len(img_shape) == 2\r\n assert len(tile_shape) == 2\r\n assert len(tile_spacing) == 2\r\n\r\n # The expression below can be re-written in a more C style as\r\n # follows :\r\n #\r\n # out_shape = [0,0]\r\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\r\n # tile_spacing[0]\r\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\r\n # tile_spacing[1]\r\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\r\n in zip(img_shape, tile_shape, tile_spacing)]\r\n\r\n if isinstance(X, tuple):\r\n assert len(X) == 4\r\n # Create an output numpy ndarray to store the image\r\n if output_pixel_vals:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype='uint8')\r\n else:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype=X.dtype)\r\n\r\n #colors default to 0, alpha defaults to 1 (opaque)\r\n if output_pixel_vals:\r\n channel_defaults = [0, 0, 0, 255]\r\n else:\r\n channel_defaults = [0., 0., 0., 1.]\r\n\r\n for i in xrange(4):\r\n if X[i] is None:\r\n # if channel is None, fill it with zeros of the correct\r\n # dtype\r\n dt = out_array.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array[:, :, i] = numpy.zeros(out_shape,\r\n dtype=dt) + channel_defaults[i]\r\n else:\r\n # use a recurrent call to compute the channel and store it\r\n # in the output\r\n out_array[:, :, i] = tile_raster_images(\r\n X[i], img_shape, tile_shape, tile_spacing,\r\n scale_rows_to_unit_interval, output_pixel_vals)\r\n return out_array\r\n\r\n else:\r\n # if we are dealing with only one channel\r\n H, W = img_shape\r\n Hs, Ws = tile_spacing\r\n\r\n # generate a matrix to store the output\r\n dt = X.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array = numpy.zeros(out_shape, dtype=dt)\r\n\r\n for tile_row in xrange(tile_shape[0]):\r\n for tile_col in xrange(tile_shape[1]):\r\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\r\n this_x = X[tile_row * tile_shape[1] + tile_col]\r\n if scale_rows_to_unit_interval:\r\n # if we should scale values to be between 0 and 1\r\n # do this by calling the `scale_to_unit_interval`\r\n # function\r\n this_img = scale_to_unit_interval(\r\n this_x.reshape(img_shape))\r\n else:\r\n this_img = this_x.reshape(img_shape)\r\n # add the slice to the corresponding position in the\r\n # output array\r\n c = 1\r\n if output_pixel_vals:\r\n c = 255\r\n out_array[\r\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\r\n tile_col * (W + Ws): tile_col * (W + Ws) + W\r\n ] = this_img * c\r\n return out_array", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def __init__(self, width, height):\n self.w = width\n self.h = height\n self.cleanTiles = []\n self.tiles = [[False] * width for i in range(height)]\n self.cleaned = 0", "def tiles(self, nums, row = 1, spaces = 0):\r\n # We add the (\" \" * 5) to align the rows\r\n # with odd number of values\r\n separator = (\"+---+\" + (\" \" * 5)) * row\r\n space = (\" \" * 5) * spaces\r\n\r\n tile = space + separator + space + \"\\n\"\r\n \r\n tile += space\r\n for i in nums:\r\n # We add the (\" \" * 5) to align the rows\r\n # with odd number of values\r\n tile += f\"| {i} |\" + (\" \" * 5)\r\n tile += space + \"\\n\"\r\n \r\n tile += space + separator + space + \"\\n\"\r\n \r\n return tile", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def grouped(pixels):\n _n = 3 #groups it by 3\n _grouped = [pixels[_i :_i + _n] for _i in range(0,len(pixels), _n)]\n return _grouped", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output np ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n # colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # functionmapping\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def iter_tiles(data_sampler, depth, merge=True):\n if merge is True:\n merge = _default_merge\n\n parents = defaultdict(dict)\n\n for node, c, increasing in iter_corners(max(depth, 1),\n bottom_only=merge):\n\n l, b = subsample(c[0], c[1], c[2], c[3], 256, increasing)\n img = data_sampler(l, b)\n\n for pth, img in _trickle_up(img, node, parents, merge, depth):\n yield pth, img", "def __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.numTiles = width*height\n\t\tself.tiles = []\n\t\tfor i in range(0, width):\n\t\t\tfor j in range(0, height):\n\t\t\t\tself.tiles.append(Tile(i, j))", "def _pixel_to_tile(x: float, y: float) -> tuple[float, float] | None:\n if not lib.TCOD_ctx.engine:\n return None\n xy = ffi.new(\"double[2]\", (x, y))\n lib.TCOD_sys_pixel_to_tile(xy, xy + 1)\n return xy[0], xy[1]", "def __init__(self, group, image, x, y, tile_size):\n\t\tsuper().__init__(group, image, x, y, tile_size)", "def tiles_positions(self) -> Generator[TilePosition, None, None]:\r\n for i in range(self.width * self.height):\r\n yield TilePosition(i % self.width, i // self.width)", "def load_tile_table(filename, width, height):\n\ttry: \n\t\ttile_table = []\n\t\timage = pygame.image.load(filename).convert()\n\texcept:\n\t\tprint(\"Could not load tileset:\", filename)\n\telse:\n\t\timage_width, image_height = image.get_size()\n\t\tfor tile_x in range(0, int(image_width/width)):\n\t\t\tline = []\n\t\t\ttile_table.append(line)\n\t\t\tfor tile_y in range(0, int(image_height/height)):\n\t\t\t\trect = (tile_x*width, tile_y*height, width, height)\n\t\t\t\tline.append(image.subsurface(rect))\n\treturn tile_table", "def Split(cls, img):\n pix = img.load()\n\n # vertical cut\n vertical = []\n foundLetter = False\n for x in range(img.size[0]):\n inLetter = False\n for y in range(img.size[1]):\n if pix[x, y] == 0:\n inLetter = True\n break\n if not foundLetter and inLetter:\n foundLetter = True\n start = x\n if foundLetter and not inLetter:\n foundLetter = False\n end = x\n vertical.append((start, end))\n\n # horizontal cut\n def _findFistLine(pix, y_range, x_start, x_end):\n for y in y_range:\n for x in range(x_start, x_end):\n if pix[x, y] == 0:\n return y\n\n horizontal = []\n for i in vertical:\n start = _findFistLine(pix, range(img.size[1]), *i)\n end = _findFistLine(pix, reversed(range(img.size[1])), *i)\n horizontal.append((start, end))\n\n return [(vertical[i][0], horizontal[i][0], vertical[i][1], horizontal[i][1] + 1) for i in range(len(vertical))]" ]
[ "0.7743909", "0.7442206", "0.7223278", "0.6875224", "0.6805701", "0.6805701", "0.68043333", "0.6783657", "0.67152864", "0.6648476", "0.6550898", "0.6506808", "0.6499578", "0.644437", "0.64369965", "0.64329946", "0.6417711", "0.6391121", "0.6388546", "0.637921", "0.6316766", "0.63017416", "0.6299771", "0.6295904", "0.62767625", "0.6265029", "0.6232143", "0.62023723", "0.618484", "0.6164547", "0.61625075", "0.6155534", "0.6152099", "0.6148211", "0.61317015", "0.61278415", "0.61145335", "0.6097943", "0.60911834", "0.60817385", "0.6079502", "0.6070139", "0.6068163", "0.6052846", "0.6047751", "0.60388094", "0.6024736", "0.59879565", "0.5978726", "0.5955223", "0.5951077", "0.590159", "0.589859", "0.5891227", "0.5889109", "0.58886266", "0.5886475", "0.58763623", "0.5874852", "0.5869099", "0.5851927", "0.5850796", "0.58493215", "0.5840534", "0.5839733", "0.5826013", "0.58236617", "0.5798205", "0.57971275", "0.5793621", "0.57932734", "0.5793226", "0.5792273", "0.5774732", "0.5762961", "0.5757388", "0.57525647", "0.5743784", "0.5734286", "0.57258373", "0.57238907", "0.57130176", "0.57103217", "0.5709169", "0.56941414", "0.5693441", "0.56910616", "0.568983", "0.5685288", "0.5683665", "0.56675553", "0.5666032", "0.565534", "0.5654988", "0.5648264", "0.5627477", "0.5615284", "0.56025904", "0.56007326", "0.5594939" ]
0.8323475
0
Draw the contours over a blank array. The function cv2.DrawContours overlays the contours on top of the bitwise array. Which is not ideal if the bitwise array contains some small, noisy contours. Therefore, I created an empty array first and then used this as the base for drawing the contours onto.
def draw_contours(self): contours, hierarchy = cv2.findContours(self.edged, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1) edged = cv2.bitwise_not(self.edged) rgb = cv2.cvtColor(edged, cv2.COLOR_GRAY2RGB) temp_array = np.ones([rgb.shape[0], rgb.shape[1], rgb.shape[2]]) contours_ = cv2.drawContours(temp_array, contours, -1, (0, 0, 0), thickness=1) ml_filename = 'MLOutputs/' + str(self.file_name) + 'Clusters' + str(self.clusters) + \ 'FilterSize' + str(self.filter_size) + 'Sigma' + str(self.sigma) + 'UniqueColours' + \ str(self.unique_colours) + ".png" plt.imshow(contours_, cmap="gray") if not os.path.isfile(ml_filename): imsave(ml_filename, contours_, cmap="gray")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_contours(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n contoured = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n self.get_instance_contours(img, contoured, uni)\n\n cv.namedWindow('building contours', cv.WINDOW_NORMAL)\n cv.imshow('building contours', contoured)\n cv.waitKey(0)\n cv.destroyAllWindows()", "def make_mask(shape, contour):\n mask = np.zeros(shape, np.int32)\n cv2.drawContours(mask, [contour], 0, (255), -1)\n return mask", "def drawContours(img, cnt, color=(0, 255, 0), thickness=2):\n\tcv2.drawContours(img, cnt, -1, color, thickness)", "def fillContour( self, img, contours, i):\n cv2.drawContours(img, contours, i , self.WHITE, -1)", "def generate_contours(threshold):\n \tcnts = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \tcnts = imutils.grab_contours(cnts)\n \treturn cnts", "def draw_contours(mat, contours, color=(0, 0, 255), thickness=1):\n cv2.drawContours(mat, contours, -1, color, thickness=thickness)", "def remove_border(contour, ary):\n # Use a rotated rectangle (should be a good approximation of a border).\n # If it's far from a right angle, it's probably two sides of a border and\n # we should use the bounding box instead.\n c_im = np.zeros(ary.shape)\n r = cv2.minAreaRect(contour)\n degs = r[2]\n if angle_from_right(degs) <= 10.0:\n box = cv2.boxPoints(r)\n box = np.int0(box)\n cv2.drawContours(c_im, [box], 0, 255, -1)\n cv2.drawContours(c_im, [box], 0, 0, 4)\n else:\n x1, y1, x2, y2 = cv2.boundingRect(contour)\n cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)\n cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)\n\n return np.minimum(c_im, ary)", "def contour_array(self, a, masked_values=None, head=None, **kwargs):\n return self.__cls.contour_array(a=a, masked_values=masked_values,\n head=head, **kwargs)", "def draw(self, binary, leftx, rightx):\n\t\tfilled = np.zeros_like(binary)\n\t\tploty = np.linspace(0, filled.shape[0] - 1, filled.shape[0])\n\t\t# Recast the x and y points into usable format for cv2.fillpoly()\n\t\tpts_left = np.array([np.transpose(np.vstack([leftx, ploty]))])\n\t\tpts_right = np.array([np.flipud(np.transpose(np.vstack([rightx, ploty])))])\n\t\tpts = np.hstack((pts_left, pts_right))\n\t\t# Draw the lane onto the warped blank image\n\t\tcv2.fillPoly(filled, np.int_([pts]), (0, 255, 0))\n\t\treturn filled", "def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:\r\n contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]\r\n mask = np.zeros(thresh.shape, np.uint8)\r\n good_contours = sorted(\r\n [cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],\r\n key=cv.contourArea,\r\n )\r\n\r\n setattr(self, \"contour1\", good_contours[0])\r\n setattr(\r\n self,\r\n \"contour2\",\r\n good_contours[1]\r\n if cv.pointPolygonTest(\r\n good_contours[1], tuple(good_contours[0][0][0]), False\r\n )\r\n < 0\r\n else good_contours[2],\r\n )\r\n\r\n cv.drawContours(mask, [self.contour1], 0, 255, -1)\r\n cv.drawContours(mask, [self.contour2], 0, 255, -1)\r\n\r\n return mask", "def fillHoles(img):\n out,contour,hierarchy = cv2.findContours(img,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)\n i=0\n for cnt in contour:\n cv2.drawContours(img,contour,i,255,-1)\n i+=1\n return img", "def detect_contours(self):\r\n (contours, _) = cv2.findContours(self.image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n return [DbugContour(cv_contour=contour) for contour in contours]", "def fillContour(img, cnt, color = (255,255,0)):\n\tcv2.drawContours(img, [cnt], 0, color, -1)", "def draw_contours(self, image, maskImg):\r\n # Required variables..\r\n x, y, width, height = 0, 0, 0, 0\r\n # Find contours..\r\n contours, hierarchy = cv2.findContours(image=maskImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) # Playable Parameters..\r\n # Draw the contours..\r\n for contour in contours:\r\n # Calculate the area of the contour, so can remove unnecessary contours..\r\n area = cv2.contourArea(contour=contour)\r\n if area > 3000: # Playable adjustment..!! Found Good as 3000 for current light condition.. change this if light condition changes..\r\n # Draw the contours to the image -- actual frame..\r\n if self.debug_mode:\r\n cv2.drawContours(image=image, contours=contour, contourIdx=-1, color=(255, 255, 0), thickness=4)\r\n # Find the perimeter of the markers detected...\r\n perimeter = cv2.arcLength(curve=contour, closed=True)\r\n # Approximating/Finding the corners of the image from the obtained corners..\r\n approx_corners = cv2.approxPolyDP(curve=contour, epsilon=0.02 * perimeter, closed=True)\r\n # Find the bounding box rectangle for the approximated corners..\r\n x, y, width, height = cv2.boundingRect(approx_corners)\r\n # Return the values with which a rectangle can be drawn..\r\n return x, y, width, height", "def __find_contours(input, external_only):\r\n if(external_only):\r\n mode = cv2.RETR_EXTERNAL\r\n else:\r\n mode = cv2.RETR_LIST\r\n method = cv2.CHAIN_APPROX_SIMPLE\r\n _, contours, hierarchy = cv2.findContours(\r\n input, mode=mode, method=method)\r\n return contours", "def compute_contour_binary_masks(\n contour1: np.ndarray,\n contour2: np.ndarray,\n max_size: int = DEFAULT_MAX_CONTOUR_MASK_SIZE,\n) -> typing.Tuple[np.ndarray, np.ndarray]:\n points = np.concatenate([contour1, contour2], axis=0)\n offset = points.min(axis=0)\n points, contour1, contour2 = [v - offset for v in [points, contour1, contour2]]\n scale = min(max_size / points.max(axis=0).min(), 1)\n if scale < 1:\n points, contour1, contour2 = [v * scale for v in [points, contour1, contour2]]\n w, h = points.max(axis=0).astype(\"int32\")\n im1, im2 = [\n cv2.drawContours(\n np.zeros((h, w), dtype=\"uint8\"),\n contours=(box[np.newaxis]).round().astype(\"int32\"),\n color=255,\n thickness=-1,\n contourIdx=0,\n )\n > 0\n for box in [contour1, contour2]\n ]\n return im1, im2", "def drawContour(im,draw):\r\n img = im.filter(ImageFilter.BLUR)\r\n img = im.filter(ImageFilter.SMOOTH)\r\n img = cv.cvtColor(numpy.array(img), cv.COLOR_RGB2BGR)\r\n edges = cv.Canny(img,100,200)\r\n pos = numpy.nonzero(edges)\r\n pos2 = [(pos[0][i],pos[1][i]) for i in range(0,len(pos[0]))]\r\n pos3=[tuple(map(lambda x:int(round(x/32)),i)) for i in pos2]\r\n pos3 = [(i[1],i[0]) for i in pos3]\r\n for i in pos3:\r\n if pos3.count((i[0]+1,i[1]))>20 and i[0]<16 and i[1]<16:\r\n draw.line([(32*i[0],32*i[1]),(32*(i[0]+1),32*i[1])],fill=(0,0,0),width=5)\r\n if pos3.count((i[0],i[1]+1))>20 and i[0]<16 and i[1]<16:\r\n draw.line([(32*i[0],32*i[1]),(32*(i[0]),32*(i[1]+1))],fill=(0,0,0),width=5)", "def as_boolean_mask(self):\n bbox = self.bbox()\n zs = np.unique([c.image_z_position for c in self.contours])\n z_to_index = dict(zip(zs,range(len(zs))))\n\n # Get dimensions, initialize mask.\n nx,ny = np.diff(bbox[:2], axis=1).astype(int) + 1\n nx = int(nx); ny = int(ny)\n nz = int(zs.shape[0])\n mask = np.zeros((nx,ny,nz), dtype=np.bool)\n\n # We check if these points are enclosed within each contour \n # for a given slice. `test_points` is a list of image coordinate \n # points, offset by the bounding box.\n test_points = bbox[:2,0] + np.c_[ np.where(~mask[:,:,0]) ]\n\n # First we \"turn on\" pixels enclosed by inclusion contours.\n for contour in self.contours:\n if contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels\n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n contains_pts = path.contains_points(test_points)\n mask[:,:,zi] = contains_pts.reshape(mask.shape[:2])\n\n # Second, we \"turn off\" pixels enclosed by exclusion contours.\n for contour in self.contours:\n if not contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n path = mplpath.Path(contour_matrix, closed=True)\n not_contains_pts = ~path.contains_points(test_points)\n not_contains_pts = not_contains_pts.reshape(mask.shape[:2])\n mask[:,:,zi] = np.logical_and(mask[:,:,zi], not_contains_pts)\n\n # The first and second axes have to \n # be swapped because of the reshape.\n return mask.swapaxes(0,1), bbox[[1,0,2]]", "def draw_contours(self, contours, line_color=(0,0,0), line_thickness=1):\r\n cv2_contours = [c.contour for c in contours]\r\n cv2.drawContours(self.image, cv2_contours, -1, line_color, line_thickness) # -1 draw all contours in cv2_contours\r", "def __find_contours(input, external_only):\n if(external_only):\n mode = cv2.RETR_EXTERNAL\n else:\n mode = cv2.RETR_LIST\n method = cv2.CHAIN_APPROX_SIMPLE\n contours, hierarchy =cv2.findContours(input, mode=mode, method=method)\n return contours", "def emit_mask_contour(self):\r\n contours = find_contours(self.final_mask, 0.5)\r\n \r\n sig = [contours, self.fillContourButton.isChecked(), self.thicknessSpinBox.value(), self.invertMaskButton.isChecked()]\r\n \r\n self.signal_DMDcontour.emit(sig)", "def _draw_contour(self, img):\n if self.mask is None or self.contour_width == 0:\n return img\n\n mask = self._get_bolean_mask(self.mask) * 255\n contour = Image.fromarray(mask.astype(np.uint8))\n contour = contour.resize(img.size)\n contour = contour.filter(ImageFilter.FIND_EDGES)\n contour = np.array(contour)\n\n # make sure borders are not drawn before changing width\n contour[[0, -1], :] = 0\n contour[:, [0, -1]] = 0\n\n # use gaussian to change width, divide by 10 to give more resolution\n radius = self.contour_width / 10\n contour = Image.fromarray(contour)\n contour = contour.filter(ImageFilter.GaussianBlur(radius=radius))\n contour = np.array(contour) > 0\n contour = np.dstack((contour, contour, contour))\n\n # color the contour\n ret = np.array(img) * np.invert(contour)\n if self.contour_color != 'black':\n color = Image.new(img.mode, img.size, self.contour_color)\n ret += np.array(color) * contour\n\n return Image.fromarray(ret)", "def __find_contours(input, external_only):\n if (external_only):\n mode = cv2.RETR_EXTERNAL\n else:\n mode = cv2.RETR_LIST\n method = cv2.CHAIN_APPROX_SIMPLE\n contours, hierarchy = cv2.findContours(input, mode=mode, method=method)\n return contours", "def __find_contours(input, external_only):\n if(external_only):\n mode = cv2.RETR_EXTERNAL\n else:\n mode = cv2.RETR_LIST\n method = cv2.CHAIN_APPROX_SIMPLE\n im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)\n return contours", "def draw_contours(image, contours):\n assert isinstance(image, np.ndarray), 'Image should be a numpy array'\n x, y, w, h = 0, 0, 0, 0\n for contour in contours:\n area = get_contour_area(contour)\n if area > 10000:\n x, y, w, h = draw_rect_around_contour(image, contour)\n return x, y, w+x, h+y", "def compute_contour(ptr_array):\n hull = ConvexHull(ptr_array)\n contour_index = hull.vertices.tolist() # indices are ordered\n # contour_index = hull.simplices.flatten()\n # contour_index = list(set(contour_index))\n return contour_index", "def binary_contour_image_filter(*args, **kwargs):\n import itk\n instance = itk.BinaryContourImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def _get_watershed_areas(self, class_contours, class_mask, kernel_size=3, area_thresh=500):\n\n kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)\n\n # Since the watershed draws contours, we need to invert the predictions to\n # get the 'inside' blob portion. We also slightly compress the blob portion\n # so we can get a more defining border.\n inverted_contours = 255 - class_contours\n\n inverted_contours = cv2.erode(inverted_contours, kernel, iterations=1)\n # remove areas that are not part of the class mask\n inverted_contours[class_mask==0]= 0 # here ?\n\n return inverted_contours", "def register_cells(contours: List[np.array]) -> np.array:\n def create_cell(contour: np.array) -> np.array:\n center, radius = cv2.minEnclosingCircle(contour)\n v = (0, 0)\n weight = -1\n\n return np.array([center[0], center[1], v[0], v[1], radius, weight])\n\n areas = np.array([cv2.contourArea(contour) for contour in contours]).astype(\"float32\")\n blobs = np.array([create_cell(contour) for contour in contours]).astype(\"int32\")\n\n return blobs[areas >= 100].astype(\"float32\")", "def drawContour(img, cnt, color=(0, 255, 0), thickness=2):\n\tcv2.drawContours(img, [cnt], 0, color, thickness)", "def find_original_contours(self):\r\n\r\n # Convert to gray, threshold and invert the image. Also save a thresholded but non-inverted image copy\r\n gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\r\n self.thresh_invert = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,\r\n int(self.cell_size), 5)\r\n self.thresh_orig = 255 - self.thresh_invert\r\n\r\n # Find the contours of the image. Each contour should correspond to a cell\r\n orig_contours = cv2.findContours(self.thresh_invert, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n orig_contours = orig_contours[0] if len(orig_contours) == 2 else orig_contours[1]\r\n for block in orig_contours:\r\n area = cv2.contourArea(block)\r\n\r\n # If the contours are not too large, we draw them over the image to remove the digits in the grid\r\n if area < self.min_cell_size:\r\n cv2.drawContours(self.thresh_invert, [block], -1, (0, 0, 0), -1)", "def mark_contours(contour_arr, img, symmetry, _plot=False):\n marg, flag = 7, 0\n # fig, ax = plt.subplots()\n # ax.imshow(img, cmap=\"gray\")\n sub_images = [] # init array for pictures\n for contour in contour_arr:\n\n lower_dim = contour[:, 0]\n x, y = lower_dim[:, 0], lower_dim[:, 1]\n min_x, min_y, max_x, max_y = min(x), min(y), max(x), max(y)\n\n # remove small noise\n if (max_x - min_x) * (max_y - min_y) < MIN_IMG_SIZE:\n continue\n # avoid index error\n if min_x - marg < 0 or min_y - marg < 0 or max_y + marg > img.shape[0] or max_x + marg > img.shape[1]:\n marg = 0\n # crop only half cause image is symmetric:\n # TODO: address different types of symmetry as input from user\n if symmetry == HORIZONTAL:\n if max_x <= img.shape[1] // 2:\n sub_images.append(img[min_y:max_y, min_x:max_x])\n flag = 1\n else: # symmetry is around the X-axis\n if max_y <= img.shape[0] // 2:\n sub_images.append(img[min_y:max_y, min_x:max_x])\n flag = 1\n if _plot and flag:\n ax.plot([min_x - marg, max_x + marg, max_x + marg, min_x - marg, min_x - marg],\n [min_y - marg, min_y - marg, max_y + marg, max_y + marg, min_y - marg], c='r', linewidth=0.4)\n flag = 0\n if _plot:\n plt.savefig(\"found_subshapes2.jpg\")\n # plt.show()\n return sub_images", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def __find_contours(input1, external_only):\n if(external_only):\n mode = cv2.RETR_EXTERNAL\n else:\n mode = cv2.RETR_LIST\n method = cv2.CHAIN_APPROX_SIMPLE\n contours, hierarchy = cv2.findContours(input1, cv2.RETR_TREE, method=method)\n return contours", "def take_contours(self):\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n ret, self.th = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n im2, contours, hierarchy = cv2.findContours(self.th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return [contour for contour in contours if 1000 < cv2.contourArea(contour) < 150000]", "def processVideoFrame(self):\n cv2.drawContours(\n self.VideoFrame, self.block_contours, -1, (255, 0, 255), 3)", "def find_contours(img_path, **kwargs):\n threshold = kwargs['threshold']\n image = cv2.imread(img_path)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]\n contours, _ = cv2.findContours(thresh.copy(),\n cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n\n contour_areas = [] # TODO: is there a faster way to keep the contors with n-largest areas?\n for i, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n contour_areas.append((i, area))\n\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(image,[box],0,(0,0,255),2)\n cv2.imwrite('rect_{}.png'.format(i), image)\n # Contours that cover the largest areas are whole pieces\n largest_contour_areas = nlargest(10, contour_areas, key=lambda x:x[1])\n #print(largest_contour_areas)\n\n for i, area in largest_contour_areas:\n print (i, area)\n cv2.drawContours(image, contours, i, (100, 155, 100), 3)\n cv2.imwrite('contor_{}.png'.format(i), image)\n return True", "def drawcntMap(orgimg,filteredimg,wThresh,hThresh):\r\n _, contour, _ = cv2.findContours(filteredimg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n cnt = cv2.drawContours(orgimg.copy(), contour, -1, (0, 0, 255), 2) # To draw filtered contours on original image\r\n\r\n digitCnts = [] # contours to be surrounded by bounding boxes\r\n\r\n for c in contour:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n if w >= wThresh and h >= hThresh and w*h <20000: # Length filters to reduce noise\r\n cv2.rectangle(cnt,(x,y),(x+w,y+h),[0,255,0],2)\r\n digitCnts.append(c)\r\n\r\n return cnt, digitCnts", "def mask(self):\n\n mask = np.zeros(shape=(self._info.height, self._info.width), dtype=np.uint8)\n\n self.draw(image=mask, color=constants.COLOR_WHITE_MONO)\n\n mask_with_border = np.pad(mask, 1, 'constant', constant_values=255)\n\n cv2.floodFill(image=mask,\n mask=mask_with_border,\n seedPoint=(int(self.middle_point[0]), int(self.middle_point[1])),\n newVal=constants.COLOR_WHITE_MONO)\n\n return mask", "def draw_contours(in_img, contours, colour=(255, 0, 0)):\n img = convert_when_colour(colour, in_img.copy())\n thickness = int(max(img.shape) / 150)\n img = cv2.drawContours(img, contours, -1, colour, thickness)\n return img", "def find_contours(mask):\n\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n return cnts", "def getContours(image, copyImage):\n contours, heirarchy = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n for contour in contours:\n area = cv.contourArea(contour)\n \n if area > 500.0:\n cv.drawContours(copyImage, contour, -1, (255,0,0),3)\n perimeter = cv.arcLength(contour, True)\n \n # Approximates to the nearest polygon\n approx = cv.approxPolyDP(contour,0.02*perimeter, True)\n objectCoordinates = len(approx)\n\n # Returns the x, y and height width of the polygon\n x, y, w, h = cv.boundingRect(approx)\n\n if objectCoordinates == 3:\n objectShape = \"Triangle\"\n elif objectCoordinates == 4:\n ratio = w / float(h)\n if ratio >= 0.95 and ratio <= 1.05:\n objectShape = \"Square\"\n else: objectShape = \"Rectangle\"\n else: objectShape = \"Circle\" \n\n \n\n # Draw rectangles around the images\n cv.rectangle(copyImage, (x,y), (x+w, y+h), (0,255,0), 2)\n cv.putText(copyImage, objectShape, (x + (w//2), y + (h//2)),cv.FONT_HERSHEY_COMPLEX, 0.5, (0,0,0))", "def preprocessing(self, img):\n [a, contours, c] = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours", "def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]", "def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic", "def boundary_contour(self, time):\n ti = np.where(time == self.times)[0][0]\n image_mask = binary_erosion(binary_fill_holes(binary_dilation(self.masks[ti])))\n padded_mask = np.pad(image_mask, 1, 'constant', constant_values=0)\n c_out = find_contours(padded_mask, level=0.5, fully_connected=\"high\")\n x_cont = self.x[ti][np.floor(c_out[0][:, 0]).astype(int), np.floor(c_out[0][:, 1]).astype(int)]\n y_cont = self.y[ti][np.floor(c_out[0][:, 0]).astype(int), np.floor(c_out[0][:, 1]).astype(int)]\n ordered_coords = np.vstack([x_cont, y_cont])\n return ordered_coords", "def find_contours(thresh):\n thresh = thresh.astype(np.uint8)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n\n # filter outer contours\n filtered_cont = []\n for i in range(len(contours)):\n if hierarchy[0, i, 3] == NO_PARENT:\n filtered_cont.append(contours[i])\n\n return filtered_cont", "def drawShadedContours(xlist, ylist, zmatrix, levels):\n dislin.conshd(xlist, len(xlist), ylist, len(ylist), zmatrix, levels, len(levels))", "def generate_contours(edges, colorimg, img):\n cv2.destroyAllWindows()\n cv2.namedWindow(\"Contours\")\n lengthlimit = 20\n arealimit = 20\n\n # create trackbars for length and area filters\n cv2.createTrackbar(\"length\", \"Contours\", 0, 200, nothing)\n cv2.createTrackbar(\"area\", \"Contours\", 0, 2000, nothing)\n\n # find all possible contours in the image\n ret, thresh = cv2.threshold(edges, 127, 255, 0)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n while 1:\n cv2.imshow(\"Contours\", colorimg)\n\n # filter based on area and length\n lengthlimit = cv2.getTrackbarPos(\"length\", \"Contours\")\n arealimit = cv2.getTrackbarPos(\"area\", \"Contours\")\n\n for idx, contour in enumerate(contours):\n if len(contour) > lengthlimit:\n area = cv2.contourArea(contour)\n if area > arealimit:\n isconvex = cv2.isContourConvex(contour)\n circle = cv2.minEnclosingCircle(contour)\n cv2.drawContours(colorimg, contours, idx, (0, 255, 0), 2)\n else:\n cv2.drawContours(colorimg, contours, idx, (0, 0, 255), 2)\n else:\n cv2.drawContours(colorimg, contours, idx, (0, 0, 255), 2)\n\n k = cv2.waitKey(1) & 0xFF\n # print k\n if k == 27:\n exit()\n if k == 32:\n show_final(colorimg, contours, arealimit, lengthlimit)", "def binarize_array(numpy_array, threshold=200):\n for i in range(len(numpy_array)):\n for j in range(len(numpy_array[0])):\n if numpy_array[i][j] > threshold:\n numpy_array[i][j] = 255\n else:\n numpy_array[i][j] = 0\n return numpy_array", "def getContours(img,iteration):\n nP, IDrange = upDate(iteration)\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n retvalth, imgthreshold = cv2.threshold(imgray, 50, 255, cv2.THRESH_BINARY)\n kernel = np.ones((nP, nP), np.uint8)\n imgdilation = cv2.dilate(imgthreshold, kernel, iterations=2)\n contours = []\n # two vertion of cv2 for findcontours-> (old vertion): imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n #contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if iteration == 2 :\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n elif iteration == 3:\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n ##imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n return contours", "def binarize_array(numpy_array, threshold=200):\n for i in range(len(numpy_array)):\n for j in range(len(numpy_array[0])):\n if numpy_array[i][j] > threshold:\n numpy_array[i][j] = 255\n else:\n numpy_array[i][j] = 0\n return numpy_array", "def binarize_array(numpy_array, threshold=200):\n for i in range(len(numpy_array)):\n for j in range(len(numpy_array[0])):\n if numpy_array[i][j] > threshold:\n numpy_array[i][j] = 255\n else:\n numpy_array[i][j] = 0\n return numpy_array", "def props_for_contours(contours, ary):\n c_info = []\n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n c_im = np.zeros(ary.shape)\n cv2.drawContours(c_im, [c], 0, 255, -1)\n c_info.append({\n 'x1': x,\n 'y1': y,\n 'x2': x + w - 1,\n 'y2': y + h - 1,\n 'sum': np.sum(ary * (c_im > 0)) / 255\n })\n return c_info", "def contours(self, image,debug=False):\n imgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n if debug: cv2.imwrite('debug_pics/gray_scale_contour.jpg',imgray) # cv2.imshow('gray_scale_contour',imgray)\n im2, contours, hierarchy = cv2.findContours(imgray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n return contours,hierarchy", "def display_contour(cnts, img):\n for c in cnts:\n cv2.drawContours(img, [c], -1, (0, 255, 0), 2)\n\n img = imutils.resize(img, width=1200)\n cv2.imshow(\"Image\", img)\n cv2.moveWindow('Image', 30, 30)\n cv2.waitKey(0)", "def draw_separate_position_patterns(img, found, contours):\n for i in found:\n qr_dc = img.copy()\n cv2.drawContours(qr_dc, contours, i, (0, 255, 0), 2)\n show(qr_dc)", "def find_contours(mask):\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n return contours", "def draw(self, **kwargs):\n\n Lons = numpy.ones(self.data.shape)*0.5\n Lats = numpy.ones(self.data.shape)*0.5\n for ix in range(self.ncols):\n for iy in range(self.nrows):\n Lons[iy,ix] = self.xllcorner+float(ix)*self.cellsize\n Lats[iy,ix] = self.yllcorner+float(iy)*self.cellsize\n ContourMin = numpy.min(numpy.where(self.data != self.nodata,self.data, 1000000))\n ContourMax = numpy.max(numpy.where(self.data != self.nodata,self.data, -1000000))*1.10\n if kwargs.has_key('contours'):\n if type( kwargs['contours'] ) == type( 1 ):\n Contours = numpy.arange(ContourMin, ContourMax, (ContourMax-ContourMin)/float( kwargs['contours']+1))\n else:\n Contours = kwargs['contours']\n else:\n Contours = numpy.arange(ContourMin, ContourMax, (ContourMax-ContourMin)/11.)\n if kwargs.has_key('cmap'):\n mycmap = kwargs['cmap']\n else:\n mycmap = 'jet'\n if kwargs.has_key('dmap'):\n dmap = max(0,min(4,kwargs['dmap']))\n else:\n dmap = 4\n # Lambert Conformal Conic map.\n if kwargs.has_key('res'):\n if kwargs['res']=='med':\n mapres='i'\n elif kwargs['res']=='hi':\n mapres='h'\n else:\n mapres = 'l'\n else:\n mapres = 'l'\n if mapres not in ('c','l','i','h'):\n mapres = 'l'\n m = Basemap(llcrnrlon=Lons[0,0], llcrnrlat=Lats[0,0], urcrnrlon=Lons[self.nrows-1,self.ncols-1], urcrnrlat=Lats[self.nrows-1,self.ncols-1],\n projection='lcc',lat_1=30.,lat_2=60.,lon_0=(Lons[0,0]+Lons[self.nrows-1,self.ncols-1])/2.,\n resolution =mapres,area_thresh=1000.)\n # create figure, add axes.\n fig=p.figure()\n ax = fig.add_axes([0.1,0.1,0.7,0.7])\n #make a filled contour plot.\n x, y = m( Lons , Lats)\n CS = m.contourf(x,y,self.data, Contours, cmap=p.get_cmap(mycmap))\n\tpos = ax.get_position()\n\tl, b, w, h = getattr(pos, 'bounds', pos)\n #l,b,w,h=ax.get_position()\n cax = p.axes([l+w+0.075, b, 0.05, h]) # setup colorbar axes\n p.colorbar(drawedges=True, cax=cax) # draw colorbar\n p.axes(ax) # make the original axes current again\n\n if kwargs.has_key('shapefiles'):\n for s in kwargs['shapefiles']:\n try:\n lw = s[3]\n except:\n lw = 0.5\n try:\n clr = s[4]\n except:\n clr='k'\n shp_info = apply(m.readshapefile, (s[0],s[1]),{'drawbounds':s[2], 'linewidth':lw, 'color':clr} )\n # draw coastlines, meridians and parallels.\n if dmap > 1:\n m.drawcoastlines()\n if dmap > 2:\n m.drawcountries()\n if dmap > 3:\n m.drawstates()\n if dmap > 0:\n m.drawparallels(p.arange(10,70,10),labels=[1,1,0,0])\n m.drawmeridians(p.arange(-100,0,10),labels=[0,0,0,1])\n if kwargs.has_key('title'):\n p.title(kwargs['title'])\n else:\n p.title(self.name.title())\n if kwargs.has_key('format'):\n fn = self.name+'.'+kwargs['format']\n if kwargs.has_key('dpi'):\n dots = kwargs['dpi']\n else:\n dots = 100\n try:\n p.savefig(fn,dpi=dots)\n except:\n print 'Error saving to format : ', kwargs['format']\n else:\n p.show()", "def binarize_array(numpy_array, threshold=200):\n import numpy\n for i in range(len(numpy_array)):\n for j in range(len(numpy_array[0])):\n if numpy_array[i][j] > threshold:\n numpy_array[i][j] = 255\n else:\n numpy_array[i][j] = 0\n return numpy_array", "def instance_contours(gti):\n # TODO: move to somewhere better\n import cv2\n\n rc_locs = np.where(gti > 0)\n grouped_cc_rcs = util.group_items(\n np.ascontiguousarray(np.vstack(rc_locs).T),\n gti[rc_locs], axis=0\n )\n\n def bounding_box(rcs):\n rc1 = rcs.min(axis=0)\n rc2 = rcs.max(axis=0)\n return rc1, rc2\n\n # slice out a bounding region around each instance, detect the contour and\n # then offset it back into image coordinates\n grouped_contours = {}\n for label, rcs in grouped_cc_rcs.items():\n rc1, rc2 = bounding_box(rcs)\n sl = (slice(rc1[0], rc2[0] + 2), slice(rc1[1], rc2[1] + 2))\n submask = (gti[sl] == label).astype(np.uint8)\n\n xy_offset = rc1[::-1]\n offset = xy_offset + [-2, -2]\n\n border = cv2.copyMakeBorder(submask, 2, 2, 2, 2, cv2.BORDER_CONSTANT, value=0 )\n _, contors, hierarchy = cv2.findContours(border, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE,\n offset=tuple(offset))\n \"\"\"\n offset = [0, 0]\n BGR_GREEN = (0, 255, 0)\n x = np.ascontiguousarray(util.ensure_alpha_channel(submask)[:, :, 0:3]).astype(np.uint8)\n draw_img = cv2.drawContours(\n image=x, contours=contors,\n contourIdx=-1, color=BGR_GREEN, thickness=2)\n \"\"\"\n # note when len(contours > 1, there is a hole in the building)\n # assert len(contors) == 1\n grouped_contours[label] = contors\n return grouped_contours", "def displayContours(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"contour\") == \"1\":\n needleNode = slicer.mrmlScene.GetNodeByID(modelNode.GetAttribute(\"needleID\"))\n if needleNode != None:\n if needleNode.GetDisplayVisibility()==1:\n modelNode.SetDisplayVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked)-1))\n d = modelNode.GetDisplayNode()\n d.SetSliceIntersectionVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked)-1))", "def voxelize_contours(self, contours,res,llc,voxSlice, direction):\n\t\t# generate each voxel slice\n\t\tvslices=[self.voxelize_single_contour(contour,res,llc,voxSlice, direction) for contour in contours]\n\t\tif len(vslices)>0:\n\t\t\tresult=vslices.pop(0)\n\t\t\tfor S in vslices:\n\t\t\t\tresult=numpy.logical_xor(result,S)\n\t\t\treturn result\n\t\telse:\n\t\t\treturn voxSlice", "def displayContours(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n if modelNode.GetAttribute(\"contour\") == \"1\":\r\n needleNode = slicer.mrmlScene.GetNodeByID(modelNode.GetAttribute(\"needleID\"))\r\n if needleNode != None:\r\n if needleNode.GetDisplayVisibility() == 1:\r\n modelNode.SetDisplayVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked) - 1))\r\n d = modelNode.GetDisplayNode()\r\n d.SetSliceIntersectionVisibility(abs(int(slicer.modules.NeedleFinderWidget.displayContourButton.checked) - 1))", "def ccd_pixels(border=False, corners=False, full_frame=False):\n\n if border:\n edge = np.arange(127) # one less than pixel array size\n n_border = len(edge)\n\n min_corner = 0\n max_corner = 127\n\n min_edge = np.full(n_border, min_corner)\n max_edge = np.full(n_border, max_corner)\n\n # We'll go CCW - bottom, right, top, left:\n xpix = np.concatenate([edge, max_edge, [max_corner], np.flip(edge), min_edge])\n ypix = np.concatenate([min_edge, edge, [max_corner], max_edge, np.flip(edge)])\n\n xy = np.vstack([xpix, ypix]).T\n\n elif corners:\n raise NotImplementedError('still working on this')\n elif full_frame:\n xpix = np.arange(128)\n ypix = np.arange(128)\n\n xy = np.array(np.meshgrid(xpix, ypix)).T.reshape(-1, 2) # much faster than itertools\n else:\n raise Exception('You must set one of the arguments to True')\n\n return SimpleNamespace(x=xy[:, 0], y=xy[:, 1])", "def getContours( self,img ):\n if( len(img.shape) == 3):\n (_, contours, _) = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else:\n (contours,_) = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours", "def draw_all_position_patterns(img, found, contours):\n draw_img = img.copy()\n for i in found:\n rect = cv2.minAreaRect(contours[i])\n box = np.int0(cv2.cv.BoxPoints(rect))\n cv2.drawContours(draw_img, [box], 0, (0, 0, 255), 2)\n show(draw_img)", "def remove_border(input_path, output_path):\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n os.makedirs(output_path)\n img_fn_list = get_images(input_path)\n epsilon = 0.0001\n for img_fn in img_fn_list:\n print('===============')\n print(img_fn)\n start = time.time()\n try:\n img_gray = cv2.imread(img_fn,cv2.IMREAD_GRAYSCALE)\n except:\n print(\"Error reading image {}!\".format(img_fn))\n continue\n h, w = img_gray.shape[:2]\n img_blank = np.ones(shape=[h, w], dtype=np.uint8)*255\n img_binary = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY)[1]\n _, contours, _ = cv2.findContours(img_binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n area = []\n for cnt in contours:\n approx = cv2.approxPolyDP(cnt,epsilon*cv2.arcLength(cnt,True),True)\n area .append(cv2.contourArea(cnt))\n # sort by contour area\n top_cnt_area = np.argsort(-1*np.array(area))\n # drawing has not been pre-processed\n # select the thrid largest contour which fit the drawing broader\n ind = top_cnt_area[2]\n approx = cv2.approxPolyDP(contours[ind],epsilon*cv2.arcLength(contours[ind],True),True)\n cv2.drawContours(img_blank, [approx], 0, (0), thickness = -1, lineType=8)\n # combine image with masks\n img_gray = cv2.bitwise_or(img_blank, img_gray)\n cv2.imwrite(os.path.join(output_path, os.path.basename(img_fn)), img_gray)", "def findContours(img, thresh=127, val=255):\n\tgray = grayscale(img)\n\tret, binary = cv2.threshold(gray, thresh, val, cv2.THRESH_BINARY_INV)\n\tbina, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\terrorCheckContour(contours)\n\treturn sort(contours)", "def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,\n min_height, max_height, solidity, max_vertex_count, min_vertex_count,\n min_ratio, max_ratio):\n output = []\n for contour in input_contours:\n x,y,w,h = cv2.boundingRect(contour)\n if (w < min_width or w > max_width):\n continue\n if (h < min_height or h > max_height):\n continue\n area = cv2.contourArea(contour)\n if (area < min_area):\n continue\n if (cv2.arcLength(contour, True) < min_perimeter):\n continue\n hull = cv2.convexHull(contour)\n solid = 100 * area / max(cv2.contourArea(hull),1)\n if (solid < solidity[0] or solid > solidity[1]):\n continue\n if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):\n continue\n ratio = (float)(w) / h\n if (ratio < min_ratio or ratio > max_ratio):\n continue\n output.append(contour)\n return output", "def get_contours(image):\n assert len(image.shape) == 2, 'Image should be binary'\n contour_image = image.copy()\n contour_image[contour_image == 1] = 255\n _, contours, _ = cv2.findContours(contour_image.astype('uint8'),\n cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n return contours", "def find_rects_white(image: np.ndarray) -> List[np.ndarray]:\n\n raise NotImplementedError()\n\n #gray = norm_color_test(image.copy())\n gray = cv.cvtColor(image.copy(), cv.COLOR_RGB2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n #edged = cv.Canny(gray, 70, 180) # this numbers is hand picked guess from a few photos\n edged = auto_canny(gray) # this numbers is hand picked guess from a few photos\n\n # split to HSV, then pick up rouhly any white color zeroing out the rest\n hsv = cv.cvtColor(image.copy(), cv.COLOR_RGB2HSV)\n h,s,v = cv.split(hsv)\n h[h<145] = 0\n h[h>165] = 0\n #h = cv.GaussianBlur(h, (5, 5), 0)\n normed = cv.normalize(h, None, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1)\n kernel = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(5,5))\n opened = cv.morphologyEx(normed, cv.MORPH_OPEN, kernel)\n\n # now find white regions contours\n whites = cv.findContours(opened, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)[0]\n whites.sort(key=cv.contourArea, reverse=True)\n whites = [cnt for cnt in whites if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n whiterects = []\n for i in whites:\n rect = cv.minAreaRect(i)\n w,h = rect[1]\n if w*h > 150: # 150px area, or rougly 12x12 pixels\n whiterects.append(rect)\n\n #cv.drawContours(image, whites, -1, COLORS[2 % len(COLORS)], 2)\n #cv.imshow('test', image)\n #cv.waitKey()\n\n whites = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), whiterects))\n\n\n\n #cv.imshow('test', edged)\n #cv.waitKey()\n\n contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n contours.sort(key=cv.contourArea, reverse=True)\n contours = [cnt for cnt in contours if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n rects = list(map(cv.minAreaRect, contours))\n boxes = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), rects))\n\n # filter non overlapping contours\n for i in reversed(range(len(boxes))):\n overlaps = False\n for wbox in whites:\n if wbox.overlaps(boxes[i]):\n overlaps = True\n break\n if not overlaps:\n boxes.pop(i)\n\n boxes = Rect.nms_merge(boxes)\n\n for i in range(len(contours)):\n #peri = cv.arcLength(contours[i], True)\n #approx = cv.approxPolyDP(contours[i], 0.02 * peri, True)\n rect = cv.minAreaRect(contours[i])\n box = cv.boxPoints(rect)\n box = np.int0(box)\n #cv.drawContours(image, [box], -1, COLORS[i % len(COLORS)], 2)\n #cv.putText(image, f'{i}: {cv.contourArea(contours[i])}px', (int(rect[0][0]), int(rect[0][1])), cv.FONT_HERSHEY_SIMPLEX, 0.6, COLORS[i % len(COLORS)], 1)\n\n #cv.drawContours(image, contours, -1, COLORS[1], 2)\n\n for b in boxes:\n cv.line(image, (int(b.xmin), int(b.ymin)), (int(b.xmax), int(b.ymin)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmin), int(b.ymax)), (int(b.xmax), int(b.ymax)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmin), int(b.ymin)), (int(b.xmin), int(b.ymax)), (0, 255, 255), 2)\n cv.line(image, (int(b.xmax), int(b.ymin)), (int(b.xmax), int(b.ymax)), (0, 255, 255), 2)\n\n stacked = np.hstack( (cv.cvtColor(edged, cv.COLOR_GRAY2RGB), cv.cvtColor(opened, cv.COLOR_GRAY2RGB), image))\n cv.namedWindow('test', 0)\n cv.imshow('test', stacked)\n cv.waitKey()\n\n cv.imwrite('dump.jpg', stacked)\n\n\n return boxes or list()", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\n segmentation = [np.clip(i,0.0,i).tolist() for i in segmentation]\n polygons.append(segmentation)\n\n return polygons", "def drawOnImg(file1, imgBW, chosenDis,iteration):\n nP, IDrange = upDate(iteration)\n chosenContour = []\n x, y, w, h = cv2.boundingRect(shapesContours[len(chosenDis) - 1])\n cv2.rectangle(imgBW, (x - nP, y - nP), (x + w + nP, y + h + nP), (128, 0, 128), 4)\n chosenContour.append(shapesContours[len(chosenDis) - 1])\n file1.write(str(chosenDis[len(chosenDis) - 1]) + \" **\\n\")\n print(chosenDis[len(chosenDis) - 1], \"**\")\n\n file1.write(str(chosenDis[len(chosenDis) - 1]) + '\\n')\n print(chosenDis[len(chosenDis) - 1])", "def draw_contours(self, edged_image, image_cropped): \n # Find contours. Since we are only intersted in the list of contours we use a dummy variable for the second thing that is also returned (hierarchy)\n (contours, _) = cv2.findContours(edged_image.copy(), # using a copy of the image\n cv2.RETR_EXTERNAL, # contour retrieval mode. RETR_EXTERNAL has to do with how contours are structured hierarchically. It performs a hierarchical structuring, which means that internal structures are filtered out leaving only external contours. Hence, if there are contours inside the object we filter those out, and only focus on the contours that surrounds the object itself. \n cv2.CHAIN_APPROX_SIMPLE) # contours approximation method. This method only saves the most important boundary points of the contours rather than all of them in order to save memory. \n \n # Draw green contours around the letters on the cropped image\n image_letters = cv2.drawContours(image_cropped.copy(), # using a copy of the image\n contours, # list of contours\n -1, # draw all contours\n (0,255,0), # green following the BGR-color model of openCV\n 2) # thickness of contours\n \n # Save cropped image with contours\n cv2.imwrite(os.path.join(self.output_dir, f\"{self.input_image}_letters.jpg\"), image_letters)", "def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,\n min_height, max_height, solidity, max_vertex_count, min_vertex_count,\n min_ratio, max_ratio):\n output = []\n for contour in input_contours:\n x, y, w, h = cv2.boundingRect(contour)\n if (w < min_width or w > max_width):\n continue\n if (h < min_height or h > max_height):\n continue\n area = cv2.contourArea(contour)\n if (area < min_area):\n continue\n if (cv2.arcLength(contour, True) < min_perimeter):\n continue\n hull = cv2.convexHull(contour)\n solid = 100 * area / cv2.contourArea(hull)\n if (solid < solidity[0] or solid > solidity[1]):\n continue\n if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):\n continue\n ratio = (float)(w) / h\n if (ratio < min_ratio or ratio > max_ratio):\n continue\n output.append(contour)\n return output", "def get_instance_contours(img, contoured, instance):\n mask = np.zeros(img.shape, dtype=np.uint16)\n mask[img == instance] = 1\n ret, threshed = cv.threshold(mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n cv.drawContours(contoured, contours, -1, (randint(25, 255), randint(25, 255), randint(25, 255)), 3)\n img2 = contours = hierarchy = mask = None", "def get_contours(self):\n depth_image = capture_images((self.w,self.h))\n\n thresh = threshold_depth_image(depth_image, min_depth=config.min_depth, max_depth=config.max_depth, threshold=config.min_depth, kernel=self.kernel)\n\n # extract contours\n im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n if hierarchy!=None:\n hierarchy = hierarchy[0]\n return contours, hierarchy\n else:\n return [], []", "def contour_ice(dataarray):\n contours = dataarray.plot.contourf(vmin=0, vmax=100, levels=5)\n ice_json = json.loads(contourf_to_geojson(contourf=contours))\n print(ice_json['type'])\n # Remove land polygons\n rm_feats = [f for f in ice_json['features']\n if f['properties']['title'] == '<0.00 ']\n for rm in rm_feats:\n ice_json['features'].remove(rm)\n print([np.array(f['geometry']['coordinates']).shape\n for f in ice_json['features']])\n geodf_ice = gpd.GeoDataFrame.from_features(ice_json)\n return geodf_ice", "def analyzeGameBoard(image, debug=False):\r\n\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n image = cv2.GaussianBlur(image, (5, 5), 0)\r\n binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\r\n\r\n edges = cv2.Canny(binary, 1, 254)\r\n\r\n if debug:\r\n cv2.imshow(\"Image\", image)\r\n cv2.imshow(\"Binary\", binary)\r\n cv2.imshow(\"Edges\", edges)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n # Find contours based on edges\r\n contours = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n # Normalize format of contours between different versions of OpenCV\r\n contours = imutils.grab_contours(contours)\r\n\r\n # Find the contour with the largest area, which should be the game board\r\n board2 = max(contours, key=cv2.contourArea)\r\n #contours.remove(board2)\r\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\r\n contours = contours[1:]\r\n board = max(contours, key=cv2.contourArea)\r\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\r\n contours = contours[1:]\r\n #contours.remove(board)\r\n\r\n mask = np.zeros_like(binary)\r\n cv2.drawContours(mask, [board], 0, 255, -1)\r\n out = np.full_like(binary, 255)\r\n out[mask == 255] = binary[mask == 255]\r\n\r\n for contour in contours:\r\n mask = np.zeros_like(binary)\r\n cv2.drawContours(mask, [contour], 0, 255, -1)\r\n out[mask == 255] = 255\r\n if debug:\r\n cv2.imshow('t', mask)\r\n cv2.imshow('h', out)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n if debug:\r\n cv2.imshow('Original', binary)\r\n cv2.imshow('Mask', mask)\r\n cv2.imshow('Output', out)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n boardEdges = cv2.Canny(out, 1, 254)\r\n\r\n lines = cv2.HoughLines(boardEdges, 2, np.pi / 90, 100)\r\n\r\n lines = mergeLines(lines)\r\n vLines, hLines = findExtremeLines(lines)\r\n lines = vLines + hLines\r\n\r\n if debug:\r\n for line in lines:\r\n for rho, theta in line:\r\n a = np.cos(theta)\r\n b = np.sin(theta)\r\n x0 = a * rho\r\n y0 = b * rho\r\n x1 = int(x0 + 1000 * (-b))\r\n y1 = int(y0 + 1000 * (a))\r\n x2 = int(x0 - 1000 * (-b))\r\n y2 = int(y0 - 1000 * (a))\r\n cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)\r\n cv2.imshow(\"i\", image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n # Remove the game board from the image\r\n binary[out == 0] = 255\r\n\r\n if debug:\r\n cv2.imshow('mask', mask)\r\n cv2.imshow('out', out)\r\n cv2.imshow('binary', binary)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n tlPoint, trPoint, blPoint, brPoint = getAllIntersections(vLines, hLines)\r\n upperMiddle = int((tlPoint[0] + trPoint[0]) / 2)\r\n middleLeft = int((tlPoint[1] + blPoint[1]) / 2)\r\n middleRight = int((trPoint[1] + brPoint[1]) / 2)\r\n lowerMiddle = int((blPoint[0] + brPoint[0]) / 2)\r\n\r\n yMax = binary.shape[0] - 1\r\n xMax = binary.shape[1] - 1\r\n\r\n spaces = np.empty((3, 3), dtype=object)\r\n\r\n if debug:\r\n image[tlPoint[0], tlPoint[1]] = 255\r\n image[trPoint[0], trPoint[1]] = 255\r\n image[blPoint[0], blPoint[1]] = 255\r\n image[brPoint[0], brPoint[1]] = 255\r\n cv2.imshow('h', image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n spaces[0][0] = binary[0:tlPoint[0], 0:tlPoint[1]]\r\n spaces[0][1] = binary[0:upperMiddle, tlPoint[1]:trPoint[1]]\r\n spaces[0][2] = binary[0:trPoint[0], trPoint[1]:xMax]\r\n spaces[1][0] = binary[tlPoint[0]:blPoint[0], 0:middleLeft]\r\n spaces[1][1] = binary[upperMiddle:lowerMiddle, middleLeft:middleRight]\r\n spaces[1][2] = binary[trPoint[0]:brPoint[0], middleRight:xMax]\r\n spaces[2][0] = binary[blPoint[0]:yMax, 0:blPoint[1]]\r\n spaces[2][1] = binary[lowerMiddle:yMax, blPoint[1]:brPoint[1]]\r\n spaces[2][2] = binary[brPoint[0]:yMax, brPoint[1]:xMax]\r\n\r\n gameState = np.full((3, 3), ' ')\r\n\r\n for i in range(3):\r\n for j in range(3):\r\n gameState[i][j] = analyzeSpace(spaces[i][j], debug)\r\n\r\n return gameState", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(\n binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour.ravel().tolist()\n # after padding and subtracting 1 we may\n # get -0.5 points in our segmentation\n segmentation = [0 if i < 0 else i for i in segmentation]\n polygons.append(segmentation)\n return polygons", "def fun_contours(self, params):\n shape_coeffs = params[:self.num_shape_params]\n blendshape_end = self.num_shape_params + self.numObservations * self.num_blendshape_params\n blendshape_coeffs = params[self.num_shape_params:blendshape_end].reshape((self.numObservations, self.num_blendshape_params))\n trans_mats = params[blendshape_end:].reshape((self.numObservations, 7))\n\n vertices3d = self.vertices3d\n vertices3d_from_mesh = np.zeros_like(vertices3d)\n vertices3d_inner, vertices3d_right, vertices3d_left = self.transform_meshes(shape_coeffs, blendshape_coeffs, trans_mats)\n\n inner_idx = 0\n for idx in range(vertices3d.shape[0]):\n lm_idx = idx % 66\n obs_num = int(np.floor(idx/66))\n\n if lm_idx in self.contour_lms_list[0]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_right[obs_num])\n elif lm_idx in self.contour_lms_list[1]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_left[obs_num])\n else:\n vertices3d_from_mesh[idx] = vertices3d_inner[obs_num][inner_idx]\n inner_idx += 1\n if inner_idx == 50:\n inner_idx = 0\n\n return (vertices3d_from_mesh - vertices3d).ravel()", "def del_big_areas(img):\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n # ret, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 101, 3) \n implt(gray, 'gray')\n \n gray2 = gray.copy()\n mask = np.zeros(gray.shape,np.uint8)\n \n im2, contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n \n for cnt in contours:\n if (200 < cv2.contourArea(cnt) < 5000):\n cv2.drawContours(img,[cnt],0,(0,255,0),2)\n cv2.drawContours(mask,[cnt],0,255,-1)\n \n implt(mask)\n implt(img)", "def get_building_contour(current_building_mask):\n ret, threshed = cv.threshold(current_building_mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n current_building_contour, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n return current_building_contour, hierarchy", "def generate_text_mask(shape, textboxes):\n if textboxes is None or len(textboxes) == 0:\n return np.zeros(shape).astype(np.uint8)\n \n mask = np.zeros(shape)\n for (xtl, ytl, xbr, ybr) in textboxes:\n pts = np.array(((xtl, ytl), (xtl, ybr), (xbr, ybr), (xbr, ytl)))\n cv2.fillConvexPoly(mask, pts, True)\n return mask.astype(np.uint8)", "def test_by_ref_non_contiguous(self):\n self.init()\n corners = self.ff64_2[::2,::2]\n assert not corners.flags['OWNDATA']\n set_to_zero_by_ref(corners)\n assert np.all(self.ff64_2 == np.array([[0,1,0],[3,4,5],[0,7,0]]))", "def detect_and_draw_contours(frame, thresh, meas_last, meas_now, min_area = 0, max_area = 10000, ellipses = False, directors = False):\n # Detect contours and draw them based on specified area thresholds\n img, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n final = frame.copy()\n\n i = 0\n meas_last = meas_now.copy()\n del meas_now[:]\n director = 0. \n rx = ry = 0.\n cx = cy = 0.\n\n fname_min_enc_C = \"min_enc_C.dat\"\n f_min_enc_C = open(fname_min_enc_C,'a+')\n R_min_enc_C = x_min_enc_C = y_min_enc_C = 0.\n \n while i < len(contours):\n area = cv2.contourArea(contours[i])\n if area < min_area or area > max_area:\n del contours[i]\n else:\n\n cv2.drawContours(final, contours, i, (0,0,255), 1)\n # add ellipse here\n if ( ellipses ):\n ellipse = cv2.fitEllipse(contours[i])\n cv2.ellipse(final,ellipse,(0,255,0),2)\n M = cv2.moments(contours[i])\n\n # here is the ouput showing minEnclosingCircle, which should\n # basically give a long-axis measurement of any given ellipse\n (x_min_enc_C, y_min_enc_C), R_min_enc_C = cv2.minEnclosingCircle(contours[i]) \n f_min_enc_C.write(\"%e %e %e\\n\" %(x_min_enc_C,y_min_enc_C,R_min_enc_C))\n\n if M['m00'] != 0:\n cx = M['m10']/M['m00']\n cy = M['m01']/M['m00']\n if ( directors ):\n mu20 = M['m20']/M['m00'] - pow(cx,2)\n mu02 = M['m02']/M['m00'] - pow(cy,2)\n mu11 = M['m11']/M['m00'] - cx*cy\n else:\n \tcx = 0\n \tcy = 0\n\n if ( directors ):\n ry = 2*mu11\n rx = mu20-mu02\n if rx == 0:\n atan = 0.5*np.pi\n if ry < 0: atan *= -1 \n director = np.fmod(0.5*atan,2*np.pi) + np.pi\n else:\n director = np.fmod(0.5*np.arctan(ry/rx),2*np.pi) + np.pi\n if (rx < 0):\n director += np.pi/2.\n\n vsize = 10\n cv2.line(final,\n (int(cx - vsize*np.cos(director)), int(cy - vsize*np.sin(director))),\n (int(cx + vsize*np.cos(director)), int(cy + vsize*np.sin(director))), \n (255,0,0),2)\n meas_now.append([cx,cy,director])\n else: \n meas_now.append([cx,cy])\n\n i += 1\n\n f_min_enc_C.close()\n\n fname_ndist = \"ndist.dat\"\n f_ndist = open(fname_ndist,'a+')\n meas_now = np.array(meas_now)\n for i in range(len(meas_now)):\n for j in range(i+1,len(meas_now)):\n f_ndist.write(\"%e \\n\" % distance(meas_now[i,:-1],meas_now[j,:-1]))\n f_ndist.close()\n meas_now = list(meas_now)\n \n return final, contours, meas_last, meas_now", "def find_contours(vertical, horizontal):\n\tmask = vertical + horizontal\n\n\ttry:\n\t\t__, contours, __ = cv2.findContours(\n\t\t\tmask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\texcept ValueError:\n\t\t# for opencv backward compatibility\n\t\tcontours, __ = cv2.findContours(\n\t\t\tmask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\t# sort in reverse based on contour area and use first 10 contours\n\tcontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n\n\tcont = []\n\tfor c in contours:\n\t\tc_poly = cv2.approxPolyDP(c, 3, True)\n\t\tx, y, w, h = cv2.boundingRect(c_poly)\n\t\tcont.append((x, y, w, h))\n\treturn cont", "def find_contours(dframe):\n #Blur the image.\n blurred = cv2.GaussianBlur(dframe, (11,11), 0)\n #if debug:\n #plt.imshow(fixColor(blurred))\n #plt.show()\n #Get a threshold image, numbers where tested for the given video.\n ret, tframe= cv2.threshold(blurred,50,255,cv2.THRESH_BINARY)\n (cnts, _) = cv2.findContours(tframe.copy(), cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n return cnts", "def process(self):\n contours, hierachy = cv.findContours(\n self.input_image,\n cv.RETR_LIST,\n cv.CHAIN_APPROX_SIMPLE,\n )\n contours = sorted(contours, key=cv.contourArea, reverse=True)\n target = None\n for c in contours:\n path = cv.arcLength(c, True)\n approx = cv.approxPolyDP(c, 0.1*path, True)\n if len(approx) == 4:\n # print(approx)\n target = approx\n break\n if target is not None:\n \"\"\"\n cv.drawContours(\n self.original_image,\n [target],\n 0,\n (255, 255, 255),\n 5\n )\n \"\"\"\n self.output_image = copy.deepcopy(self.original_image)\n for p in target:\n cv.circle(self.output_image, tuple(p[0]), 4, (255, 255, 255), 5)\n return (self.output_image, target)", "def region_of_interest(self,img):\r\n #defining a blank mask\r\n mask = np.zeros_like(img) \r\n #checking number of image channel(color/grayscale) and applying mask\r\n if len(img.shape) > 2:\r\n ignore_mask_color = (255,255,255)\r\n else:\r\n ignore_mask_color = 255\r\n #filling color to pixels inside the polygon \r\n cv2.fillPoly(mask, self.vertices_img, ignore_mask_color)\r\n #image where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n #cv2.imshow('',masked_image)\r\n return masked_image", "def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))", "def sanitize_mask(orig_x, orig_y, mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Draw contours:\n cv2.drawContours(mask, contours, 0, (0, 255, 0), 2)\n # Calculate image moments of the detected contour\n num_objects = (len(contours))\n #threshold\n threshold = 3\n\n center_list = []\n # print(num_objects)\n if num_objects > 1:\n for item in range(num_objects):\n M = cv2.moments(contours[item])\n try:\n center_x = round(M['m10'] / M['m00'])\n center_y = round(M['m01'] / M['m00'])\n center_list.append([center_y , center_x ])\n except:\n pass\n\n # initialize retmask\n retmask = mask\n if num_objects > 1:\n for x, y in center_list:\n if orig_x - threshold <= x <= orig_x + threshold and orig_y - threshold <= y <= orig_y + threshold:\n pass\n else:\n def dfs_removal(px , py, mask):\n R = len(mask)\n C = len(mask[0])\n if mask[px][py ] != 255: \n return\n mask[px][py] = 0\n if 0 <= px - 1 and mask[px - 1][py ] == 255: dfs_removal(px - 1 , py , mask)\n if px + 1 < R and mask[px + 1][py ] == 255: dfs_removal(px + 1 , py , mask)\n if 0 <= py - 1 and mask[px][py - 1] == 255: dfs_removal(px, py -1 , mask)\n if py + 1 < C and mask[px][py + 1] == 255: dfs_removal(px, py + 1 , mask)\n\n dfs_removal(x,y, mask)\n\n return retmask", "def sort_filtered_contours(self):\r\n\r\n # Get the contours again\r\n invert = 255 - self.thresh_invert\r\n real_contours = cv2.findContours(invert, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n real_contours = real_contours[0] if len(real_contours) == 2 else real_contours[1]\r\n\r\n # Make sure that they're within the correct range for size\r\n # If too small, it is probably noise; if too large, then should be things around the grid\r\n for i, c in enumerate(real_contours, 1):\r\n contour_area = cv2.contourArea(c)\r\n if self.min_cell_size < contour_area < self.max_cell_size:\r\n self.good_contours.append(c)\r\n\r\n # We assume a square board, so the number of rows/cols should be the square root of total contours/cells\r\n self.board_dimension = int(math.sqrt(len(self.good_contours)))\r\n\r\n # Sort the contours from top to bottom\r\n (half_sorted_contours, _) = contours.sort_contours(self.good_contours, method=\"top-to-bottom\")\r\n\r\n # We then sort each row from left to right\r\n row = []\r\n for i, c in enumerate(half_sorted_contours, 1):\r\n row.append(c)\r\n if i % self.board_dimension == 0:\r\n (full_sorted_contours, _) = contours.sort_contours(row, method=\"left-to-right\")\r\n self.game_board_contours.append(full_sorted_contours)\r\n row = []", "def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)", "def fill_polygon(points, im_shape):\n im_cnt = np.zeros((im_shape[0],im_shape[1],1), np.uint8)\n cv.fillPoly(im_cnt, [points], (255,255))\n\n return im_cnt", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def get_bricks(self, contours):\n bricks = []\n for cnt in contours:\n epsilon = 0.04*cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,epsilon,True) \n \n if len(approx) >= 4:\n rect = cv2.minAreaRect(approx)\n area = cv2.contourArea(approx)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n if area > 600 and area < 4000:\n\n brick = Brick()\n area = np.int0(area)\n center = np.int0(rect[0])\n angle = np.int0(rect[2])\n\n brick.set_area(area)\n brick.set_center(center)\n brick.set_angle(angle)\n brick.set_box(box)\n\n bricks.append(brick)\n\n # elif len(approx) > 4:\n # (x,y),radius = cv2.minEnclosingCircle(cnt)\n # center = (int(x),int(y))\n # radius = int(radius)\n # area = radius*radius*math.pi\n\n # if area > 600 and area < 2000:\n\n # brick = Brick()\n # area = np.int0(area)\n \n # brick.set_area(area)\n # brick.set_center(center)\n # brick.set_radius(radius)\n\n # bricks.append(brick)\n\n \n \n return bricks", "def getCV(file1, type, imgOri,iteration):\n del s_arrays[:]\n del shapesContours[:]\n\n if type == 'shapes':\n spContours = getContours(imgOri,iteration)\n file1.write(str(len(spContours)) + '\\n')\n print(len(spContours))\n spContours = clean_Con(spContours)\n file1.write(str(len(spContours)) + '\\n')\n print(len(spContours))\n cv2.drawContours(imgOri, spContours, -1, (0, 255, 128), 5)\n # del s_arrays[:]\n # for each shape\n for cons in spContours:\n sampleComVector = []\n x, y, w, h = cv2.boundingRect(cons)\n cv2.rectangle(imgOri, (x, y), (x + w, y + h), (100, 100, 100), 1)\n\n # move the points to center\n for point in cons:\n sampleComVector.append(complex(point[0][0] - x, (point[0][1] - y)))\n # sampleComVectors store CV of all testees contours\n s_arrays.append(sampleComVector)\n # sampleContours store all testees contours, same order with sampleComVectors\n shapesContours.append(cons)\n\n elif type == 'temp':\n # Automatically find templete contour\n templetTrue = imgOri\n tpContour = getContours(templetTrue,iteration)\n for contour in tpContour:\n x, y, w, h = cv2.boundingRect(contour)\n #\n for point in contour:\n # -x and -y are to make left and upper boundry start from 0\n t_array.append(complex(point[0][0] - x, (point[0][1] - y)))", "def hex_contour(x, y, min_cnt=2, std=False, levels=2, smoothing=3,\n hkwargs={}, skwargs={}, ckwargs={}):\n # set the default scales on x and y axis to linear\n hkwargs.setdefault('xscale', 'linear') \n hkwargs.setdefault('yscale', 'linear')\n # hexbin the data to a mincnt of 0 (needed for sig_cont to look good),\n # visible = False so it does not draw\n f = pl.hexbin(x, y, mincnt=1, visible=False, **hkwargs)\n # NOTE: may have to change the mincnt if the C keyword is set...\n c = f.get_array() # get the number of points in each bin\n H = f.get_paths() # get the hexagon Path objects\n # if binning is linear hexbin uses the new convention\n if (hkwargs['xscale'] == 'linear') and (hkwargs['yscale'] == 'linear'):\n h = f.get_offsets() # the centers for each hex\n xs = h.T[0]\n ys = h.T[1]\n # if bining is non-linear it uses the old convention\n else:\n # get the centers of each hexagon\n xs = np.array([(0.5 * (i.get_extents().xmax - i.get_extents().xmin) + \\\n i.get_extents().xmin) for i in H]) \n ys = np.array([(0.5 * (i.get_extents().ymax - i.get_extents().ymin) + \\\n i.get_extents().ymin) for i in H])\n if std: # check if you are making standard deviation curves\n # convert the hexbin counts to fraction of the total number of points\n c1 = convert_to_stdev(c)\n # if you just indicate the number of contours you want\n if isinstance(levels, int):\n ckwargs['levels'] = [0.682689492, 0.954499736, 0.997300204,\n 0.99993666, 0.999999426697][:levels]\n else:\n ckwargs['levels'] = levels\n # set where the last contour goes and where outlier points begin\n cmin_std = max(ckwargs['levels'])\n # mask for all bins with less than the min_cnt or less than cmin_std\n idx = ((c1 > cmin_std) | (c <= min_cnt)) & (c > 0)\n # make Triangulation object using the centers of each of the hexbins\n # plot contours\n T = triangle_contour(xs, ys, c1, smoothing, ckwargs)\n else: # if you are making normal contours\n if isinstance(levels, int):\n # default to contours from min_cnt to just below the maximum value\n # in even steps.\n ckwargs['levels'] = np.linspace(min_cnt, c.max(), levels + 1)[:-1]\n else:\n ckwargs['levels'] = levels\n idx = (c <= min_cnt) & (c > 0) # mask for all bins with less than the min_cnt\n # plot contours\n T = triangle_contour(xs, ys, c, smoothing, ckwargs)\n xout = pl.array([]) # array to hold x scatter points\n yout = pl.array([]) # array to hold y scatter points\n # if binning is linear hexbin uses the new convention\n if (hkwargs['xscale'] == 'linear') and (hkwargs['yscale'] == 'linear'):\n out = h[idx] # only the hexagons with less than min_cnt\n H = H[0] # there is only hexagon one path now centered at 0,0\n ext = H.get_extents() # the extents of this one hexagon\n for o in out:\n # move data to current bin's center\n xnow = x - o[0] \n ynow = y - o[1]\n # get the index of the points in extent of the hexagon\n jdx = (xnow >= ext.xmin) & (xnow <= ext.xmax) & \\\n (ynow >= ext.ymin) & (ynow <= ext.ymax) \n # loop through each point in extent of the hexagon\n for p in zip(xnow[jdx], ynow[jdx]):\n # if the point is actual in the hexagon add it to the list of\n # outliers\n if H.contains_point(p):\n xout = np.append(xout, p[0] + o[0])\n yout = np.append(yout, p[1] + o[1])\n # if binning is non-linear it uses the old convention\n else:\n out = pl.array(H)[idx] # hexagons to be cut\n for o in out: # loop over Paths\n H.remove(o) # remove hexagons below cutoff\n ext = o.get_extents() # get the x,y extent of the hexagon\n # mask for data points only in the hexagon extent\n jdx = (x >= ext.xmin) & (x <= ext.xmax) & \\\n (y >= ext.ymin) & (y <= ext.ymax) \n for p in zip(x[jdx], y[jdx]): # loop over these points\n if o.contains_point(p): # check if point is in the hexagon\n xout = np.append(xout, p[0]) # if so append the x value\n yout = np.append(yout, p[1]) # if so append the y value\n # set default type and size for outlier points\n skwargs.setdefault('marker', '.')\n skwargs.setdefault('ms', 2)\n skwargs.setdefault('ls', 'None')\n P = pl.plot(xout, yout, **skwargs) # plot the outlier points\n return T, P" ]
[ "0.62755567", "0.62009114", "0.60225683", "0.5922091", "0.5880212", "0.58467317", "0.5801622", "0.5658485", "0.56459415", "0.56298447", "0.5628904", "0.5601753", "0.5587474", "0.55055207", "0.5471264", "0.5443541", "0.54303265", "0.54265594", "0.542418", "0.54153204", "0.54089355", "0.5408265", "0.5406317", "0.5390426", "0.537104", "0.5337075", "0.5326477", "0.53149754", "0.52943826", "0.5280703", "0.527552", "0.52748924", "0.5268262", "0.5253515", "0.52389634", "0.52307373", "0.52234197", "0.5200975", "0.51936656", "0.5189026", "0.5186277", "0.51830006", "0.51807207", "0.5150723", "0.5145967", "0.51315403", "0.51311946", "0.51263756", "0.5108092", "0.5107764", "0.5106679", "0.5068648", "0.5068648", "0.50668263", "0.5065341", "0.5062363", "0.50576496", "0.5038805", "0.50381577", "0.50361556", "0.502791", "0.5024034", "0.5021762", "0.50170714", "0.5006589", "0.49894303", "0.49806362", "0.49762022", "0.49645862", "0.4959684", "0.49504936", "0.49498367", "0.4948961", "0.4927788", "0.49200282", "0.4916984", "0.49082676", "0.4904986", "0.49004543", "0.48965004", "0.48931834", "0.48820478", "0.48797885", "0.48656082", "0.4860495", "0.48589933", "0.48558128", "0.4854167", "0.4851816", "0.4842862", "0.48392355", "0.48388708", "0.48374683", "0.48342052", "0.48298836", "0.4823284", "0.48191035", "0.48188958", "0.4815135", "0.48066127" ]
0.5654723
8
Return True if object is an instance that inherited from specified\ class, otherwise False python3 c 'print(__import__("my_module").my_function.__doc__)' python3 c 'print(__import__("my_module").MyClass.my_function.__doc__)'
def inherits_from(obj, a_class): if issubclass(type(obj), a_class) and not type(obj) == a_class: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True", "def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)", "def inherits_from(obj, a_class):\n\n if isinstance(obj, a_class) and type(obj) is not a_class:\n return True\n\n return False", "def inherits_from(obj, a_class):\n if type(obj) is not a_class:\n return(issubclass(type(obj), a_class))\n else:\n return False", "def inherits_from(obj, a_class):\n if type(obj) is not a_class and issubclass(type(obj), a_class):\n return True\n else:\n return False", "def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class):\n if type(obj) is not a_class:\n return True\n return False", "def inherits_from(obj, a_class):\n if isinstance(type(obj), a_class) and type(obj) != a_class:\n return True\n return False", "def class_is(cls: Class) -> bool:\n pass", "def inherits_from(obj, a_class):\n\n if issubclass(type(obj), a_class) and type(obj) != a_class:\n return True\n return False", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def hasDoc():\n print(\"这个是含有函数文档的函数\")", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def inherits_from(obj, a_class):\n return(issubclass(type(obj), a_class) and type(obj) != a_class)", "def inherits_from(obj, a_class):\n if a_class == type(obj):\n return False\n return isinstance(obj, a_class)", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def inherits_from(obj, a_class):\n if type(obj) == a_class:\n return False\n return issubclass(type(obj), a_class)", "def predicate(obj):\n return inspect.isclass(obj) and issubclass(obj, MafColumnRecord)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def is_class_discoverable(_class, default_discoverability=False):\n return bool(getattr(_class, _get_discoverable_attribute(_class),\n default_discoverability))", "def inherits_from(obj, a_class):\n return issubclass(type(obj), a_class) and type(obj) != a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class", "def inherits_from(obj, a_class):\n return ((issubclass(type(obj), a_class)) and type(obj) != a_class)", "def inherits_from(obj, a_class):\n\n return isinstance(obj, a_class) and type(obj) is not a_class", "def is_builtin_dataclass(_cls: Type[Any]) -> bool:\n import dataclasses\n\n return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)", "def doesmatch(TheClass):\n import sys \n\n if sys.version_info.major < 3:\n return None\n S = TheClass.__base__\n for meth_name in dir(TheClass):\n if not hasattr(S, meth_name):\n continue\n meth = getattr(TheClass, meth_name)\n if(callable(meth)):\n try:\n match = (inspect.signature(meth) == inspect.signature(getattr(S,meth_name)))\n #assert(match)\n if not match:\n print(meth_name, ' : does not match parent signature', inspect.signature(meth) , inspect.signature(getattr(S,meth_name)))\n except ValueError:\n pass", "def info(obj=None):\n if obj is None:\n print (\"Python keywords:\")\n import keyword\n for kwname in keyword.kwlist:\n print (\" \", kwname)\n print(\"Built in objects:\")\n for bi_object_name in sorted(__builtins__.keys()):\n bi_object = __builtins__[bi_object_name]\n if callable(bi_object):\n if type(bi_object) is types.ClassType:\n print(\" {} (class)\".format(bi_object.__name__))\n elif type(bi_object) is types.FunctionType:\n print(\" {} (function)\".format(bi_object.__name__))\n elif hasattr(obj, \"__doc__\") and obj.__doc__ is not None:\n print (\"Documentation for %s :\\n\" % (obj.__name__))\n print (obj.__doc__)\n elif type(obj) is types.ModuleType:\n pprint(dir(obj))\n elif type(obj) is types.ClassType:\n pprint(dir(obj))\n elif type(obj) is types.InstanceType:\n pprint(dir(obj))\n pprint(dir(obj.__class__))\n return \"\"", "def has_api(instance, T):\n rtn = False\n if instance is not None and T is not None:\n if inspect.isclass(instance):\n if hasattr(instance, \"__implements\"):\n if T in instance.__implements:\n rtn = True\n else:\n if hasattr(instance.__class__, \"__implements\"):\n if T in instance.__class__.__implements:\n rtn = True\n return rtn", "def inherits_from(child, parent_name):\n if inspect.isclass(child):\n if parent_name in [c.__name__ for c in inspect.getmro(child)[1:]]:\n return True\n return False", "def inherits_from(obj, a_class):\n return (isinstance(obj, a_class) and type(obj) != a_class)", "def _is_user_class(obj):\n type_dict = type(obj).__dict__\n is_user_class = '_pandas_type' in type_dict\n return is_user_class", "def determineIfInClassTree( testObj, searchObj ):\n if not INSP.isclass( searchObj ):\n return False\n \n allBases= INSP.getmro( searchObj )\n for aBase in allBases:\n if aBase is testObj:\n return True\n \n return False", "def ismemberdescriptor(object):\r\n return False", "def has_doc() -> None:", "def describe_func(obj, parent_class, module_name):\n\n try:\n name = obj.__name__\n except AttributeError:\n # Funny comtypes...\n return\n\n if name.startswith('_') and '__init__' not in name:\n return\n\n name = parent_class.name + '.' + name\n\n docs = getdoc(obj)\n comments = getcomments(obj)\n\n if isfunction(obj):\n # in Py3 unbound methods have same type as functions.\n if isinstance(parent_class, Class):\n method = object_types.METHOD\n else:\n method = object_types.FUNCTION\n elif ismethod(obj):\n method = object_types.METHOD\n elif ismethoddescriptor(obj):\n method = object_types.METHOD_DESCRIPTOR\n\n if isinstance(obj, types.MethodType):\n method = object_types.INSTANCE_METHOD\n\n try:\n source_code = getsource(obj)\n except (IOError, TypeError):\n source_code = ''\n\n klass = Method(name, method)\n klass.docs = docs\n\n klass_module = getmodule(obj)\n if klass_module and klass_module.__name__ != module_name:\n klass.is_redundant = True\n\n if source_code:\n inspect_source(klass, obj, source_code)\n klass.number_lines = '%d' % len(source_code.split('\\n'))\n\n if isinstance(obj, staticmethod):\n klass.kind = method = object_types.STATIC_METHOD\n\n if is_classmethod(obj):\n klass.kind = method = object_types.CLASS_METHOD\n\n try:\n code = None\n if method in [object_types.METHOD, object_types.METHOD_DESCRIPTOR, object_types.INSTANCE_METHOD]:\n if isPython3():\n code = obj.__func__.__code__\n else:\n code = obj.im_func.func_code\n elif method == object_types.STATIC_METHOD:\n if isPython3():\n code = obj.__func__.__code__\n else:\n code = obj.im_func.func_code\n else:\n if isPython3():\n code = obj.__code__\n else:\n code = obj.func_code\n except AttributeError:\n code = None\n\n if code is not None:\n klass.firstlineno = '%d' % code.co_firstlineno\n\n parent_class.Add(klass)", "def is_inherited(self, fld: str) -> bool:\n return self.read_inheritance(self.get_obj_label(), fld)", "def is_boost_class(obj: Any) -> bool:\n return \"Boost.Python.class\" in str(type(obj))", "def __subclasshook__(cls, subclass: Type[Any]) -> bool:\n return (subclass in cls.__subclasses__() \n or denovo.unit.has_methods(\n item = subclass,\n methods = [\n 'add', 'subset', '__add__', '__iadd__', '__iter__', \n '__len__']))", "def test_method_docs(self):\n for func in dir(BaseModel):\n self.assertTrue(len(func.__doc__) > 0)", "def is_function_type(self, objtype):\n # return self.__cfuncptrt == type(objtype)\n return issubclass(objtype, self.__cfuncptrt)\n # return isinstance(objtype, self.__cfuncptrt)", "def isbuiltin(object):\n if inspect.isbuiltin(object):\n return True\n\n return getattr(object, '__module__', None) == 'builtins'", "def InheritsFrom(self,base_class,child_class):\n if self.CleanName(base_class) in child_class.split(\"(\")[-1]:\n return True\n else:\n return False", "def is_defined_in_xxx(xxx, cls):\n if not cls.parent:\n return False\n\n if not isinstance(cls.parent, namespace.namespace_t):\n return False\n\n if xxx != cls.parent.name:\n return False\n\n xxx_ns = cls.parent\n if not xxx_ns.parent:\n return False\n\n if not isinstance(xxx_ns.parent, namespace.namespace_t):\n return False\n\n if '::' != xxx_ns.parent.name:\n return False\n\n global_ns = xxx_ns.parent\n return None is global_ns.parent", "def isbuiltin(object):\r\n return isinstance(object, types.BuiltinFunctionType)", "def ismetaclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type in inspect.getmro(object)", "def __subclasshook__(cls, C):\n #print \"subclass check\",cls,C\n try:\n if implements(cls, C): return True\n except NotImplementedError:\n return False", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def is_child_class(obj, classinfo):\n try:\n return issubclass(obj, classinfo)\n except TypeError:\n return None", "def __contains__(name):", "def has_func(cls, obj, *args):\n methods = dir(obj)\n matched = [x for x in args if x in methods]\n return len(matched) == len(args)", "def is_top_level_function(obj: Any) -> bool:\r\n return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)", "def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)", "def AllInternalsVisible(self) -> bool:", "def class_is_interesting(name: str):\n if name.startswith('org.chromium.'):\n return True\n return False", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def test_check_instance_explainer_functionality():\n type_error = 'The suppress_warning parameter should be a boolean.'\n inheritance_warning = (\n 'Every explainer object should inherit from fatf.utils.transparency.'\n 'explainers.Explainer abstract class.')\n\n class ClassPlain(object):\n pass\n\n class_plain = ClassPlain()\n\n class ClassInit(fute.Explainer):\n def __init__(self):\n pass\n\n class_init = ClassInit()\n\n class ClassExplainer1(object):\n def explain_instance(self):\n pass # pragma: no cover\n\n class_explainer_1 = ClassExplainer1()\n\n class ClassExplainer2(fute.Explainer):\n def explain_instance(self, x, y):\n pass # pragma: no cover\n\n class_explainer_2 = ClassExplainer2()\n\n class ClassExplainer3(object):\n def explain_instance(self, x):\n pass # pragma: no cover\n\n class_explainer_3 = ClassExplainer3()\n\n class ClassExplainer4(fute.Explainer):\n def explain_instance(self, x, y=3):\n pass # pragma: no cover\n\n class_explainer_4 = ClassExplainer4()\n\n class ClassExplainer5(object):\n def explain_instance(self, x, y=3, z=3):\n pass # pragma: no cover\n\n class_explainer_5 = ClassExplainer5()\n\n with pytest.raises(TypeError) as exinf:\n fute.check_instance_explainer_functionality(class_plain, 'False')\n assert str(exinf.value) == type_error\n with pytest.raises(TypeError) as exinf:\n fute.check_instance_explainer_functionality(ClassPlain, 'True')\n assert str(exinf.value) == type_error\n\n msg = \"The *{}* (explainer) class is missing 'explain_instance' method.\"\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_plain,\n False) is False\n assert len(warning) == 2\n assert msg.format('ClassPlain') == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(ClassPlain) is False\n assert len(warning) == 2\n assert msg.format('ClassPlain') == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_plain,\n True) is False\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n msg = (\"The 'explain_instance' method of the *{}* (explainer) class has \"\n 'incorrect number ({}) of the required parameters. It needs to '\n 'have exactly 1 required parameter(s). Try using optional '\n 'parameters if you require more functionality.')\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_init,\n False) is False\n assert len(warning) == 1\n assert msg.format('ClassInit', 0) == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(ClassInit) is False\n assert len(warning) == 1\n assert msg.format('ClassInit', 0) == str(warning[0].message)\n\n assert fute.check_instance_explainer_functionality(class_init,\n True) is False\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_1, False) is False\n assert len(warning) == 2\n assert msg.format('ClassExplainer1', 0) == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer1) is False\n assert len(warning) == 2\n assert msg.format('ClassExplainer1', 0) == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_1, True) is False\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_2, False) is False\n assert len(warning) == 1\n assert msg.format('ClassExplainer2', 2) == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_2) is False\n assert len(warning) == 1\n assert msg.format('ClassExplainer2', 2) == str(warning[0].message)\n\n assert fute.check_instance_explainer_functionality(class_explainer_2,\n True) is False\n\n #\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_3, False) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer3, True) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n #\n\n assert fute.check_instance_explainer_functionality(class_explainer_4,\n False) is True\n assert fute.check_instance_explainer_functionality(ClassExplainer4,\n True) is True\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_5, False) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer5, True) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)", "def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)", "def inheritdocstrings(cls):\n for name, func in vars(cls).items():\n if isinstance(func, types.FunctionType) and not func.__doc__:\n for parent in cls.__bases__:\n parfunc = getattr(parent, name, None)\n if parfunc and getattr(parfunc, '__doc__', None):\n func.__doc__ = parfunc.__doc__\n break\n return cls", "def is_object(space, w_obj):\n return space.wrap(space.is_object(w_obj))", "def has_docstring(func):\n\n return func.__doc__ is not None", "def inherits_doc():\n pass", "def subectIsSelf():", "def is_heritage_completion(self):\n current_line = self.get_current_line()\n\n match = re.match(r\"class\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before[-1] == \"(\":\n return True\n return False", "def exactly(base_cls):\n\n @meta\n def check(cls):\n return cls is base_cls\n\n return check", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def _isobject(self, name, exist):\r\n if exist in [2, 5]:\r\n return False\r\n cmd = \"isobject(%s)\" % name\r\n if not self._engine:\r\n msg = \"Session is not open\"\r\n raise Oct2PyError(msg)\r\n resp = self._engine.eval(cmd, silent=True).strip()\r\n return resp == \"ans = 1\"", "def test_method_docs(self):\n for func in dir(User):\n self.assertTrue(len(func.__doc__) > 0)", "def is_qualified(obj):\n return obj.is_qualified()", "def is_function(obj):\n if type(obj) is types.FunctionType:\n return True\n if not is_object(obj):\n return False\n if not hasattr(obj, '__class__'):\n return False\n module = obj.__class__.__module__\n name = obj.__class__.__name__\n return (module == '__builtin__' and\n name in ('function',\n 'builtin_function_or_method',\n 'instancemethod',\n 'method-wrapper'))", "def test_explainer_class(self):\n assert self.explainer.__class__.__bases__[0].__name__ == 'ABC'\n assert self.explainer.__class__.__name__ == 'Explainer'", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def __dominates__(component, other):\n # Refine in subclasses.\n return issubclass(component, other)", "def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__", "def applies(cls, obj):\n return type(obj) in cls.types", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def test_doc_class(self):\n expected = 'Amenity class handles all application amenities'\n actual = Amenity.__doc__\n self.assertEqual(expected, actual)", "def isabstract(object):\r\n return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)", "def is_icecube_class(obj: Any) -> bool:\n classname = str(type(obj))\n return \"icecube.\" in classname", "def is_object(value, class_name):\n\n return isinstance(value, getattr(schema, class_name))", "def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def checkObjectInNameSpace(objectName):\n if objectName is None or not isinstance(objectName, basestring) or objectName == u\"\": return False\n if objectName in globals(): return True\n return objectName in dir(builtins)", "def issubclass(obj, cls):\r\n if isinstance(obj, Assert):\r\n obj = obj.obj\r\n return assert_(issubclass(obj, cls),\r\n 'not issubclass(%s, %s)' % (_repr(obj), _repr(cls)))", "def is_class_method(func):\n return inspect.ismethod(func) and inspect.isclass(func.__self__)", "def is_process_class(node):\n if isinstance(node, ClassDef):\n for b in node.bases:\n if isinstance(b, Name) and b.id == KW_PROCESS_DEF:\n return True\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def use_inheritance_one():\r\n print(\"* use_inheritance_one() *\")\r\n print()\r\n\r\n class Robot:\r\n def __init__(self, name):\r\n # this is constructor\r\n # initializes object\r\n self.name = name\r\n\r\n def say_hi(self):\r\n print(\"Hi, I am \"+self.name)\r\n\r\n class PhysicianRobot(Robot):\r\n # this class inherit class Robot\r\n def say_hi(self):\r\n # if this method is not defined then base class method will get called\r\n print(\"Hi, I am not \"+self.name)\r\n\r\n robo_one = Robot(\"Marvin\")\r\n robo_two = PhysicianRobot(\"James\")\r\n # returns class name with object address\r\n print(robo_one)\r\n print(robo_two)\r\n # returns class name\r\n print(type(robo_one))\r\n print(type(robo_two))\r\n robo_two.say_hi()\r\n return", "def is_method(obj: Any) -> bool:\n return inspect.ismethod(obj) or \"Boost.Python.function\" in str(type(obj))", "def isinstancemethod(cls, obj):\n return _isinstancemethod(cls, obj)", "def callable(obj): # pylint: disable=redefined-builtin\n return bool(PyCallable_Check(py_object(obj)))", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False" ]
[ "0.6786016", "0.6415582", "0.6202909", "0.6114083", "0.60608363", "0.60413224", "0.6035273", "0.60166055", "0.59785694", "0.59465903", "0.5944888", "0.5933893", "0.59169173", "0.5910051", "0.59036976", "0.5892804", "0.5881093", "0.5877024", "0.5872548", "0.58636963", "0.5856867", "0.5849472", "0.58454883", "0.58454883", "0.5843207", "0.5843207", "0.58420175", "0.5835947", "0.5835699", "0.5835624", "0.5811119", "0.5803651", "0.58009976", "0.5795655", "0.5773814", "0.5769952", "0.57606447", "0.5758276", "0.5721183", "0.57055086", "0.5682639", "0.56707036", "0.56705916", "0.5651129", "0.5649765", "0.5642982", "0.5637157", "0.5635478", "0.5632334", "0.56288266", "0.5627856", "0.5625708", "0.5618127", "0.56166494", "0.5615663", "0.5605771", "0.5605253", "0.5605253", "0.55961066", "0.55833536", "0.5576829", "0.5569092", "0.55392665", "0.55244106", "0.5519892", "0.5516977", "0.55017734", "0.5498717", "0.54702085", "0.54690886", "0.5463475", "0.5461471", "0.5440727", "0.54383403", "0.54290044", "0.5423393", "0.54198337", "0.54190755", "0.5416828", "0.5409024", "0.53989553", "0.5391568", "0.53877914", "0.53873974", "0.53820646", "0.537469", "0.5371293", "0.5368492", "0.534991", "0.53481895", "0.5346945", "0.5332659", "0.5332125", "0.53288674", "0.5312532", "0.52845246", "0.5283958", "0.52828825", "0.5281354", "0.5281354" ]
0.59816
8
Compile the .qrc (Qt Designer resource files) into .py files.
def compile_resources(resourcename="images", location="."): res_count_ok = 0 res_count_ko = 0 for qrc_file in os.listdir(location): # Loop through directory if qrc_file.endswith(".qrc"): # We have a candidate file print(f"---> Found {qrc_file}") # get the filename without extension base_filename, _ = os.path.splitext(os.path.basename(qrc_file)) # Make the target name target_filename = f"{base_filename}_rc.py" # Run! result = subprocess.run(["pyrcc5", "-o", target_filename, f"{location}/{qrc_file}"], capture_output=True) if result.returncode == 0: print(f"[v] Resource compiled to {target_filename}") res_count_ok += 1 else: print(f"[e] An error occured {result}") res_count_ko += 1 return res_count_ok, res_count_ko
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_qrc(path, name=\"resources\", prefix=\"icons\"):\n qrc = '<RCC>\\n\\t<qresource prefix=\"{}\">\\n'.format(prefix)\n for each in sorted(os.listdir(path)):\n qrc += \"\\t\\t<file>{0}</file>\\n\".format(each)\n qrc += \"\\t</qresource>\\n</RCC>\\n\"\n\n qrc_file = os.path.join(path, name + \".qrc\")\n with open(qrc_file, \"w\") as stream:\n stream.write(qrc)\n\n return qrc_file", "def generatePyFromUi(verbose=True):\n ui_files = glob.glob(\"*.ui\")\n\n for filename in ui_files:\n if os.path.exists(filename):\n if verbose:\n print(\"{} was converted\".format(filename))\n os.system(\"exec python3 -m PyQt5.uic.pyuic {0}.ui -o {0}.py -x\".format(filename.rstrip(\".ui\")))", "def convert_qrc_files(directory):\n for file in Path.iterdir(directory):\n if Path.is_dir(file):\n convert_ui_files(file)\n elif Path(file).suffix == \".qrc\":\n rc_file = Path(file)\n converted_file = directory / Path(\"rc_\" + PurePosixPath(file).stem + \".py\")\n command = \"call {} && pyside6-rcc \\\"{}\\\" -o \\\"{}\\\"\".format(ENVIRONMENT_LOCATION, rc_file, converted_file)\n subprocess.call(command, shell=True)", "def make_gui(filename):\n\n from subprocess import check_call\n import os\n\n cmd = os.path.normpath('C:\\Anaconda\\Scripts\\pyside-uic')\n infile = filename + '.ui'\n outfile = filename + '.py'\n\n call_args = [cmd, infile, '-o', outfile]\n print call_args\n\n check_call(call_args)", "def copy_qtgui_to_modules():\r\n\r\n pyside_filepath = PREFIX + '/PySide.json'\r\n pyqt4_filepath = PREFIX + '/PyQt4.json'\r\n pyside = read_json(pyside_filepath)\r\n pyqt4 = read_json(pyqt4_filepath)\r\n\r\n # When Qt4 was moved to Qt5, they split QtGui into QtGui, QtWidgets, and\r\n # QtPrintSupport.\r\n pyside['QtWidgets'] = pyside['QtGui']\r\n pyqt4['QtWidgets'] = pyqt4['QtGui']\r\n pyside['QtPrintSupport'] = pyside['QtGui']\r\n pyqt4['QtPrintSupport'] = pyqt4['QtGui']\r\n\r\n write_json(pyside, pyside_filepath)\r\n print('--> Copied QtGui to QtWidgets and QtPrintSupport for {0}'.format(\r\n os.path.basename(pyside_filepath)))\r\n write_json(pyqt4, pyqt4_filepath)\r\n print('--> Copied QtGui to QtWidgets and QtPrintSupport for {0}'.format(\r\n os.path.basename(pyqt4_filepath)))", "def qrc_to_py(filename: str,\n filepath: str = os.path.dirname(__file__),\n outputpath: str = os.path.dirname(__file__)) -> None:\n if isinstance(filename,str) and isinstance(filepath,str):\n if not ' ' in filename:\n if os.path.isfile(\"{}\\{}.qrc\".format(filepath,filename)):\n filepath = filepath\n filename = filename\n chk_py = os.path.isfile(\"{}\\{}.py\".format(filepath,filename))\n os.system(\"cd {0} & pyrcc5 {1}.qrc -o {1}.py\".format(filepath,filename))\n shutil.move(\"{}\\{}.py\".format(filepath,filename),\"{}\\{}.py\".format(outputpath,filename))\n\n if chk_py:\n print(\"File Converter Info: {}.py file updated.\".format(filename))\n else:\n print(\"File Converter Info: {}.py file created.\".format(filename))\n else:\n print(\"File Converter Alert: The {}.qrc file doesn't exist.\".format(filename))\n else:\n print(\"File Converter Error: The filename contains spaces.\")\n else:\n print(\"File Converter Error: Arguments are not string.\")", "def build_assets(self):\n theme = self.theme\n \n # ~ self.assets_dir = cwd + \"/CenterSide_Themes/\" + theme + \"/\"\n \n \n \n \n \n \n # ~ self.blank_langmssg = QPixmap(\"blank_langmssg.svg\")\n # ~ self.blank_thememssg = QPixmap(\"blank_thememssg.svg\")\n \n \n \n \n \n # ~ self.icon_info = QIcon(\"Icons/info.svg\")\n # ~ self.icon_intructions = QIcon(\"Icons/instructions.svg\")\n # ~ self.icon_internet = QIcon(\"Icons/internet.svg\")\n # ~ self.icon_invite = QIcon(\"Icons/invite.svg\")\n # ~ self.icon_languages = QIcon(\"Icons/languages.svg\")\n # ~ self.icon_local = QIcon(\"Icons/local.svg\")\n # ~ self.icon_message = QIcon(\"Icons/message.svg\")\n # ~ self.icon_name = QIcon(\"Icons/name.svg\")\n # ~ self.icon_options = QIcon(\"Icons/options.svg\")\n # ~ self.icon_palettes = QIcon(\"Icons/palettes.svg\")\n \n # ~ self.icon_quit = QIcon(\"Icons/quit.svg\")\n # ~ self.icon_refresh = QIcon(\"Icons/refresh.svg\")\n # ~ self.icon_shop = QIcon(\"Icons/shop.svg\")\n # ~ self.icon_soundon = QIcon(\"Icons/soundon.svg\")\n # ~ self.icon_soundoff = QIcon(\"Icons/soundoff.svg\")\n # ~ self.icon_vsAI = QIcon(\"Icons/vsAI.svg\")", "def task_prepare_build():\n\n import sys\n\n python_path = sys.executable.split(os.sep)\n venv_path = str(Path(os.sep.join(python_path[:-2])))\n\n def get_dst_path():\n import platform\n\n print(f\"Going on with {venv_path} as the virtual environment exclusively used for using pyinstaller.\")\n arch = platform.system()\n if arch == \"Windows\":\n return Path(venv_path) / \"Lib/site-packages/mad_gui/qt_designer/build/\"\n if arch in [\"Linux\", \"Darwin\"]:\n python_dirs = os.listdir(Path(venv_path) / \"lib/\")\n warnings.warn(\n f\"dodo.py: Assuming your python 3.7 installation is in {Path(venv_path)}/lib/{python_dirs[0]}\"\n )\n return Path(venv_path) / \"lib\" / python_dirs[0] / \"site-packages/mad_gui/qt_designer/build/\"\n raise ValueError(\"What operating system is this?!\")\n\n def set_up_paths():\n if not os.path.exists(get_dst_path().parent):\n raise FileNotFoundError(\n \"Apparently mad_gui is not installed in this environemnt. Use `pip install . ` to do so.\"\n )\n dst_path = get_dst_path()\n os.makedirs(dst_path, exist_ok=True)\n\n def convert_ui_to_py():\n dst_path = get_dst_path()\n ui_files = [file for file in os.listdir(dst_path.parent) if \".ui\" in file]\n print(\"\\n\")\n for file in ui_files:\n print(f\"Converting from: {dst_path.parent}{os.sep}{file}\")\n print(f\"To: {dst_path}{os.sep}{file.split('.')[0]}.py\\n\")\n os.popen(f\"pyside2-uic -o {dst_path}{os.sep}{file.split('.')[0]}.py {dst_path.parent}{os.sep}{file}\")\n\n print(\n \"Info: These conversion should take place in the virutal environment you are going to use with \"\n \"pyinstaller.\"\n )\n\n return {\n \"actions\": [set_up_paths, convert_ui_to_py],\n \"verbosity\": 2,\n }", "def __init__(self):\n super(QTUIProject, self).__init__()\n self.setupUi(self)\n self.assignWidgets()\n self.show()\n self.SlotsJsonName = \"Slots Assets Folder\"\n self.BingoJsonName = \"Bingo Assets Folder\"", "def compileUiFiles(directory, recurse=False):\n compileUiDir(directory, recurse, __pyName)", "def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()", "def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n # Window Sizes\n MainWindow.resize(1280, 600)\n MainWindow.setMinimumSize(QtCore.QSize(1280, 600))\n MainWindow.setMaximumSize(QtCore.QSize(1280, 600))\n # Main Window\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/logos/roc_Logo.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n MainWindow.setWindowIcon(icon)\n MainWindow.setIconSize(QtCore.QSize(10, 10))\n self.mainWindowBase = QtWidgets.QWidget(MainWindow)\n self.mainWindowBase.setStyleSheet(\"background: #2A2E37;\")\n self.mainWindowBase.setObjectName(\"mainWindowBase\")\n # Side Menu\n self.menu_Logo = QtWidgets.QFrame(self.mainWindowBase)\n self.menu_Logo.setGeometry(QtCore.QRect(0, 0, 120, 120))\n self.menu_Logo.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"image: url(:/logos/roc_Logo.png);\")\n self.menu_Logo.setObjectName(\"menu_Logo\")\n self.gridLayout = QtWidgets.QGridLayout(self.menu_Logo)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.menuFrame = QtWidgets.QFrame(self.mainWindowBase)\n self.menuFrame.setGeometry(QtCore.QRect(0, 0, 120, 600))\n self.menuFrame.setStyleSheet(\"background: #313640;\\n\"\n \"\")\n self.menuFrame.setObjectName(\"menuFrame\")\n self.gridLayout_3 = QtWidgets.QGridLayout(self.menuFrame)\n self.gridLayout_3.setObjectName(\"gridLayout_3\")\n self.menu_Line = QtWidgets.QFrame(self.mainWindowBase)\n self.menu_Line.setGeometry(QtCore.QRect(120, 0, 2, 600))\n self.menu_Line.setStyleSheet(\"background: rgba(41, 63, 71, 0.75);\")\n self.menu_Line.setFrameShadow(QtWidgets.QFrame.Raised)\n self.menu_Line.setFrameShape(QtWidgets.QFrame.VLine)\n self.menu_Line.setObjectName(\"menu_Line\")\n # QR Code Button\n self.qrcode_App_Button = QtWidgets.QCommandLinkButton(self.mainWindowBase)\n self.qrcode_App_Button.setGeometry(QtCore.QRect(5, 200, 110, 70))\n font = QtGui.QFont()\n font.setPointSize(7)\n self.qrcode_App_Button.setFont(font)\n self.qrcode_App_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.qrcode_App_Button.setToolTipDuration(3000)\n self.qrcode_App_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: white;\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/vectors/qrcode.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.qrcode_App_Button.setIcon(icon1)\n self.qrcode_App_Button.setIconSize(QtCore.QSize(45, 45))\n self.qrcode_App_Button.setObjectName(\"qrcode_App_Button\")\n self.qrcode_App_Button.clicked.connect(self.openQrApp)\n # Data Button\n self.data_Button = QtWidgets.QCommandLinkButton(self.mainWindowBase)\n self.data_Button.setGeometry(QtCore.QRect(5, 300, 110, 70))\n font = QtGui.QFont()\n font.setPointSize(7)\n self.data_Button.setFont(font)\n self.data_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.data_Button.setToolTipDuration(3000)\n self.data_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: white;\\n\"\n \"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/vectors/data.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.data_Button.setIcon(icon2)\n self.data_Button.setIconSize(QtCore.QSize(45, 45))\n self.data_Button.setObjectName(\"data_Button\")\n self.data_Button.clicked.connect(InProgress)\n # Config Button\n self.config_Button = QtWidgets.QCommandLinkButton(self.mainWindowBase)\n self.config_Button.setGeometry(QtCore.QRect(5, 400, 110, 70))\n font = QtGui.QFont()\n font.setPointSize(7)\n self.config_Button.setFont(font)\n self.config_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.config_Button.setToolTipDuration(3000)\n self.config_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: white;\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/vectors/config.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.config_Button.setIcon(icon3)\n self.config_Button.setIconSize(QtCore.QSize(45, 45))\n self.config_Button.setObjectName(\"config_Button\")\n self.config_Button.clicked.connect(self.openRocConfig)\n # Docummentation Button\n self.docs_Button = QtWidgets.QCommandLinkButton(self.mainWindowBase)\n self.docs_Button.setGeometry(QtCore.QRect(1080, 15, 100, 50))\n self.docs_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.docs_Button.setToolTipDuration(3000)\n self.docs_Button.setStyleSheet(\"color: white;\\n\"\n \"background: rgba(41, 63, 71, 0.75);\")\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\":/vectors/docs.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.docs_Button.setIcon(icon4)\n self.docs_Button.setIconSize(QtCore.QSize(40, 32))\n self.docs_Button.setObjectName(\"docs_Button\")\n self.docs_Button.clicked.connect(lambda: self.mainWebActions(button='docs'))\n # About Button\n self.about_Button = QtWidgets.QCommandLinkButton(self.mainWindowBase)\n self.about_Button.setGeometry(QtCore.QRect(1180, 15, 91, 50))\n self.about_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.about_Button.setToolTipDuration(3000)\n self.about_Button.setStyleSheet(\"color: white;\\n\"\n \"background: rgba(41, 63, 71, 0.75);\")\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\":/vectors/info.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.about_Button.setIcon(icon5)\n self.about_Button.setIconSize(QtCore.QSize(30, 30))\n self.about_Button.setObjectName(\"about_Button\")\n self.about_Button.clicked.connect(self.openAboutApp)\n # Viewer Base Design\n self.viewer_Superior_Line = QtWidgets.QFrame(self.mainWindowBase)\n self.viewer_Superior_Line.setGeometry(QtCore.QRect(1061, 70, 220, 2))\n self.viewer_Superior_Line.setStyleSheet(\"background: #1DDED8;\")\n self.viewer_Superior_Line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.viewer_Superior_Line.setFrameShape(QtWidgets.QFrame.HLine)\n self.viewer_Superior_Line.setObjectName(\"viewer_Superior_Line\")\n self.inferior_Line_Base = QtWidgets.QFrame(self.mainWindowBase)\n self.inferior_Line_Base.setGeometry(QtCore.QRect(121, 460, 1280, 2))\n self.inferior_Line_Base.setStyleSheet(\"background: #1DDED8;\")\n self.inferior_Line_Base.setFrameShape(QtWidgets.QFrame.HLine)\n self.inferior_Line_Base.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.inferior_Line_Base.setObjectName(\"inferior_Line_Base\")\n self.lateral_Line_Base = QtWidgets.QFrame(self.mainWindowBase)\n self.lateral_Line_Base.setGeometry(QtCore.QRect(1061, 0, 2, 462))\n self.lateral_Line_Base.setStyleSheet(\"background: #1DDED8;\")\n self.lateral_Line_Base.setFrameShape(QtWidgets.QFrame.VLine)\n self.lateral_Line_Base.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.lateral_Line_Base.setObjectName(\"lateral_Line_Base\")\n self.robot_Viewer_Frame = QtWidgets.QFrame(self.mainWindowBase)\n self.robot_Viewer_Frame.setGeometry(QtCore.QRect(1063, 72, 218, 30))\n self.robot_Viewer_Frame.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\")\n self.robot_Viewer_Frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.robot_Viewer_Frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.robot_Viewer_Frame.setObjectName(\"robot_Viewer_Frame\")\n self.robot_Viewer_Label = QtWidgets.QLabel(self.robot_Viewer_Frame)\n self.robot_Viewer_Label.setGeometry(QtCore.QRect(60, 9, 131, 16))\n self.robot_Viewer_Label.setStyleSheet(\"background: transparent;\\n\"\n \"font: 10pt \\\"Khmer OS System\\\";\\n\"\n \"color: white;\")\n self.robot_Viewer_Label.setObjectName(\"robot_Viewer_Label\")\n self.robotViewerBase = QtWidgets.QFrame(self.mainWindowBase)\n self.robotViewerBase.setGeometry(QtCore.QRect(1063, 100, 216, 110))\n self.robotViewerBase.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.robotViewerBase.setFrameShadow(QtWidgets.QFrame.Raised)\n self.robotViewerBase.setObjectName(\"robotViewerBase\")\n self.robot_Viewer_Line = QtWidgets.QFrame(self.robotViewerBase)\n self.robot_Viewer_Line.setGeometry(QtCore.QRect(105, 15, 2, 80))\n self.robot_Viewer_Line.setStyleSheet(\"background: #1DDED8;\")\n self.robot_Viewer_Line.setFrameShape(QtWidgets.QFrame.VLine)\n self.robot_Viewer_Line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.robot_Viewer_Line.setObjectName(\"robot_Viewer_Line\")\n # Terminal Widget\n self.terminalWidget = QtWidgets.QTabWidget(self.mainWindowBase)\n self.terminalWidget.setGeometry(QtCore.QRect(121, 462, 1158, 139))\n self.terminalWidget.setMinimumSize(QtCore.QSize(1158, 139))\n self.terminalWidget.setMaximumSize(QtCore.QSize(1158, 139))\n self.terminalWidget.setTabPosition(QtWidgets.QTabWidget.South)\n self.terminalWidget.setIconSize(QtCore.QSize(10, 10))\n self.terminalWidget.setObjectName(\"terminalWidget\")\n self.urxvtWidget = QtWidgets.QWidget()\n self.urxvtWidget.setObjectName(\"urxvtWidget\")\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\":/vectors/ShellWhite.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n # To add new tabs use the line above\n # self.terminalWidget.addTab(self.urxvtWidget, icon6, \"\")\n # Starts the Terminal within a New Tab\n self.terminalWidget.insertTab(0, embeddedTerminal(), icon6, \"urvxt\")\n # Robot TB1 Viwer Checkbox\n self.robot_TB1_Viewer = QtWidgets.QCheckBox(self.robotViewerBase)\n self.robot_TB1_Viewer.setGeometry(QtCore.QRect(15, 15, 80, 21))\n self.robot_TB1_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB1_Viewer.setToolTipDuration(3000)\n self.robot_TB1_Viewer.setStyleSheet(\"color: white;\")\n self.robot_TB1_Viewer.setObjectName(\"robot_TB1_Viewer\")\n # Robot TB2 Viwer Checkbox\n self.robot_TB2_Viewer = QtWidgets.QCheckBox(self.robotViewerBase)\n self.robot_TB2_Viewer.setGeometry(QtCore.QRect(15, 35, 81, 21))\n self.robot_TB2_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB2_Viewer.setToolTipDuration(3000)\n self.robot_TB2_Viewer.setStyleSheet(\"color: white;\")\n self.robot_TB2_Viewer.setObjectName(\"robot_TB2_Viewer\")\n # Robot TB3 Viwer Checkbox\n self.robot_TB3_Viewer = QtWidgets.QCheckBox(self.robotViewerBase)\n self.robot_TB3_Viewer.setGeometry(QtCore.QRect(15, 55, 81, 21))\n self.robot_TB3_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB3_Viewer.setToolTipDuration(3000)\n self.robot_TB3_Viewer.setStyleSheet(\"color: white;\")\n self.robot_TB3_Viewer.setObjectName(\"robot_TB3_Viewer\")\n # Robot TB4 Viwer Checkbox\n self.robot_TB4_Viewer = QtWidgets.QCheckBox(self.robotViewerBase)\n self.robot_TB4_Viewer.setGeometry(QtCore.QRect(15, 75, 81, 21))\n self.robot_TB4_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB4_Viewer.setToolTipDuration(3000)\n self.robot_TB4_Viewer.setStyleSheet(\"color: white;\")\n self.robot_TB4_Viewer.setObjectName(\"robot_TB4_Viewer\")\n # Robot TB1 Status Button\n self.robot_TB1_Status = QtWidgets.QPushButton(self.robotViewerBase)\n self.robot_TB1_Status.setGeometry(QtCore.QRect(119, 16, 90, 18))\n self.robot_TB1_Status.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB1_Status.setToolTipDuration(6000)\n self.robot_TB1_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n self.robot_TB1_Status.setObjectName(\"robot_TB1_Status\")\n self.robot_TB1_Status.clicked.connect(InProgress)\n # Robot TB2 Status Button\n self.robot_TB2_Status = QtWidgets.QPushButton(self.robotViewerBase)\n self.robot_TB2_Status.setGeometry(QtCore.QRect(120, 36, 90, 18))\n self.robot_TB2_Status.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB2_Status.setToolTipDuration(6000)\n self.robot_TB2_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n self.robot_TB2_Status.setObjectName(\"robot_TB2_Status\")\n self.robot_TB2_Status.clicked.connect(InProgress)\n # Robot TB3 Status Button\n self.robot_TB3_Status = QtWidgets.QPushButton(self.robotViewerBase)\n self.robot_TB3_Status.setGeometry(QtCore.QRect(120, 56, 90, 18))\n self.robot_TB3_Status.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB3_Status.setToolTipDuration(6000)\n self.robot_TB3_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n self.robot_TB3_Status.setObjectName(\"robot_TB3_Status\")\n self.robot_TB3_Status.clicked.connect(InProgress)\n # Robot TB4 Status Button\n self.robot_TB4_Status = QtWidgets.QPushButton(self.robotViewerBase)\n self.robot_TB4_Status.setGeometry(QtCore.QRect(120, 76, 90, 18))\n self.robot_TB4_Status.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB4_Status.setToolTipDuration(6000)\n self.robot_TB4_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n self.robot_TB4_Status.setObjectName(\"robot_TB4_Status\")\n self.robot_TB4_Status.clicked.connect(InProgress)\n # Robot TB1 Main Widget\n self.robot_TB1 = QtWidgets.QGroupBox(self.mainWindowBase)\n self.robot_TB1.setEnabled(True)\n self.robot_TB1.setGeometry(QtCore.QRect(150, 10, 439, 219))\n self.robot_TB1.setStyleSheet(\"\\n\"\n \"color: rgb(206, 255, 188);\")\n self.robot_TB1.setObjectName(\"robot_TB1\")\n self.robot_TB1.setHidden(True)\n self.options_TB1 = QtWidgets.QFrame(self.robot_TB1)\n self.options_TB1.setGeometry(QtCore.QRect(215, 21, 221, 196))\n self.options_TB1.setStyleSheet(\"\")\n self.options_TB1.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.options_TB1.setFrameShadow(QtWidgets.QFrame.Raised)\n self.options_TB1.setObjectName(\"options_TB1\")\n self.line_TB1_1 = QtWidgets.QFrame(self.options_TB1)\n self.line_TB1_1.setGeometry(QtCore.QRect(90, 11, 2, 80))\n self.line_TB1_1.setStyleSheet(\"background: #1DDED8;\")\n self.line_TB1_1.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB1_1.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB1_1.setObjectName(\"line_TB1_1\")\n # TB1 Settings and Configuration Button\n self.configure_TB1_Button = QtWidgets.QPushButton(self.options_TB1)\n self.configure_TB1_Button.setGeometry(QtCore.QRect(100, 70, 111, 18))\n self.configure_TB1_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.configure_TB1_Button.setToolTipDuration(3000)\n self.configure_TB1_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.configure_TB1_Button.setObjectName(\"configure_TB1_Button\")\n self.configure_TB1_Button.clicked.connect(self.openTB1Settings)\n # TB1 Logs Button\n self.logs_TB1_Button = QtWidgets.QPushButton(self.options_TB1)\n self.logs_TB1_Button.setGeometry(QtCore.QRect(100, 45, 111, 18))\n self.logs_TB1_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.logs_TB1_Button.setToolTipDuration(3000)\n self.logs_TB1_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.logs_TB1_Button.setObjectName(\"logs_TB1_Button\")\n # TB1 Floor Checkbox\n self.floor_TB1_Show = QtWidgets.QCheckBox(self.options_TB1)\n self.floor_TB1_Show.setGeometry(QtCore.QRect(10, 30, 61, 21))\n self.floor_TB1_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.floor_TB1_Show.setToolTipDuration(3000)\n self.floor_TB1_Show.setStyleSheet(\"color: white;\")\n self.floor_TB1_Show.setObjectName(\"floor_TB1_Show\")\n self.floor_TB1_Show.stateChanged.connect(self.floorTB1Checked)\n # TB1 Kinnect Checkbox\n self.kinect_TB1_Show = QtWidgets.QCheckBox(self.options_TB1)\n self.kinect_TB1_Show.setGeometry(QtCore.QRect(10, 50, 71, 21))\n self.kinect_TB1_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.kinect_TB1_Show.setToolTipDuration(3000)\n self.kinect_TB1_Show.setStyleSheet(\"color: white;\")\n self.kinect_TB1_Show.setObjectName(\"kinect_TB1_Show\")\n self.kinect_TB1_Show.stateChanged.connect(self.kinnectTB1Checked)\n # TB1 Gmapp Checkbox\n self.gmapp_TB1_Show = QtWidgets.QCheckBox(self.options_TB1)\n self.gmapp_TB1_Show.setGeometry(QtCore.QRect(10, 10, 71, 21))\n self.gmapp_TB1_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.gmapp_TB1_Show.setToolTipDuration(3000)\n self.gmapp_TB1_Show.setStyleSheet(\"color: white;\")\n self.gmapp_TB1_Show.setObjectName(\"gmapp_TB1_Show\")\n self.gmapp_TB1_Show.stateChanged.connect(self.gmappTB1Checked)\n # TB1 Camera Checkbox\n self.camera_TB1_Show = QtWidgets.QCheckBox(self.options_TB1)\n self.camera_TB1_Show.setGeometry(QtCore.QRect(10, 70, 71, 21))\n self.camera_TB1_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.camera_TB1_Show.setStyleSheet(\"color: white;\")\n self.camera_TB1_Show.setObjectName(\"camera_TB1_Show\")\n self.camera_TB1_Show.stateChanged.connect(self.cameraTB1Checked)\n # TB1 ON Radio Button\n self.on_TB1_Viewer = QtWidgets.QRadioButton(self.options_TB1)\n self.on_TB1_Viewer.setGeometry(QtCore.QRect(100, 10, 51, 21))\n self.on_TB1_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.on_TB1_Viewer.setToolTipDuration(3000)\n self.on_TB1_Viewer.setObjectName(\"on_TB1_Viewer\")\n self.on_TB1_Viewer.toggled.connect(self.startSubscribers)\n # TB1 OFF Radio Button\n self.off_TB1_Viewer = QtWidgets.QRadioButton(self.options_TB1)\n self.off_TB1_Viewer.setGeometry(QtCore.QRect(160, 10, 51, 21))\n self.off_TB1_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.off_TB1_Viewer.setToolTipDuration(3000)\n self.off_TB1_Viewer.setObjectName(\"off_TB1_Viewer\")\n self.off_TB1_Viewer.toggled.connect(self.killSubscribers)\n # TB1 Secondary Design\n self.line_TB1_2 = QtWidgets.QFrame(self.options_TB1)\n self.line_TB1_2.setGeometry(QtCore.QRect(100, 35, 110, 1))\n self.line_TB1_2.setStyleSheet(\"\")\n self.line_TB1_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_TB1_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB1_2.setObjectName(\"line_TB1_2\")\n self.reload_TB1 = QtWidgets.QPushButton(self.options_TB1)\n self.reload_TB1.setGeometry(QtCore.QRect(114, 162, 90, 20))\n self.reload_TB1.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reload_TB1.setToolTipDuration(3000)\n self.reload_TB1.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reload_TB1.setObjectName(\"reload_TB1\")\n self.line_TB1_4 = QtWidgets.QFrame(self.options_TB1)\n self.line_TB1_4.setGeometry(QtCore.QRect(209, 160, 2, 25))\n self.line_TB1_4.setStyleSheet(\"background: #313640;\")\n self.line_TB1_4.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB1_4.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB1_4.setObjectName(\"line_TB1_4\")\n self.line_TB1_3 = QtWidgets.QFrame(self.options_TB1)\n self.line_TB1_3.setGeometry(QtCore.QRect(10, 160, 2, 25))\n self.line_TB1_3.setStyleSheet(\"background: #313640;\")\n self.line_TB1_3.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB1_3.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB1_3.setObjectName(\"line_TB1_3\")\n # TB1 RESET Button\n self.reset_TB1 = QtWidgets.QPushButton(self.options_TB1)\n self.reset_TB1.setGeometry(QtCore.QRect(15, 162, 90, 20))\n self.reset_TB1.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reset_TB1.setToolTipDuration(3000)\n self.reset_TB1.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reset_TB1.setObjectName(\"reset_TB1\")\n self.valuesTB1Frame = QtWidgets.QFrame(self.options_TB1)\n self.valuesTB1Frame.setGeometry(QtCore.QRect(10, 100, 201, 51))\n self.valuesTB1Frame.setToolTipDuration(3000)\n self.valuesTB1Frame.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\")\n self.valuesTB1Frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.valuesTB1Frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.valuesTB1Frame.setObjectName(\"valuesTB1Frame\")\n # TB1 Robot Values\n self.x_TB1_Value = QtWidgets.QLCDNumber(self.valuesTB1Frame)\n self.x_TB1_Value.setGeometry(QtCore.QRect(30, 7, 31, 16))\n self.x_TB1_Value.setObjectName(\"x_TB1_Value\")\n self.x_TB1_Label = QtWidgets.QLabel(self.valuesTB1Frame)\n self.x_TB1_Label.setGeometry(QtCore.QRect(10, 7, 16, 16))\n self.x_TB1_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.x_TB1_Label.setObjectName(\"x_TB1_Label\")\n self.y_TB1_Label = QtWidgets.QLabel(self.valuesTB1Frame)\n self.y_TB1_Label.setGeometry(QtCore.QRect(10, 31, 16, 16))\n self.y_TB1_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.y_TB1_Label.setObjectName(\"y_TB1_Label\")\n self.y_TB1_Value = QtWidgets.QLCDNumber(self.valuesTB1Frame)\n self.y_TB1_Value.setGeometry(QtCore.QRect(30, 31, 31, 16))\n self.y_TB1_Value.setObjectName(\"y_TB1_Value\")\n self.velocity_TB1_Label = QtWidgets.QLabel(self.valuesTB1Frame)\n self.velocity_TB1_Label.setGeometry(QtCore.QRect(75, 7, 61, 16))\n self.velocity_TB1_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.velocity_TB1_Label.setObjectName(\"velocity_TB1_Label\")\n self.linear_TB1_Value = QtWidgets.QLCDNumber(self.valuesTB1Frame)\n self.linear_TB1_Value.setGeometry(QtCore.QRect(130, 7, 31, 16))\n self.linear_TB1_Value.setObjectName(\"linear_TB1_Value\")\n self.battery_TB1_Label = QtWidgets.QLabel(self.valuesTB1Frame)\n self.battery_TB1_Label.setGeometry(QtCore.QRect(75, 31, 61, 16))\n self.battery_TB1_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.battery_TB1_Label.setObjectName(\"battery_TB1_Label\")\n self.turtleBat_TB1_Value = QtWidgets.QLCDNumber(self.valuesTB1Frame)\n self.turtleBat_TB1_Value.setGeometry(QtCore.QRect(130, 30, 31, 16))\n self.turtleBat_TB1_Value.setObjectName(\"turtleBat_TB1_Value\")\n self.noteBat_TB1_Value = QtWidgets.QLCDNumber(self.valuesTB1Frame)\n self.noteBat_TB1_Value.setGeometry(QtCore.QRect(165, 30, 31, 16))\n self.noteBat_TB1_Value.setObjectName(\"noteBat_TB1_Value\")\n self.angular_TB1_Value = QtWidgets.QLCDNumber(self.valuesTB1Frame)\n self.angular_TB1_Value.setGeometry(QtCore.QRect(165, 7, 31, 16))\n self.angular_TB1_Value.setObjectName(\"angular_TB1_Value\")\n self.viewer_TB1 = QtWidgets.QTabWidget(self.robot_TB1)\n self.viewer_TB1.setGeometry(QtCore.QRect(0, 20, 221, 198))\n font = QtGui.QFont()\n font.setFamily(\"Tlwg Typist\")\n font.setPointSize(6)\n font.setBold(True)\n font.setWeight(75)\n self.viewer_TB1.setFont(font)\n self.viewer_TB1.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.viewer_TB1.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.viewer_TB1.setStyleSheet(\"color: white;\")\n self.viewer_TB1.setTabPosition(QtWidgets.QTabWidget.West)\n self.viewer_TB1.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.viewer_TB1.setElideMode(QtCore.Qt.ElideLeft)\n self.viewer_TB1.setObjectName(\"viewer_TB1\")\n # Kinnect TB1 Screen\n self.kinnect_TB1_Screen = QtWidgets.QWidget()\n self.kinnect_TB1_Screen.setObjectName(\"kinnect_TB1_Screen\")\n self.viewer_TB1.addTab(self.kinnect_TB1_Screen, \"\")\n # Camera TB1 Screen\n self.camera_TB1_Screen = QtWidgets.QWidget()\n self.camera_TB1_Screen.setObjectName(\"camera_TB1_Screen\")\n self.viewer_TB1.addTab(self.camera_TB1_Screen, \"\")\n # GMAPP TB1 Screen\n self.gmapp_TB1_Screen = QtWidgets.QWidget()\n self.gmapp_TB1_Screen.setObjectName(\"gmapp_TB1_Screen\")\n self.viewer_TB1.addTab(self.gmapp_TB1_Screen, \"\")\n # Floor TB1 Screen\n self.floor_TB1_Screen = QtWidgets.QWidget()\n self.floor_TB1_Screen.setObjectName(\"floor_TB1_Screen\")\n self.viewer_TB1.addTab(self.floor_TB1_Screen, \"\")\n self.robot_Selection_Frame = QtWidgets.QFrame(self.mainWindowBase)\n self.robot_Selection_Frame.setGeometry(QtCore.QRect(1063, 208, 218, 30))\n self.robot_Selection_Frame.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\")\n self.robot_Selection_Frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.robot_Selection_Frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.robot_Selection_Frame.setObjectName(\"robot_Selection_Frame\")\n self.robot_Selection_Label = QtWidgets.QLabel(self.robot_Selection_Frame)\n self.robot_Selection_Label.setGeometry(QtCore.QRect(52, 9, 131, 16))\n self.robot_Selection_Label.setStyleSheet(\"background: transparent;\\n\"\n \"font: 10pt \\\"Khmer OS System\\\";\\n\"\n \"color: white;\")\n self.robot_Selection_Label.setObjectName(\"robot_Selection_Label\")\n self.robotSelectionBase = QtWidgets.QFrame(self.mainWindowBase)\n self.robotSelectionBase.setGeometry(QtCore.QRect(1063, 235, 216, 224))\n self.robotSelectionBase.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.robotSelectionBase.setFrameShadow(QtWidgets.QFrame.Raised)\n self.robotSelectionBase.setObjectName(\"robotSelectionBase\")\n self.robot_Selection_TypeLabel = QtWidgets.QLabel(self.robotSelectionBase)\n self.robot_Selection_TypeLabel.setGeometry(QtCore.QRect(20, 74, 91, 21))\n self.robot_Selection_TypeLabel.setStyleSheet(\"color: white;\")\n self.robot_Selection_TypeLabel.setObjectName(\"robot_Selection_TypeLabel\")\n self.robot_Selection_Type = QtWidgets.QComboBox(self.robotSelectionBase)\n self.robot_Selection_Type.setGeometry(QtCore.QRect(110, 72, 91, 23))\n self.robot_Selection_Type.setStyleSheet(\"color: white;\\n\"\n \"font: 8pt \\\"Sans Serif\\\";\")\n self.robot_Selection_Type.setObjectName(\"robot_Selection_Type\")\n self.robot_Selection_Type.addItem(\"\")\n self.robot_Selection_Type.addItem(\"\")\n self.robot_Selection_Type.addItem(\"\")\n self.robot_Selection_Role = QtWidgets.QComboBox(self.robotSelectionBase)\n self.robot_Selection_Role.setGeometry(QtCore.QRect(110, 100, 91, 23))\n self.robot_Selection_Role.setStyleSheet(\"color: white;\\n\"\n \"font: 8pt \\\"Sans Serif\\\";\")\n self.robot_Selection_Role.setObjectName(\"robot_Selection_Role\")\n self.robot_Selection_Role.addItem(\"\")\n self.robot_Selection_Role.addItem(\"\")\n self.robot_Selection_Role.addItem(\"\")\n self.robot_Selection_RoleLabel = QtWidgets.QLabel(self.robotSelectionBase)\n self.robot_Selection_RoleLabel.setGeometry(QtCore.QRect(20, 102, 91, 21))\n self.robot_Selection_RoleLabel.setStyleSheet(\"color: white;\")\n self.robot_Selection_RoleLabel.setObjectName(\"robot_Selection_RoleLabel\")\n self.robot_Selection_TaskLabel = QtWidgets.QLabel(self.robotSelectionBase)\n self.robot_Selection_TaskLabel.setGeometry(QtCore.QRect(20, 134, 91, 21))\n self.robot_Selection_TaskLabel.setStyleSheet(\"color: white;\")\n self.robot_Selection_TaskLabel.setObjectName(\"robot_Selection_TaskLabel\")\n self.robot_Selection_Task = QtWidgets.QComboBox(self.robotSelectionBase)\n self.robot_Selection_Task.setGeometry(QtCore.QRect(110, 132, 91, 23))\n self.robot_Selection_Task.setStyleSheet(\"color: white;\\n\"\n \"font: 8pt \\\"Sans Serif\\\";\")\n self.robot_Selection_Task.setObjectName(\"robot_Selection_Task\")\n self.robot_Selection_Task.addItem(\"\")\n self.robot_Selection_Task.addItem(\"\")\n self.robot_Selection_Task.addItem(\"\")\n self.robot_Selection_BehaviorLabel = QtWidgets.QLabel(self.robotSelectionBase)\n self.robot_Selection_BehaviorLabel.setGeometry(QtCore.QRect(20, 164, 91, 21))\n self.robot_Selection_BehaviorLabel.setStyleSheet(\"color: white;\")\n self.robot_Selection_BehaviorLabel.setObjectName(\"robot_Selection_BehaviorLabel\")\n self.robot_Selection_Behavior = QtWidgets.QComboBox(self.robotSelectionBase)\n self.robot_Selection_Behavior.setGeometry(QtCore.QRect(110, 162, 91, 23))\n self.robot_Selection_Behavior.setStyleSheet(\"color: white;\\n\"\n \"font: 8pt \\\"Sans Serif\\\";\")\n self.robot_Selection_Behavior.setObjectName(\"robot_Selection_Behavior\")\n self.robot_Selection_Behavior.addItem(\"\")\n self.robot_Selection_Behavior.addItem(\"\")\n self.robot_Selection_Behavior.addItem(\"\")\n self.robot_Selection_Experiment = QtWidgets.QComboBox(self.robotSelectionBase)\n self.robot_Selection_Experiment.setGeometry(QtCore.QRect(110, 190, 91, 23))\n self.robot_Selection_Experiment.setStyleSheet(\"color: white;\\n\"\n \"font: 8pt \\\"Sans Serif\\\";\")\n self.robot_Selection_Experiment.setObjectName(\"robot_Selection_Experiment\")\n self.robot_Selection_Experiment.addItem(\"\")\n self.robot_Selection_Experiment.addItem(\"\")\n self.robot_Selection_Experiment.addItem(\"\")\n self.robot_Selection_ExpLabel = QtWidgets.QLabel(self.robotSelectionBase)\n self.robot_Selection_ExpLabel.setGeometry(QtCore.QRect(20, 192, 91, 21))\n self.robot_Selection_ExpLabel.setStyleSheet(\"color: white;\")\n self.robot_Selection_ExpLabel.setObjectName(\"robot_Selection_ExpLabel\")\n # Set de Experiment Button\n self.set_Selection_Values = QtWidgets.QPushButton(self.robotSelectionBase)\n self.set_Selection_Values.setGeometry(QtCore.QRect(77, 8, 50, 22))\n self.set_Selection_Values.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.set_Selection_Values.setToolTipDuration(3000)\n self.set_Selection_Values.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n self.set_Selection_Values.setObjectName(\"set_Selection_Values\")\n self.set_Selection_Values.clicked.connect(lambda: self.setExperiment(robot='None', set='OK'))\n # Reset de Experiment Button\n self.reset_Selection_Values = QtWidgets.QPushButton(self.robotSelectionBase)\n self.reset_Selection_Values.setGeometry(QtCore.QRect(169, 8, 31, 22))\n self.reset_Selection_Values.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reset_Selection_Values.setToolTipDuration(3000)\n self.reset_Selection_Values.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reset_Selection_Values.setObjectName(\"reset_Selection_Values\")\n self.robot_Selection_InternalLine = QtWidgets.QFrame(self.robotSelectionBase)\n self.robot_Selection_InternalLine.setGeometry(QtCore.QRect(20, 35, 180, 3))\n self.robot_Selection_InternalLine.setFrameShape(QtWidgets.QFrame.HLine)\n self.robot_Selection_InternalLine.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.robot_Selection_InternalLine.setObjectName(\"robot_Selection_InternalLine\")\n self.robot_TB1_Selection = QtWidgets.QRadioButton(self.robotSelectionBase)\n self.robot_TB1_Selection.setGeometry(QtCore.QRect(25, 45, 40, 20))\n self.robot_TB1_Selection.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB1_Selection.setToolTipDuration(3000)\n self.robot_TB1_Selection.setStyleSheet(\"color: white;\")\n self.robot_TB1_Selection.setObjectName(\"robot_TB1_Selection\")\n self.robot_TB2_Selection = QtWidgets.QRadioButton(self.robotSelectionBase)\n self.robot_TB2_Selection.setGeometry(QtCore.QRect(70, 45, 40, 20))\n self.robot_TB2_Selection.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB2_Selection.setToolTipDuration(3000)\n self.robot_TB2_Selection.setStyleSheet(\"color: white;\")\n self.robot_TB2_Selection.setObjectName(\"robot_TB2_Selection\")\n self.robot_TB4_Selection = QtWidgets.QRadioButton(self.robotSelectionBase)\n self.robot_TB4_Selection.setGeometry(QtCore.QRect(165, 45, 40, 20))\n self.robot_TB4_Selection.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB4_Selection.setToolTipDuration(3000)\n self.robot_TB4_Selection.setStyleSheet(\"color: white;\")\n self.robot_TB4_Selection.setObjectName(\"robot_TB4_Selection\")\n self.robot_TB3_Selection = QtWidgets.QRadioButton(self.robotSelectionBase)\n self.robot_TB3_Selection.setGeometry(QtCore.QRect(120, 45, 41, 20))\n self.robot_TB3_Selection.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.robot_TB3_Selection.setToolTipDuration(3000)\n self.robot_TB3_Selection.setStyleSheet(\"color: white;\")\n self.robot_TB3_Selection.setObjectName(\"robot_TB3_Selection\")\n # Run the Experiment/Others Button\n self.run_Selection_Values = QtWidgets.QPushButton(self.robotSelectionBase)\n self.run_Selection_Values.setGeometry(QtCore.QRect(20, 8, 50, 22))\n self.run_Selection_Values.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.run_Selection_Values.setToolTipDuration(3000)\n self.run_Selection_Values.setStyleSheet(\"background-color: rgb(188, 255, 143);\\n\"\n \"color: rgb(22, 22, 22);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n self.run_Selection_Values.setObjectName(\"run_Selection_Values\")\n self.run_Selection_Values.clicked.connect(InProgress)\n # Down the Experiment/Others Button\n self.down_Selection_Values = QtWidgets.QPushButton(self.robotSelectionBase)\n self.down_Selection_Values.setGeometry(QtCore.QRect(135, 8, 31, 22))\n self.down_Selection_Values.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.down_Selection_Values.setToolTipDuration(3000)\n self.down_Selection_Values.setStyleSheet(\"color: rgb(0, 0, 0);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\\n\"\n \"background-color: rgb(107, 21, 18);\")\n self.down_Selection_Values.setObjectName(\"down_Selection_Values\")\n self.down_Selection_Values.clicked.connect(lambda: self.killExperiment(kill='yes'))\n # Selection Section Design\n self.selection_Superior_Line = QtWidgets.QFrame(self.mainWindowBase)\n self.selection_Superior_Line.setGeometry(QtCore.QRect(1061, 205, 220, 3))\n self.selection_Superior_Line.setStyleSheet(\"background: #1DDED8;\")\n self.selection_Superior_Line.setFrameShape(QtWidgets.QFrame.HLine)\n self.selection_Superior_Line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.selection_Superior_Line.setObjectName(\"selection_Superior_Line\")\n self.robot_TB2 = QtWidgets.QGroupBox(self.mainWindowBase)\n self.robot_TB2.setGeometry(QtCore.QRect(150, 235, 439, 219))\n self.robot_TB2.setStyleSheet(\"\\n\"\n \"color: rgb(206, 255, 188);\")\n self.robot_TB2.setObjectName(\"robot_TB2\")\n self.robot_TB2.setHidden(True)\n self.options_TB2 = QtWidgets.QFrame(self.robot_TB2)\n self.options_TB2.setGeometry(QtCore.QRect(215, 21, 221, 196))\n self.options_TB2.setStyleSheet(\"\")\n self.options_TB2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.options_TB2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.options_TB2.setObjectName(\"options_TB2\")\n self.line_TB2_2 = QtWidgets.QFrame(self.options_TB2)\n self.line_TB2_2.setGeometry(QtCore.QRect(90, 11, 2, 80))\n self.line_TB2_2.setStyleSheet(\"background: #1DDED8;\")\n self.line_TB2_2.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB2_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB2_2.setObjectName(\"line_TB2_2\")\n # TB2 Settings and Configurations Button\n self.configure_TB2_Button = QtWidgets.QPushButton(self.options_TB2)\n self.configure_TB2_Button.setGeometry(QtCore.QRect(100, 70, 111, 18))\n self.configure_TB2_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.configure_TB2_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.configure_TB2_Button.setObjectName(\"configure_TB2_Button\")\n self.configure_TB2_Button.clicked.connect(self.openTB2Settings)\n # TB2 Logs Button\n self.logs_TB2_Button = QtWidgets.QPushButton(self.options_TB2)\n self.logs_TB2_Button.setGeometry(QtCore.QRect(100, 45, 111, 18))\n self.logs_TB2_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.logs_TB2_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.logs_TB2_Button.setObjectName(\"logs_TB2_Button\")\n self.floor_TB2_Show = QtWidgets.QCheckBox(self.options_TB2)\n self.floor_TB2_Show.setGeometry(QtCore.QRect(10, 30, 61, 21))\n self.floor_TB2_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.floor_TB2_Show.setStyleSheet(\"color: white;\")\n self.floor_TB2_Show.setObjectName(\"floor_TB2_Show\")\n self.kinect_TB2_Show = QtWidgets.QCheckBox(self.options_TB2)\n self.kinect_TB2_Show.setGeometry(QtCore.QRect(10, 50, 71, 21))\n self.kinect_TB2_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.kinect_TB2_Show.setStyleSheet(\"color: white;\")\n self.kinect_TB2_Show.setObjectName(\"kinect_TB2_Show\")\n self.gmapp_TB2_Show = QtWidgets.QCheckBox(self.options_TB2)\n self.gmapp_TB2_Show.setGeometry(QtCore.QRect(10, 10, 71, 21))\n self.gmapp_TB2_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.gmapp_TB2_Show.setStyleSheet(\"color: white;\")\n self.gmapp_TB2_Show.setObjectName(\"gmapp_TB2_Show\")\n self.camera_TB2_Show = QtWidgets.QCheckBox(self.options_TB2)\n self.camera_TB2_Show.setGeometry(QtCore.QRect(10, 70, 71, 21))\n self.camera_TB2_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.camera_TB2_Show.setStyleSheet(\"color: white;\")\n self.camera_TB2_Show.setObjectName(\"camera_TB2_Show\")\n self.on_TB2_Viewer = QtWidgets.QRadioButton(self.options_TB2)\n self.on_TB2_Viewer.setGeometry(QtCore.QRect(100, 10, 51, 21))\n self.on_TB2_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.on_TB2_Viewer.setObjectName(\"on_TB2_Viewer\")\n self.off_TB2_Viewer = QtWidgets.QRadioButton(self.options_TB2)\n self.off_TB2_Viewer.setGeometry(QtCore.QRect(160, 10, 51, 21))\n self.off_TB2_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.off_TB2_Viewer.setObjectName(\"off_TB2_Viewer\")\n self.line_TB2_1 = QtWidgets.QFrame(self.options_TB2)\n self.line_TB2_1.setGeometry(QtCore.QRect(100, 35, 110, 1))\n self.line_TB2_1.setStyleSheet(\"\")\n self.line_TB2_1.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_TB2_1.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB2_1.setObjectName(\"line_TB2_1\")\n self.reload_TB2 = QtWidgets.QPushButton(self.options_TB2)\n self.reload_TB2.setGeometry(QtCore.QRect(114, 162, 90, 20))\n self.reload_TB2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reload_TB2.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reload_TB2.setObjectName(\"reload_TB2\")\n self.line_TB2_4 = QtWidgets.QFrame(self.options_TB2)\n self.line_TB2_4.setGeometry(QtCore.QRect(209, 160, 2, 25))\n self.line_TB2_4.setStyleSheet(\"background: #313640;\")\n self.line_TB2_4.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB2_4.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB2_4.setObjectName(\"line_TB2_4\")\n self.line_TB2_3 = QtWidgets.QFrame(self.options_TB2)\n self.line_TB2_3.setGeometry(QtCore.QRect(10, 160, 2, 25))\n self.line_TB2_3.setStyleSheet(\"background: #313640;\")\n self.line_TB2_3.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB2_3.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB2_3.setObjectName(\"line_TB2_3\")\n self.reset_TB2 = QtWidgets.QPushButton(self.options_TB2)\n self.reset_TB2.setGeometry(QtCore.QRect(15, 162, 90, 20))\n self.reset_TB2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reset_TB2.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reset_TB2.setObjectName(\"reset_TB2\")\n self.valuesTB2Frame = QtWidgets.QFrame(self.options_TB2)\n self.valuesTB2Frame.setGeometry(QtCore.QRect(10, 100, 201, 51))\n self.valuesTB2Frame.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\")\n self.valuesTB2Frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.valuesTB2Frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.valuesTB2Frame.setObjectName(\"valuesTB2Frame\")\n self.x_TB2_Value = QtWidgets.QLCDNumber(self.valuesTB2Frame)\n self.x_TB2_Value.setGeometry(QtCore.QRect(30, 7, 31, 16))\n self.x_TB2_Value.setObjectName(\"x_TB2_Value\")\n self.x_TB2_Label = QtWidgets.QLabel(self.valuesTB2Frame)\n self.x_TB2_Label.setGeometry(QtCore.QRect(10, 7, 16, 16))\n self.x_TB2_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.x_TB2_Label.setObjectName(\"x_TB2_Label\")\n self.y_TB2_Label = QtWidgets.QLabel(self.valuesTB2Frame)\n self.y_TB2_Label.setGeometry(QtCore.QRect(10, 31, 16, 16))\n self.y_TB2_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.y_TB2_Label.setObjectName(\"y_TB2_Label\")\n self.y_TB2_Value = QtWidgets.QLCDNumber(self.valuesTB2Frame)\n self.y_TB2_Value.setGeometry(QtCore.QRect(30, 31, 31, 16))\n self.y_TB2_Value.setObjectName(\"y_TB2_Value\")\n self.velocity_TB2_Label = QtWidgets.QLabel(self.valuesTB2Frame)\n self.velocity_TB2_Label.setGeometry(QtCore.QRect(75, 7, 61, 16))\n self.velocity_TB2_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.velocity_TB2_Label.setObjectName(\"velocity_TB2_Label\")\n self.linear_TB2_Value = QtWidgets.QLCDNumber(self.valuesTB2Frame)\n self.linear_TB2_Value.setGeometry(QtCore.QRect(130, 7, 31, 16))\n self.linear_TB2_Value.setObjectName(\"linear_TB2_Value\")\n self.battery_TB2_Label = QtWidgets.QLabel(self.valuesTB2Frame)\n self.battery_TB2_Label.setGeometry(QtCore.QRect(75, 31, 61, 16))\n self.battery_TB2_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.battery_TB2_Label.setObjectName(\"battery_TB2_Label\")\n self.turtleBat_TB2_Value = QtWidgets.QLCDNumber(self.valuesTB2Frame)\n self.turtleBat_TB2_Value.setGeometry(QtCore.QRect(130, 30, 31, 16))\n self.turtleBat_TB2_Value.setObjectName(\"turtleBat_TB2_Value\")\n self.noteBat_TB2_Value = QtWidgets.QLCDNumber(self.valuesTB2Frame)\n self.noteBat_TB2_Value.setGeometry(QtCore.QRect(165, 30, 31, 16))\n self.noteBat_TB2_Value.setObjectName(\"noteBat_TB2_Value\")\n self.angular_TB2_Value = QtWidgets.QLCDNumber(self.valuesTB2Frame)\n self.angular_TB2_Value.setGeometry(QtCore.QRect(165, 7, 31, 16))\n self.angular_TB2_Value.setObjectName(\"angular_TB2_Value\")\n self.viewer_TB2 = QtWidgets.QTabWidget(self.robot_TB2)\n self.viewer_TB2.setGeometry(QtCore.QRect(0, 20, 221, 198))\n font = QtGui.QFont()\n font.setFamily(\"Tlwg Typist\")\n font.setPointSize(6)\n font.setBold(True)\n font.setWeight(75)\n self.viewer_TB2.setFont(font)\n self.viewer_TB2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.viewer_TB2.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.viewer_TB2.setStyleSheet(\"color: white;\")\n self.viewer_TB2.setTabPosition(QtWidgets.QTabWidget.West)\n self.viewer_TB2.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.viewer_TB2.setElideMode(QtCore.Qt.ElideLeft)\n self.viewer_TB2.setObjectName(\"viewer_TB2\")\n # Kinnect TB2 Screen\n self.kinnect_TB2_Screen = QtWidgets.QWidget()\n self.kinnect_TB2_Screen.setObjectName(\"kinnect_TB2_Screen\")\n self.viewer_TB2.addTab(self.kinnect_TB2_Screen, \"\")\n # Camera TB2 Screen\n self.camera_TB2_Screen = QtWidgets.QWidget()\n self.camera_TB2_Screen.setObjectName(\"camera_TB2_Screen\")\n self.viewer_TB2.addTab(self.camera_TB2_Screen, \"\")\n # GMAPP TB2 Screen\n self.gmapp_TB2_Screen = QtWidgets.QWidget()\n self.gmapp_TB2_Screen.setObjectName(\"gmapp_TB2_Screen\")\n self.viewer_TB2.addTab(self.gmapp_TB2_Screen, \"\")\n # FLOOR TB2 Screen\n self.floor_TB2_Screen = QtWidgets.QWidget()\n self.floor_TB2_Screen.setObjectName(\"floor_TB2_Screen\")\n self.viewer_TB2.addTab(self.floor_TB2_Screen, \"\")\n self.label = QtWidgets.QLabel(self.mainWindowBase)\n self.label.setGeometry(QtCore.QRect(25, 570, 71, 16))\n self.label.setToolTipDuration(3000)\n self.label.setStyleSheet(\"background: transparent;\\n\"\n \"color: white;\")\n self.label.setObjectName(\"label\")\n self.robot_TB3 = QtWidgets.QGroupBox(self.mainWindowBase)\n self.robot_TB3.setGeometry(QtCore.QRect(610, 10, 439, 219))\n self.robot_TB3.setStyleSheet(\"\\n\"\n \"color: rgb(206, 255, 188);\")\n self.robot_TB3.setObjectName(\"robot_TB3\")\n self.robot_TB3.setHidden(True)\n self.options_TB3 = QtWidgets.QFrame(self.robot_TB3)\n self.options_TB3.setGeometry(QtCore.QRect(215, 21, 221, 196))\n self.options_TB3.setStyleSheet(\"\")\n self.options_TB3.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.options_TB3.setFrameShadow(QtWidgets.QFrame.Raised)\n self.options_TB3.setObjectName(\"options_TB3\")\n self.line_TB3_1 = QtWidgets.QFrame(self.options_TB3)\n self.line_TB3_1.setGeometry(QtCore.QRect(90, 11, 2, 80))\n self.line_TB3_1.setStyleSheet(\"background: #1DDED8;\")\n self.line_TB3_1.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB3_1.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB3_1.setObjectName(\"line_TB3_1\")\n # TB3 Settings and Configurations Buton\n self.configure_TB3_Button = QtWidgets.QPushButton(self.options_TB3)\n self.configure_TB3_Button.setGeometry(QtCore.QRect(100, 70, 111, 18))\n self.configure_TB3_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.configure_TB3_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.configure_TB3_Button.setObjectName(\"configure_TB3_Button\")\n self.configure_TB3_Button.clicked.connect(self.openTB3Settings)\n # TB3 Logs Button\n self.logs_TB3_Button = QtWidgets.QPushButton(self.options_TB3)\n self.logs_TB3_Button.setGeometry(QtCore.QRect(100, 45, 111, 18))\n self.logs_TB3_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.logs_TB3_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.logs_TB3_Button.setObjectName(\"logs_TB3_Button\")\n self.floor_TB3_Show = QtWidgets.QCheckBox(self.options_TB3)\n self.floor_TB3_Show.setGeometry(QtCore.QRect(10, 30, 61, 21))\n self.floor_TB3_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.floor_TB3_Show.setStyleSheet(\"color: white;\")\n self.floor_TB3_Show.setObjectName(\"floor_TB3_Show\")\n self.kinect_TB3_Show = QtWidgets.QCheckBox(self.options_TB3)\n self.kinect_TB3_Show.setGeometry(QtCore.QRect(10, 50, 71, 21))\n self.kinect_TB3_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.kinect_TB3_Show.setStyleSheet(\"color: white;\")\n self.kinect_TB3_Show.setObjectName(\"kinect_TB3_Show\")\n self.gmapp_TB3_Show = QtWidgets.QCheckBox(self.options_TB3)\n self.gmapp_TB3_Show.setGeometry(QtCore.QRect(10, 10, 71, 21))\n self.gmapp_TB3_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.gmapp_TB3_Show.setStyleSheet(\"color: white;\")\n self.gmapp_TB3_Show.setObjectName(\"gmapp_TB3_Show\")\n self.camera_TB3_Show = QtWidgets.QCheckBox(self.options_TB3)\n self.camera_TB3_Show.setGeometry(QtCore.QRect(10, 70, 71, 21))\n self.camera_TB3_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.camera_TB3_Show.setStyleSheet(\"color: white;\")\n self.camera_TB3_Show.setObjectName(\"camera_TB3_Show\")\n self.on_TB3_Viewer = QtWidgets.QRadioButton(self.options_TB3)\n self.on_TB3_Viewer.setGeometry(QtCore.QRect(100, 10, 51, 21))\n self.on_TB3_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.on_TB3_Viewer.setObjectName(\"on_TB3_Viewer\")\n self.off_TB3_Viewer = QtWidgets.QRadioButton(self.options_TB3)\n self.off_TB3_Viewer.setGeometry(QtCore.QRect(160, 10, 51, 21))\n self.off_TB3_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.off_TB3_Viewer.setObjectName(\"off_TB3_Viewer\")\n self.line_TB3_2 = QtWidgets.QFrame(self.options_TB3)\n self.line_TB3_2.setGeometry(QtCore.QRect(100, 35, 110, 1))\n self.line_TB3_2.setStyleSheet(\"\")\n self.line_TB3_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_TB3_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB3_2.setObjectName(\"line_TB3_2\")\n self.reload_TB3 = QtWidgets.QPushButton(self.options_TB3)\n self.reload_TB3.setGeometry(QtCore.QRect(114, 162, 90, 20))\n self.reload_TB3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reload_TB3.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reload_TB3.setObjectName(\"reload_TB3\")\n self.line_TB3_4 = QtWidgets.QFrame(self.options_TB3)\n self.line_TB3_4.setGeometry(QtCore.QRect(209, 160, 2, 25))\n self.line_TB3_4.setStyleSheet(\"background: #313640;\")\n self.line_TB3_4.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB3_4.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB3_4.setObjectName(\"line_TB3_4\")\n self.line_TB3_3 = QtWidgets.QFrame(self.options_TB3)\n self.line_TB3_3.setGeometry(QtCore.QRect(10, 160, 2, 25))\n self.line_TB3_3.setStyleSheet(\"background: #313640;\")\n self.line_TB3_3.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB3_3.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB3_3.setObjectName(\"line_TB3_3\")\n self.reset_TB3 = QtWidgets.QPushButton(self.options_TB3)\n self.reset_TB3.setGeometry(QtCore.QRect(15, 162, 90, 20))\n self.reset_TB3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reset_TB3.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reset_TB3.setObjectName(\"reset_TB3\")\n self.valuesTB3Frame = QtWidgets.QFrame(self.options_TB3)\n self.valuesTB3Frame.setGeometry(QtCore.QRect(10, 100, 201, 51))\n self.valuesTB3Frame.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\")\n self.valuesTB3Frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.valuesTB3Frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.valuesTB3Frame.setObjectName(\"valuesTB3Frame\")\n self.x_TB3_Value = QtWidgets.QLCDNumber(self.valuesTB3Frame)\n self.x_TB3_Value.setGeometry(QtCore.QRect(30, 7, 31, 16))\n self.x_TB3_Value.setObjectName(\"x_TB3_Value\")\n self.x_TB3_Label = QtWidgets.QLabel(self.valuesTB3Frame)\n self.x_TB3_Label.setGeometry(QtCore.QRect(10, 7, 16, 16))\n self.x_TB3_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.x_TB3_Label.setObjectName(\"x_TB3_Label\")\n self.y_TB3_Label = QtWidgets.QLabel(self.valuesTB3Frame)\n self.y_TB3_Label.setGeometry(QtCore.QRect(10, 31, 16, 16))\n self.y_TB3_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.y_TB3_Label.setObjectName(\"y_TB3_Label\")\n self.y_TB3_Value = QtWidgets.QLCDNumber(self.valuesTB3Frame)\n self.y_TB3_Value.setGeometry(QtCore.QRect(30, 31, 31, 16))\n self.y_TB3_Value.setObjectName(\"y_TB3_Value\")\n self.velocity_TB3_Label = QtWidgets.QLabel(self.valuesTB3Frame)\n self.velocity_TB3_Label.setGeometry(QtCore.QRect(75, 7, 61, 16))\n self.velocity_TB3_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.velocity_TB3_Label.setObjectName(\"velocity_TB3_Label\")\n self.linear_TB3_Value = QtWidgets.QLCDNumber(self.valuesTB3Frame)\n self.linear_TB3_Value.setGeometry(QtCore.QRect(130, 7, 31, 16))\n self.linear_TB3_Value.setObjectName(\"linear_TB3_Value\")\n self.battery_TB3_Label = QtWidgets.QLabel(self.valuesTB3Frame)\n self.battery_TB3_Label.setGeometry(QtCore.QRect(75, 31, 61, 16))\n self.battery_TB3_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.battery_TB3_Label.setObjectName(\"battery_TB3_Label\")\n self.turtleBat_TB3_Value = QtWidgets.QLCDNumber(self.valuesTB3Frame)\n self.turtleBat_TB3_Value.setGeometry(QtCore.QRect(130, 30, 31, 16))\n self.turtleBat_TB3_Value.setObjectName(\"turtleBat_TB3_Value\")\n self.noteBat_TB3_Value = QtWidgets.QLCDNumber(self.valuesTB3Frame)\n self.noteBat_TB3_Value.setGeometry(QtCore.QRect(165, 30, 31, 16))\n self.noteBat_TB3_Value.setObjectName(\"noteBat_TB3_Value\")\n self.angular_TB3_Value = QtWidgets.QLCDNumber(self.valuesTB3Frame)\n self.angular_TB3_Value.setGeometry(QtCore.QRect(165, 7, 31, 16))\n self.angular_TB3_Value.setObjectName(\"angular_TB3_Value\")\n self.viewer_TB3 = QtWidgets.QTabWidget(self.robot_TB3)\n self.viewer_TB3.setGeometry(QtCore.QRect(0, 20, 221, 198))\n font = QtGui.QFont()\n font.setFamily(\"Tlwg Typist\")\n font.setPointSize(6)\n font.setBold(True)\n font.setWeight(75)\n self.viewer_TB3.setFont(font)\n self.viewer_TB3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.viewer_TB3.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.viewer_TB3.setStyleSheet(\"color: white;\")\n self.viewer_TB3.setTabPosition(QtWidgets.QTabWidget.West)\n self.viewer_TB3.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.viewer_TB3.setElideMode(QtCore.Qt.ElideLeft)\n self.viewer_TB3.setObjectName(\"viewer_TB3\")\n # Kinnect TB3 Screen\n self.kinnect_TB3_Screen = QtWidgets.QWidget()\n self.kinnect_TB3_Screen.setObjectName(\"kinnect_TB3_Screen\")\n self.viewer_TB3.addTab(self.kinnect_TB3_Screen, \"\")\n # Camera TB3 Screen\n self.camera_TB3_Screen = QtWidgets.QWidget()\n self.camera_TB3_Screen.setObjectName(\"camera_TB3_Screen\")\n self.viewer_TB3.addTab(self.camera_TB3_Screen, \"\")\n # GMAPP TB3 Screen\n self.gmapp_TB3_Screen = QtWidgets.QWidget()\n self.gmapp_TB3_Screen.setObjectName(\"gmapp_TB3_Screen\")\n self.viewer_TB3.addTab(self.gmapp_TB3_Screen, \"\")\n # Floor TB3 Screen\n self.floor_TB3_Screen = QtWidgets.QWidget()\n self.floor_TB3_Screen.setObjectName(\"floor_TB3_Screen\")\n self.viewer_TB3.addTab(self.floor_TB3_Screen, \"\")\n self.robot_TB4 = QtWidgets.QGroupBox(self.mainWindowBase)\n self.robot_TB4.setGeometry(QtCore.QRect(610, 235, 439, 219))\n self.robot_TB4.setStyleSheet(\"\\n\"\n \"color: rgb(206, 255, 188);\")\n self.robot_TB4.setObjectName(\"robot_TB4\")\n self.robot_TB4.setHidden(True)\n self.options_TB4 = QtWidgets.QFrame(self.robot_TB4)\n self.options_TB4.setGeometry(QtCore.QRect(215, 21, 221, 196))\n self.options_TB4.setStyleSheet(\"\")\n self.options_TB4.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.options_TB4.setFrameShadow(QtWidgets.QFrame.Raised)\n self.options_TB4.setObjectName(\"options_TB4\")\n self.line_TB4_1 = QtWidgets.QFrame(self.options_TB4)\n self.line_TB4_1.setGeometry(QtCore.QRect(90, 11, 2, 80))\n self.line_TB4_1.setStyleSheet(\"background: #1DDED8;\")\n self.line_TB4_1.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB4_1.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB4_1.setObjectName(\"line_TB4_1\")\n # TB4 Settings and Configurations Button\n self.configure_TB4_Button = QtWidgets.QPushButton(self.options_TB4)\n self.configure_TB4_Button.setGeometry(QtCore.QRect(100, 70, 111, 18))\n self.configure_TB4_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.configure_TB4_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.configure_TB4_Button.setObjectName(\"configure_TB4_Button\")\n self.configure_TB4_Button.clicked.connect(self.openTB4Settings)\n # TB4 Logs Button\n self.logs_TB4_Button = QtWidgets.QPushButton(self.options_TB4)\n self.logs_TB4_Button.setGeometry(QtCore.QRect(100, 45, 111, 18))\n self.logs_TB4_Button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.logs_TB4_Button.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\\n\"\n \"color: rgb(22, 22, 22)\")\n self.logs_TB4_Button.setObjectName(\"logs_TB4_Button\")\n self.floor_TB4_Show = QtWidgets.QCheckBox(self.options_TB4)\n self.floor_TB4_Show.setGeometry(QtCore.QRect(10, 30, 61, 21))\n self.floor_TB4_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.floor_TB4_Show.setStyleSheet(\"color: white;\")\n self.floor_TB4_Show.setObjectName(\"floor_TB4_Show\")\n self.kinect_TB4_Show = QtWidgets.QCheckBox(self.options_TB4)\n self.kinect_TB4_Show.setGeometry(QtCore.QRect(10, 50, 71, 21))\n self.kinect_TB4_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.kinect_TB4_Show.setStyleSheet(\"color: white;\")\n self.kinect_TB4_Show.setObjectName(\"kinect_TB4_Show\")\n self.gmapp_TB4_Show = QtWidgets.QCheckBox(self.options_TB4)\n self.gmapp_TB4_Show.setGeometry(QtCore.QRect(10, 10, 71, 21))\n self.gmapp_TB4_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.gmapp_TB4_Show.setStyleSheet(\"color: white;\")\n self.gmapp_TB4_Show.setObjectName(\"gmapp_TB4_Show\")\n self.camera_TB4_Show = QtWidgets.QCheckBox(self.options_TB4)\n self.camera_TB4_Show.setGeometry(QtCore.QRect(10, 70, 71, 21))\n self.camera_TB4_Show.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.camera_TB4_Show.setStyleSheet(\"color: white;\")\n self.camera_TB4_Show.setObjectName(\"camera_TB4_Show\")\n self.on_TB4_Viewer = QtWidgets.QRadioButton(self.options_TB4)\n self.on_TB4_Viewer.setGeometry(QtCore.QRect(100, 10, 51, 21))\n self.on_TB4_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.on_TB4_Viewer.setObjectName(\"on_TB4_Viewer\")\n self.off_TB4_Viewer = QtWidgets.QRadioButton(self.options_TB4)\n self.off_TB4_Viewer.setGeometry(QtCore.QRect(160, 10, 51, 21))\n self.off_TB4_Viewer.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.off_TB4_Viewer.setObjectName(\"off_TB4_Viewer\")\n self.line_TB4_2 = QtWidgets.QFrame(self.options_TB4)\n self.line_TB4_2.setGeometry(QtCore.QRect(100, 35, 110, 1))\n self.line_TB4_2.setStyleSheet(\"\")\n self.line_TB4_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_TB4_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB4_2.setObjectName(\"line_TB4_2\")\n self.reload_TB4 = QtWidgets.QPushButton(self.options_TB4)\n self.reload_TB4.setGeometry(QtCore.QRect(114, 162, 90, 20))\n self.reload_TB4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reload_TB4.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reload_TB4.setObjectName(\"reload_TB4\")\n self.line_TB4_4 = QtWidgets.QFrame(self.options_TB4)\n self.line_TB4_4.setGeometry(QtCore.QRect(209, 160, 2, 25))\n self.line_TB4_4.setStyleSheet(\"background: #313640;\")\n self.line_TB4_4.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB4_4.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB4_4.setObjectName(\"line_TB4_4\")\n self.line_TB4_5 = QtWidgets.QFrame(self.options_TB4)\n self.line_TB4_5.setGeometry(QtCore.QRect(10, 160, 2, 25))\n self.line_TB4_5.setStyleSheet(\"background: #313640;\")\n self.line_TB4_5.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_TB4_5.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_TB4_5.setObjectName(\"line_TB4_5\")\n self.reset_TB4 = QtWidgets.QPushButton(self.options_TB4)\n self.reset_TB4.setGeometry(QtCore.QRect(15, 162, 90, 20))\n self.reset_TB4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.reset_TB4.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 75 9pt \\\"Clean\\\";\\n\"\n \"background: rgba(25, 27, 33, 0.2);\")\n self.reset_TB4.setObjectName(\"reset_TB4\")\n self.valuesTB4Frame = QtWidgets.QFrame(self.options_TB4)\n self.valuesTB4Frame.setGeometry(QtCore.QRect(10, 100, 201, 51))\n self.valuesTB4Frame.setStyleSheet(\"background: rgba(29, 222, 216, 0.1);\")\n self.valuesTB4Frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.valuesTB4Frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.valuesTB4Frame.setObjectName(\"valuesTB4Frame\")\n self.x_TB4_Value = QtWidgets.QLCDNumber(self.valuesTB4Frame)\n self.x_TB4_Value.setGeometry(QtCore.QRect(30, 7, 31, 16))\n self.x_TB4_Value.setObjectName(\"x_TB4_Value\")\n self.x_TB4_Label = QtWidgets.QLabel(self.valuesTB4Frame)\n self.x_TB4_Label.setGeometry(QtCore.QRect(10, 7, 16, 16))\n self.x_TB4_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.x_TB4_Label.setObjectName(\"x_TB4_Label\")\n self.y_TB4_Label = QtWidgets.QLabel(self.valuesTB4Frame)\n self.y_TB4_Label.setGeometry(QtCore.QRect(10, 31, 16, 16))\n self.y_TB4_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.y_TB4_Label.setObjectName(\"y_TB4_Label\")\n self.y_TB4_Value = QtWidgets.QLCDNumber(self.valuesTB4Frame)\n self.y_TB4_Value.setGeometry(QtCore.QRect(30, 31, 31, 16))\n self.y_TB4_Value.setObjectName(\"y_TB4_Value\")\n self.velocity_TB4_Label = QtWidgets.QLabel(self.valuesTB4Frame)\n self.velocity_TB4_Label.setGeometry(QtCore.QRect(75, 7, 61, 16))\n self.velocity_TB4_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.velocity_TB4_Label.setObjectName(\"velocity_TB4_Label\")\n self.linear_TB4_Value = QtWidgets.QLCDNumber(self.valuesTB4Frame)\n self.linear_TB4_Value.setGeometry(QtCore.QRect(130, 7, 31, 16))\n self.linear_TB4_Value.setObjectName(\"linear_TB4_Value\")\n self.battery_TB4_Label = QtWidgets.QLabel(self.valuesTB4Frame)\n self.battery_TB4_Label.setGeometry(QtCore.QRect(75, 31, 61, 16))\n self.battery_TB4_Label.setStyleSheet(\"background: transparent;\\n\"\n \"color: rgb(206, 255, 188);\")\n self.battery_TB4_Label.setObjectName(\"battery_TB4_Label\")\n self.turtleBat_TB4_Value = QtWidgets.QLCDNumber(self.valuesTB4Frame)\n self.turtleBat_TB4_Value.setGeometry(QtCore.QRect(130, 30, 31, 16))\n self.turtleBat_TB4_Value.setObjectName(\"turtleBat_TB4_Value\")\n self.noteBat_TB4_Value = QtWidgets.QLCDNumber(self.valuesTB4Frame)\n self.noteBat_TB4_Value.setGeometry(QtCore.QRect(165, 30, 31, 16))\n self.noteBat_TB4_Value.setObjectName(\"noteBat_TB4_Value\")\n self.angular_TB4_Value = QtWidgets.QLCDNumber(self.valuesTB4Frame)\n self.angular_TB4_Value.setGeometry(QtCore.QRect(165, 7, 31, 16))\n self.angular_TB4_Value.setObjectName(\"angular_TB4_Value\")\n self.viewer_TB4 = QtWidgets.QTabWidget(self.robot_TB4)\n self.viewer_TB4.setGeometry(QtCore.QRect(0, 20, 221, 198))\n font = QtGui.QFont()\n font.setFamily(\"Tlwg Typist\")\n font.setPointSize(6)\n font.setBold(True)\n font.setWeight(75)\n self.viewer_TB4.setFont(font)\n self.viewer_TB4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.viewer_TB4.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.viewer_TB4.setStyleSheet(\"color: white;\")\n self.viewer_TB4.setTabPosition(QtWidgets.QTabWidget.West)\n self.viewer_TB4.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.viewer_TB4.setElideMode(QtCore.Qt.ElideLeft)\n self.viewer_TB4.setObjectName(\"viewer_TB4\")\n # Kinnect TB4 Screen\n self.kinnect_TB4_Screen = QtWidgets.QWidget()\n self.kinnect_TB4_Screen.setObjectName(\"kinnect_TB4_Screen\")\n self.viewer_TB4.addTab(self.kinnect_TB4_Screen, \"\")\n # Camera TB4 Screen\n self.camera_TB4_Screen = QtWidgets.QWidget()\n self.camera_TB4_Screen.setObjectName(\"camera_TB4_Screen\")\n self.viewer_TB4.addTab(self.camera_TB4_Screen, \"\")\n # GMAPP TB4 Screen\n self.gmapp_TB4_Screen = QtWidgets.QWidget()\n self.gmapp_TB4_Screen.setObjectName(\"gmapp_TB4_Screen\")\n self.viewer_TB4.addTab(self.gmapp_TB4_Screen, \"\")\n # Floor TB4 Screen\n self.floor_TB4_Screen = QtWidgets.QWidget()\n self.floor_TB4_Screen.setObjectName(\"floor_TB4_Screen\")\n self.viewer_TB4.addTab(self.floor_TB4_Screen, \"\")\n self.robot_TB1.raise_()\n self.menuFrame.raise_()\n self.menu_Logo.raise_()\n self.menu_Line.raise_()\n self.qrcode_App_Button.raise_()\n self.data_Button.raise_()\n self.config_Button.raise_()\n self.docs_Button.raise_()\n self.about_Button.raise_()\n self.viewer_Superior_Line.raise_()\n self.inferior_Line_Base.raise_()\n self.lateral_Line_Base.raise_()\n self.robot_Viewer_Frame.raise_()\n self.terminalWidget.raise_()\n self.robotViewerBase.raise_()\n self.robot_Selection_Frame.raise_()\n self.robotSelectionBase.raise_()\n self.selection_Superior_Line.raise_()\n self.robot_TB2.raise_()\n self.label.raise_()\n self.robot_TB3.raise_()\n self.robot_TB4.raise_()\n MainWindow.setCentralWidget(self.mainWindowBase)\n self.retranslateUi(MainWindow)\n self.viewer_TB1.setCurrentIndex(0)\n self.viewer_TB2.setCurrentIndex(3)\n self.viewer_TB3.setCurrentIndex(3)\n self.viewer_TB4.setCurrentIndex(3)\n # Shows the Robots Viewers and Change the Button State Style\n self.robot_TB1_Viewer.clicked['bool'].connect(self.robot_TB1.setVisible)\n self.robot_TB2_Viewer.clicked['bool'].connect(self.robot_TB2.setVisible)\n self.robot_TB3_Viewer.clicked['bool'].connect(self.robot_TB3.setVisible)\n self.robot_TB4_Viewer.clicked['bool'].connect(self.robot_TB4.setVisible)\n self.robot_TB1_Viewer.toggled['bool'].connect(lambda: self.buttonStatusChange(robot='tb1'))\n self.robot_TB1_Viewer.toggled['bool'].connect(lambda: self.robotsTerminals(robot='tb1'))\n self.robot_TB2_Viewer.toggled['bool'].connect(lambda: self.buttonStatusChange(robot='tb2'))\n self.robot_TB2_Viewer.toggled['bool'].connect(lambda: self.robotsTerminals(robot='tb2') )\n self.robot_TB3_Viewer.toggled['bool'].connect(lambda: self.buttonStatusChange(robot='tb3'))\n self.robot_TB3_Viewer.toggled['bool'].connect(lambda: self.robotsTerminals(robot='tb3'))\n self.robot_TB4_Viewer.toggled['bool'].connect(lambda: self.buttonStatusChange(robot='tb4'))\n self.robot_TB4_Viewer.toggled['bool'].connect(lambda: self.robotsTerminals(robot='tb4'))\n # Configuration Robot Selection\n self.robot_TB1_Selection.clicked.connect(lambda: self.setExperiment(robot='1', set='None'))\n self.robot_TB2_Selection.clicked.connect(lambda: self.setExperiment(robot='2', set='None'))\n self.robot_TB3_Selection.clicked.connect(lambda: self.setExperiment(robot='3', set='None'))\n self.robot_TB4_Selection.clicked.connect(lambda: self.setExperiment(robot='4', set='None'))\n # TB1 Reset Screen\n self.reset_TB1.clicked.connect(lambda: self.gmapp_TB1_Show.setChecked(False))\n self.reset_TB1.clicked.connect(lambda: self.floor_TB1_Show.setChecked(False))\n self.reset_TB1.clicked.connect(lambda: self.kinect_TB1_Show.setChecked(False))\n self.reset_TB1.clicked.connect(lambda: self.camera_TB1_Show.setChecked(False))\n self.reset_TB1.clicked.connect(lambda: self.linear_TB1_Value.display(0))\n self.reset_TB1.clicked.connect(lambda: self.angular_TB1_Value.display(0))\n self.reset_TB1.clicked.connect(lambda: self.x_TB1_Value.display(0))\n self.reset_TB1.clicked.connect(lambda: self.y_TB1_Value.display(0))\n self.reset_TB1.clicked.connect(lambda: self.turtleBat_TB1_Value.display(0))\n self.reset_TB1.clicked.connect(lambda: self.noteBat_TB1_Value.display(0))\n # TB2 Reset Screen\n self.reset_TB2.clicked.connect(lambda: self.gmapp_TB2_Show.setChecked(False))\n self.reset_TB2.clicked.connect(lambda: self.floor_TB2_Show.setChecked(False))\n self.reset_TB2.clicked.connect(lambda: self.kinect_TB2_Show.setChecked(False))\n self.reset_TB2.clicked.connect(lambda: self.camera_TB2_Show.setChecked(False))\n self.reset_TB2.clicked.connect(lambda: self.linear_TB2_Value.display(0))\n self.reset_TB2.clicked.connect(lambda: self.angular_TB2_Value.display(0))\n self.reset_TB2.clicked.connect(lambda: self.x_TB2_Value.display(0))\n self.reset_TB2.clicked.connect(lambda: self.y_TB2_Value.display(0))\n self.reset_TB2.clicked.connect(lambda: self.turtleBat_TB2_Value.display(0))\n self.reset_TB2.clicked.connect(lambda: self.noteBat_TB2_Value.display(0))\n # TB3 Reset Screen\n self.reset_TB3.clicked.connect(lambda: self.gmapp_TB3_Show.setChecked(False))\n self.reset_TB3.clicked.connect(lambda: self.floor_TB3_Show.setChecked(False))\n self.reset_TB3.clicked.connect(lambda: self.kinect_TB3_Show.setChecked(False))\n self.reset_TB3.clicked.connect(lambda: self.camera_TB3_Show.setChecked(False))\n self.reset_TB3.clicked.connect(lambda: self.linear_TB3_Value.display(0))\n self.reset_TB3.clicked.connect(lambda: self.angular_TB3_Value.display(0))\n self.reset_TB3.clicked.connect(lambda: self.x_TB3_Value.display(0))\n self.reset_TB3.clicked.connect(lambda: self.y_TB3_Value.display(0))\n self.reset_TB3.clicked.connect(lambda: self.turtleBat_TB3_Value.display(0))\n self.reset_TB3.clicked.connect(lambda: self.noteBat_TB3_Value.display(0))\n # TB4 Reset Screen\n self.reset_TB4.clicked.connect(lambda: self.gmapp_TB4_Show.setChecked(False))\n self.reset_TB4.clicked.connect(lambda: self.floor_TB4_Show.setChecked(False))\n self.reset_TB4.clicked.connect(lambda: self.kinect_TB4_Show.setChecked(False))\n self.reset_TB4.clicked.connect(lambda: self.camera_TB4_Show.setChecked(False))\n self.reset_TB4.clicked.connect(lambda: self.linear_TB4_Value.display(0))\n self.reset_TB4.clicked.connect(lambda: self.angular_TB4_Value.display(0))\n self.reset_TB4.clicked.connect(lambda: self.x_TB4_Value.display(0))\n self.reset_TB4.clicked.connect(lambda: self.y_TB4_Value.display(0))\n self.reset_TB4.clicked.connect(lambda: self.turtleBat_TB4_Value.display(0))\n self.reset_TB4.clicked.connect(lambda: self.noteBat_TB4_Value.display(0))\n # Configuration Reset Button\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB1_Selection.setAutoExclusive(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB1_Selection.setChecked(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB2_Selection.setAutoExclusive(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB2_Selection.setChecked(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB3_Selection.setAutoExclusive(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB3_Selection.setChecked(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB4_Selection.setAutoExclusive(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_TB4_Selection.setChecked(False))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_Selection_Type.setCurrentIndex(0))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_Selection_Role.setCurrentIndex(0))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_Selection_Task.setCurrentIndex(0))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_Selection_Behavior.setCurrentIndex(0))\n self.reset_Selection_Values.clicked.connect(lambda: self.robot_Selection_Experiment.setCurrentIndex(0))\n self.qrcode_App_Button.clicked.connect(MainWindow.show)\n self.data_Button.clicked.connect(MainWindow.show)\n self.logs_TB4_Button.clicked.connect(self.logs_TB4_Button.show)\n self.configure_TB4_Button.clicked.connect(self.configure_TB4_Button.show)\n self.logs_TB3_Button.clicked.connect(self.logs_TB3_Button.show)\n self.configure_TB3_Button.clicked['bool'].connect(self.configure_TB3_Button.show)\n self.logs_TB1_Button.clicked.connect(self.logs_TB1_Button.show)\n self.configure_TB1_Button.clicked.connect(self.configure_TB1_Button.show)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)", "def _setup_buttons(self):\n self.ui.pushButton_start.clicked.connect(self._start_stop)\n self.ui.pushButton_start.setIcon(QtGui.QIcon(\"res/record.png\"))\n self.ui.pushButton_visualize.clicked.connect(self._start_stop_visualizer)\n self.ui.lineEdit_pulse_freq.textChanged.connect(self._calculate_rollover)\n self.ui.lineEdit_analog_channels.textChanged.connect(self._calculate_datarate)\n self.ui.comboBox_dtype.currentIndexChanged.connect(self._calculate_datarate)\n self.ui.comboBox_analog_freq.currentIndexChanged.connect(self._calculate_datarate)\n self.ui.lineEdit_data_bits.textEdited.connect(self._dig_line_count_changed)\n self.ui.checkBox_ci.clicked.connect(self._ci_toggle)\n\n self.ui.checkBox_comm.stateChanged.connect(self._setup_communication)\n\n self.ui.tableWidget_labels.cellChanged.connect(self._label_changed)\n self.ui.tabWidget.currentChanged.connect(self._tab_changed)\n\n self.ui.comboBox_device.currentIndexChanged.connect(self._get_device_model)\n\n self.ui.actionQuit.triggered.connect(self.close)\n self.ui.actionLoad.triggered.connect(self.load_config)\n self.ui.actionSave.triggered.connect(self.save_config)\n self.ui.actionAbout.triggered.connect(self.about)\n\n # set some validators\n int_val = QtGui.QIntValidator()\n int_val.setRange(1, 32)\n self.ui.lineEdit_data_bits.setValidator(int_val)\n\n # float_val = QtGui.QDoubleValidator()\n # float_val.setRange(1000.0, 20000000.0)\n # self.ui.lineEdit_pulse_freq.setValidator(float_val)", "def test_qtdesigner():\n QtDesigner = pytest.importorskip(\"qtpy.QtDesigner\")\n\n assert QtDesigner.QAbstractExtensionFactory is not None\n assert QtDesigner.QAbstractExtensionManager is not None\n assert QtDesigner.QDesignerActionEditorInterface is not None\n assert QtDesigner.QDesignerContainerExtension is not None\n assert QtDesigner.QDesignerCustomWidgetCollectionInterface is not None\n assert QtDesigner.QDesignerCustomWidgetInterface is not None\n assert QtDesigner.QDesignerFormEditorInterface is not None\n assert QtDesigner.QDesignerFormWindowCursorInterface is not None\n assert QtDesigner.QDesignerFormWindowInterface is not None\n assert QtDesigner.QDesignerFormWindowManagerInterface is not None\n assert QtDesigner.QDesignerMemberSheetExtension is not None\n assert QtDesigner.QDesignerObjectInspectorInterface is not None\n assert QtDesigner.QDesignerPropertyEditorInterface is not None\n assert QtDesigner.QDesignerPropertySheetExtension is not None\n assert QtDesigner.QDesignerTaskMenuExtension is not None\n assert QtDesigner.QDesignerWidgetBoxInterface is not None\n assert QtDesigner.QExtensionFactory is not None\n assert QtDesigner.QExtensionManager is not None\n assert QtDesigner.QFormBuilder is not None", "def assignWidgets(self):\n self.buttonBox.accepted.connect(self.runAll)\n self.buttonBox.rejected.connect(self.exitSystem)\n self.FilesButton.clicked.connect(self.FileToRun)\n self.OutputButton.clicked.connect(self.OutPutLocation)", "def main():\n # Read Settings\n readSettings()\n\n # Initialize Qt\n app = QtWidgets.QApplication(sys.argv)\n\n # Set design and colors\n design.QDarkPalette().set_app(app)\n\n # Initialize and start Window\n window = startWindow()\n\n # Set Window Title\n window.setWindowTitle(__title__)\n\n # Set Window Icon\n window.setWindowIcon(QtGui.QIcon(iconFilePath))\n\n # Close Window on exit\n sys.exit(app.exec_())", "def initUI(self):\n startbtn = QPushButton(\"Start Recroding\", self)\n startbtn.move(30, 50)\n\n stopbtn = QPushButton(\"Stop Recording\", self)\n stopbtn.move(150, 50)\n\n initbtn = QPushButton(\"Initilize\", self)\n initbtn.move(30, 100)\n\n plotbtn = QPushButton(\"Plot\", self)\n plotbtn.move(150, 100)\n\n startbtn.clicked.connect(self.start_recording)\n stopbtn.clicked.connect(self.stop_recording)\n initbtn.clicked.connect(self.init_recording)\n plotbtn.clicked.connect(self.plot_signals)\n\n self.statusBar()\n self.statusBar().showMessage('Click Init')\n\n self.setGeometry(300, 300, 290, 150)\n self.setWindowTitle('Recorder 1.0')\n self.setWindowIcon(QIcon(\"./Static/Images/icon.jpg\"))\n self.show()", "def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)", "def initialize_gui(self) -> None:\n # pymol.Qt provides the PyQt5 interface\n from PyQt5 import QtWidgets\n from PyQt5.uic import loadUi\n # from pymol.Qt.utils import loadUi\n\n # populate the QMainWindow from our *.ui file\n uifile = os.path.join(os.path.dirname(__file__), 'PyMOL-KVFinder-web-tools.ui')\n loadUi(uifile, self)\n\n # ScrollBars binded to QListWidgets in Descriptors\n scroll_bar_volume = QtWidgets.QScrollBar(self)\n self.volume_list.setVerticalScrollBar(scroll_bar_volume)\n scroll_bar_area = QtWidgets.QScrollBar(self)\n self.area_list.setVerticalScrollBar(scroll_bar_area)\n scroll_bar_residues = QtWidgets.QScrollBar(self)\n self.residues_list.setVerticalScrollBar(scroll_bar_residues)\n\n # about text\n self.about_text.setHtml(about_text)\n\n ########################\n ### Buttons Callback ###\n ########################\n\n # hook up QMainWindow buttons callbacks\n self.button_run.clicked.connect(self.run)\n self.button_exit.clicked.connect(self.close)\n self.button_restore.clicked.connect(self.restore)\n self.button_grid.clicked.connect(self.show_grid)\n \n # hook up Parameters button callbacks\n self.button_browse.clicked.connect(self.select_directory)\n self.refresh_input.clicked.connect(lambda: self.refresh(self.input))\n \n # hook up Search Space button callbacks\n # Box Adjustment\n self.button_draw_box.clicked.connect(self.set_box)\n self.button_delete_box.clicked.connect(self.delete_box)\n self.button_redraw_box.clicked.connect(self.redraw_box)\n self.button_box_adjustment_help.clicked.connect(self.box_adjustment_help)\n # Ligand Adjustment\n self.refresh_ligand.clicked.connect(lambda: self.refresh(self.ligand))\n\n # hook up methods to results tab\n # Jobs\n self.available_jobs.currentIndexChanged.connect(self.fill_job_information)\n self.button_show_job.clicked.connect(self.show_id)\n self.button_add_job_id.clicked.connect(self.add_id)\n # Visualization\n self.button_browse_results.clicked.connect(self.select_results_file)\n self.button_load_results.clicked.connect(self.load_results)\n self.volume_list.itemSelectionChanged.connect(lambda list1=self.volume_list, list2=self.area_list: self.show_cavities(list1, list2))\n self.area_list.itemSelectionChanged.connect(lambda list1=self.area_list, list2=self.volume_list: self.show_cavities(list1, list2))\n self.residues_list.itemSelectionChanged.connect(self.show_residues)", "def connect_signals(self):\n self.logger.debug(\"Connect signals\")\n # connect special menu action signals\n self.actionNew.triggered.connect(self.new_project)\n self.actionOpen.triggered.connect(self.open_project)\n self.actionClose.triggered.connect(self._parent.close_project)\n self.actionQuit.triggered.connect(self.close)\n self.actionSave.triggered.connect(self._parent.save_project)\n self.actionSettings.triggered.connect(self.settingsCtr.ui.exec)\n self.actionShowLog.triggered.connect(self.showLogRequested.emit)\n self.actionSetRights.triggered.connect(self._parent.do_setrights)\n self.actionInstall.triggered.connect(self.quickinstall)\n self.actionUpload.triggered.connect(self.upload)\n\n self.actionSaveAs.triggered.connect(self.not_working)\n self.actionRecent.triggered.connect(self.not_working)\n self.actionScheduler.triggered.connect(self.not_working)\n\n self.actionUninstall.triggered.connect(self._parent.show_quickuninstall)\n\n self.actionDeploy.triggered.connect(self.not_working)\n\n self.actionBundleCreation.triggered.connect(self.not_working)\n self.actionDepotManager.triggered.connect(self.not_working)\n self.actionStartWinst.triggered.connect(self.not_working)\n self.actionScriptEditor.triggered.connect(self.not_working)\n self.actionHelp.triggered.connect(self.not_working)\n self.actionSearchForUpdates.triggered.connect(self.not_working)\n self.actionShowChangeLog.triggered.connect(self.not_working)\n self.actionAbout.triggered.connect(self.not_working)\n\n # buttons\n self.btnSave.clicked.connect(self._parent.save_project)\n self.btnChangelogEdit.clicked.connect(self._parent.open_changelog_editor)\n self.btnShowScrStruct.clicked.connect(self._parent.show_script_structure)\n\n self.btnScrSetup.clicked.connect(lambda: self.select_script_dialog(\"setup\"))\n self.btnScrUninstall.clicked.connect(lambda: self.select_script_dialog(\"uninstall\"))\n self.btnScrUpdate.clicked.connect(lambda: self.select_script_dialog(\"update\"))\n self.btnScrAlways.clicked.connect(lambda: self.select_script_dialog(\"always\"))\n self.btnScrOnce.clicked.connect(lambda: self.select_script_dialog(\"once\"))\n self.btnScrCustom.clicked.connect(lambda: self.select_script_dialog(\"custom\"))\n self.btnScrUserLogin.clicked.connect(lambda: self.select_script_dialog(\"userlogin\"))\n self.btnScrSetupDel.clicked.connect(lambda: self.select_script_dialog(\"setup\", False))\n self.btnScrUninstallDel.clicked.connect(lambda: self.select_script_dialog(\"uninstall\", False))\n self.btnScrUpdateDel.clicked.connect(lambda: self.select_script_dialog(\"update\", False))\n self.btnScrAlwaysDel.clicked.connect(lambda: self.select_script_dialog(\"always\", False))\n self.btnScrOnceDel.clicked.connect(lambda: self.select_script_dialog(\"once\", False))\n self.btnScrCustomDel.clicked.connect(lambda: self.select_script_dialog(\"custom\", False))\n self.btnScrUserLoginDel.clicked.connect(lambda: self.select_script_dialog(\"userlogin\", False))\n self.btnScrSetupEdit.clicked.connect(self.not_working)\n self.btnScrUninstallEdit.clicked.connect(self.not_working)\n self.btnScrUpdateEdit.clicked.connect(self.not_working)\n self.btnScrAlwaysEdit.clicked.connect(self.not_working)\n self.btnScrOnceEdit.clicked.connect(self.not_working)\n self.btnScrCustomEdit.clicked.connect(self.not_working)\n self.btnScrUserLoginEdit.clicked.connect(self.not_working)\n\n self.btnBuild.clicked.connect(self._parent.build_project)\n self.btnInstall.clicked.connect(self._parent.do_install)\n self.btnInstSetup.clicked.connect(self._parent.do_installsetup)\n self.btnUninstall.clicked.connect(self._parent.do_uninstall)\n self.btnDevFolder.clicked.connect(self.open_project_folder)\n\n self.btnDepAdd.clicked.connect(self._parent.add_dependency)\n self.btnDepModify.clicked.connect(self.submit_dependencies)\n self.btnDepDelete.clicked.connect(lambda a: self._parent.remove_dependency(self.tblDependencies.selectionModel().currentIndex().row()))\n\n self.btnPropAdd.clicked.connect(self._parent.add_property)\n self.btnPropModify.clicked.connect(self.submit_properties)\n self.btnPropDelete.clicked.connect(lambda a: self._parent.remove_property(self.tblProperties.selectionModel().currentIndex().row()))\n self.btnPropRead.clicked.connect(self._parent.get_properties_from_scripts)\n\n self.tblProperties.setModel(self._parent.model_properties)\n self.tblDependencies.setModel(self._parent.model_dependencies)\n self.tblDependencies.selectionModel().selectionChanged.connect(self.update_dependency_fields)\n self.tblProperties.selectionModel().selectionChanged.connect(self.update_property_fields)\n\n self._parent.modelDataUpdated.connect(self.reset_datamapper_and_display)\n self._parent.msgSend.connect(self.set_statbar_text)\n self._parent.processingStarted.connect(self.splash.show)\n self._parent.processingEnded.connect(self.splash.close)\n self._parent.processingEnded.connect(self.set_button_state)", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def create_plugin_files(config: Config) -> Config:\n c4d_symbols_file = os.path.join(\n config.destination,\n \"res\",\n \"c4d_symbols.h\"\n )\n c4d_symbols_content = \"\"\"enum\n{\n\n};\"\"\"\n\n if not os.path.isfile(c4d_symbols_file):\n assert_directories(c4d_symbols_file, True)\n\n with open(c4d_symbols_file, \"w\") as f:\n f.write(c4d_symbols_content)\n\n c4d_strings_file = os.path.join(\n config.destination,\n \"res/strings_us\",\n \"c4d_strings.str\"\n )\n\n if not os.path.isfile(c4d_strings_file):\n assert_directories(c4d_strings_file, True)\n\n with open(c4d_strings_file, \"w\") as f:\n f.write(\"\")\n\n return config", "def OnBuildPNGs(self, e):\n if (not defaults.use_tex):\n msg = \"LaTeX is disabled in the defaults.py file. To use this functionality, change the\"\n msg += \" use_tex option to True and restart the GUI.\"\n ShowMessage(msg, kind='warn')\n return\n question = \"Quantity code formulas are displayed using PNG images, which need to be generated.\"\n question += \"\\n\\n\\nImages should only be generated if they do not already exist or\"\n question += \" the quantity codes have changed, e.g., more custom outputs have been added.\"\n question += \"\\n\\n\\nThis can take ~60 sec, do you want to proceed?\"\n proceed = AskYesNo(question, title='Generate LaTeX Formula Images?')\n if (not proceed): return\n\n question = \"Choose a path where the images will be saved. The default value from defaults.py is shown.\"\n path = AskText(question, default=defaults.quantity_code_image_path, title=\"Where to store images?\")\n if (path is None): return\n defaults.quantity_code_image_path = path # user overrode this quantity, remember for later\n\n question = \"If image files already exist, do you want to overwrite them?\"\n overwrite = AskYesNo(question, title='Overwrite Existing Files?')\n\n # call render routine and display a progress bar\n Nq = len(self.mainparent.nmlpanel.output_quantities.quantities)\n offsets = list(self.mainparent.nmlpanel.output_quantities.offsets.keys())\n\n P = ProgressBar(Nq)\n P(0)\n for i,Q in enumerate(self.mainparent.nmlpanel.output_quantities.quantities):\n if (Q.name in offsets): continue\n render_tex(Q.code, Q.tex, defaults.quantity_code_image_path, overwrite=overwrite)\n\n P(i+1) # update progress bar", "def initUI(self):\n\n grid = QGridLayout()\n grid.addWidget(self.from_currency_label, 0, 0, Qt.AlignRight)\n grid.addWidget(self.from_currency, 0, 1)\n grid.addWidget(self.to_currency_label, 0, 2, Qt.AlignRight)\n grid.addWidget(self.to_currency, 0, 3)\n grid.addWidget(self.from_amount_label, 1, 0)\n grid.addWidget(self.from_amount, 1, 1)\n grid.addWidget(self.to_amount_label, 1, 2)\n grid.addWidget(self.to_amount, 1, 3)\n\n grid.addWidget(self.from_calendar, 2, 0, 1, 2)\n grid.addWidget(self.to_calendar, 2, 2, 1, 2)\n\n grid.addWidget(self.rates_plot, 3, 0, 1, 4)\n grid.addWidget(self.graph_hint, 4, 0, 1, 4)\n\n self.rates_plot.showGrid(x=True, y=True)\n self.rates_plot.setLabel('left', 'Rate')\n self.rates_plot.setLabel('bottom', 'Days')\n self.legend = self.rates_plot.addLegend()\n\n self.setLayout(grid)\n self.setWindowTitle('Currency Converter - Assignment 1 - Arnaud Bourget - 2981151')\n\n self.from_currency.currentIndexChanged.connect(self.updateUI)\n self.to_currency.currentIndexChanged.connect(self.updateUI)\n self.from_amount.valueChanged.connect(self.fromAmountHandler)\n self.from_calendar.selectionChanged.connect(self.fromCalendarHandler)\n self.to_calendar.selectionChanged.connect(self.toCalendarHandler)\n\n self.show()", "def generate(options):\n interactive = options['i']\n if interactive:\n generate_interactive(options)\n else:\n generate_rcfile(vars(options['c']), options['rcfile'])", "def initUI(self):\n\n lbl_names = ['Название проекта', 'Версия', 'Директория', 'Описание', 'Автор', 'Почта', 'Дополнительные зависимости', 'Название ноды']\n param_list = ['motor_driver', '0.0.0', '/home/mitya/catkin_ws/src/', 'The motor_driver package', 'D. Potapov',\n '[email protected]', 'nav_msgs, geometry_msgs, tf, ', 'motor_driver_node']\n labels = []\n for name in lbl_names:\n labels.append(QLabel(name))\n for i, ph in zip(range(len(labels)), param_list):\n ed_line = QLineEdit()\n if i == 1:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([0-9\\.])*[0-9]$\")))\n elif i == 5:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([a-z0-9_-]+\\.)*[a-z0-9_-]+@[a-z0-9_-]+(\\.[a-z0-9_-]+)*\\.[a-z]{2,6}$\")))\n ed_line.setPlaceholderText(ph)\n if i != 0:\n ed_line.textEdited.connect(self.change_data)\n else:\n ed_line.textEdited.connect(self.change_pkg_name)\n self.full_ed_lines.append(ed_line)\n grid = QGridLayout()\n grid.setSpacing(5)\n for i in range(1, len(labels) + 1):\n for j in range(0, 2):\n if j == 0:\n grid.addWidget(labels[i - 1], i, j)\n else:\n grid.addWidget(self.full_ed_lines[i - 1], i, j)\n ch_dirButton = QPushButton(self)\n ch_dirButton.setIcon(QIcon('./icons/open_folder.png'))\n ch_dirButton.clicked.connect(self.ch_dirDialog)\n grid.addWidget(ch_dirButton, 3, 3)\n genButton = QPushButton(\"Сгенерировать\")\n genButton.clicked.connect(self.generate)\n grid.addWidget(genButton, len(labels) + 2, 1)\n self.setLayout(grid)\n self.setMinimumSize(700, 400)\n self.show()", "def launchUI():\n app = QtWidgets.QApplication(sys.argv)\n ui = ClientFileManager()\n ui.resize(1200, 650)\n ui.show()\n sys.exit(app.exec_())", "def initUI(self, width=1200, height=800):\n \n self.setWindowTitle('InterFits %s'%__version__) \n \n self.main_frame = QtGui.QWidget() \n self.setWindowIcon(QtGui.QIcon('lib/icon.gif')) \n #self.gen_gui = generateGui()\n \n # Create buttons/widgets\n self.but_open = QtGui.QPushButton(\"Open\")\n self.but_open.clicked.connect(self.onButOpen)\n self.but_plot = QtGui.QPushButton(\"Plot\")\n self.but_plot.clicked.connect(self.updatePlot)\n \n self.lab_info = QtGui.QLabel(\" \")\n \n self.axes_select, self.laxes_select = self.createSpinner(\"Axis\", self.updateAxes, 0, 1, 1)\n self.spin_ref_ant, self.lspin_ref_ant = self.createSpinner(\"Ant 1\", self.updateAxes, 1, 2, 1)\n self.spin_ref_ant2, self.lspin_ref_ant2 = self.createSpinner(\"Ant 2\", self.updateAxes, 1, 2, 1)\n \n self.plot_select = QtGui.QComboBox(self)\n self.plot_select.addItem(\"Single Baseline\")\n self.plot_select.addItem(\"Single Baseline: Dual Pol\")\n self.plot_select.addItem(\"Multi baseline: Autocorrs\")\n self.plot_select.addItem(\"Multi baseline: Amplitude\")\n self.plot_select.addItem(\"Multi baseline: Phase\")\n self.plot_select.addItem(\"Multi baseline: Delay spectrum\")\n self.plot_select.addItem(\"UV coverage\")\n self.plot_select.activated.connect(self.updateSpinners)\n \n \n self.scale_select = QtGui.QComboBox(self)\n self.scale_select.addItem(\"Power (linear)\")\n self.scale_select.addItem(\"Power (decibel)\")\n self.scale_select.addItem(\"Variance\")\n self.scale_select.addItem(\"Skew\")\n self.scale_select.addItem(\"Kurtosis\")\n self.lscale_select = QtGui.QLabel(\"Scale\")\n \n self.current_plot = \"\"\n \n self.axes_select = QtGui.QComboBox(self)\n for v in ['Stokes I','Stokes Q','Stokes U','Stokes V']:\n self.axes_select.addItem(v)\n \n \n # Create plots\n self.sp_fig, self.sp_ax = self.createBlankPlot()\n self.sp_canvas = FigureCanvas(self.sp_fig)\n self.mpl_toolbar = NavigationToolbar(self.sp_canvas, self.main_frame)\n \n # Widget layout\n layout = QtGui.QVBoxLayout()\n h_layout = QtGui.QHBoxLayout()\n h_layout.addWidget(self.plot_select)\n h_layout.addStretch(1)\n h_layout.addWidget(self.laxes_select)\n h_layout.addWidget(self.axes_select)\n h_layout.addWidget(self.lscale_select)\n h_layout.addWidget(self.scale_select)\n h_layout.addWidget(self.lspin_ref_ant)\n h_layout.addWidget(self.spin_ref_ant)\n h_layout.addWidget(self.lspin_ref_ant2)\n h_layout.addWidget(self.spin_ref_ant2)\n h_layout.addWidget(self.but_plot)\n layout.addLayout(h_layout)\n h_layout = QtGui.QHBoxLayout()\n h_layout.addWidget(self.sp_canvas)\n \n layout.addLayout(h_layout)\n h_layout = QtGui.QHBoxLayout()\n h_layout.addStretch(1)\n\n layout.addLayout(h_layout)\n layout.addWidget(self.mpl_toolbar)\n \n bbox = QtGui.QHBoxLayout()\n bbox.addWidget(self.lab_info)\n bbox.addStretch(1)\n bbox.addWidget(self.but_open)\n layout.addLayout(bbox)\n\n self.setLayout(layout) \n #textEdit = QtGui.QTextEdit()\n #self.setCentralWidget(textEdit)\n #self.setCentralWidget(sp_canvas)\n \n # Load file if command line argument is passed\n if self.filename != '':\n try:\n self.uv = InterFits(self.filename)\n #self.openSdFits(self.filename)\n self.onFileOpen()\n self.plot_single_baseline(1,1)\n self.updateSpinners()\n except:\n try:\n self.uv = InterFitsOriginal(self.filename)\n #self.openSdFits(self.filename)\n self.onFileOpen()\n self.plot_single_baseline(1,1)\n self.updateSpinners()\n except:\n print \"Error: cannot open %s\"%self.filename\n raise\n \n self.setGeometry(300, 300, width, height) \n self.show()\n \n def on_draw(event):\n \"\"\" Release event matplotlib \"\"\"\n #print \"DRAWN!\"\n #if self.current_plot == 'single':\n # ax = self.current_ax\n # self.updateFreqAxis(ax)\n \n def on_click(event):\n \"\"\"Enlarge or restore the selected axis.\"\"\"\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()\n \n #h1(\"Loading PyQt GUI\")\n self.fig_connect = self.sp_fig.canvas.mpl_connect('button_press_event', on_click)\n self.fig_connect2 = self.sp_fig.canvas.mpl_connect('draw_event', on_draw)", "def convert(self):\r\n status = self.Debug_checkbox.isChecked() \r\n status2 = self.checkBox_Exe_option.isChecked()\r\n status3 = self.radioOut_to_File.isChecked()\r\n combine = bin(status3) + bin(status2) + bin(status) \r\n spin_value = self.spinBox_indent.value()\r\n str_spinvalue = str(spin_value)\r\n line_edit_txt = self.lineEdit_Py_file_name.text()\r\n overwrite_y_n = QMessageBox.No\r\n exists = os.path.isfile(line_edit_txt) #returns true if line_edit_txt is present\r\n\r\n cmdStr = {'0b00b00b0' : 'pyuic5 ' + self.fileName + ' -i ' + str_spinvalue,\r\n '0b00b00b1' : 'pyuic5 -d -i ' + str_spinvalue +' ' + self.fileName,\r\n '0b00b10b0' : 'pyuic5 -x -i ' + str_spinvalue +' ' + self.fileName,\r\n '0b00b10b1' : 'pyuic5 -x -d -i ' + str_spinvalue +' ' + self.fileName,\r\n '0b10b00b0' : 'pyuic5 -i ' + str_spinvalue +' ' + self.fileName + ' -o ' + line_edit_txt,\r\n '0b10b00b1' : 'pyuic5 -d -i ' + str_spinvalue +' ' + self.fileName + ' -o ' + line_edit_txt,\r\n '0b10b10b0' : 'pyuic5 -x -i ' + str_spinvalue +' ' + self.fileName+ ' -o ' + line_edit_txt,\r\n '0b10b10b1' : 'pyuic5 -d -x -i ' + str_spinvalue +' ' + self.fileName+ ' -o ' + line_edit_txt,\r\n }\r\n cmdStr2 = cmdStr[combine] # get dictionary command string defined by user selection\r\n # If out to file selected --------------------\r\n if self.radioOut_to_File.isChecked() == True: # is write out to file selected\r\n # else if file is present -----------------\r\n if exists == False : # is file present? If true ask permission to overwrite\r\n output2 = subprocess.getoutput(cmdStr2) # write command string - write file \r\n self.plainTextEdit.insertPlainText( \"\\n\" + \"Created file : \" + line_edit_txt )\r\n else : \r\n overwrite_y_n = QMessageBox.question(self,'File Overwrite', \"File exist Overwrite ? \", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\r\n if overwrite_y_n == QMessageBox.Yes :\r\n output2 = subprocess.getoutput(cmdStr2) # write command string - write file \r\n self.plainTextEdit.insertPlainText( output2 +\"\\n\" + \"File updated \" )\r\n else:\r\n self.plainTextEdit.insertPlainText( \"\\n\" + \"pyuic5 command not executed NO File updated \" )\r\n\r\n else:\r\n output2 = subprocess.getoutput(cmdStr2) # write command string - write file \r\n self.plainTextEdit.insertPlainText( output2 + \"output to screen \\n\" ) # show return message\r", "def initGui(self):\n self.sketchButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'sketch.svg'),\n text=self.tr('Sketch on map'),\n callback=self.sketchAction,\n parent=self.iface.mainWindow(),\n object_name='mSketchAction')\n self.penButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'pen.svg'),\n text=self.tr('Draw line on map'),\n callback=self.penAction,\n parent=self.iface.mainWindow(),\n object_name='mPenAction')\n self.canvasButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'canvas.svg'),\n text=self.tr('Color and width canvas'),\n callback=None,\n parent=self.iface.mainWindow())\n self.eraseButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'erase.svg'),\n text=self.tr('Erase sketches'),\n callback=self.eraseAction,\n parent=self.iface.mainWindow(),\n object_name='mEraseAction')\n self.removeButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'remove.svg'),\n text=self.tr('Remove all sketches'),\n callback=self.removeSketchesAction,\n parent=self.iface.mainWindow(),\n object_name='mRemoveAllSketches')\n self.noteButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'note.svg'),\n text=self.tr('Add text annotations to sketches'),\n callback=None,\n parent=self.iface.mainWindow(),\n object_name='mAddTextAnnotations')\n self.convertButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'toLayer.svg'),\n text=self.tr('Convert annotations to Memory Layer'),\n callback=self.toMemoryLayerAction,\n parent=self.iface.mainWindow(),\n object_name='mConvertAnnotationsToMemoryLayer')\n self.saveButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'inbox.svg'),\n text=self.tr('Save sketches to file'),\n callback=self.saveAction,\n parent=self.iface.mainWindow(),\n object_name='mSaveSketchesToFile')\n self.loadButton = self.add_action(\n path.join(self.plugin_dir, 'icons', 'outbox.svg'),\n text=self.tr('Load sketches from file'),\n callback=self.loadAction,\n parent=self.iface.mainWindow(),\n object_name='mLoadSketchesFromFile')\n self.canvasButton.setMenu(self.canvasMenu())\n self.noteButton.setCheckable(True)\n self.penButton.setCheckable(True)\n self.sketchButton.setCheckable(True)\n self.eraseButton.setCheckable(True)\n self.geoSketches = []\n self.dumLayer = QgsVectorLayer(\"Point?crs=EPSG:4326\", \"temporary_points\", \"memory\")\n self.pressed = None\n self.previousPoint = None\n self.previousMoved = None\n self.gestures = 0\n self.points = 0\n self.currentColor = QColor(\"#aa0000\")\n self.currentWidth = 5\n self.annotation = sketchNoteDialog(self.iface)\n self.annotatatedSketch = None\n self.sketchEnabled(None)\n self.iface.projectRead.connect(self.projectReadAction)\n self.iface.newProjectCreated.connect(self.newProjectCreatedAction)\n QgsProject.instance().legendLayersAdded.connect(self.notSavedProjectAction)", "def createButtons(self):\r\n buttonsPosition = {\r\n \"BROWSE\": (3, 2),\r\n \"CALCULATE\": (13, 2),\r\n \"SAVE\": (14, 0),\r\n \"CLEAR\": (14, 1),\r\n \"DELETE\": (14, 2),\r\n \"PRINT\": (13, 3),\r\n \"GENERATE BILL\": (14, 3)\r\n }\r\n self.buttons = {}\r\n for widgetName, position in buttonsPosition.items():\r\n self.button = QPushButton(widgetName)\r\n\r\n self.button.setStyleSheet(\"\"\"\r\n QPushButton {\r\n\r\n background-color: #A8DBC5;\r\n font-family: arial;\r\n font-weight: bold;\r\n font-size: 12px;\r\n border-color: white;\r\n }\r\n QPushButton:hover {\r\n background-color: #DAE0E2;\r\n }\r\n \"\"\")\r\n self.grid.addWidget(self.button, position[0], position[1])\r\n self.buttons[widgetName] = self.button\r\n # Setting calendar icon\r\n self.buttons[\"BROWSE\"].setIcon(QIcon(\"calendarr.png\"))\r\n # Buttons Signals\r\n self.buttons[\"CLEAR\"].clicked.connect(self.clearAll)\r\n self.buttons[\"BROWSE\"].clicked.connect(self.calendar)\r\n self.buttons[\"CALCULATE\"].clicked.connect(self.calculate)\r\n self.buttons[\"PRINT\"].clicked.connect(self.printBill)", "def _initUI(self):\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Description\n #----------------------------------------------------------------\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel()\n label.setText('Locatie:')\n label.setFixedWidth(100)\n hlayout.addWidget(label)\n\n label = QtWidgets.QLabel()\n label.setText(self.name)\n hlayout.addWidget(label)\n hlayout.setSpacing(10)\n\n vlayout.addLayout(hlayout)\n\n # Exportnaam\n #----------------------------------------------------------------\n self.exportname = ParameterInputLine(label='Exportnaam:', labelwidth=100)\n self.exportname.LineEdit.setMinimumWidth(200)\n vlayout.addLayout(self.exportname.layout)\n\n # Exportdatabase\n #----------------------------------------------------------------\n self.exportpath = ExtendedLineEdit(label='SQLite-database:', labelwidth=100, browsebutton=True)\n self.exportpath.BrowseButton.clicked.connect(self._get_path_database)\n vlayout.addLayout(self.exportpath.layout)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n self.input_elements['factor Tm Tp'] = widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n unitlabel='(NVT: Tp aanwezig)' if 'Tp' in self.hydraulic_loads.columns else '',\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n )\n\n if 'Tp' in self.hydraulic_loads.columns or self.parent_tab.step != 'I1':\n self.input_elements['factor Tm Tp'].set_enabled(False)\n\n # Add line edit with browsebutton for Master template\n self.input_elements['mastertemplate'] = widgets.ExtendedLineEdit(\n label='Master template bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_master_template)\n )\n\n # Add line edit with browsebutton for depth file\n self.input_elements['depthfile'] = widgets.ExtendedLineEdit(\n label='Bathymetry bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_bathymetry_file)\n )\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['swanfolder'] = widgets.ExtendedLineEdit(\n label='SWAN uitvoer folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_swan_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Genereer invoer')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n \n self.setWindowTitle('The Visual Climate Data Analysis Tools - (VCDAT)')\n layout = QtGui.QVBoxLayout()\n self.setLayout(layout)\n\n # Init Menu Widget\n self.menuWidget = QMenuWidget(self)\n\n # Init File Widget\n vsplitter = QtGui.QSplitter(QtCore.Qt.Vertical) \n fileWidget = QLabeledWidgetContainer(QCDATFileWidget(),\n 'FILE VARIABLES')\n vsplitter.addWidget(fileWidget)\n\n # Init Defined Variables Widget\n definedVar = QLabeledWidgetContainer(QDefinedVariable(),\n 'DEFINED VARIABLES')\n vsplitter.addWidget(definedVar)\n hsplitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n hsplitter.addWidget(vsplitter)\n\n # Init Var Plotting Widget\n varView = QLabeledWidgetContainer(QVariableView(),\n 'PLOTTING')\n hsplitter.addWidget(varView)\n hsplitter.setStretchFactor(1, 1)\n layout.addWidget(hsplitter)\n\n # Init guiController\n guiController = GuiController(fileWidget.getWidget(),\n definedVar.getWidget(),\n varView.getWidget())\n guiController.initTeachingCommands()\n self.guiController = guiController # So guicontroller doesn't get garbage collected\n\n # Connect signals between self & GuiController\n self.connect(self, QtCore.SIGNAL('setRecordCommands'),\n guiController.setRecordCommands)\n self.connect(self, QtCore.SIGNAL('viewTeachingCommands'),\n guiController.viewTeachingCommands)\n self.connect(self, QtCore.SIGNAL('closeTeachingCommands'),\n guiController.closeTeachingCommands) \n\n # Connect Signals between QVariableView & QDefinedVariable\n varView.connect(definedVar.getWidget(), QtCore.SIGNAL('selectDefinedVariableEvent'),\n varView.getWidget().selectDefinedVariableEvent)\n varView.connect(definedVar.getWidget(), QtCore.SIGNAL('setupDefinedVariableAxes'),\n varView.getWidget().setupDefinedVariableAxes)\n definedVar.connect(varView.getWidget(), QtCore.SIGNAL('plotPressed'),\n definedVar.getWidget().defineQuickplot)\n definedVar.connect(varView.getWidget(), QtCore.SIGNAL('defineVariable'),\n definedVar.getWidget().defineVariable)\n\n # Connect Signals between QFileWidget & QVariableView\n varView.connect(fileWidget.getWidget(), QtCore.SIGNAL('variableChanged'),\n varView.getWidget().setupDefinedVariableAxes)\n varView.connect(fileWidget.getWidget(), QtCore.SIGNAL('defineVariableEvent'),\n varView.getWidget().defineVariableEvent)", "def initUI(self):\n\n self.wid = RosGenWidget()\n self.setCentralWidget(self.wid)\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Файл')\n editMenu = menubar.addMenu('&Редактирование')\n self.create_menu_par('Менеджер подписчиков и издателей', self.wid.show_manager, fileMenu, 'Ctrl+M')\n self.create_menu_par('Очистить', self.wid.clear_all_lines, editMenu, 'Ctrl+D')\n self.create_menu_par('Загрузить данные из...', self.wid.open_fileDialog, fileMenu, 'Ctrl+F')\n self.create_menu_par('Сохранить как...', self.wid.save_fileDialog, fileMenu, 'Ctrl+S')\n self.create_menu_par('Выход', self.exit_app, fileMenu, 'Esc')\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ожидание данных')\n self.wid.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.setGeometry(600, 200, 700, 400)\n self.setWindowTitle('Генератор шаблонов ROS-приложения')\n self.show()", "def main():\n app = QApplication(sys.argv)\n app_window = Gui()\n app_window.show()\n sys.exit(app.exec_())", "def setup_ui(self):\n\t\t\n\t\t# CREATION DU LAYOUT\n\t\tself.layout = QtWidgets.QHBoxLayout(self) #le layout prend la fenetre principal en argument donc notre self\n\t\t\n\t\t# CREATION DES WIDGETS\n\t\tself.cbb_devisesFrom = QtWidgets.QComboBox() #combobox (liste deroulante) pour choisir la devise From\n\t\tself.spn_montant = QtWidgets.QSpinBox() #spinbox (zone affichage) du montant a convertir\n\t\tself.cbb_devisesTo = QtWidgets.QComboBox() #cbb pour choisir la devise To\n\t\tself.spn_montantConverti = QtWidgets.QSpinBox() #spn du montant converti\n\t\tself.btn_inverser = QtWidgets.QPushButton(\"Inverser devises\") #bouton pour inverser les devises\n\t\t\n\t\t# AJOUT AU LAYOUT\n\t\tself.layout.addWidget(self.cbb_devisesFrom)\n\t\tself.layout.addWidget(self.spn_montant)\n\t\tself.layout.addWidget(self.cbb_devisesTo)\n\t\tself.layout.addWidget(self.spn_montantConverti)\n\t\tself.layout.addWidget(self.btn_inverser)", "def return_folder_button_style_sheet():\n style_sheet_folder_button = '''\n QLabel {\n font: 12pt \"Verdana\";\n margin-left: 5px;\n background-color: transparent;\n }\n QPushButton {\n border: 0px solid lightgray;\n border-radius: 4px;\n color: transparent;\n background-color: transparent;\n min-width: 20px;\n icon-size: 20px;\n font: 12pt \"Verdana\";\n margin: 10px;\n padding: 6px;\n }\n QPushButton:pressed {\n background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n stop: 0 #dadbde, stop: 1 #f6f7fa);\n }\n '''\n return style_sheet_folder_button", "def setup_ui(self):\n self.resize(480, 480)\n self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)\n\n self.setStyleSheet(\n \"\"\"\n QPushButton {\n border-style: outset;\n border-radius: 0px;\n padding: 6px;\n }\n QPushButton:hover {\n background-color: #cf7500;\n border-style: inset;\n }\n QPushButton:pressed {\n background-color: #ffa126;\n border-style: inset;\n }\n \"\"\"\n )\n\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n\n self.sub_layout = QtWidgets.QHBoxLayout()\n\n self.widget = QtWidgets.QWidget(self)\n self.widget.setStyleSheet(\".QWidget{background-color: rgb(20, 20, 40);}\")\n\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)\n self.verticalLayout_2.setContentsMargins(9, 0, 0, 0)\n\n self.close_button = QtWidgets.QPushButton(self.widget)\n self.close_button.setMinimumSize(QtCore.QSize(35, 25))\n self.close_button.setIcon(QIcon(':/icon/icons/close.png'))\n self.close_button.setIconSize(QSize(16, 16))\n self.close_button.setMaximumSize(QtCore.QSize(35, 25))\n self.close_button.setStyleSheet(\"color: white;\\n\"\n \"font: 13pt \\\"Verdana\\\";\\n\"\n \"border-radius: 1px;\\n\"\n \"opacity: 200;\\n\")\n self.close_button.clicked.connect(self.close)\n self.verticalLayout_2.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.sub_layout1 = QtWidgets.QVBoxLayout()\n self.sub_layout1.setContentsMargins(-1, 15, -1, -1)\n\n self.label = QtWidgets.QLabel(self.widget)\n self.label.setMinimumSize(QtCore.QSize(80, 80))\n self.label.setMaximumSize(QtCore.QSize(80, 80))\n self.label.setStyleSheet(\"image: url(:/icon/icons/rocket_48x48.png);\")\n self.sub_layout1.addWidget(self.label, 0, QtCore.Qt.AlignHCenter)\n\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setContentsMargins(50, 35, 59, -1)\n\n self.label_2 = QtWidgets.QLabel(self.widget)\n self.label_2.setStyleSheet(\"color: rgb(231, 231, 231);\\n\"\n \"font: 15pt \\\"Verdana\\\";\")\n self.form_layout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)\n\n self.username_edit = QtWidgets.QLineEdit(self.widget)\n self.username_edit.setMinimumSize(QtCore.QSize(0, 40))\n self.username_edit.setStyleSheet(\"QLineEdit {\\n\"\n \"color: rgb(231, 231, 231);\\n\"\n \"font: 15pt \\\"Verdana\\\";\\n\"\n \"border: None;\\n\"\n \"border-bottom-color: white;\\n\"\n \"border-radius: 10px;\\n\"\n \"padding: 0 8px;\\n\"\n \"background: rgb(20, 20, 40);\\n\"\n \"selection-background-color: darkgray;\\n\"\n \"}\")\n self.username_edit.setFocus()\n self.username_edit.setPlaceholderText(\"用户名\")\n self.form_layout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.username_edit)\n\n self.label_3 = QtWidgets.QLabel(self.widget)\n self.form_layout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_3)\n\n self.password_edit = PasswordEdit(self.widget)\n self.password_edit.setPlaceholderText(\"密码\")\n self.password_edit.setMinimumSize(QtCore.QSize(0, 40))\n self.password_edit.setStyleSheet(\"QLineEdit {\\n\"\n \"color: rgb(231, 231, 231);\\n\"\n \"font: 15pt \\\"Verdana\\\";\\n\"\n \"border: None;\\n\"\n \"border-bottom-color: white;\\n\"\n \"border-radius: 10px;\\n\"\n \"padding: 0 8px;\\n\"\n \"background: rgb(20, 20, 40);\\n\"\n \"selection-background-color: darkgray;\\n\"\n \"}\")\n self.form_layout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.password_edit)\n self.password_edit.setEchoMode(QtWidgets.QLineEdit.Password)\n\n self.line = QtWidgets.QFrame(self.widget)\n self.line.setStyleSheet(\"border: 2px solid white;\")\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.form_layout.setWidget(1, QtWidgets.QFormLayout.SpanningRole, self.line)\n\n self.line_2 = QtWidgets.QFrame(self.widget)\n self.line_2.setStyleSheet(\"border: 2px solid white;\")\n self.line_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.form_layout.setWidget(5, QtWidgets.QFormLayout.SpanningRole, self.line_2)\n\n self.login_button = QtWidgets.QPushButton(self.widget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.login_button.sizePolicy().hasHeightForWidth())\n\n self.login_button.setSizePolicy(sizePolicy)\n self.login_button.setMinimumSize(QtCore.QSize(0, 60))\n self.login_button.setAutoFillBackground(False)\n self.login_button.setStyleSheet(\"color: rgb(231, 231, 231);\\n\"\n \"font: 17pt \\\"Verdana\\\";\\n\"\n \"border: 2px solid orange;\\n\"\n \"padding: 5px;\\n\"\n \"border-radius: 3px;\\n\"\n \"opacity: 200;\\n\"\n \"\")\n self.login_button.setAutoDefault(True)\n self.form_layout.setWidget(7, QtWidgets.QFormLayout.SpanningRole, self.login_button)\n\n self.register_button = QtWidgets.QPushButton(self.widget)\n self.register_button.setMinimumSize(QtCore.QSize(0, 60))\n self.register_button.setStyleSheet(\"color: rgb(231, 231, 231);\\n\"\n \"font: 17pt \\\"Verdana\\\";\\n\"\n \"border: 2px solid orange;\\n\"\n \"padding: 5px;\\n\"\n \"border-radius: 3px;\\n\"\n \"opacity: 200;\\n\"\n \"\")\n self.register_button.setDefault(False)\n self.register_button.setFlat(False)\n self.form_layout.setWidget(8, QtWidgets.QFormLayout.SpanningRole, self.register_button)\n\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.form_layout.setItem(6, QtWidgets.QFormLayout.SpanningRole, spacerItem)\n self.sub_layout1.addLayout(self.form_layout)\n\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.sub_layout1.addItem(spacerItem1)\n self.verticalLayout_2.addLayout(self.sub_layout1)\n\n self.sub_layout.addWidget(self.widget)\n self.sub_layout.setStretch(0, 1)\n self.main_layout.addLayout(self.sub_layout)\n\n self.retranslateUi()\n QtCore.QMetaObject.connectSlotsByName(self)", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def initGui(self):\n\n self.action = QAction(QIcon(self.plugdir + '/icon.png'), 'MGB Water Balance', self.iface.mainWindow())\n self.action.triggered.connect(self.run)\n self.iface.addToolBarIcon(self.action)\n self.iface.addPluginToMenu('&IPH - Plugins', self.action)", "def build_UI(self):\n\n #Common local coordinates to change the UI positions\n common_x = 0\n common_y = 5\n\n #Create the Main Title\n self.titleFont = font.Font(family = FONTS[\"lucida grande\"], size = 30)\n self.title = cGUIf.get_TextLabel(self,\n \"File Converter App \",\n self.titleFont,\n 135 + common_x,\n 40 + common_y)\n\n #Add the \"Main Icon\"\n self.mainIcon = cGUIf.get_ImgLabel(self,\n self.__pics[\"main_icon\"],\n 280 + common_x,\n 125 + common_y)\n\n\n #Create a subtitle that says \"options\"\n self.subtitleFont = font.Font(family = FONTS[\"courier new\"], size = 22)\n self.subtitle = cGUIf.get_TextLabel(self,\n \"Options\",\n self.subtitleFont,\n 240 + common_x,\n 195 + common_y)\n\n #Create a label that says \"Image Conversion\"\n self.conversionFont = font.Font(family = FONTS[\"times new roman\"], size = 15)\n self.imageConversionLabel = cGUIf.get_TextLabel(self,\n \" Image\\n Conversion\",\n self.conversionFont,\n 60 + common_x,\n 285 + common_y)\n\n #Create a button for Image Conversion\n self.imageButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"image_frame\"),\n 190 + common_x,\n 270 + common_y)\n self.imageButton.configure(image = self.__pics[\"image_icon\"])\n\n #Create a label that says \"Audio Conversion\"\n self.audioConversionLabel = cGUIf.get_TextLabel(self,\n \" Audio\\n Conversion\",\n self.conversionFont,\n 440 + common_x,\n 285 + common_y)\n\n #Create a button for Audio Conversion\n self.audioButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"audio_frame\"),\n 340 + common_x,\n 270 + common_y)\n self.audioButton.configure(image = self.__pics[\"audio_icon\"])\n\n #Create a label that says \"Doc Conversion\"\n self.docConversionLabel = cGUIf.get_TextLabel(self,\n \" Doc\\n Conversion\",\n self.conversionFont,\n 60 + common_x,\n 410 + common_y)\n\n\n #Create a button for Doc Conversion\n self.docButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"doc_frame\"),\n 190 + common_x,\n 400 + common_y)\n self.docButton.configure(image = self.__pics[\"doc_icon\"])\n\n\n #Create a label that says \"Video Conversion\"\n self.videoConversionLabel = cGUIf.get_TextLabel(self,\n \" Video\\n Conversion\",\n self.conversionFont,\n 440 + common_x,\n 410 + common_y)\n\n #Create a button for Video Conversion\n self.videoButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"video_frame\"),\n 340 + common_x,\n 400 + common_y)\n self.videoButton.configure(image = self.__pics[\"video_icon\"])", "def __pyName(py_dir, py_file):\n return py_dir, \"Ui_{0}\".format(py_file)", "def setupUi(self, MainWindow):\n MainWindow.resize(500, 500)\n\n self.setMenu(MainWindow)\n\n\n widget = QtWidgets.QWidget()\n MainWindow.setCentralWidget(widget)\n \n\n grid = QtWidgets.QGridLayout()\n\n # label horizontally expanding!\n emptyLabel = QtWidgets.QLabel('')\n self.setPolicy(emptyLabel, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n\n # label horizontally and vertically expanding!\n labExpExp = QtWidgets.QLabel('')\n self.setPolicy(labExpExp, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n\n # label vertically expanding!\n labFixExp = QtWidgets.QLabel('')\n self.setPolicy(labFixExp, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)\n\n # Connect command line!\n self.refreshPorts = QtWidgets.QPushButton()\n self.refreshPorts.resize(32, 27)\n self.setFixedPolicy(self.refreshPorts)\n self.refreshPorts.setIcon(QtGui.QIcon('icons/refresh.png'))\n grid.addWidget(self.refreshPorts, 2, 0)\n \n self.ports = QtWidgets.QComboBox()\n self.ports.resize(247, 27)\n self.setPolicy(self.ports, QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed)\n grid.addWidget(self.ports, 2, 1)\n \n self.connectPort = QtWidgets.QPushButton('connect')\n self.setFixedPolicy(self.connectPort)\n grid.addWidget(self.connectPort, 2, 2)\n \n self.disconnectPort = QtWidgets.QPushButton('disconnect')\n self.setFixedPolicy(self.disconnectPort)\n grid.addWidget(self.disconnectPort, 2, 3)\n \n # graph widget!\n self.graph = Graph()\n self.setPolicy(self.graph.canvas, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n grid.addWidget(self.graph.canvas, 3, 0, 3, 2)\n \n # File field line!\n self.fileNameInput = QtWidgets.QLineEdit()\n self.fileNameInput.setText('./output.csv')\n self.setPolicy(self.fileNameInput, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n self.fileName = QtWidgets.QLabel('File name: ')\n self.setFixedPolicy(self.fileName)\n fileFieldLine = QtWidgets.QHBoxLayout()\n fileFieldLine.addWidget(self.fileName)\n fileFieldLine.addWidget(self.fileNameInput)\n fileFieldLine.addWidget(emptyLabel)\n\n # File command line! \n self.startWriteButton = QtWidgets.QPushButton('start recording')\n self.setFixedPolicy(self.startWriteButton)\n self.stopWriteButton = QtWidgets.QPushButton('stop recording')\n self.setFixedPolicy(self.stopWriteButton)\n self.stopWriteButton.setEnabled(False)\n self.openFileButton = QtWidgets.QPushButton('open file')\n self.setFixedPolicy(self.openFileButton)\n fileCommandLine = QtWidgets.QHBoxLayout()\n fileCommandLine.addWidget(self.startWriteButton)\n fileCommandLine.addWidget(self.stopWriteButton)\n fileCommandLine.addWidget(self.openFileButton)\n \n\n # time line field!\n timeLayout = QtWidgets.QHBoxLayout()\n self.time = QtWidgets.QLineEdit()\n self.setPolicy(self.time, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.timeLabel = QtWidgets.QLabel('time:')\n self.setFixedPolicy(self.timeLabel)\n timeLayout.addWidget(self.timeLabel)\n timeLayout.addWidget(self.time)\n\n # calculate data line field!\n calcDataLayout = QtWidgets.QHBoxLayout()\n self.calcData = QtWidgets.QLineEdit()\n self.setPolicy(self.calcData, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.calcDataLabel = QtWidgets.QLabel('data:')\n self.setFixedPolicy(self.calcDataLabel)\n calcDataLayout.addWidget(self.calcDataLabel)\n calcDataLayout.addWidget(self.calcData)\n\n # arduino analogRead() data!\n ardAnalogLayout = QtWidgets.QHBoxLayout()\n self.ardAnalog = QtWidgets.QLineEdit()\n self.setPolicy(self.ardAnalog, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.ardAnalogLabel = QtWidgets.QLabel('A5:')\n self.setFixedPolicy(self.ardAnalogLabel)\n ardAnalogLayout.addWidget(self.ardAnalogLabel)\n ardAnalogLayout.addWidget(self.ardAnalog)\n\n # raw data!\n rawDataLayout = QtWidgets.QHBoxLayout()\n self.rawData = QtWidgets.QLineEdit()\n self.setPolicy(self.rawData, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.rawDataLabel = QtWidgets.QLabel('Raw data:')\n self.setFixedPolicy(self.rawDataLabel)\n rawDataLayout.addWidget(self.rawDataLabel)\n rawDataLayout.addWidget(self.rawData)\n\n fileCommandLine.addWidget(emptyLabel)\n fileCommandLine.addLayout(timeLayout)\n fileCommandLine.addWidget(emptyLabel)\n fileCommandLine.addLayout(calcDataLayout)\n fileCommandLine.addWidget(emptyLabel)\n fileCommandLine.addLayout(ardAnalogLayout)\n fileCommandLine.addWidget(emptyLabel)\n fileCommandLine.addLayout(rawDataLayout)\n \n\n # Graph command line!\n graphCommandLine = QtWidgets.QHBoxLayout()\n self.startDraw = QtWidgets.QPushButton('start Drawing')\n self.setFixedPolicy(self.startDraw)\n self.stopDraw = QtWidgets.QPushButton('stop Drawing')\n self.setFixedPolicy(self.stopDraw)\n self.stopDraw.setEnabled(False)\n self.clear = QtWidgets.QPushButton('clear')\n self.setFixedPolicy(self.clear)\n graphCommandLine.addWidget(self.startDraw)\n graphCommandLine.addWidget(self.stopDraw)\n graphCommandLine.addWidget(self.clear)\n graphCommandLine.addWidget(emptyLabel)\n\n # Widgets below the graphs\n vLayout = QtWidgets.QVBoxLayout()\n vLayout.addLayout(fileFieldLine)\n vLayout.addLayout(fileCommandLine)\n vLayout.addLayout(graphCommandLine)\n vLayout.addWidget(labExpExp)\n grid.addLayout(vLayout, 6, 0, 2, 2)\n\n \n rightBlockLayout = QtWidgets.QVBoxLayout()\n\n self.captureZero = QtWidgets.QPushButton('detect 0')\n self.setPolicy(self.captureZero, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n\n self.captureCoef = QtWidgets.QPushButton('to measure the coefficient')\n self.setPolicy(self.captureCoef, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n\n # zero line field!\n zeroLineField = QtWidgets.QHBoxLayout()\n self.zeroKg = QtWidgets.QLineEdit()\n self.setPolicy(self.zeroKg, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.zeroLabel = QtWidgets.QLabel('zero:')\n self.setFixedPolicy(self.zeroLabel)\n zeroLineField.addWidget(self.zeroLabel)\n zeroLineField.addWidget(self.zeroKg)\n\n # coef line field!\n coefLineField = QtWidgets.QHBoxLayout()\n self.coef = QtWidgets.QLineEdit()\n self.setPolicy(self.coef, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.coefLabel = QtWidgets.QLabel('coef:')\n self.setFixedPolicy(self.coefLabel)\n coefLineField.addWidget(self.coefLabel)\n coefLineField.addWidget(self.coef)\n\n # weight line field!\n weightLineField = QtWidgets.QHBoxLayout()\n self.measureMass = QtWidgets.QLineEdit()\n self.setPolicy(self.measureMass, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\n self.weightLabel = QtWidgets.QLabel('weight:')\n self.setFixedPolicy(self.weightLabel)\n weightLineField.addWidget(self.weightLabel)\n weightLineField.addWidget(self.measureMass)\n \n rightBlockLayout.addWidget(self.captureZero)\n rightBlockLayout.addWidget(self.captureCoef)\n rightBlockLayout.addLayout(zeroLineField)\n rightBlockLayout.addLayout(coefLineField)\n rightBlockLayout.addLayout(weightLineField)\n rightBlockLayout.addWidget(labFixExp)\n\n grid.addLayout(rightBlockLayout, 3, 2, 3, 2)\n\n widget.setLayout(grid)\n\n QtCore.QMetaObject.connectSlotsByName(MainWindow)", "def _add_static_files(self, req):\n add_script(req, self._get_jqplot('jquery.jqplot'))\n add_stylesheet(req, 'common/js/jqPlot/jquery.jqplot.css')\n # excanvas is needed for IE8 support\n add_script(req, self._get_jqplot('excanvas.min'))\n add_script(req, self._get_jqplot('plugins/jqplot.dateAxisRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.highlighter'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasTextRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisTickRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisLabelRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.enhancedLegendRenderer'))", "def __setup_ui(self):\n self.pixel_label = QLabel(\"\", self)\n self.pixel_label.setFixedWidth(100)\n self.pixel_coords_label = QLabel(\"\", self)\n self.statusBar().addPermanentWidget(self.pixel_coords_label)\n self.statusBar().addPermanentWidget(self.pixel_label)\n\n self.current_fps_label = QLabel(\"\", self)\n self.statusBar().addPermanentWidget(self.current_fps_label)\n\n self.toolbar = self.addToolBar(\"default\")\n self.toolbar.setMovable(False)\n self.setContextMenuPolicy(Qt.NoContextMenu)\n\n exit_act = QAction(QIcon.fromTheme('exit'), 'Exit', self)\n exit_act.setShortcut('Ctrl+Q')\n exit_act.setStatusTip(\"Exit application\")\n exit_act.triggered.connect(self.app.quit)\n self.toolbar.addAction(exit_act)\n\n preferences_action = QAction(QIcon.fromTheme(\"preferences-desktop\"),\n \"Preferences\", self)\n preferences_action.setStatusTip(\"Open preferences dialog\")\n preferences_action.triggered.connect(self.open_preferences)\n self.toolbar.addAction(preferences_action)\n\n self.device_label = QLabel(\"Device:\")\n self.device_combo = QComboBox(self)\n self.device_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n # self.device_combo.setMinimumWidth(300)\n self.device_combo.activated[str].connect(self.on_device_selected)\n self.toolbar.addWidget(self.device_label)\n self.toolbar.addWidget(self.device_combo)\n\n self.format_label = QLabel(\"Format:\")\n self.format_combo = QComboBox(self)\n self.format_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.format_combo.setMinimumWidth(150)\n self.format_combo.activated[str].connect(self.on_format_selected)\n self.toolbar.addWidget(self.format_label)\n self.toolbar.addWidget(self.format_combo)\n\n self.resolution_label = QLabel(\"Resolution:\")\n self.resolution_combo = TcamComboBox(self, \"Select Resolution\")\n self.resolution_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.resolution_combo.activated[str].connect(self.on_resolution_selected)\n self.toolbar.addWidget(self.resolution_label)\n self.toolbar.addWidget(self.resolution_combo)\n\n self.fps_label = QLabel(\"FPS:\")\n self.fps_combo = TcamComboBox(self, \"Select FPS:\")\n self.fps_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.fps_combo.activated[str].connect(self.on_fps_selected)\n self.toolbar.addWidget(self.fps_label)\n self.toolbar.addWidget(self.fps_combo)\n\n self.save_image = QAction(\"Save Image\", self)\n self.save_image.setIcon(QIcon.fromTheme(\"insert-image\"))\n\n self.save_image.triggered.connect(self.save_image_action)\n self.toolbar.addAction(self.save_image)\n\n self.fit_to_win = QAction(\"Fit To Window\", self)\n self.fit_to_win.setIcon(QIcon.fromTheme(\"zoom-fit-best\"))\n\n self.fit_to_win.triggered.connect(self.fit_to_window)\n self.toolbar.addAction(self.fit_to_win)\n\n self.props_action = QAction(\"\", self)\n self.props_action.setText(\"Properties\")\n self.props_action.setVisible(False)\n self.props_action.triggered.connect(self.toggle_properties_dialog)\n self.toolbar.addAction(self.props_action)\n\n self.recording_action = QAction(\"\", self)\n self.recording_action.setIcon(QIcon.fromTheme(\"media-record\"))\n self.recording_action.setIconText(\"Start recording\")\n self.recording_action.setText(\"Start recording\")\n self.recording_action.triggered.connect(self.start_recording_video)\n self.toolbar.addAction(self.recording_action)\n\n self.set_device_menus_enabled(False)\n\n self.view = None", "def initGui(self):\n icon = QIcon(\":/plugins/DEMto3D/icons/demto3d.png\")\n text = self.tr(\"DEM 3D printing\")\n parent = self.iface.mainWindow()\n self.action = QAction(icon, text, parent)\n self.action.setObjectName(text)\n self.action.setStatusTip(text)\n self.action.triggered.connect(self.run)\n\n self.iface.addRasterToolBarIcon(self.action)\n self.iface.addPluginToRasterMenu(self.menu, self.action)", "def setupUi(self, Form: QtWidgets.QWidget) -> None:\n\n self.email = Path(\"modules\", \"user.txt\").read_text()\n Path(\"modules\", \"user.txt\").unlink()\n self.uid = id_(self.email)\n\n self.Form = Form\n Form.setObjectName(\"Form\")\n Form.setEnabled(True)\n Form.setFixedSize(522, 553)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/newPrefix/new.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n Form.setWindowIcon(icon)\n Form.setStyleSheet(\n \"*{\\n\"\n \"font-family:Calibri;\\n\"\n \"font-size:20px;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QFrame\\n\"\n \"{\\n\"\n \"background: rgba(0,0,0,0.8);\\n\"\n \"border-radius:15px\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton\\n\"\n \"{\\n\"\n \"\\n\"\n \"background:#2671a0;\\n\"\n \"border-radius:60px;\\n\"\n \"font-weight:bold;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QToolButton\\n\"\n \"{\\n\"\n \"\\n\"\n \"background:#2671a0;\\n\"\n \"border-radius:50px;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QLabel\\n\"\n \"{\\n\"\n \"color:white;\\n\"\n \"background:transparent;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton\\n\"\n \"{\\n\"\n \"color:white;\\n\"\n \"border-radius:15px;\\n\"\n \"}\\n\"\n \"\\n\"\n \"QLineEdit\\n\"\n \"{\\n\"\n \"background:transparent;\\n\"\n \"border:none;\\n\"\n \"color:white;\\n\"\n \"border-bottom:1px solid #717072;\\n\"\n \"}\"\n )\n self.frame = QtWidgets.QFrame(Form)\n self.frame.setGeometry(QtCore.QRect(10, 60, 501, 481))\n self.frame.setStyleSheet(\"\")\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.pushButton = QtWidgets.QPushButton(self.frame)\n self.pushButton.setGeometry(QtCore.QRect(80, 410, 331, 51))\n self.pushButton.setObjectName(\"pushButton\")\n\n self.pushButton.clicked.connect(self.log_out)\n\n self.tabWidget = QtWidgets.QTabWidget(self.frame)\n self.tabWidget.setGeometry(QtCore.QRect(0, 80, 501, 291))\n self.tabWidget.setStyleSheet(\n \"QTabWidget::pane{border-bottom: 0px;\\n\"\n \"background: rgba(0,0,0,0.8);\\n\"\n \"}\"\n )\n self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)\n self.tabWidget.setMovable(False)\n self.tabWidget.setObjectName(\"tabWidget\")\n\n # View password\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName(\"tab\")\n self.listWidget_2 = QtWidgets.QListWidget(self.tab)\n self.listWidget_2.setGeometry(QtCore.QRect(20, 30, 461, 151))\n self.listWidget_2.setStyleSheet(\"background-color:white;\")\n self.listWidget_2.setObjectName(\"listWidget_2\")\n self.lineEdit_7 = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_7.setGeometry(QtCore.QRect(30, 210, 261, 31))\n self.lineEdit_7.setStyleSheet(\n \"color:white;\\n\"\n \"\"\n )\n self.lineEdit_7.setObjectName(\"lineEdit_7\")\n self.pushButton_2 = QtWidgets.QPushButton(self.tab)\n self.pushButton_2.setGeometry(QtCore.QRect(330, 210, 151, 31))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.tabWidget.addTab(self.tab, \"\")\n\n self.pushButton_2.clicked.connect(self.view_password)\n\n # Store Password\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(598, 16, 208, 33))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_2.setGeometry(QtCore.QRect(200, 40, 241, 33))\n self.lineEdit_2.setStyleSheet(\"color:white;\")\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(90, 40, 91, 42))\n self.label.setStyleSheet(\"font-size:26px;\")\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(60, 110, 111, 42))\n self.label_2.setStyleSheet(\"font-size:26px;\")\n self.label_2.setObjectName(\"label_2\")\n self.lineEdit_3 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_3.setGeometry(QtCore.QRect(200, 110, 241, 33))\n self.lineEdit_3.setStyleSheet(\"color:white;\")\n self.lineEdit_3.setInputMask(\"\")\n self.lineEdit_3.setText(\"\")\n self.lineEdit_3.setEchoMode(QtWidgets.QLineEdit.Normal)\n self.lineEdit_3.setObjectName(\"lineEdit_3\")\n self.pushButton_8 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_8.setGeometry(QtCore.QRect(200, 190, 111, 41))\n self.pushButton_8.setObjectName(\"pushButton_8\")\n self.tabWidget.addTab(self.tab_2, \"\")\n\n self.pushButton_8.clicked.connect(self.store_password)\n\n # Update password\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName(\"tab_3\")\n self.lineEdit_4 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_4.setGeometry(QtCore.QRect(200, 40, 241, 33))\n self.lineEdit_4.setStyleSheet(\"color:white;\")\n self.lineEdit_4.setObjectName(\"lineEdit_4\")\n self.lineEdit_5 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_5.setGeometry(QtCore.QRect(200, 110, 241, 33))\n self.lineEdit_5.setStyleSheet(\"color:white;\")\n self.lineEdit_5.setObjectName(\"lineEdit_5\")\n self.label_3 = QtWidgets.QLabel(self.tab_3)\n self.label_3.setGeometry(QtCore.QRect(90, 40, 91, 42))\n self.label_3.setStyleSheet(\"font-size:26px;\")\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(self.tab_3)\n self.label_4.setGeometry(QtCore.QRect(60, 110, 111, 42))\n self.label_4.setStyleSheet(\"font-size:26px;\")\n self.label_4.setObjectName(\"label_4\")\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_3.setGeometry(QtCore.QRect(200, 190, 121, 41))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.tabWidget.addTab(self.tab_3, \"\")\n\n self.pushButton_3.clicked.connect(self.update_password)\n\n # Delete password\n self.tab_4 = QtWidgets.QWidget()\n self.tab_4.setObjectName(\"tab_4\")\n self.lineEdit_6 = QtWidgets.QLineEdit(self.tab_4)\n self.lineEdit_6.setGeometry(QtCore.QRect(200, 80, 231, 33))\n self.lineEdit_6.setStyleSheet(\"color:white;\")\n self.lineEdit_6.setObjectName(\"lineEdit_6\")\n self.label_5 = QtWidgets.QLabel(self.tab_4)\n self.label_5.setGeometry(QtCore.QRect(90, 80, 81, 42))\n self.label_5.setStyleSheet(\"font-size:26px;\")\n self.label_5.setObjectName(\"label_5\")\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_4)\n self.pushButton_4.setGeometry(QtCore.QRect(190, 190, 131, 41))\n self.pushButton_4.setObjectName(\"pushButton_4\")\n self.tabWidget.addTab(self.tab_4, \"\")\n\n self.pushButton_4.clicked.connect(self.delete_password)\n\n # Download sequence\n self.tab_5 = QtWidgets.QWidget()\n self.tab_5.setObjectName(\"tab_5\")\n self.pushButton_5 = QtWidgets.QPushButton(self.tab_5)\n self.pushButton_5.setGeometry(QtCore.QRect(130, 180, 241, 41))\n self.pushButton_5.setObjectName(\"pushButton_5\")\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_5)\n self.lineEdit_8.setGeometry(QtCore.QRect(100, 100, 311, 33))\n self.lineEdit_8.setStyleSheet(\"color:white;\")\n self.lineEdit_8.setEchoMode(QtWidgets.QLineEdit.Password)\n self.lineEdit_8.setObjectName(\"lineEdit_8\")\n self.label_6 = QtWidgets.QLabel(self.tab_5)\n self.label_6.setGeometry(QtCore.QRect(130, 30, 251, 42))\n self.label_6.setStyleSheet(\"font-size:26px;\")\n self.label_6.setObjectName(\"label_6\")\n self.tabWidget.addTab(self.tab_5, \"\")\n\n self.pushButton_5.clicked.connect(self.download_sequence)\n\n # Backup codes\n self.tab_6 = QtWidgets.QWidget()\n self.tab_6.setObjectName(\"tab_6\")\n self.pushButton_6 = QtWidgets.QPushButton(self.tab_6)\n self.pushButton_6.setGeometry(QtCore.QRect(280, 80, 191, 41))\n self.pushButton_6.setObjectName(\"pushButton_6\")\n self.listWidget = QtWidgets.QListWidget(self.tab_6)\n self.listWidget.setGeometry(QtCore.QRect(40, 60, 191, 151))\n self.listWidget.setStyleSheet(\"background-color:white;\")\n self.listWidget.setObjectName(\"listWidget\")\n self.pushButton_7 = QtWidgets.QPushButton(self.tab_6)\n self.pushButton_7.setGeometry(QtCore.QRect(280, 160, 191, 41))\n self.pushButton_7.setObjectName(\"pushButton_7\")\n self.tabWidget.addTab(self.tab_6, \"\")\n\n self.pushButton_6.clicked.connect(self.view_backup_codes)\n self.pushButton_7.clicked.connect(self.download_backup_codes)\n\n # Delete account\n self.tab_7 = QtWidgets.QWidget()\n self.tab_7.setObjectName(\"tab_7\")\n self.label_8 = QtWidgets.QLabel(self.tab_7)\n self.label_8.setGeometry(QtCore.QRect(130, 30, 251, 42))\n self.label_8.setStyleSheet(\"font-size:26px;\")\n self.label_8.setObjectName(\"label_8\")\n self.lineEdit_9 = QtWidgets.QLineEdit(self.tab_7)\n self.lineEdit_9.setGeometry(QtCore.QRect(100, 100, 311, 33))\n self.lineEdit_9.setStyleSheet(\"color:white;\")\n self.lineEdit_9.setEchoMode(QtWidgets.QLineEdit.Password)\n self.lineEdit_9.setObjectName(\"lineEdit_9\")\n self.pushButton_9 = QtWidgets.QPushButton(self.tab_7)\n self.pushButton_9.setGeometry(QtCore.QRect(190, 180, 131, 41))\n self.pushButton_9.setObjectName(\"pushButton_9\")\n self.tabWidget.addTab(self.tab_7, \"\")\n\n self.toolButton_2 = QtWidgets.QToolButton(Form)\n self.toolButton_2.setGeometry(QtCore.QRect(210, 10, 101, 101))\n self.toolButton_2.setStyleSheet(\"\")\n self.toolButton_2.setText(\"\")\n self.toolButton_2.setIcon(icon)\n self.toolButton_2.setIconSize(QtCore.QSize(45, 50))\n self.toolButton_2.setObjectName(\"toolButton_2\")\n\n self.retranslateUi(Form)\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(Form)", "def setup_gui(self):\n central_widget = QWidget(self)\n central_widget.setObjectName('central_widget')\n self.label = QLabel('Hello World')\n self.input_field = QLineEdit()\n change_button = QPushButton('Change text')\n close_button = QPushButton('close')\n quit_button = QPushButton('quit')\n central_layout = QVBoxLayout()\n button_layout = QHBoxLayout()\n central_layout.addWidget(self.label)\n central_layout.addWidget(self.input_field)\n # a separate layout to display buttons horizontal\n button_layout.addWidget(change_button)\n button_layout.addWidget(close_button)\n button_layout.addWidget(quit_button)\n central_layout.addLayout(button_layout)\n central_widget.setLayout(central_layout)\n self.setCentralWidget(central_widget)\n # create a system tray icon. Uncomment the second form, to have an\n # icon assigned, otherwise you will only be seeing an empty space in\n # system tray\n self.systemtrayicon = QSystemTrayIcon(self)\n self.systemtrayicon.show()\n # set a fancy icon\n self.systemtrayicon.setIcon(QIcon.fromTheme('help-browser'))\n change_button.clicked.connect(self.change_text)\n quit_button.clicked.connect(QApplication.instance().quit)\n close_button.clicked.connect(self.hide)\n # show main window, if the system tray icon was clicked\n self.systemtrayicon.activated.connect(self.icon_activated)", "def _load_ui(self):\n load_ui(\"about_dlg.ui\", self)\n\n self.title_label = self.findChild(QLabel, \"title_label\")\n self.title_label.setText(f\"Friendly Pics 2 v{__version__}\")\n\n self.runtime_env_label = self.findChild(QLabel, \"runtime_env_label\")\n if is_mac_app_bundle():\n self.runtime_env_label.setText(\"Running as MacOS app bundle\")\n elif is_pyinstaller_bundle():\n self.runtime_env_label.setText(\"Running as a pyinstaller binary\")\n else:\n self.runtime_env_label.setText(\"Running under conventional Python environment\")\n\n self.gui_settings_label = self.findChild(QLabel, \"gui_settings_label\")\n self.gui_settings_label.setText(f\"<b>GUI Settings:</b> {self._settings.fileName()}\")\n\n self.gui_settings_clear_button = self.findChild(QPushButton, \"gui_settings_clear_button\")\n self.gui_settings_clear_button.clicked.connect(self._clear_gui_settings)\n\n self.app_settings_label = self.findChild(QLabel, \"app_settings_label\")\n self.app_settings_label.setText(f\"<b>App Settings:</b> {self._app_settings.path}\")\n\n # Center the about box on the parent window\n parent_geom = self.parent().geometry()\n self.move(parent_geom.center() - self.rect().center())", "def main(args):\n root = tkinter.Tk()\n root.title(\"OGRE Editor\")\n app = App(root)\n for path in args:\n app.load_signal_file(path)\n root.mainloop()", "def resources(self):", "def resource_path(relative_path) :\n\n try :\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except :\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def connectSignals(self):\n QtCore.QObject.connect(self.ui.main_tabs, QtCore.SIGNAL(\"currentChanged(int)\"),\n self.onTabChanged)\n QtCore.QObject.connect(self.ui.main_edit, QtCore.SIGNAL(\"textChanged()\"),\n self.onTextChanged)\n\n QtCore.QObject.connect(self.ui.actionNew_File, QtCore.SIGNAL(\"triggered()\"),\n self.newFile)\n QtCore.QObject.connect(self.ui.actionOpen_File, QtCore.SIGNAL(\"triggered()\"),\n self.openFile)\n QtCore.QObject.connect(self.ui.actionSave, QtCore.SIGNAL(\"triggered()\"),\n self.saveFile)\n QtCore.QObject.connect(self.ui.actionPrint, QtCore.SIGNAL(\"triggered()\"),\n self.printFile)\n QtCore.QObject.connect(self.ui.actionExit, QtCore.SIGNAL(\"triggered()\"),\n lambda: self.quit('data/recent_files.txt'))\n\n QtCore.QObject.connect(self.ui.actionCopy, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.copy)\n QtCore.QObject.connect(self.ui.actionCut, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.cut)\n QtCore.QObject.connect(self.ui.actionPaste, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.paste)\n QtCore.QObject.connect(self.ui.actionRedo, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.redo)\n QtCore.QObject.connect(self.ui.actionUndo, QtCore.SIGNAL(\"triggered()\"),\n self.ui.main_edit.undo)\n QtCore.QObject.connect(self.ui.actionPreferences, QtCore.SIGNAL(\"triggered()\"),\n self.createPrefWindow)\n\n QtCore.QObject.connect(self.ui.actionAbout, QtCore.SIGNAL(\"triggered()\"),\n self.createAboutWindow)\n QtCore.QObject.connect(self.ui.actionHelp, QtCore.SIGNAL(\"triggered()\"),\n self.createHelpWindow)", "def createMirrorOfModule_UI(self):\n\n # copy the settings of the module\n self.copySettings()\n\n # get basename and classname\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n className = cmds.getAttr(networkNode + \".moduleType\")\n\n # launch a UI to get the name information\n self.mirrorWindow = QtWidgets.QMainWindow()\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/mainScheme.qss\")\n f = open(styleSheetFile, \"r\")\n style = f.read()\n f.close()\n\n self.mirrorWindow.setStyleSheet(style)\n\n # size policies\n mainSizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mirrorWindow.setCentralWidget(self.mainWidget)\n\n # set qt object name\n self.mirrorWindow.setObjectName(\"ART_createMirrorModuleUI\")\n self.mirrorWindow.setWindowTitle(\"Create Mirror Module\")\n\n # create the mainLayout for the rig creator UI\n self.mainLayout = QtWidgets.QVBoxLayout(self.mainWidget)\n self.mainLayout.setContentsMargins(0, 0, 0, 0)\n\n self.mirrorWindow.resize(300, 150)\n self.mirrorWindow.setSizePolicy(mainSizePolicy)\n self.mirrorWindow.setMinimumSize(QtCore.QSize(300, 150))\n self.mirrorWindow.setMaximumSize(QtCore.QSize(300, 150))\n\n # create the background image\n self.frame = QtWidgets.QFrame()\n self.mainLayout.addWidget(self.frame)\n\n # create the layout for the widgets\n self.widgetLayout = QtWidgets.QVBoxLayout(self.frame)\n\n # create the prefix pair of fields\n self.prefixForm = QtWidgets.QFormLayout()\n self.widgetLayout.addLayout(self.prefixForm)\n\n self.prefixLabel = QtWidgets.QLabel(\"Prefix: \")\n self.prefixForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.prefixLabel)\n\n self.prefix = QtWidgets.QLineEdit()\n self.prefixForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.prefix)\n\n # hookup signal/slot connection\n self.prefix.textChanged.connect(partial(self.updatePreview, baseName))\n\n # create the suffix pair of fields\n self.suffixForm = QtWidgets.QFormLayout()\n self.widgetLayout.addLayout(self.suffixForm)\n\n self.suffixLabel = QtWidgets.QLabel(\"Suffix: \")\n self.suffixForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.suffixLabel)\n\n self.suffix = QtWidgets.QLineEdit()\n self.suffixForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.suffix)\n\n # hookup signal/slot connection\n self.suffix.textChanged.connect(partial(self.updatePreview, baseName))\n\n # spacer\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.widgetLayout.addItem(spacerItem)\n\n # realtime preview of final module name\n self.previewForm = QtWidgets.QFormLayout()\n self.widgetLayout.addLayout(self.previewForm)\n self.previewLabel = QtWidgets.QLabel(\"Preview: \")\n self.previewName = QtWidgets.QLabel(baseName)\n self.previewName.setMinimumSize(QtCore.QSize(200, 20))\n self.previewName.setMaximumSize(QtCore.QSize(200, 20))\n self.previewName.setAlignment(QtCore.Qt.AlignHCenter)\n self.previewForm.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.previewLabel)\n self.previewForm.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.previewName)\n\n # set preview font\n font = QtGui.QFont()\n font.setPointSize(12)\n self.previewName.setFont(font)\n\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.widgetLayout.addItem(spacerItem1)\n\n # create button\n self.createButton = QtWidgets.QPushButton(\"CREATE\")\n self.createButton.setObjectName(\"blueButton\")\n self.widgetLayout.addWidget(self.createButton)\n self.createButton.setMinimumSize(QtCore.QSize(285, 40))\n self.createButton.setMaximumSize(QtCore.QSize(285, 40))\n self.createButton.setSizePolicy(mainSizePolicy)\n font = QtGui.QFont()\n font.setPointSize(12)\n self.createButton.setFont(font)\n\n # hookup signal/slot on create button\n self.createButton.clicked.connect(self.createMirrorModule)\n\n # show the window\n self.mirrorWindow.show()", "def init_gui(self):\n # This is the main layout.\n main_layout = QtGui.QVBoxLayout(self)\n\n # This is the start button.\n start_btn = QtGui.QPushButton('Start Turntable')\n start_btn.clicked.connect(self.init_turn)\n\n # This is the file browser button.\n brw_btn = QtGui.QPushButton('Browse')\n brw_btn.clicked.connect(self.select_dir)\n\n # This is the render settings drop down.\n self.setting_dropdown = QtGui.QComboBox()\n self.setting_dropdown.addItems(['Low','Medium','High','Show','Custom'])\n\n # These are the line edits.\n self.save_loc = QtGui.QLineEdit()\n self.start_frm_le = QtGui.QLineEdit()\n self.end_frm_le = QtGui.QLineEdit()\n\n # This is the checkbox for rendering wireframe.\n self.ren_cb = QtGui.QCheckBox('Wireframe')\n\n # This is the radio btn group.\n self.rad_grp = QtGui.QButtonGroup()\n rd_01 = QtGui.QRadioButton('Surface')\n rd_02 = QtGui.QRadioButton('Model')\n rd_01.setObjectName('surface')\n rd_02.setObjectName('model')\n self.rad_grp.addButton(rd_01)\n self.rad_grp.addButton(rd_02)\n\n discipline = tl.discipline_check()\n if discipline == 'surface':\n rd_01.toggle()\n else:\n rd_02.toggle()\n\n # These are labels.\n loc_lbl = QtGui.QLabel('Location:')\n start_frm_lbl = QtGui.QLabel('Start Frame:')\n end_frm_lbl = QtGui.QLabel('End Frame:')\n\n # These are the different layout variables\n h_box_01 = QtGui.QHBoxLayout()\n h_box_02 = QtGui.QHBoxLayout()\n h_box_03 = QtGui.QHBoxLayout()\n\n v_box_01 = QtGui.QVBoxLayout()\n\n # This adds the widgets to the layouts.\n v_box_01.addWidget(rd_01)\n v_box_01.addWidget(rd_02)\n\n h_box_01.addLayout(v_box_01)\n h_box_01.addWidget(self.ren_cb)\n h_box_01.addWidget(self.setting_dropdown)\n\n h_box_02.addWidget(loc_lbl)\n h_box_02.addWidget(self.save_loc)\n h_box_02.addWidget(brw_btn)\n\n h_box_03.addWidget(start_btn)\n h_box_03.addWidget(start_frm_lbl)\n h_box_03.addWidget(self.start_frm_le)\n h_box_03.addWidget(end_frm_lbl)\n h_box_03.addWidget(self.end_frm_le)\n\n # This adds the layouts to the window\n main_layout.addLayout(h_box_01)\n main_layout.addLayout(h_box_02)\n main_layout.addLayout(h_box_03)\n\n # This is the main window.\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Turntable Tool')\n self.show()", "def connectSignals(self):\n QtCore.QObject.connect(self.button_ok, QtCore.SIGNAL(\"pressed()\"),\n lambda: self.okOptions(\"data/options.txt\"))\n QtCore.QObject.connect(self.button_apply, QtCore.SIGNAL(\"pressed()\"),\n lambda: self.applyOptions(\"data/options.txt\"))\n QtCore.QObject.connect(self.button_cancel, QtCore.SIGNAL(\"pressed()\"),\n self.close)", "def initGui(self):\n\n icon_path = ':/plugins/PluginChoucas/img/loaddata.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Chargement de données de montagne'),\n callback=self.run,\n parent=self.iface.mainWindow())\n \n icon_path = ':/plugins/PluginChoucas/img/loupe.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Recherche par mots sur clé'),\n callback=self.search,\n parent=self.iface.mainWindow())\n \n icon_path = ':/plugins/PluginChoucas/img/EmpriseZoneEtude.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Emprise ZE'),\n callback=self.loadZoneEtude,\n parent=self.iface.mainWindow())\n \n \n #icon_path = ':/plugins/PluginChoucas/img/landmark.png'\n #self.add_action(\n # icon_path,\n # text=self.tr(u'Afficher la description de l itinéraire'),\n # callback=self.displayDescription,\n # parent=self.iface.mainWindow())", "def GetResources(self):\n dirn = os.path.dirname(os.path.dirname(__file__))\n icon = os.path.join(dirn, 'resources', 'EnVis_ifc_open.svg')\n\n return {'Pixmap': icon,\n 'MenuText': QT_TRANSLATE_NOOP(\"EnVis_Import\",\n \"Import IFC file\"),\n 'ToolTip': QT_TRANSLATE_NOOP(\"EnVis_Import\",\n \"Import IFC Elements useful for energy calculations\")}", "def _setup_saving_ui(self):\n encoder_dict = Encoder.get_encoder_dict()\n form_layout = QFormLayout()\n\n layout = QVBoxLayout()\n layout.addLayout(form_layout)\n\n location_layout = QHBoxLayout()\n location_label = QLabel(\"Where to save images/videos:\", self)\n self.location_edit = QLineEdit(self)\n location_dialog_button = QPushButton(\"...\", self)\n location_dialog_button.clicked.connect(self.open_file_dialog)\n location_layout.addWidget(self.location_edit)\n location_layout.addWidget(location_dialog_button)\n\n # maintain descriptions as own labels\n # pyqt seems to loose the descriptions somewhere\n # when simple strings are used or the qlabel does not have self as owner\n form_layout.addRow(location_label,\n location_layout)\n\n self.image_type_combobox = QComboBox(self)\n for key, value in encoder_dict.items():\n if value.encoder_type == Encoder.MediaType.image:\n self.image_type_combobox.addItem(key)\n image_type_label = QLabel(\"Save images as:\")\n self.image_type_combobox.currentIndexChanged['QString'].connect(self.image_name_suffix_changed)\n\n form_layout.addRow(image_type_label,\n self.image_type_combobox)\n if self.enabled_video:\n self.video_type_combobox = QComboBox(self)\n for key, value in encoder_dict.items():\n if value.encoder_type == Encoder.MediaType.video:\n self.video_type_combobox.addItem(key)\n self.video_type_combobox.currentIndexChanged['QString'].connect(self.video_name_suffix_changed)\n\n video_type_label = QLabel(\"Save videos as:\", self)\n form_layout.addRow(video_type_label,\n self.video_type_combobox)\n\n image_name_groupbox = QGroupBox(\"Image File Names\")\n groupbox_layout = QFormLayout()\n image_name_groupbox.setLayout(groupbox_layout)\n\n self.image_name_preview = QLabel(\"<USER-PREFIX>-<SERIAL>-<FORMAT>-<TIMESTAMP>-<COUNTER>.png\")\n self.image_name_preview_description = QLabel(\"Images will be named like:\")\n groupbox_layout.addRow(self.image_name_preview_description,\n self.image_name_preview)\n\n self.image_name_prefix = QLineEdit()\n self.image_name_prefix.textChanged.connect(self.image_name_prefix_changed)\n self.image_name_prefix.setMaxLength(100)\n\n self.image_name_prefix_description = QLabel(\"User Prefix:\", self)\n groupbox_layout.addRow(self.image_name_prefix_description,\n self.image_name_prefix)\n\n self.image_name_serial = QCheckBox(self)\n self.image_name_serial.toggled.connect(self.image_name_properties_toggled)\n self.image_name_serial_description = QLabel(\"Include Serial:\")\n groupbox_layout.addRow(self.image_name_serial_description,\n self.image_name_serial)\n\n self.image_name_format = QCheckBox(self)\n self.image_name_format.toggled.connect(self.image_name_properties_toggled)\n\n self.image_name_format_description = QLabel(\"Include Format:\")\n groupbox_layout.addRow(self.image_name_format_description,\n self.image_name_format)\n\n self.image_name_counter = QCheckBox(self)\n self.image_name_counter.toggled.connect(self.image_name_properties_toggled)\n self.image_name_counter_description = QLabel(\"Include Counter:\")\n groupbox_layout.addRow(self.image_name_counter_description,\n self.image_name_counter)\n\n self.image_name_counter_box = QSpinBox(self)\n self.image_name_counter_box.setRange(1, 10)\n self.image_name_counter_box.valueChanged.connect(self.image_name_counter_changed)\n self.image_name_counter_box_description = QLabel(\"Counter Size:\")\n groupbox_layout.addRow(self.image_name_counter_box_description,\n self.image_name_counter_box)\n\n self.image_name_counter.toggled.connect(self.toggle_image_counter_box_availability)\n self.image_name_counter.toggled.connect(self.image_name_properties_toggled)\n\n self.image_name_timestamp = QCheckBox(self)\n self.image_name_timestamp.toggled.connect(self.image_name_properties_toggled)\n self.image_name_timestamp_description = QLabel(\"Include Timestamp:\")\n groupbox_layout.addRow(self.image_name_timestamp_description,\n self.image_name_timestamp)\n\n layout.addWidget(image_name_groupbox)\n\n video_groupbox = QGroupBox(\"Video File Names\")\n\n video_layout = QFormLayout()\n video_groupbox.setLayout(video_layout)\n\n self.video_name_preview = QLabel(\"<USER-PREFIX>-<SERIAL>-<FORMAT>-<TIMESTAMP>-<COUNTER>.png\")\n self.video_name_preview_description = QLabel(\"Videos will be named like:\")\n video_layout.addRow(self.video_name_preview_description,\n self.video_name_preview)\n\n self.video_name_prefix = QLineEdit()\n self.video_name_prefix.textChanged.connect(self.video_name_prefix_changed)\n self.video_name_prefix.setMaxLength(100)\n\n self.video_name_prefix_description = QLabel(\"User Prefix:\", self)\n video_layout.addRow(self.video_name_prefix_description,\n self.video_name_prefix)\n\n self.video_name_serial = QCheckBox(self)\n self.video_name_serial.toggled.connect(self.video_name_properties_toggled)\n self.video_name_serial_description = QLabel(\"Include Serial:\")\n video_layout.addRow(self.video_name_serial_description,\n self.video_name_serial)\n\n self.video_name_format = QCheckBox(self)\n self.video_name_format.toggled.connect(self.video_name_properties_toggled)\n\n self.video_name_format_description = QLabel(\"Include Format:\")\n video_layout.addRow(self.video_name_format_description,\n self.video_name_format)\n\n self.video_name_counter = QCheckBox(self)\n self.video_name_counter.toggled.connect(self.video_name_properties_toggled)\n self.video_name_counter_description = QLabel(\"Include Counter:\")\n video_layout.addRow(self.video_name_counter_description,\n self.video_name_counter)\n\n self.video_name_counter_box = QSpinBox(self)\n self.video_name_counter_box.setRange(1, 10)\n self.video_name_counter_box.valueChanged.connect(self.video_name_counter_changed)\n self.video_name_counter_box_description = QLabel(\"Counter Size:\")\n video_layout.addRow(self.video_name_counter_box_description,\n self.video_name_counter_box)\n\n self.video_name_counter.toggled.connect(self.toggle_video_counter_box_availability)\n self.video_name_counter.toggled.connect(self.video_name_properties_toggled)\n\n self.video_name_timestamp = QCheckBox(self)\n self.video_name_timestamp.toggled.connect(self.video_name_properties_toggled)\n self.video_name_timestamp_description = QLabel(\"Include Timestamp:\")\n video_layout.addRow(self.video_name_timestamp_description,\n self.video_name_timestamp)\n\n layout.addWidget(video_groupbox)\n\n self.saving_widget.setLayout(layout)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS \n base_path = sys._MEIPASS\n _BINARY_DIST = True\n #print sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def makeConnections(self):\n\t\tself.connect(self.addTextureToOcc_btn, QtCore.SIGNAL(\"clicked()\"), self.addTextureToOcc)\n\t\tself.connect(self.removeTextureToOcc_btn, QtCore.SIGNAL(\"clicked()\"), self.removeTextureToOcc)\n\t\tself.connect(self.addIrisToOcc_btn, QtCore.SIGNAL(\"clicked()\"), self.addIrisToOcc)\n\t\tself.connect(self.removeIrisToOcc_btn, QtCore.SIGNAL(\"clicked()\"), self.removeIrisToOcc)\n\t\tself.connect(self.addHideOcc_btn, QtCore.SIGNAL(\"clicked()\"), self.addHideForOcc)\n\t\tself.connect(self.removeHideOcc_btn, QtCore.SIGNAL(\"clicked()\"), self.removeHideForOcc)", "def initUI(self):\n language_help = _('''<h4>Language code.</h4>\n<p>This will be transmitted as part of the requst sent to the\nsites. As some sites only support one language, this is also used to\ndecide where to send the requests. Use a standard language code\nhere. Using invalid values or codes of unsupported languages will\nresult in no downloads. Do <em>not</em> use domain codes (E.g. use\n<code>zh</code> rather than <code>cn</code> for Chinese.)</p>''')\n self.setWindowTitle(_('Anki – Download audio'))\n self.setWindowIcon(QIcon(\":/icons/anki.png\"))\n layout = QVBoxLayout()\n self.setLayout(layout)\n edit_word_head = QLabel()\n kanji_et = _('''\\\n<h4>Requests to send to the download sites</h4>\n<p>In the split edit fields, set the kanji on the left, the\nkana on the right.</p>\n''')\n base_et = _('''\\\n<h4>Requests to send to the download sites</h4>\n<p>In split edit fields, set the expression (base) on the left, the\nreading (ruby) on the right.</p>\n''')\n single_et = _('''\\\n<h4>Requests to send to the download sites</h4>\n''')\n # Now decide which help text to show.\n # First, decide if we have any split fields.\n if any(f_data.split for f_data in self.field_data_list):\n if self.language_code and self.language_code.startswith('ja'):\n # Japanese\n edit_word_head.setText(kanji_et)\n else:\n # Chinese should not happen at the moment\n edit_word_head.setText(base_et)\n else:\n edit_word_head.setText(single_et)\n layout.addWidget(edit_word_head)\n self.create_data_rows(layout)\n line = QFrame(self)\n line.setFrameShape(QFrame.HLine)\n line.setFrameShadow(QFrame.Sunken)\n layout.addWidget(line)\n lcode_head = QLabel(_('''<h4>Language code</h4>'''))\n layout.addWidget(lcode_head)\n lang_hlayout = QHBoxLayout()\n lc_label = QLabel(_('Language code:'), self)\n lang_hlayout.addWidget(lc_label)\n lc_label.setToolTip(language_help)\n self.language_code_lineedit = QLineEdit(self)\n try:\n self.language_code_lineedit.setText(self.language_code)\n except:\n self.language_code_lineedit.setText(default_audio_language_code)\n lang_hlayout.addWidget(self.language_code_lineedit)\n self.language_code_lineedit.setToolTip(language_help)\n layout.addLayout(lang_hlayout)\n dialog_buttons = QDialogButtonBox(self)\n dialog_buttons.addButton(QDialogButtonBox.Cancel)\n dialog_buttons.addButton(QDialogButtonBox.Ok)\n dialog_buttons.accepted.connect(self.accept)\n dialog_buttons.rejected.connect(self.reject)\n layout.addWidget(dialog_buttons)", "def initGui(self):\n\n icon_path = ':/plugins/ActualizacionCatastral/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Act. Catastral'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def js():\n with lcd(BASEDIR):\n js_ext = (\n 'submodules/jquery-cookie/src/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'submodules/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-inlineform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp submodules/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js submodules/jquery-cookie/src/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))", "def setup_ui(self):\n\n self.setWindowTitle(\"PyDM Symbol Widget Editor\")\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.setContentsMargins(5, 5, 5, 5)\n vlayout.setSpacing(5)\n self.setLayout(vlayout)\n\n hlayout = QtWidgets.QHBoxLayout()\n hlayout.setContentsMargins(0, 0, 0, 0)\n hlayout.setSpacing(5)\n vlayout.addLayout(hlayout)\n\n # Creating the widgets for the buttons to add and\n # remove symbols\n list_frame = QtWidgets.QFrame(parent=self)\n list_frame.setMinimumHeight(300)\n list_frame.setMinimumWidth(300)\n list_frame.setLineWidth(1)\n list_frame.setFrameShadow(QtWidgets.QFrame.Raised)\n list_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n lf_layout = QtWidgets.QVBoxLayout()\n list_frame.setLayout(lf_layout)\n\n lf_btn_layout = QtWidgets.QHBoxLayout()\n lf_btn_layout.setContentsMargins(0, 0, 0, 0)\n lf_btn_layout.setSpacing(5)\n\n self.btn_add_symbol = QtWidgets.QPushButton(parent=self)\n self.btn_add_symbol.setAutoDefault(False)\n self.btn_add_symbol.setDefault(False)\n self.btn_add_symbol.setText(\"Add Symbol\")\n self.btn_add_symbol.clicked.connect(self.add_symbol)\n\n self.btn_del_symbol = QtWidgets.QPushButton(parent=self)\n self.btn_del_symbol.setAutoDefault(False)\n self.btn_del_symbol.setDefault(False)\n self.btn_del_symbol.setText(\"Remove Symbol\")\n self.btn_del_symbol.clicked.connect(self.del_symbol)\n\n lf_btn_layout.addWidget(self.btn_add_symbol)\n lf_btn_layout.addWidget(self.btn_del_symbol)\n\n lf_layout.addLayout(lf_btn_layout)\n\n # Table containing the state/filename pairs which\n # will display the different symbols\n self.tbl_symbols = QtWidgets.QTableWidget()\n self.tbl_symbols.setShowGrid(True)\n self.tbl_symbols.setCornerButtonEnabled(False)\n headers = [\"State\", \"File\"]\n self.tbl_symbols.setColumnCount(len(headers))\n self.tbl_symbols.setHorizontalHeaderLabels(headers)\n header = self.tbl_symbols.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)\n self.tbl_symbols.itemSelectionChanged.connect(self.load_from_list)\n self.tbl_symbols.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tbl_symbols.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n self.tbl_symbols.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tbl_symbols.verticalHeader().setVisible(False)\n lf_layout.addWidget(self.tbl_symbols)\n\n hlayout.addWidget(list_frame)\n\n # Buttons to save or cancel changes made\n buttons_layout = QtWidgets.QHBoxLayout()\n save_btn = QtWidgets.QPushButton(\"Save\", parent=self)\n save_btn.setAutoDefault(False)\n save_btn.setDefault(False)\n save_btn.clicked.connect(self.saveChanges)\n cancel_btn = QtWidgets.QPushButton(\"Cancel\", parent=self)\n cancel_btn.setAutoDefault(False)\n cancel_btn.setDefault(False)\n cancel_btn.clicked.connect(self.cancelChanges)\n buttons_layout.addStretch()\n buttons_layout.addWidget(cancel_btn)\n buttons_layout.addWidget(save_btn)\n\n vlayout.addLayout(buttons_layout)\n\n # Creating the widgets that we will use to compose the\n # symbol parameters\n self.frm_edit = QtWidgets.QFrame()\n self.frm_edit.setEnabled(False)\n self.frm_edit.setLineWidth(1)\n self.frm_edit.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frm_edit.setFrameShape(QtWidgets.QFrame.StyledPanel)\n\n frm_edit_layout = QtWidgets.QVBoxLayout()\n self.frm_edit.setLayout(frm_edit_layout)\n\n hlayout.addWidget(self.frm_edit)\n\n edit_name_layout = QtWidgets.QFormLayout()\n edit_name_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)\n lbl_state = QtWidgets.QLabel(\"State:\")\n self.txt_state = QtWidgets.QLineEdit()\n self.txt_state.editingFinished.connect(self.state_changed)\n edit_name_layout.addRow(lbl_state, self.txt_state)\n lbl_file = QtWidgets.QLabel(\"File:\")\n self.txt_file = QtWidgets.QLineEdit()\n self.txt_file.textEdited.connect(self.file_changed)\n self.txt_file.returnPressed.connect(self.file_changed)\n edit_name_layout.addRow(lbl_file, self.txt_file)\n\n self.lbl_image = QtWidgets.QLabel()\n self.lbl_image.setWordWrap(True)\n self.lbl_image.setAlignment(Qt.AlignCenter)\n edit_name_layout.addRow(self.lbl_image)\n\n frm_edit_layout.addLayout(edit_name_layout)", "def configure(self):\n\n modules = self.project.pyqt_modules\n\n for name, b in self._buttons.items():\n b.explicitly_required = (name in modules)\n\n self._set_implicit_requirements()", "def connectButtons(self):\n\n self.window.buttonLogout.clicked.connect(\n self.logout)\n\n self.window.buttonAcquisition.clicked.connect(\n self.showAcquisitionWidget)\n \n self.window.buttonCalibInt.clicked.connect(\n self.showIntrinsicCalibrationWidget)\n\n self.window.buttonCalibExt.clicked.connect(\n self.showExtrinsicCalibrationWidget)\n\n self.window.buttonClean.clicked.connect(\n self.cleanWorkspace)\n\n self.window.buttonInspectionAnalyzer.clicked.connect(\n self.showInspctionAnalyzer)", "def return_global_style_sheet():\n style_sheet = '''\n QLabel {\n font: 12pt \"Verdana\";\n margin-left: 5px;\n background-color: transparent;\n }\n QPushButton {\n background-color: #3D3D3D;\n border-style: outset;\n border: 2px solid #555555;\n border-radius: 4px;\n min-width: 20px;\n icon-size: 20px;\n font: bold 12pt \"Verdana\";\n margin: 10px;\n padding:6px 6px;\n color: #FFFFFF;\n }\n QPushButton:pressed {\n border-style: inset;\n color: #3D3D3D;\n background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n stop: 0 #dadbde, stop: 1 #f6f7fa);\n }\n QPushButton:disabled {\n background-color: #878787;\n border-style: outset;\n border: 2px solid #989898;\n border-radius: 4px;\n min-width: 20px;\n icon-size: 20px;\n font: bold 12pt \"Verdana\";\n margin: 10px;\n padding:5px 5px;\n color: #a9a9a9;\n }\n\n QMainWindow {\n background-color: #dadbde;\n }\n QMainWindow::separator {\n background: #dadbde;\n width: 1px; /* when vertical */\n height: 1px; /* when horizontal */\n }\n QMainWindow::separator:hover {\n background: red;\n }\n\n QTabWidget::pane { /* The tab widget frame */\n border-top: 2px solid #dadbde;\n }\n\n QTabWidget::tab-bar {\n left: 5px; /* move to the right by 5px */\n }\n\n /* Style the tab using the tab sub-control. Note that\n it reads QTabBar _not_ QTabWidget */\n QTabBar {\n font: bold 12pt \"Verdana\";\n }\n\n /* Style the tab using the tab sub-control. Note that\n it reads QTabBar _not_ QTabWidget */\n QTabBar::tab {\n background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,\n stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);\n border: 2px solid #C4C4C3;\n border-bottom-color: #dadbde; /* same as the pane color */\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n padding: 6px;\n }\n\n QTabBar::tab:selected, QTabBar::tab:hover {\n background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n stop: 0 #fafafa, stop: 0.4 #f4f4f4,\n stop: 0.5 #e7e7e7, stop: 1.0 #fafafa);\n }\n\n QTabBar::tab:selected {\n border-color: #9B9B9B;\n border-bottom-color: #dadbde; /* same as pane color */\n }\n\n QTabBar::tab:!selected {\n margin-top: 2px; /* make non-selected tabs look smaller */\n }\n\n /* make use of negative margins for overlapping tabs */\n QTabBar::tab:selected {\n /* expand/overlap to the left and right by 4px */\n margin-left: -4px;\n margin-right: -4px;\n }\n\n QTabBar::tab:first:selected {\n margin-left: 0; /* the first selected tab has nothing to overlap with on the left */\n }\n\n QTabBar::tab:last:selected {\n margin-right: 0; /* the last selected tab has nothing to overlap with on the right */\n }\n\n QTabBar::tab:only-one {\n margin: 0; /* if there is only one tab, we don't want overlapping margins */\n }\n '''\n return style_sheet", "def onGenWxFBPyFormButtonClicked(self, widget):\n result = gui_generator.gen(src_filename=self.py_filename, parent=self)\n\n if result:\n msg = u'Python generate by <%s> was successful' % self.py_filename\n dlg_func.openMsgBox(title=u'EDITOR', prompt_text=msg)\n else:\n msg = u'Python migrate <%s> ended unsuccessfully' % self.py_filename\n dlg_func.openErrBox(title=u'EDITOR', prompt_text=msg)\n self.getGtkTopObject().close()", "def __init__(self, observers):\n super().__init__()\n # make main window\n self.mainWidget = QWidget()\n self.setCentralWidget(self.mainWidget)\n self.setWindowTitle(\"RC GUI\")\n\n # important for setting locations of QWidgets\n self.observers = observers\n\n self.initializeLayout()\n self.mainWidget.setLayout(self.layout)\n print(\"done RC GUI creation\")", "def main():\n\n # Parse the command line.\n parser = ArgumentParser(\n \"Generate Python extension modules for C/C++ libraries.\",\n fromfile_prefix_chars='@')\n\n parser.add_argument('specification',\n help=\"the name of the specification file [default stdin]\",\n metavar=\"FILE\", nargs='?')\n\n parser.add_argument('-a', dest='api_extract',\n help=\"the name of the QScintilla API file [default not generated]\",\n metavar=\"FILE\")\n\n parser.add_argument('--abi-version', dest='abi_version',\n help=\"the ABI version\", metavar=\"VERSION\")\n\n parser.add_argument('-B', dest='backstops', action='append',\n help=\"add <TAG> to the list of timeline backstops\",\n metavar=\"TAG\")\n\n parser.add_argument('-c', dest='sources_dir',\n help=\"the name of the code directory [default not generated]\",\n metavar=\"DIR\")\n\n parser.add_argument('-D', dest='py_debug', action='store_true',\n default=False,\n help=\"generate code for a debug build of Python\")\n\n parser.add_argument('-e', dest='exceptions', action='store_true',\n default=False,\n help=\"enable support for exceptions [default disabled]\")\n\n parser.add_argument('-f', dest='warnings_are_errors', action='store_true',\n default=False,\n help=\"warnings are handled as errors\")\n\n parser.add_argument('-g', dest='release_gil', action='store_true',\n default=False,\n help=\"always release and reacquire the GIL [default only when \"\n \"specified]\")\n\n parser.add_argument('-I', dest='include_dirs', action='append',\n help=\"add <DIR> to the list of directories to search when \"\n \"importing or including .sip files\",\n metavar=\"DIR\")\n\n parser.add_argument('-j', dest='parts', type=int, default=0,\n help=\"split the generated code into <FILES> files [default 1 per \"\n \"class]\",\n metavar=\"FILES\")\n\n parser.add_argument('-m', dest='xml_extract', help=SUPPRESS)\n\n parser.add_argument('-n', dest='sip_module',\n help=\"the fully qualified name of the sip module\",\n metavar=\"NAME\")\n\n parser.add_argument('-o', dest='docstrings', action='store_true',\n default=False,\n help=\"enable the automatic generation of docstrings [default \"\n \"disabled]\")\n\n parser.add_argument('-P', dest='protected_is_public', action='store_true',\n default=False,\n help=\"enable the protected/public hack [default disabled]\")\n\n parser.add_argument('-r', dest='tracing', action='store_true',\n default=False,\n help=\"generate code with tracing enabled [default disabled]\")\n\n parser.add_argument('-s', dest='source_suffix',\n help=\"the suffix to use for C or C++ source files [default \\\".c\\\" \"\n \"or \\\".cpp\\\"]\",\n metavar=\"SUFFIX\")\n\n parser.add_argument('-t', dest='tags', action='append',\n help=\"add <TAG> to the list of versions/platforms to generate \"\n \"code for\",\n metavar=\"TAG\")\n\n parser.add_argument('-w', dest='warnings', action='store_true',\n default=False, help=\"enable warning messages [default disabled]\")\n\n parser.add_argument('-x', dest='disabled_features', action='append',\n help=\"add <FEATURE> to the list of disabled features\",\n metavar=\"FEATURE\")\n\n parser.add_argument('-X', dest='extracts', action='append',\n help=\"add <ID:FILE> to the list of extracts to generate\",\n metavar=\"ID:FILE\")\n\n parser.add_argument('-y', dest='pyi_extract',\n help=\"the name of the .pyi stub file [default not generated]\",\n metavar=\"FILE\")\n\n args = parser.parse_args()\n\n # Configure the handling of warnings.\n if args.warnings:\n if args.warnings_are_errors:\n simplefilter('error', FutureWarning)\n simplefilter('error', UserWarning)\n else:\n # Note that we don't suppress FutureWarnings.\n simplefilter('ignore', UserWarning)\n\n try:\n sip5(args.specification, sip_module=args.sip_module,\n abi_version=args.abi_version, sources_dir=args.sources_dir,\n include_dirs=args.include_dirs, tags=args.tags,\n backstops=args.backstops,\n disabled_features=args.disabled_features,\n exceptions=args.exceptions, parts=args.parts,\n source_suffix=args.source_suffix, docstrings=args.docstrings,\n protected_is_public=args.protected_is_public,\n py_debug=args.py_debug, release_gil=args.release_gil,\n tracing=args.tracing, extracts=args.extracts,\n pyi_extract=args.pyi_extract, api_extract=args.api_extract,\n xml_extract=args.xml_extract)\n except Exception as e:\n handle_exception(e)\n\n return 0", "def _init_ui(self):\n self.setWindowTitle(\"HB Havens: resultaten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n # Create figure\n self.figure = Figure(figsize=(4,4))\n self.ax = self.figure.add_subplot()\n\n self.ax.grid()\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)\n self.ax.tick_params(axis='y', color='0.75')\n self.ax.tick_params(axis='x', color='0.75')\n self.ax.set_aspect(1)\n\n # Add canvas\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.layout().addWidget(self.canvas)\n\n # Add location selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Locatie:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.location_combobox = QtWidgets.QComboBox()\n self.location_combobox.addItems(self.result_locations)\n self.location_combobox.setCurrentIndex(self.locid)\n self.location_combobox.currentIndexChanged.connect(self._set_location)\n hbox.addWidget(self.location_combobox)\n self.layout().addLayout(hbox)\n\n # Add parameter selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Parameter:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.parameter_combobox = QtWidgets.QComboBox()\n self.input_parameters = self.modelunctab.mainmodel.hydraulic_loads.result_columns[:]\n self.parameter_combobox.addItems(self.input_parameters)\n self.parameter_combobox.currentIndexChanged.connect(self._set_parameter)\n self.parameter_combobox.setCurrentIndex(0)\n self._set_parameter()\n self.figure.tight_layout()\n hbox.addWidget(self.parameter_combobox)\n self.layout().addLayout(hbox)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n self.layout().addWidget(line)\n\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n self.layout().addWidget(self.closebutton, 0, QtCore.Qt.AlignRight)\n\n self.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)", "def paintButtons(self):\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK)\n buttonOK = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"ok\"], self.showTooltip, self.removeTooltip)\n buttonOK.topleft = [770, 30]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeConfiguration)\n self.window.add_child(buttonOK)\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL)\n buttonCancel = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"cancel\"], self.showTooltip, self.removeTooltip)\n buttonCancel.topleft = [890, 30]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeConfiguration)\n self.window.add_child(buttonCancel)", "def setup_css(self):\n\t\t\n\t\t# style de notre interface\n\t\tself.setStyleSheet(\"\"\"\n background-color: rgb(30,30,30);\n color: rgb(240,240,240);\n \"\"\")\n\t\t\n\t\t# style de nos combobox\n\t\tstyle = \"\"\"\n QComboBox::down-arrow {\n image: none;\n border-width: 0px;\n }\n QComboBox::drop-down {\n border-width: 0px;\n }\n \"\"\"\n\t\tself.cbb_devisesFrom.setStyleSheet(style)\n\t\tself.cbb_devisesTo.setStyleSheet(style)\n\t\t\n\t\t# style de notre bouton\n\t\tself.btn_inverser.setStyleSheet(\"background-color: red\")", "def init_UI(self):\r\n\t\t# buttons\r\n\t\tself.dlg_btns.accepted.connect(self.on_ok_click)\r\n\t\tself.dlg_btns.rejected.connect(self.on_cancel_click)\r\n\t\tself.source_img_browse_btn.clicked.connect(self.on_source_img_browse_btn_click)\r\n\t\tself.sink_dir_browse_btn.clicked.connect(self.on_sink_dir_browse_btn_click)\r\n\r\n\t\t# layouts\r\n\t\tself.source_img_layout.addWidget(self.source_img_entry)\r\n\t\tself.source_img_layout.addWidget(self.source_img_browse_btn)\r\n\r\n\t\tself.sink_dir_layout.addWidget(self.sink_dir_entry)\r\n\t\tself.sink_dir_layout.addWidget(self.sink_dir_browse_btn)\r\n\r\n\t\tself.sink_db_name_layout.addWidget(self.sink_db_name_entry)\r\n\r\n\t\tself.form_layout.addRow(\"Image Array Path (.npy): \", self.source_img_layout)\r\n\t\t# if continuing an existing case\r\n\t\tif self.existing_case:\r\n\t\t\tself.source_db_browse_btn.clicked.connect(self.on_source_db_browse_btn_click)\r\n\t\t\tself.source_db_layout.addWidget(self.source_db_entry)\r\n\t\t\tself.source_db_layout.addWidget(self.source_db_browse_btn)\r\n\t\t\tself.form_layout.addRow(\"Source Database Path (.db): \", self.source_db_layout)\r\n\t\tself.form_layout.addRow(\"Sink Directory (folder): \", self.sink_dir_layout)\r\n\t\tself.form_layout.addRow(\"Sink Database Name: \", self.sink_db_name_layout)\r\n\r\n\t\tself.dlg_layout.addLayout(self.form_layout)\r\n\t\tself.dlg_layout.addWidget(self.dlg_btns)\r\n\r\n\t\t# set file dialog properties\r\n\t\tself.setLayout(self.dlg_layout)\r\n\t\tself.setGeometry(self.x, self.y, self.width, self.height)\r\n\t\tself.setWindowTitle(self.title)", "def initUI(self):\n\t\t#de button om het spel te beginnen\n\t\tself.playButton = QtGui.QPushButton(\"Play\", self)\n\t\tself.playButton.clicked.connect(self.playField)\n\t\tself.playButton.setStyleSheet(\"background-color: red;border-style: outset; border-radius:5px; font: bold 18px;\")\n\t\tself.playButton.move(210,100)\n\t\tself.playButton.resize(150, 50)\n\t\t\n\t\t#zakje chips\n\t\tself.chips = QtGui.QPixmap(\"chips.png\")\n\t\tself.lbl = QtGui.QLabel(self)\n\t\tself.lbl.setPixmap(self.chips)\n\t\tself.lbl.move(410, 300)\n\t\t\n\t\t#1 zak chips is nooit genoeg voor de heavy breathing cat\n\t\tself.chips2 = QtGui.QPixmap(\"chips.png\")\n\t\tself.lbl2 = QtGui.QLabel(self)\n\t\tself.lbl2.setPixmap(self.chips)\n\t\tself.lbl2.move(30, 30)\n\t\t\n\t\t#pikachu in een sailor pakje, want battleships\n\t\tself.pikachu = QtGui.QPixmap(\"pikachu.png\")\n\t\tself.lbl3 = QtGui.QLabel(self)\n\t\tself.lbl3.setPixmap(self.pikachu)\n\t\tself.lbl3.move(400,0)\n\t\t\n\t\t#een kat kan natuurlijk niet ontbreken\n\t\tself.kat = QtGui.QPixmap(\"hb2.jpeg\")\n\t\tself.lblKat = QtGui.QLabel(self)\n\t\tself.lblKat.setPixmap(self.kat)\n\t\tself.lblKat.move(0,230)\n\t\t\n\t\t#ons super vette logo #swek\n\t\tself.battlechipsPlaatje = QtGui.QPixmap(\"battleships.png\")\n\t\tself.lblbattle = QtGui.QLabel(self)\n\t\tself.lblbattle.setPixmap(self.battlechipsPlaatje)\n\t\tself.lblbattle.move(180,10)\n\t\t\n\t\t#le window\n\t\tself.setGeometry(150, 150, 600, 600)\n\t\tself.setWindowTitle(\"Menu\")\n\t\tself.setStyleSheet(\"background-color: #8e8f94\")\n\t\tself.show()", "def initGui(self):\n\n icon_path = ':/plugins/convertTo3D/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Convert to 3D'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def register_dcc_resource_path(resources_path):\n\n pass", "def load_PyQt4_uic(finder, module):\n name, QtCore = _qt_implementation(module)\n dir = os.path.join(module.path[0], \"widget-plugins\")\n finder.IncludeFiles(dir, \"%s.uic.widget-plugins\" % name)\n finder.IncludeModule(\"%s.QtNetwork\" % name)\n try:\n finder.IncludeModule(\"%s.QtWebKit\" % name)\n except ImportError:\n pass", "def main():\r\n qApp = QtGui.QApplication(sys.argv)\r\n qApp.setApplicationName(\"Visualization Tool\")\r\n qApp.setOrganizationName(\"Marquette University Speechlab\")\r\n qApp.setWindowIcon(QtGui.QIcon(\":/vtIcon.ico\"))\r\n form = MainWindow()\r\n form.show()\r\n sys.exit(qApp.exec_())", "def __init_ui(self, list: List[DiagramView], start_button: StartButtonView):\n minimize_button = QtWidgets.QPushButton()\n minimize_button.setIcon(QIcon(SystemInfo.RESOURCES + 'images/buttons/minimize.svg'))\n minimize_button.setFixedSize(31, 31)\n minimize_button.clicked.connect(self.__minimize_on_click)\n\n horizontal_layout = QHBoxLayout()\n for diagram in list:\n horizontal_layout.addWidget(diagram)\n\n central_layout = QGridLayout()\n central_widget = QtWidgets.QWidget()\n central_widget.setLayout(horizontal_layout)\n central_layout.addWidget(central_widget, 1, 0, 1, -1)\n central_layout.addWidget(start_button, 0, 1)\n central_layout.addWidget(minimize_button, 0, 2)\n\n self.setLayout(central_layout)\n\n self.setWindowFlags(Qt.CustomizeWindowHint)\n self.showMaximized()", "def main():\r\n app = appdirs.AppDirs('Python Installer', 'Unicorn')\r\n try:\r\n os.makedirs(app.user_log_dir)\r\n except:\r\n pass\r\n\r\n pyversion = platform.python_version()\r\n pyarch = platform.architecture()[0]\r\n\r\n # log installed python version\r\n with open(os.path.join(app.user_log_dir, 'install.log'), 'a', encoding='utf-8') as fp:\r\n fp.write('Python {} ({}) installed.'.format(pyversion, pyarch))\r\n\r\n # log installed modules\r\n modules = freeze.freeze()\r\n module_str = ''\r\n for module in modules:\r\n module_str += '{}\\n'.format(module)\r\n \r\n with open(os.path.join(app.user_log_dir, 'modules-py{}-{}.log'.format(pyversion, pyarch)), 'w', encoding='utf-8') as fp:\r\n fp.write(module_str)\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n\r\n hello = QtGui.QLabel(\"Python {} ({}) installed\".format(pyversion, pyarch))\r\n hello.show()\r\n hello.resize(250, 80)\r\n sys.exit(app.exec_())", "def createUI(self):\n\n q.getQItem(windowID, QtWidgets.QWidget)\n cmds.setParent(q.fullPath)\n\n # ################################################\n # Active Render Layer\n\n # cmds.separator(height=12, style='none')\n addFrameLayout(\n '%s_frameLayoutLayers' % windowID,\n 'Visible Render Layer', collapsable=False,\n labelVisible=False,\n marginHeight=0\n )\n\n addRowLayout(\n '%s_rowLayoutActiveRenderLayer' % windowID,\n 4,\n columnAlign4=('left', 'left', 'right', 'right'),\n columnAttach4=('left', 'both', 'right', 'right'),\n columnWidth4=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.775,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075\n )\n )\n\n\n addButton('%s_addNewLayer' % windowID, 'New', rsAddNewLayer,\n image='RS_create_layer', size=(21, 21))\n addOptionMenu('%s_selectActiveLayer' % windowID,\n 'Active Layer ', (), rsSelectActiveLayer)\n addButton('rsOpenRenderSetupWindow', 'Render Setup',\n rsOpenRenderSetupWindow, image='render_setup.png',\n size=(21, 21))\n addButton('rsOpenUnifiedRenderGlobals', 'Render Globals',\n rsOpenUnifiedRenderGlobals, image='render_setup.png',\n size=(21, 21))\n\n # ################################################\n # Work Render Layers\n\n cmds.setParent(q.fullPath)\n addFrameLayout('%s_frameLayoutLayersB' % windowID,\n 'Work Render Layer', collapsable=False,\n labelVisible=False, marginHeight=0)\n addRowLayout('%s_rowLayoutVisibleRenderLayer' % windowID, 3,\n columnAlign3=('left', 'left', 'right'),\n columnAttach3=('left', 'both', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.075, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.85,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075))\n\n cmds.separator()\n addOptionMenu('%s_selectVisibleLayer' % windowID,\n 'Visible Layer ', (), rsSelectVisibleLayer)\n cmds.separator()\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=12, style='none')\n\n # ################################################\n # Collections\n\n addFrameLayout('%s_frameLayout02' % windowID, 'Collections',\n labelVisible=False, marginHeight=0)\n\n addRowLayout(\n '%s_rowLayout02' % windowID,\n 6,\n columnAlign6=('left', 'left', 'left', 'left', 'left', 'left'),\n columnAttach6=('both', 'both', 'right', 'right', 'right', 'right'),\n columnWidth6=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.415,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n )\n )\n\n addButton('rsAddCollection', 'Add', rsAddCollection)\n addButton('rsRemoveCollection', 'Remove', rsRemoveCollection)\n addButton('rsSelectShapes', 'Select Shapes', rsSelectShapes,\n image='selectObject.png', size=(21, 21))\n addButton('rsRenameShader', 'Rename Shader', rsRenameShader,\n size=(21, 21), image='QR_rename.png')\n addButton('rsDuplicateShader', 'Duplicate Shader',\n duplicateShader, size=(21, 21), image='newPreset.png')\n addButton('rsRefreshUI', 'Refresh', rsRefreshUI, size=(21, 21),\n image='QR_refresh.png')\n\n # ###########################\n # Filter List\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout03' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.6, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.42))\n\n addTextField('%s_filterShaderList' % windowID, 'Search',\n rsFilterShaderList_off, rsFilterShaderList_off,\n window.updateUI)\n addOptionMenu('rsShaderGroups', '|', (), rsShaderGroups)\n\n # ###########################\n # The shaders scroll list\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout04' % windowID, 1, columnAlign1='both', columnAttach1='both', columnWidth1=WINDOW_WIDTH\n + 12)\n addTextScrollList('%s_ShaderScrollList' % windowID, (),\n rsShaderScrollList_doubleClick,\n rsShaderScrollList_onSelect,\n rsShaderScrollList_deleteKey)\n\n # Add popup menu:\n\n cmds.popupMenu('rsShaderScrollListPopupMenu',\n parent='%s_ShaderScrollList' % windowID,\n allowOptionBoxes=False, markingMenu=True,\n postMenuCommand=postMenuCommand)\n cmds.menuItem('%s_popupMenuItem02' % windowID,\n label='Duplicate Shader', command=duplicateShader)\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem04' % windowID,\n label='Graph Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem03' % windowID,\n label='Select Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem05' % windowID,\n label='Select Assigned Shapes')\n cmds.menuItem('%s_popupMenuItem06' % windowID,\n label='Select Assigned Transforms')\n\n # ##################################################\n # Arnold Property Overrides\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout20' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n addRowLayout('%s_rowLayout05' % windowID, 2,\n columnAlign2=('left', 'both'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_textArnoldPropertyOverridesLabel' % windowID,\n 'Apply Arnold Property Overrides', 'plainLabelFont')\n addCheckBox('rsArnoldPropertyOverridesCheckBox', '',\n rsArnoldPropertyOverridesCheckBox,\n rsArnoldPropertyOverridesCheckBox)\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n # Column Layout to toggle\n\n cmds.setParent('%s_columnLayout20' % windowID)\n cmds.columnLayout(\n '%s_columnLayout02' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n addCheckboxes('%s_columnLayout02' % windowID)\n cmds.columnLayout('%s_columnLayout02' % windowID, edit=True,\n visible=False)\n\n # #################################################\n # Shader Override\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout21' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n addRowLayout('%s_rowLayout06' % windowID, 2,\n columnAlign2=('left', 'right'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_shaderOverrideLabel' % windowID, 'Shader Override',\n 'plainLabelFont')\n addCheckBox('%s_shaderOverrideCheckbox' % windowID, '',\n rsShaderOverrideCheckbox, rsShaderOverrideCheckbox)\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n\n cmds.setParent('%s_columnLayout21' % windowID)\n cmds.columnLayout(\n '%s_columnLayout03' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('both', 4),\n adjustableColumn=True,\n rowSpacing=0,\n )\n cmds.setParent('%s_columnLayout03' % windowID)\n addOptionMenu('%s_optionMenu02' % windowID, 'Select: ', (),\n rsShaderOverridesMenu)\n\n global selectedShaderOverride\n\n # default selection\n\n selectedShaderOverride = SHADER_OVERRIDE_OPTIONS[0]['ui']\n cmds.columnLayout('%s_columnLayout03' % windowID, edit=True,\n visible=False)\n\n # #################################################\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=10, style='none')\n\n # #################################################\n # Extras\n\n addFrameLayout('%s_frameLayout50' % windowID, 'Extras',\n collapsable=True, marginHeight=0,\n labelVisible=False)\n\n # #################################################\n # Add & Assign Shader Groups\n\n addFrameLayout(\n '%s_frameLayout05' % windowID,\n 'Add & Assign Shader Groups',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=False,\n labelVisible=True,\n )\n\n # Add the renamer window\n\n self.gwCustomRenamer = CustomRenamer()\n self.gwCustomRenamer.createUI()\n\n # #################################################\n # AutoConnect\n\n cmds.setParent('%s_frameLayout50' % windowID)\n\n addFrameLayout(\n '%s_frameLayout03' % windowID,\n 'Adobe Connector',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout07', 3, columnAlign3=('left', 'left',\n 'left'), columnAttach3=('both', 'both', 'both'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addButton('updateConnections', '> Update Connections <',\n updateConnections)\n addButton('uvSnapshot', 'UV Snapshot', uvSnapshot)\n addButton('editTexture', 'Edit Texture', editTexture)\n\n # After Effects\n\n cmds.setParent('%s_frameLayout03' % windowID)\n addRowLayout('%s_rowLayout11' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.6))\n addText('%s_text90' % windowID, 'Send to After Effects:')\n addButton('makeCompButton', 'Send to After Effects', rsMakeComp)\n\n # #################################################\n # Render Setup /\n # Output settings\n\n cmds.setParent('%s_frameLayout50' % windowID)\n addFrameLayout(\n '%s_frameLayout04' % windowID,\n 'Output Settings',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout08' % windowID, 1,\n columnAlign1='center', columnAttach1='both',\n columnWidth1=WINDOW_WIDTH - FRAME_MARGIN * 2)\n addButton('%s_revealOutputDirectory' % windowID,\n 'Output path not set yet', rsRevealOutputDirectory)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout09' % windowID, 3,\n columnAlign3=('left', 'right', 'right'),\n columnAttach3=('left', 'right', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.8, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.14,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.06))\n\n addOptionMenu('%s_optionMenu05' % windowID, '', (),\n rsSelectOutputTemplate)\n addOptionMenu('%s_outputVersionMenu' % windowID, '', (),\n rsSelectOutputVersion)\n cmds.menuItem(label='v001')\n\n cmds.setParent('%s_rowLayout09' % windowID)\n addButton('%s_incrementOutputVersionButton' % windowID, '+1',\n rsIncrementOutputVersion, size=(21, 21))\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout10' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.7, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addOptionMenu('%s_optionMenu03' % windowID, 'Format:', (),\n rsOutputTemplatesMenu)\n addOptionMenu('%s_optionMenu06' % windowID, '', (),\n rsSetFPSMenu)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout12' % windowID, 4,\n columnAlign4=('right', 'left', 'right', 'left'),\n columnAttach4=('both', 'both', 'both', 'both'),\n columnWidth4=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.50, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.20,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15))\n\n addText('%s_setInFrameLabel' % windowID, 'In Frame ')\n addTextField('%s_setInFrame' % windowID, '', setInFrame,\n setInFrame, setInFrame)\n\n addText('%s_setOutFrameLabel' % windowID, 'Out Frame ')\n addTextField('%s_setOutFrame' % windowID, '', setOutFrame,\n setOutFrame, setOutFrame)", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def initGui(self): # pylint:disable=invalid-name\n\n icon_path = os.path.join(self.plugin_dir, \"icons\")\n\n self.add_action(\n os.path.join(icon_path, \"icon.png\"),\n text=self.translate(\"Load Data\"),\n callback=self.run,\n parent=self.iface.mainWindow(),\n )\n\n # Plugin Dialog\n self.dlg.uListOptions.itemClicked.connect(self.show_selected_option)\n self.dlg.uListOptions.itemClicked.emit(self.dlg.uListOptions.item(0))\n\n model = QStandardItemModel()\n self.dlg.uCRSCombo.setModel(model)\n self.dlg.uCRSCombo.currentIndexChanged.connect(self.layer_crs_selected)\n\n self.dlg.uLabelWarning.setStyleSheet(\"color:red\")\n self.dlg.uWarningSettings.setStyleSheet(\"color:red\")\n\n self.add_item(title=\"ALL\", icon=os.path.join(icon_path, \"all.png\"))\n self.add_item(title=\"WFS\", icon=os.path.join(icon_path, \"wfs.png\"))\n self.add_item(title=\"WMTS\", icon=os.path.join(icon_path, \"wmts.png\"))\n self.add_item(title=\"Settings\", icon=os.path.join(icon_path, \"settings.png\"))\n self.add_item(title=\"Help\", icon=os.path.join(icon_path, \"help.png\"))\n self.add_item(title=\"About\", icon=os.path.join(icon_path, \"about.png\"))\n\n # set table model\n self.set_table_model_view()\n\n # set help html\n self.dlg.hHelpHtml.setOpenExternalLinks(True)\n help_file = os.path.join(self.plugin_dir, \"help.html\")\n with open(help_file, \"r\", encoding=\"utf-8\") as file:\n help_html = file.read()\n help_html.format(self.plugin_dir)\n self.dlg.hHelpHtml.setHtml(help_html.format(icon_path))\n\n # set about html\n self.dlg.hAboutHtml.setOpenExternalLinks(True)\n about_file = os.path.join(self.plugin_dir, \"about.html\")\n with open(about_file, \"r\", encoding=\"utf-8\") as file:\n about_html = file.read()\n about_html.format(self.plugin_dir)\n self.dlg.hAboutHtml.setHtml(about_html.format(icon_path))\n\n # populate settings\n # default data services to combo\n\n self.dlg.uComboBoxDomain.addItems(SER)\n\n # settings signals\n self.dlg.uBtnAddDomain.clicked.connect(self.add_new_domain)\n for entry in range(1, 11):\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(entry)).clicked.connect(\n self.save_domain\n )\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(entry)).clicked.connect(\n self.save_domain\n )\n self.load_settings()", "def connect(self):\r\n self.btn_start.connect(SIGNAL(\"clicked()\"), self.btn_project_clicked)\r\n self.btn_connect.connect(SIGNAL(\"clicked()\"), self.app.start_comm)", "def widgets(overwrite=True):\n install_nbextension(os.path.join(PKGPATH, 'static'),\n destination='molviz',\n overwrite=overwrite)", "def main():\n print(\n \"\"\"\n\n ##########################################################\n # #\n # #\n # Compiling Colocalized Cyano Datasets #\n # #\n # #\n ##########################################################\n\n \n \"\"\"\n )\n cyanoFiles = glob.glob(f\"{COLOCALIZED_DIR}*.csv\")\n makedir(COMPILED_DIR)\n dfCompiled = pd.DataFrame({})\n for cyanoFile in cyanoFiles:\n print(f\"Compiling {cyanoFile}\")\n data = unify(cyanoFile)\n if len(dfCompiled ) < 1:\n dfCompiled = data\n else:\n dfCompiled = pd.concat([dfCompiled, data], ignore_index=True) \n dfCompiled.to_csv(f\"{COMPILED_DIR}compiled.csv\", index=False)", "def jquery_ui_css():\n return static_file(\"jquery-ui.css\", root=os.path.join(BASEDIR, \"css\"))", "def init_ui(self):\n\n # create media player object\n self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)\n\n # create videowidget object\n self.videowidget = QVideoWidget()\n\n # create \"open video\" button\n self.openVideoBtn = QPushButton(' Open Video')\n self.openVideoBtn.clicked.connect(self.open_video)\n self.openVideoBtn.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))\n\n # create \"open annotation\" button\n self.openAnnotationBtn = QPushButton(' Open csv ( + video)')\n self.openAnnotationBtn.clicked.connect(self.open_annotation)\n self.openAnnotationBtn.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton))\n\n # create save button\n self.saveBtn = QPushButton(' Save Annotation')\n self.saveBtn.clicked.connect(self.save_annotation)\n self.saveBtn.setIcon(self.style().standardIcon(QStyle.SP_DialogSaveButton))\n self.saveBtn.setEnabled(False)\n\n # create reset button\n self.resetBtn = QPushButton(\" Clear Annotation\")\n self.resetBtn.clicked.connect(self.clear_annotation)\n self.resetBtn.setIcon(self.style().standardIcon(QStyle.SP_TrashIcon))\n\n # create \"new file\" button\n self.newFileBtn = QPushButton(\" New File\")\n self.newFileBtn.clicked.connect(self.new_file)\n self.newFileBtn.setIcon(self.style().standardIcon(QStyle.SP_FileIcon))\n\n # create a help button\n self.HelpBtn = QPushButton(\" Help\")\n self.HelpBtn.clicked.connect(self.show_help)\n self.HelpBtn.setIcon(self.style().standardIcon(QStyle.SP_MessageBoxQuestion))\n\n # create button for playing\n self.playBtn = QPushButton()\n self.playBtn.setEnabled(False)\n self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.playBtn.clicked.connect(self.play_video)\n\n # create button for stop\n self.stopBtn = QPushButton()\n self.stopBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))\n self.stopBtn.clicked.connect(self.stop_video)\n\n # create checkbox for record\n self.recordLabel = QLabel(\"Record: \")\n self.recordLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n self.checkbox = QCheckBox()\n\n # Create radio buttons for view mode\n self.radioLabel = QLabel(\" x-axis range: \")\n self.zoomRadio = QRadioButton(\"Zoom\")\n self.zoomRadio.setChecked(True)\n self.wideRadio = QRadioButton(\"Wide\")\n self.wideRadio.setEnabled(False)\n\n # create video slider\n self.slider = QSlider(Qt.Horizontal)\n self.slider.setRange(0, 0)\n self.slider.sliderMoved.connect(self.set_position)\n\n # Creating a container that includes the videoplayer and the label that shows the value of the slider.\n self.container = QWidget()\n lay = QVBoxLayout(self.container)\n lay.setContentsMargins(0, 0, 0, 0)\n lay.addWidget(self.videowidget)\n\n # Create a label that shows the percentage of engagement.\n self.numLabel = QLabel(\"0\", self.container)\n self.numLabel.setFont(QFont('Times', 40))\n self.numLabel.setStyleSheet(\"background-color: white\")\n height = round(self.geometry.height()/15)\n width = round(self.geometry.width()/16)\n self.numLabel.setGeometry(QRect(80, 50, width , height))\n\n # Create combobox for Playback rate\n self.speedComboLabel = QLabel(\" | Playback rate: \")\n self.speedComboLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n self.speedCombo = QComboBox()\n #self.speedCombo.addItem(\"0.25\")\n self.speedCombo.addItem(\"0.5\")\n self.speedCombo.addItem(\"0.75\")\n self.speedCombo.addItem(\"1\")\n self.speedCombo.addItem(\"1.25\")\n self.speedCombo.addItem(\"1.5\")\n self.speedCombo.addItem(\"1.75\")\n #self.speedCombo.addItem(\"2\")\n self.speedCombo.setCurrentIndex(2)\n\n # Create label for video duration. It displays the duration of the video.\n self.durationLabel = QLabel(\"00:00\")\n self.durationLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n\n # Create a label for video length. It displays the length of the video.\n self.lengthLabel = QLabel(\"/ 00:00\")\n self.lengthLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n\n # create label for error handling\n self.errorLabel = QLabel()\n self.errorLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)\n\n # create spacers\n self.spacerItem1 = QSpacerItem(128, 17, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.spacerItem2 = QSpacerItem(128, 17, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.spacerItem3 = QSpacerItem(300, 0)", "def update_project(self):\n\n modules = self.project.pyqt_modules\n\n modules[:] = [name for name, b in self._buttons.items()\n if b.explicitly_required]", "def __init__(self) -> None:\n super().__init__('qt') # Initialize the base class.\n self.active = True\n self.consoleOnly = False # Console is separate from the log.\n self.iconimages: dict[str, Any] = {} # Keys are paths, values are Icons.\n self.globalFindDialog: Widget = None\n self.idleTimeClass = qt_idle_time.IdleTime\n self.insert_char_flag = False # A flag for eventFilter.\n self.mGuiName = 'qt'\n self.plainTextWidget = qt_text.PlainTextWrapper\n self.show_tips_flag = False # #2390: Can't be inited in reload_settings.\n self.styleSheetManagerClass = StyleSheetManager\n # Be aware of the systems native colors, fonts, etc.\n QtWidgets.QApplication.setDesktopSettingsAware(True)\n # Create objects...\n self.qtApp = QtWidgets.QApplication(sys.argv)\n self.reloadSettings()\n self.appIcon = self.getIconImage('leoapp32.png')\n\n # Define various classes key stokes.\n #@+<< define FKeys >>\n #@+node:ekr.20180419110303.1: *4* << define FKeys >>\n self.FKeys = [\n 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12']\n # These do not generate keystrokes on MacOs.\n #@-<< define FKeys >>\n #@+<< define ignoreChars >>\n #@+node:ekr.20180419105250.1: *4* << define ignoreChars >>\n # Always ignore these characters\n self.ignoreChars = [\n # These are in ks.special characters.\n # They should *not* be ignored.\n # 'Left', 'Right', 'Up', 'Down',\n # 'Next', 'Prior',\n # 'Home', 'End',\n # 'Delete', 'Escape',\n # 'BackSpace', 'Linefeed', 'Return', 'Tab',\n # F-Keys are also ok.\n # 'F1','F2','F3','F4','F5','F6','F7','F8','F9','F10','F11','F12',\n 'KP_0', 'KP_1', 'KP_2', 'KP_3', 'KP_4', 'KP_5', 'KP_6', 'KP_7', 'KP_8', 'KP_9',\n 'KP_Multiply, KP_Separator,KP_Space, KP_Subtract, KP_Tab',\n 'KP_F1', 'KP_F2', 'KP_F3', 'KP_F4',\n # Keypad chars should be have been converted to other keys.\n # Users should just bind to the corresponding normal keys.\n 'KP_Add', 'KP_Decimal', 'KP_Divide', 'KP_Enter', 'KP_Equal',\n 'CapsLock', 'Caps_Lock',\n 'NumLock', 'Num_Lock',\n 'ScrollLock',\n 'Alt_L', 'Alt_R',\n 'Control_L', 'Control_R',\n 'Meta_L', 'Meta_R',\n 'Shift_L', 'Shift_R',\n 'Win_L', 'Win_R', # Clearly, these should never be generated.\n # These are real keys, but they don't mean anything.\n 'Break', 'Pause', 'Sys_Req',\n 'Begin', 'Clear', # Don't know what these are.\n ]\n #@-<< define ignoreChars >>\n #@+<< define specialChars >>\n #@+node:ekr.20180419081404.1: *4* << define specialChars >>\n # Keys whose names must never be inserted into text.\n self.specialChars = [\n # These are *not* special keys.\n # 'BackSpace', 'Linefeed', 'Return', 'Tab',\n 'Left', 'Right', 'Up', 'Down', # Arrow keys\n 'Next', 'Prior', # Page up/down keys.\n 'Home', 'End', # Home end keys.\n 'Delete', 'Escape', # Others.\n 'Enter', 'Insert', 'Ins', # These should only work if bound.\n 'Menu', # #901.\n 'PgUp', 'PgDn', # #868.\n ]\n #@-<< define specialChars >>\n # Put up the splash screen()\n if (g.app.use_splash_screen and\n not g.app.batchMode and\n not g.app.silentMode and\n not g.unitTesting\n ):\n self.splashScreen = self.createSplashScreen()\n # qtFrame.finishCreate does all the other work.\n self.frameFactory = qt_frame.TabbedFrameFactory()", "def setupActions(self):\n\n # File Menu --------------------------------------------------\n self.openAction = QtWidgets.QAction(\n # QtWidgets.QIcon(\":/images/open.png\"),\n \"&Open File\",\n self,\n shortcut=\"Ctrl+O\",\n statusTip=\"Open File\",\n triggered=self.openFile\n )\n\n self.openFolderAction = QtWidgets.QAction(\n # QtWidgets.QIcon(\":/images/open.png\"),\n \"Open Folder\",\n self,\n shortcut=\"Ctrl+Shift+O\",\n statusTip=\"Open Folder\",\n triggered=self.openFolder\n )\n\n self.saveAction = QtWidgets.QAction(\n # QtWidgets.QIcon(\":/images/save.png\"),\n \"&Save File\",\n self,\n shortcut=\"Ctrl+S\",\n statusTip=\"Save File\",\n triggered=self.saveFile\n )\n\n self.saveAsAction = QtWidgets.QAction(\n # QtWidgets.QIcon(\":/images/save.png\"),\n \"Save As File\",\n self,\n shortcut=\"Ctrl+Shift+S\",\n statusTip=\"Save File As...\",\n triggered=self.saveFileAs\n )\n\n self.quitAction = QtWidgets.QAction(\n # QtWidgets.QIcon(':/images/save.png'),\n \"&Quit\",\n self,\n shortcut=\"Ctrl+Q\",\n statusTip=\"Quit\",\n triggered=self.close\n )\n\n # Build Menu --------------------------------------------------\n\n self.buildHTMLAction = QtWidgets.QAction(\n \"Build &HTML\",\n self,\n shortcut=\"Ctrl+B\",\n statusTip=\"Build HTML\",\n triggered=self.buildHTML\n )\n\n self.buildPDFAction = QtWidgets.QAction(\n \"Build &PDF\",\n self,\n shortcut=\"Ctrl+Shift+B\",\n statusTip=\"Build PDF\",\n triggered=self.buildPDF\n )\n self.selectFontAction = QtWidgets.QAction(\n \"Select Font\", self, triggered=self.openFontDialog\n )", "def init_src(config):\n new_py = new_hark = None\n\n os.makedirs(str(config.project.python_src), exist_ok=True)\n\n py_init = config.project.python_src / \"__init__.py\"\n if not py_init.exists():\n with open(py_init, \"w\") as f:\n f.write(\"\")\n new_py = py_init\n\n if not config.project.hark_file.exists():\n with open(config.project.hark_file, \"w\") as f:\n main = 'fn main() {\\n print(\"Hello World!\");\\n}\\n'\n f.write(f\"// Something great begins here.\\n\\n\\n{main}\")\n new_hark = config.project.hark_file\n\n return new_py, new_hark", "def start_livecoding_gui(engine, project_path, main_file, live_qml=''):\n register_types()\n recursively_register_types(project_path)\n\n global reloader # necessary to make reloading work, prevents garbage collection\n reloader = PythonReloader(main_file)\n engine.rootContext().setContextProperty(PythonReloader.__name__, reloader)\n engine.rootContext().setContextProperty(\n 'userProjectPath', QUrl.fromLocalFile(project_path)\n )\n\n if live_qml:\n qml_main = live_qml\n engine.addImportPath(os.path.join(MODULE_PATH, '..'))\n else:\n qml_main = os.path.join(MODULE_PATH, 'live.qml')\n engine.load(qml_main)", "def configure_ui(self):\n self.setWindowIcon(self.MAIN_ICON)\n self.setWindowModality(Qt.ApplicationModal)", "def menu_design_a_gui_with_xrc(self, event=None):\n self.parentPanel.design_a_gui_with_xrc()", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\t# \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n\t# base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n\t# return os.path.join(base_path, relative_path)" ]
[ "0.67200714", "0.65681183", "0.6396254", "0.6029277", "0.5712012", "0.5677451", "0.56494296", "0.5627389", "0.5508585", "0.5422793", "0.5350092", "0.530345", "0.52731526", "0.5244704", "0.5227191", "0.5177347", "0.5123454", "0.5108174", "0.5087578", "0.50874686", "0.5061226", "0.5041817", "0.50064653", "0.4959779", "0.49564698", "0.4906794", "0.4888389", "0.48807436", "0.48727572", "0.48649248", "0.4853727", "0.48483372", "0.48330376", "0.48042664", "0.47970745", "0.47960925", "0.47881383", "0.47813603", "0.4766537", "0.47636944", "0.4757032", "0.4754878", "0.47466487", "0.4745914", "0.4744158", "0.47433445", "0.47369096", "0.47366047", "0.47309938", "0.47276524", "0.4723373", "0.471596", "0.47105724", "0.47064495", "0.46864235", "0.4686175", "0.4683835", "0.46783486", "0.46747062", "0.4674353", "0.46720195", "0.46704608", "0.46698642", "0.466939", "0.4661679", "0.4655866", "0.4643963", "0.46398783", "0.4638947", "0.46375743", "0.46353295", "0.4635106", "0.46350005", "0.4629947", "0.46263346", "0.46220484", "0.46131513", "0.46065202", "0.46032056", "0.46015492", "0.4594991", "0.4589713", "0.4586797", "0.45842433", "0.45837075", "0.45789394", "0.4578919", "0.45771855", "0.45723745", "0.45682797", "0.45656666", "0.45628512", "0.45574448", "0.45541435", "0.454508", "0.45446652", "0.45443338", "0.45428842", "0.4539039", "0.45345756" ]
0.6065464
3
Generates an array of ppxf_util.gaussian emission lines to be used as gas templates in PPXF. Generally, these templates represent the instrumental line spread function (LSF) at the set of wavelengths of each emission line. In this case, pPXF will return the intrinsic (i.e. astrophysical) dispersion of the gas lines. Alternatively, one can input FWHM_gal=0, in which case the emission lines are deltafunctions and pPXF will return a dispersion which includes both the intrumental and the intrinsic disperson. Additional lines can be easily added by editing the code of this procedure, which is meant as a template to be modified by the users where needed. For accuracy the ppxf_util.gaussians are integrated over the pixels boundaries. This can be changed by setting `pixel`=False. The [OI], [OIII] and [NII] doublets are fixed at theoretical flux ratio~3. The [OII] and [SII] doublets can be restricted to physical range of ratios. The Balmet Series can be fixed to the theoretically predicted decrement.
def emission_lines(logLam_temp, lamRange_gal, FWHM_gal, pixel=True, tie_balmer=False, limit_doublets=False, vacuum=False): if tie_balmer: # Balmer decrement for Case B recombination (T=1e4 K, ne=100 cm^-3) # Table 4.4 of Dopita & Sutherland 2003 https://www.amazon.com/dp/3540433627 # Balmer: Htheta Heta Hzeta Heps Hdelta Hgamma Hbeta Halpha wave = np.array([3797.90, 3835.39, 3889.05, 3970.07, 4101.76, 4340.47, 4861.33, 6562.80]) # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) ratios = np.array([0.0530, 0.0731, 0.105, 0.159, 0.259, 0.468, 1, 2.86]) ratios *= wave[-2]/wave # Account for varying pixel size in Angstrom emission_lines = gauss @ ratios line_names = ['Balmer'] w = (wave > lamRange_gal[0]) & (wave < lamRange_gal[1]) line_wave = np.mean(wave[w]) if np.any(w) else np.mean(wave) else: # Use fewer lines here, as the weak ones are difficult to measure # Balmer: Hdelta Hgamma Hbeta Halpha line_wave = [4101.76, 4340.47, 4861.33, 6562.80] # air wavelengths if vacuum: line_wave = ppxf_util.air_to_vac(line_wave) line_names = ['Hdelta', 'Hgamma', 'Hbeta', 'Halpha'] emission_lines = ppxf_util.gaussian(logLam_temp, line_wave, FWHM_gal, pixel) if limit_doublets: # The line ratio of this doublet lam3729/lam3726 is constrained by # atomic physics to lie in the range 0.28--1.47 (e.g. fig.5.8 of # Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/). # We model this doublet as a linear combination of two doublets with the # maximum and minimum ratios, to limit the ratio to the desired range. # -----[OII]----- wave = [3726.03, 3728.82] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[OII]3726_d1', '[OII]3726_d2'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) doublets = gauss @ [[1, 1], [0.28, 1.47]] # produces *two* doublets emission_lines = np.column_stack([emission_lines, doublets]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) # The line ratio of this doublet lam6716/lam6731 is constrained by # atomic physics to lie in the range 0.44--1.43 (e.g. fig.5.8 of # Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/). # We model this doublet as a linear combination of two doublets with the # maximum and minimum ratios, to limit the ratio to the desired range. # -----[SII]----- wave = [6716.47, 6730.85] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[SII]6731_d1', '[SII]6731_d2'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) doublets = gauss @ [[0.44, 1.43], [1, 1]] # produces *two* doublets emission_lines = np.column_stack([emission_lines, doublets]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) else: # Here the doublets are free to have any ratio # -----[OII]----- -----[SII]----- wave = [3726.03, 3728.82, 6716.47, 6730.85] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[OII]3726', '[OII]3729', '[SII]6716', '[SII]6731'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) emission_lines = np.column_stack([emission_lines, gauss]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[OIII]----- wave = [4958.92, 5006.84] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[OIII]5007_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[OI]----- wave = [6300.30, 6363.67] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.33] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[OI]6300_d') # single template for this doublet line_wave = np.append(line_wave, wave[0]) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[NII]----- wave = [6548.03, 6583.41] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[NII]6583_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) #added by anja to ppxf_util.emission_lines version # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[NI]----- wave = [5197.90, 5200.39] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.7] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[NI]5200_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) #---------------------- # Only include lines falling within the estimated fitted wavelength range. # w = (line_wave > lamRange_gal[0]) & (line_wave < lamRange_gal[1]) emission_lines = emission_lines[:, w] line_names = line_names[w] line_wave = line_wave[w] print('Emission lines included in gas templates:') print(line_names) return emission_lines, line_names, line_wave
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template", "def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list", "def array_templates(templates, max_R=5000):\n from grizli.utils_c.interp import interp_conserve_c\n \n wave = np.unique(np.hstack([templates[t].wave for t in templates]))\n clipsum, iter = 1, 0\n while (clipsum > 0) & (iter < 10):\n clip = np.gradient(wave)/wave < 1/max_R\n idx = np.arange(len(wave))[clip]\n wave[idx[::2]] = np.nan\n wave = wave[np.isfinite(wave)]\n iter += 1\n clipsum = clip.sum()\n #print(iter, clipsum)\n \n NTEMP = len(templates)\n flux_arr = np.zeros((NTEMP, len(wave)))\n \n for i, t in enumerate(templates):\n flux_arr[i,:] = interp_conserve_c(wave, templates[t].wave,\n templates[t].flux)\n \n is_line = np.array([t.startswith('line ') for t in templates])\n \n return wave, flux_arr, is_line", "def gen_gaussian_low(img, c_res, c=0.5, vx_size=1):\n\n # Input parsing\n assert (c_res > 0) and (c > 0) and (vx_size > 0)\n assert isinstance(img, np.ndarray) and (len(img.shape) == 3)\n\n # Initialization\n f_vx = c_res / vx_size\n ff_vx = min(img.shape) / (2. * np.pi * f_vx)\n sf_vx = ff_vx / math.sqrt(2. * math.log(1. / c))\n\n # Meshgrid generation\n nx, ny, nz = (img.shape[0] - 1) * .5, (img.shape[1] - 1) * .5, (img.shape[2] - 1) * .5\n if (nx % 1) == 0:\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx + 1, 1)))\n else:\n if nx < 1:\n arr_x = np.arange(0, 1)\n else:\n nx = math.ceil(nx)\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx, 1)))\n if (ny % 1) == 0:\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny + 1, 1)))\n else:\n if ny < 1:\n arr_y = np.arange(0, 1)\n else:\n ny = math.ceil(ny)\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny, 1)))\n if (nz % 1) == 0:\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz + 1, 1)))\n else:\n if nz < 1:\n arr_z = np.arange(0, 1)\n else:\n nz = math.ceil(nz)\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz, 1)))\n [X, Y, Z] = np.meshgrid(arr_x, arr_y, arr_z, indexing='ij')\n X = X.astype(np.float32, copy=False)\n Y = Y.astype(np.float32, copy=False)\n Z = Z.astype(np.float32, copy=False)\n R = np.sqrt(X * X + Y * Y + Z * Z)\n\n # Building\n return np.exp(-R / (2.*sf_vx*sf_vx))", "def grid_lineify(f, x_lim=(0.,256) ,y_lim=(0.,256), ntraj = 600,\n max_step = 3000, gamma = 0.02, dt = 9., e0 = 0.1,\n T = 0.1,\n e_thresh = 0.001, h = 2e-1, m = 3, bounce = False\n ):\n lines = []\n nx = int(np.sqrt(ntraj))\n x_starts, y_starts = np.meshgrid(np.linspace(x_lim[0],x_lim[1],nx),\n np.linspace(y_lim[0],y_lim[1],nx))\n x_starts = x_starts.flatten()\n y_starts = y_starts.flatten()\n for traj in range(len(x_starts)):\n x,y = x_starts[traj].item(), y_starts[traj].item()\n PE = f(x, y)\n v0 = np.sqrt(e0/m)\n vx,vy = np.random.normal(0,v0), np.random.normal(0,v0)\n line = []\n step = 0\n while step < max_step and np.sqrt(vx*vx+vy*vy) > e_thresh:\n PE = f(x, y)\n if (np.exp(-PE/.01) > np.random.random()):\n break\n # cdiff grad\n gx = ((f(x+h,y)-f(x-h,y))/(2*h)).item()\n gy = ((f(x,y+h)-f(x,y-h))/(2*h)).item()\n vx += 0.5*dt*(gx - gamma*vx + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n vy += 0.5*dt*(gy - gamma*vy + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n x += vx*dt\n y += vy*dt\n # Bounce off edges.\n if (bounce):\n if (x > x_lim[1]):\n x -= 2.0*np.abs(x-x_lim[1])\n vx *= -1\n if (x < x_lim[0]):\n x += 2.0*np.abs(x-x_lim[0])\n vx *= -1\n if (y > y_lim[1]):\n y -= 2.0*np.abs(y-y_lim[1])\n vy *= -1\n if (y < y_lim[0]):\n y += 2.0*np.abs(y-y_lim[0])\n vy *= -1\n else: # absorb\n if (x > x_lim[1]):\n break\n elif (x < x_lim[0]):\n break\n elif (y > y_lim[1]):\n break\n elif (y < y_lim[0]):\n break\n line.append([x,y])\n gx = ((f(x+h,y)-f(x-h,y))/(2*h)).item()\n gy = ((f(x,y+h)-f(x,y-h))/(2*h)).item()\n vx += 0.5*dt*(gx - gamma*vx + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n vy += 0.5*dt*(gy - gamma*vy + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n step += 1\n lines.append(line)\n return lines", "def Build_Background_Template(numBGPhotons, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS = False,outputSize=300,angularSize=10.0):\r\n \r\n numPhotons = numBGPhotons\r\n numHigh = int(round(.32 *numPhotons))\r\n numLow = numPhotons-numHigh\r\n \r\n bgEventsX = []\r\n bgEventsY = []\r\n \r\n bgTemplate = bgTemplate *(1.0-flatLevel) + flatLevel\r\n# import matplotlib.pyplot as plt\r\n# plt.imshow(bgTemplate,'jet',vmin=0, vmax=1)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n app=float(angularSize)/float(outputSize) # angle per pixel\r\n for i in range(numPhotons):\r\n x ,y = 0, 0\r\n while True:\r\n x,y = np.random.randint(0,high = len(bgTemplate)),np.random.randint(0,high = len(bgTemplate))\r\n if (np.random.ranf() < bgTemplate[y][x]):\r\n break\r\n # Shift and scale coordinates to output map and then compute PSF modification to the position.\r\n psfMod = PSF_Spread(PSFTableFront,PSFTableBack, HESS =HESS)\r\n dx = psfMod[0]*math.cos(psfMod[1]) # PSF shift in deg\r\n dy = psfMod[0]*math.sin(psfMod[1]) # PSF shift in deg\r\n \r\n bgEventsX.append((x-outputSize/2.0)*app + dx)\r\n bgEventsY.append((y-outputSize/2.0)*app + dy)\r\n \r\n return (bgEventsX, bgEventsY)", "def gen_new_phiw_div_phib_arr(N_PROCESSES, phiw_div_phib_arr_new, cond_GT, fcn_D, fcn_eta, z_div_L_arr, phiw_div_phib_arr, Pi_div_DLP_arr, weight, gp_arr, gm_arr, yt_arr, phi_yt_arr, ID_yt_arr, Ieta_yt_arr):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n membrane_geometry = cond_GT['membrane_geometry']\n \n Ny = size(yt_arr)\n # # Python allocate the name for phi_yt_arr[0], this is the same as reference value for C++ \" y= &x\"\n phi_arr_z0 = phi_yt_arr[0]\n Ieta_arr_z0= Ieta_yt_arr[0]\n ID_arr_z0 = ID_yt_arr[0]\n\n ind_z0 = 0 #z-index at inlet\n \n z0_div_L = 0. #z-coord at inlet\n \n r0_div_R = 0. #r-coord at the centerline of pipe\n rw_div_R = 1. #r-coord at the membrane wall\n \n vw_div_vw0_z0 = get_v_conv(rw_div_R, z0_div_L, Pi_div_DLP_arr[ind_z0], cond_GT, gp_arr[ind_z0], gm_arr[ind_z0])\n gen_phi_wrt_yt(z0_div_L, phiw_div_phib_arr[ind_z0]*phi_b, fcn_D, vw_div_vw0_z0, yt_arr, phi_arr_z0, cond_GT)\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, Ieta_arr_z0, fcn_eta, cond_GT)\n Ieta_arr_z0 /= Ieta_arr_z0[-1] # CHECK\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, ID_arr_z0, fcn_D, cond_GT)\n\n uZ_z0 = get_uZ_out(z0_div_L, cond_GT['k'], cond_GT['Bp'], cond_GT['Bm'], gp_arr[ind_z0], gm_arr[ind_z0])\n F2_0 = cal_F2_Z(vw_div_vw0_z0, ed, yt_arr, Ieta_arr_z0, ID_arr_z0, uZ_z0, membrane_geometry)\n\n Nz = size(z_div_L_arr)\n if (N_PROCESSES ==1):\n # when only single-processor is allocated\n for i in range(1, Nz):\n phiw_div_phib_arr_new[i] = process_at_zi(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\n else:\n # this uses multiprocessing packages\n import multiprocessing as mp\n \n pool = mp.Pool(N_PROCESSES)\n args_list = [(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\\\n for i in range(1, Nz)]\n phiw_div_phib_arr_new[1:] = pool.starmap(process_at_zi, args_list)\n pool.close()\n pool.join()\n\n cnt_EXCEED = 0 \n for i,x in enumerate(phiw_div_phib_arr_new):\n\n x = x*cond_GT['phi_bulk']\n if x > cond_GT['phi_freeze']:\n cnt_EXCEED += 1\n phiw_div_phib_arr_new[i] = cond_GT['phi_freeze']/cond_GT['phi_bulk'] # this prevent the accidently beyond the freezing concentration\n if(cnt_EXCEED>0):\n print('Warning: exceed phi_freeze %d times out of %d\\n'%(cnt_EXCEED, cond_GT['Nz']))\n\n FPI_operator(cond_GT['weight'], phiw_div_phib_arr, phiw_div_phib_arr_new, N_skip=1) # phiw(0) must be phib.\n\n return 0", "def fillSignalTemplates(opt):\n\n totalSig={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n data.AddFile(os.path.join(opt.input,opt.sig))\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel\n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n print '\\t',catName,categCut\n\n #signal modelling histograms\n histos=[]\n for name,pfix in [('sig_'+catName,''),('sig_%s_sigShape'%catName,'mix')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n wgtExpr='wgt*%f'%(SIGNALXSECS[opt.xangle]*opt.lumi)\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),\n '{0}*({1})'.format(wgtExpr,templCuts),\n 'goff')\n h=data.GetHistogram()\n histos.append( h.Clone(name) ) \n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalSig[icat]=h.Integral()\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n \n print '\\t total signal:',totalSig\n return totalSig,templates", "def make_alf_template():\n import alf.alf\n import fsps\n \n ssp = alf.alf.Alf()\n \n sp = fsps.StellarPopulation(zcontinuous=1)\n sp.params['logzsol'] = 0.2\n\n # Alf\n m = ssp.get_model(in_place=False, logage=0.96, zh=0.2, mgh=0.2)\n \n # FSPS\n w, spec = sp.get_spectrum(tage=10**0.96, peraa=True)\n \n # blue\n blue_norm = spec[w > 3600][0] / m[ssp.wave > 3600][0]\n red_norm = spec[w > 1.7e4][0] / m[ssp.wave > 1.7e4][0]\n \n templx = np.hstack([w[w < 3600], ssp.wave[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], w[w > 1.7e4]])\n temply = np.hstack([spec[w < 3600]/blue_norm, m[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], spec[w > 1.7e4]/red_norm])\n \n np.savetxt('alf_SSP.dat', np.array([templx, temply]).T, fmt='%.5e', header='wave flux\\nlogage = 0.96\\nzh=0.2\\nmgh=0.2\\nfsps: w < 3600, w > 1.7e4')", "def fillBackgroundTemplates(opt):\n\n totalBkg={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n for f in [os.path.join(opt.input,x) for x in os.listdir(opt.input) if 'Data13TeV' in x]:\n if 'MuonEG' in f : continue\n data.AddFile(f)\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel \n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n\n print '\\t',catName,categCut\n\n #background modelling histos\n histos=[]\n data_obs=None\n for name,pfix in [('bkg_'+catName,'mix'),('bkg_%s_bkgShape'%catName,'mixem')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),templCuts,'goff')\n h=data.GetHistogram()\n histos.append(h.Clone(name))\n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalBkg[icat]=h.Integral()\n if not opt.unblind :\n data_obs=h.Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n\n #observed data in this category if unblinding\n if opt.unblind:\n data.Draw('mmiss >> h({1},{2},{3})'.format(opt.nbins,opt.mMin,opt.mMax),categCut,'goff')\n data_obs=data.GetHistogram().Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n templates.append(data_obs)\n\n print '\\t total background:',totalBkg\n return totalBkg,templates", "def gvg(series, tmin=None, tmax=None, fill_method='linear', limit=8,\n output='mean', min_n_meas=2, min_n_years=8, year_offset='a'):\n return __gxg__(series, __mean_spring__, tmin=tmin, tmax=tmax,\n fill_method=fill_method, limit=limit, output=output,\n min_n_meas=min_n_meas, min_n_years=min_n_years,\n year_offset=year_offset)", "def gvg(series, tmin=None, tmax=None, fill_method='linear', limit=8,\n output='mean', min_n_meas=2, min_n_years=8, year_offset='a'):\n return __gxg__(series, __mean_spring__, tmin=tmin, tmax=tmax,\n fill_method=fill_method, limit=limit, output=output,\n min_n_meas=min_n_meas, min_n_years=min_n_years,\n year_offset=year_offset)", "def generate_random_linelist (teff,wv_bounds=(4500,5500),species_params=None,filepath=None):\n abund_offset_range = (-1,1)\n species_offset_range = (-1,1)\n ew_dist_width = 30\n ep_range = (0,12)\n loggf_range = (-6.0,0.5) \n \n theta = 5040.0/teff\n \n # # TODO: remove this calculation???\n # # # fix to a particular line which should be by the turnoff\n # # # Fe I 88.2 2.22 EP -4.2 loggf\n # loggf = -4.2\n # ep = 2.22\n # x_turnoff = abund_standard['Fe']['abundance']+loggf-theta*ep\n # x-x_turnoff = -5\n # \n # based on the model abundance used in the cog file\n xnorm = -6.5\n ynorm = -2.0\n \n # read in the parameters \n if species_params is None:\n species_params = _elements_params\n el_params = species_params.copy()\n for el,pars in _elements_params.items():\n el_params.setdefault(el,pars)\n \n\n coeffs, knots, centers, scales = np.array(cog_ppol_hf[\"coefficients\"]), np.array(cog_ppol_hf[\"knots\"]), np.array(cog_ppol_hf[\"centers\"]), np.array(cog_ppol_hf[\"scales\"])\n iqp = piecewise_polynomial.InvertiblePiecewiseQuadratic(coeffs, knots, centers=centers, scales=scales)\n iqp_deriv = iqp.deriv()\n \n # calc the linelist\n linelist = {}\n element_abund = {}\n for species,pars in list(species_params.items()):\n wvs = np.random.uniform(wv_bounds[0],wv_bounds[1],pars['n'])\n solar_abund_offset = np.random.uniform(*abund_offset_range)\n \n # get the abundance for this element, ignore species\n abund = abund_standard[species]['abundance']+solar_abund_offset\n element_abund.setdefault(abund_standard[species]['element'],abund) \n \n species_offset = np.random.uniform(*species_offset_range) \n species_abund = element_abund[abund_standard[species]['element']]+species_offset\n species_abund = np.repeat(species_abund,pars['n'])\n \n # generate the parameters for the lines\n spe_col = np.repeat(abund_standard.species_id(species),pars['n'])\n ew = np.random.exponential(ew_dist_width,pars['n'])\n ep = np.random.uniform(ep_range[0],ep_range[1],pars['n'])\n loggf = np.random.uniform(loggf_range[0],loggf_range[1],pars['n'])\n \n # calculate the line strengths from the COG\n #x = species_abund + loggf - theta*ep + xnorm\n logrw = np.log10(ew/wvs)\n x = iqp.inverse(logrw-ynorm)\n loggf = species_abund - x - theta*ep + xnorm\n\n # estimate the lorzentian and gaussian widths for this line\n lorz_width = estimate_lorentz_width(x, iqp_deriv)\n gauss_width = np.repeat(99.9,pars['n'])\n \n # add to the linelist\n linelist[species] = np.dstack((wvs,spe_col,ep,loggf,ew,gauss_width,lorz_width))[0]\n \n if filepath is not None:\n # save moog file\n f = open(filepath,'w')\n header = \"# Fake linelist created THIMBLES with teff {} # \"\n header += \"wvs species ep loggf ew gauss_width lorz_width # \"\n header += \"guassian and lorentzian widths are estimate\\n\"\n f.write(header.format(teff))\n \n fmt = \"{0:>9.5f} {1:>9.1f} {2:>9.2f} {3:>9.2f}\"+20*\" \"+\" {4:>9.2f}\"+10*\" \"\n fmt += \" {5:>9.2f} {6:>9.2f} FAKE_LINE\\n\"\n for species,ll in linelist.items():\n for row in ll:\n f.write(fmt.format(*row)) \n return linelist", "def pure_gabor():\n \n dots = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dot-images-coh1-2000ms-s02.p\", \"rb\" ), encoding='latin1') \n x = np.arange(-40, 41, 1)\n gaborx, gabory = make_gabors(x)\n centres = np.array([[200,200]])\n \n nf = dots.shape[2]\n nrf = centres.shape[0] # number of receptive fields\n ng = gaborx.shape[1] # number of gabors per receptive field\n \n # offsets (from RF centres) of subimages to multiply with kernels\n vw = int(np.floor(gabory.size/2))\n v_offsets = np.arange(-vw, vw+1)\n hw = int(np.floor(gaborx.shape[0]/2))\n h_offsets = np.arange(-hw, hw+1)\n \n result = np.zeros((nrf, ng, nf))\n for i in range(dots.shape[2]): \n for j in range(nrf): \n v_indices = v_offsets + centres[j,0]\n h_indices = h_offsets + centres[j,1]\n region = dots[v_indices[:,np.newaxis],h_indices,i]\n for k in range(ng): \n gabor = np.outer(gabory, gaborx[:,k])\n result[j,k,i] = np.sum(gabor * region)\n return result", "def get_iPTF16hgs(colorplt = False):\n z = 0.017\n ebv = 0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = pd.read_csv('../data/otherSN/iPTF16hgs/table1.txt', sep=\"\\t\")\n tb = tb.drop(columns=[\"Unnamed: 5\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Magnitude'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Magnitude'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Magnitude'].values])\n tb = tb.drop(columns=[\"Magnitude\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n t_max = 57691.59 # from the paper\n tb['tmax_of'] = tb['mjd'] - t_max\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n \"\"\"\n plt.errorbar(tb[\"tmax_rf\"].values[ixg], tb[\"mag\"].values[ixg], tb[\"emag\"].values[ixg], fmt=\".g\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixr], tb[\"mag\"].values[ixr], tb[\"emag\"].values[ixr], fmt=\".r\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixi], tb[\"mag\"].values[ixi], tb[\"emag\"].values[ixi], fmt=\".y\")\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n #tb = tb.drop(columns=[\"datetime64\"])\n if colorplt==False:\n return tb\n else:\n #tb = tb[tb.mjd > 55352.5]\n #tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def generate_6D_Gaussian_bunch_matched(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z=None, epsn_z=None\n ):\n if self.longitudinal_mode == 'linear':\n assert(sigma_z is not None)\n bunch = self.generate_6D_Gaussian_bunch(n_macroparticles, intensity,\n epsn_x, epsn_y, sigma_z)\n elif self.longitudinal_mode == \"non-linear\":\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.RF_bucket_distribution(\n self.longitudinal_map.get_bucket(gamma=self.gamma),\n sigma_z=sigma_z,\n epsn_z=epsn_z,\n ),\n ).generate()\n else:\n raise ValueError('Unknown longitudinal mode!')\n\n return bunch", "def growth_curve(userinputs, filter, catalog):\n logging.info('Running growth curve analysis on {}'.format(catalog))\n # Load the photometry results from the catalog (that is returned by the phot\n # function)\n aper_st, flux_st = np.loadtxt(catalog, unpack=True, usecols=(0,3))\n\n #Growth curve is only done on the ref image so we get the filter from userinp.\n ref_filter = filter\n\n ratio_st = np.empty(len(aper_st))\n\n #number of apertures\n naper = 20\n\n # Calculate the number of stars, make sure it is an integer\n nstar = int(len(aper_st)/naper)\n logging.info('Number of stars used: {}'.format(nstar))\n aper_ind = naper - 1\n\n for k in range(nstar):\n\n for i in range(naper):\n\n ratio_st[i + k*naper] = flux_st[i + k*naper]/flux_st[aper_ind + k*naper]\n\n\n # Find median ratio at each aperture between all the stars and all the clusters\n med_st = np.empty(naper)\n\n for i in range(naper):\n\n med_st[i] = np.median(ratio_st[i::naper])\n\n\n # Plot growth curves\n logging.info('Creating Growth curve plots')\n fig = plt.figure(figsize = (7,7))\n\n aper_x = np.arange(naper) + 1\n\n for i in range(nstar):\n\n ratio_y = ratio_st[i*naper:(i + 1)*naper]\n plt.plot(aper_x, ratio_y, 'y-')\n plt.annotate(str(i + 1), xy=(8.0, ratio_y[7]),\n horizontalalignment='left', verticalalignment='top', fontsize=6)\n\n\n plt.plot(aper_x, med_st, 'r-' , linewidth=4.0)\n plt.hlines(0.5, 0, 20, color='black', linewidth=2, zorder=10)\n plt.vlines(4, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(5, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(6, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n\n plt.ylabel('Normalized Flux ' + ref_filter.upper())\n plt.xlabel('Radius (pix)')\n plt.xlim(1,20)\n plt.minorticks_on()\n\n fig.savefig(userinputs['OUTDIR'] + '/plots/plot_growth_curve_{}.pdf'.format(ref_filter))", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def generate_6D_Gaussian_bunch(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z\n ):\n if self.longitudinal_mode == \"linear\":\n check_inside_bucket = lambda z, dp: np.array(len(z) * [True])\n Q_s = self.longitudinal_map.Q_s\n elif self.longitudinal_mode == \"non-linear\":\n bucket = self.longitudinal_map.get_bucket(\n gamma=self.gamma, mass=self.mass, charge=self.charge\n )\n check_inside_bucket = bucket.make_is_accepted(margin=0.05)\n Q_s = bucket.Q_s\n else:\n raise NotImplementedError(\"Something wrong with self.longitudinal_mode\")\n\n eta = self.longitudinal_map.alpha_array[0] - self.gamma ** -2\n beta_z = np.abs(eta) * self.circumference / 2.0 / np.pi / Q_s\n sigma_dp = sigma_z / beta_z\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.cut_distribution(\n generators.gaussian2D_asymmetrical(sigma_u=sigma_z, sigma_up=sigma_dp),\n is_accepted=check_inside_bucket,\n ),\n ).generate()\n\n return bunch", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def make_egge(w,minZ,maxZ,ires=1,m=mz0):\n cmds = []\n # coefficients for the amplitudes\n cmds.append(\"A[1,0,1000000]\")\n cmds.append(\"B[1,0,1000000]\")\n cmds.append(\"C[10000.0,0,1000000]\")\n # amplitudes\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('g[8,0,100]')\n denom = '((x^2-m^2)^2+g^2*m^2)'\n cmds.append(\"expr::z_rbw('x^2/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_int('(x^2-m^2)/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_rad('1/(x^2+1)',x)\")\n # resolution model\n cmds += resolutions[ires]()\n [w.factory(cmd) for cmd in cmds]\n # sum-of-amplitudes pdf\n lshape = RooRealSumPdf('lshape','lshape',RooArgList(w.function('z_rad'),w.function('z_int'),w.function('z_rbw')),RooArgList(w.var('A'),w.var('B'),w.var('C')))\n getattr(w,'import')(lshape)\n # convolution\n pdf = w.pdf('lshape')\n if w.pdf('res'):\n w.var('x').setBins(10000,'cache')\n cmd = 'FCONV::sum(x,lshape,res)'\n w.factory(cmd)\n pdf = w.pdf('sum')\n return pdf, kFALSE", "def write_gdfs(self):\n for cat, gdf in self.inventory.gdfs.items():\n info = self.inventory.emission_infos[cat]\n for sub in self.inventory.substances:\n source_group = self.source_groups[(sub, cat)]\n if sub not in gdf.columns:\n continue\n\n mask_polygons = gdf.geom_type.isin([\"Polygon\", \"MultiPolygon\"])\n if any(mask_polygons):\n gdf_polygons = gdf.loc[mask_polygons]\n self._write_polygons(\n gdf_polygons.geometry, gdf_polygons[sub], info, source_group\n )\n\n mask_points = gdf.geom_type == \"Point\"\n if any(mask_points):\n gdf_points = gdf.loc[mask_points]\n self._add_points(\n gdf_points.geometry, gdf_points[sub], info, source_group\n )\n\n mask_lines = gdf.geom_type.isin([\"LineString\"])\n if any(mask_lines):\n gdf_lines = gdf.loc[mask_lines]\n self._write_lines(\n gdf_lines.geometry, gdf_lines[sub], info, source_group\n )\n\n mask_multilines = gdf.geom_type.isin([\"MultiLineString\"])\n if any(mask_multilines):\n gdf_multilines = gdf.loc[mask_multilines]\n # Split all the multilines into lines\n for shape, shape_emission in zip(\n gdf_multilines.geometry, gdf_multilines[sub]\n ):\n lenghts = np.array([line.length for line in shape.geoms])\n proprtions = lenghts / shape.length\n for line, prop in zip(shape.geoms, proprtions):\n self._write_line(\n line, shape_emission * prop, info, source_group\n )\n mask_missing = ~(\n mask_multilines | mask_lines | mask_points | mask_polygons\n )\n if any(mask_missing):\n raise NotImplementedError(\n f\"Shapes of type: '{gdf.loc[mask_missing].geom_type.unique()}'\"\n \" are not implemented.\"\n )\n\n # Write all the points as a singl batch\n pd.concat(self.points_dfs).to_csv(\n self.file_points, mode=\"a\", index=False,\n )", "def addGaussian(ax, ismulti):\n shape = (96, 288) #ax.shape[:2]\n intensity_noise = np.random.uniform(low=0, high=0.05)\n if ismulti:\n ax[:,:,0] = ax[:,:,0]*(1+ intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1]))\n else:\n ax[:,:,0] = ax[:,:,0] + intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1])\n return ax", "def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado", "def line_sSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n \n L_line = getattr(GR,'L_'+p.line+'_sun')#[0:100]\n SFR = getattr(GR,'SFR')#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[0:100]\n R_gas = getattr(GR,'R2_gas')#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[0:100]\n M_star = getattr(GR,'M_star')#[0:100]\n\n # Take only MS galaxies?\n if p.select == '_MS':\n indices = aux.select_salim18(GR.M_star,GR.SFR)\n L_line = L_line[indices]\n SFR = SFR[indices]\n Zsfr = Zsfr[indices]\n print('With MS selection criteria: only %i galaxies' % (len(L_line)))\n\n SFR = SFR[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n M_star = M_star[L_line > 0]\n sSFR = SFR/M_star\n L_line = L_line[L_line > 0]\n\n print('%i data points ' % (len(L_line)))\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_dim':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'AREPO parametric PDF'}\n lab = labs[p.table_ext]\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)\n m = ax.scatter(sSFR[np.argsort(Sigma_M_H2)],L_line[np.argsort(Sigma_M_H2)],marker='o',s=20,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=3.5,label=lab,alpha=0.6,zorder=10)\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/kpc$^2$]',size=15)\n else:\n m = ax.scatter(sSFR,L_line,marker='o',s=20,\\\n c=Zsfr,label=lab,alpha=0.6,zorder=10)\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'$\\langle Z\\rangle_{\\mathrm{SFR}}$ [Z$_{\\odot}$]',size=15)\n\n if p.add_obs:\n add_line_sSFR_obs(p.line,L_line,ax,select=p.select)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel(getlabel('sSFR'))\n ax.set_ylabel(getlabel(p.line))\n handles,labels = ax.get_legend_handles_labels()\n handles = np.flip(handles)\n labels = np.flip(labels)\n # ax.legend(handles,labels,loc='upper left',fontsize=7)\n ax.legend(handles,labels,loc='lower right',fontsize=7,frameon=True,framealpha=0.5) \n print(np.min(sSFR),np.max(sSFR))\n if not p.xlim: p.xlim = 10.**np.array([-13,-7])\n if not p.ylim: \n p.ylim = [np.median(L_line)/1e6,np.median(L_line)*1e4]\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.grid(ls='--')\n\n if p.savefig & (not p.add):\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/%s_sSFR.png' % p.line, format='png', dpi=300)", "def preparehspiceidvg(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,NFINparam, DEVTYPEparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam',NFINparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'DEVTYPEparam',DEVTYPEparam)", "def timinggrid(self):\n\n gelem = Element(\"g\") # create a group\n for i in range(int(self.cycles)):\n\n lelem = Element(\"line\")\n lelem.attrib['x1'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y1'] = str(0);\n lelem.attrib['x2'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y2'] = str(self.signalcnt*(self.height + self.signalspacing) + self.signalspacing)\n lelem.attrib['stroke'] = \"grey\"\n lelem.attrib['stroke-width'] = \"0.5\"\n gelem.append(lelem)\n\n \n self.svgelem.append(gelem)\n self.svgelem.append(self.signalselem)", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])", "def synthetic_gen(self):\r\n logging.debug('generating synthetic map...')\r\n data = self.realData\r\n unit = Params.unitGrid\r\n x_min = np.floor(Params.LOW[0] / unit) * unit\r\n x_max = np.ceil(Params.HIGH[0] / unit) * unit\r\n y_min = np.floor(Params.LOW[1] / unit) * unit\r\n y_max = np.ceil(Params.HIGH[1] / unit) * unit\r\n\r\n x_CELL = int(np.rint((x_max - x_min) / unit))\r\n y_CELL = int(np.rint((y_max - y_min) / unit))\r\n\r\n self.root.n_box = np.array([[x_min, y_min], [x_max, y_max]])\r\n\r\n self.mapp = np.zeros((x_CELL, y_CELL)) - 1 # ## initialize every cell with -1\r\n for i in range(Params.NDATA): # ## populate the map\r\n point = data[:, i]\r\n cell_x = int(np.floor((point[0] - x_min) / unit))\r\n cell_y = int(np.floor((point[1] - y_min) / unit))\r\n if self.mapp[cell_x, cell_y] != -1:\r\n self.mapp[cell_x, cell_y] += 1\r\n else:\r\n self.mapp[cell_x, cell_y] = 1\r\n\r\n for i in range(x_CELL): # ## perturb the counts\r\n for j in range(y_CELL):\r\n if self.mapp[i, j] != -1:\r\n self.mapp[i, j] += np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n else:\r\n self.mapp[i, j] = np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n # if noisy count is negative, ignore the noise and generate no points\r\n if self.mapp[i, j] < 0:\r\n self.mapp[i, j] = 0", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def line_SFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n if p.line == 'CO(2-1)': p.select = 'Zsfr'\n\n GR = glo.global_results(sim_run=p.sim_run,nGal=p.nGal)\n \n marker = 'o'\n if p.sim_run == p.sim_runs[0]: marker = '^'\n\n L_line = getattr(GR,'L_'+p.line+'_sun')#[380:400]#[0:100]\n SFR = getattr(GR,'SFR')#[380:400]#[0:100]\n M_star = getattr(GR,'M_star')#[380:400]#[0:100]\n # G0_mw = getattr(GR,'F_FUV_mw')#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[380:400]#[0:100]\n R_gas = getattr(GR,'R2_gas')#[380:400]#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[380:400]#[0:100]\n # if 'CO' in p.line: p.select = 'Sigma_M_H2'\n\n # Take only MS galaxies?\n if p.MS == True:\n indices = aux.select_salim18(GR.M_star,GR.SFR)\n L_line = L_line[indices]\n SFR = SFR[indices]\n M_star = M_star[indices]\n Zsfr = Zsfr[indices]\n R_gas = R_gas[indices]\n M_H2 = M_H2[indices]\n print('With MS selection criteria: only %i galaxies' % (len(L_line)))\n\n # Just selection of galaxies\n #SFR = SFR[0:10]\n #Zsfr = Zsfr[0:10]\n #R_gas = R_gas[0:10]\n #M_H2 = M_H2[0:10]\n #L_line = L_line[0:10]\n #M_star = M_star[0:10]\n\n SFR = SFR[L_line > 0]\n M_star = M_star[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n L_line = L_line[L_line > 0]\n print('%i data points ' % (len(L_line)))\n\n lSFR = np.log10(SFR)\n lL_line = np.log10(L_line)\n\n\n # plt.plot(np.log10(M_star),np.log10(SFR),'o')\n # s = aseg\n\n labs = {'_100Mpc_M10':'Mach=10 power-law',\\\n '_100Mpc_arepoPDF_CMZ':'SIGAME v3',\\\n '_25Mpc_arepoPDF_M51':'SIGAME v3 (Simba-25)',\\\n '_100Mpc_arepoPDF_M51':'SIGAME v3 (Simba-100)'}\n lab = labs[p.sim_run+p.table_ext]\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)/1e6 # per pc^-2\n m = ax.scatter(lSFR[np.argsort(Sigma_M_H2)],lL_line[np.argsort(Sigma_M_H2)],marker=marker,s=14,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=-2.5,vmax=2.2,label=lab,alpha=0.5,zorder=10)\n p.vmin = np.log10(Sigma_M_H2.min())\n p.vmax = np.log10(Sigma_M_H2.max())\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/pc$^2$]',size=15)\n if p.select == 'M_star':\n m = ax.scatter(lSFR[np.argsort(M_star)],lL_line[np.argsort(M_star)],marker=marker,s=8,\\\n c=np.log10(M_star[np.argsort(M_star)]),vmin=-2.5,vmax=2.2,label=lab,alpha=0.5,zorder=10)\n # Just one galaxy\n # m = ax.scatter(lSFR,lL_line,marker=marker,s=15,\\\n # c=np.log10(Sigma_M_H2),vmin=-2.5,vmax=2.2,label=lab,alpha=1,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $M_{star}$ [M$_{\\odot}$]',size=15)\n if p.select == 'Zsfr':\n print('min and max Zsfr in sims: ',Zsfr.min(),Zsfr.max())\n p.vmin = np.log10(0.01)\n p.vmax = np.log10(3)\n m = ax.scatter(lSFR,lL_line,marker=marker,s=20,\\\n c=np.log10(Zsfr),label=lab,alpha=0.6,zorder=10,vmin=p.vmin,vmax=p.vmax)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\langle Z\\rangle_{\\mathrm{SFR}}$ [Z$_{\\odot}$]',size=15)\n if p.select == 'F_FUV_mw':\n m = ax.scatter(lSFR,lL_line,marker=marker,s=20,\\\n c=np.log10(G0_mw),label=lab,alpha=0.6,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label='log ' + getlabel('G0'),size=15)\n if p.select == 'f_HII':\n f_HII[f_HII == 0] = np.min(f_HII[f_HII > 0])\n m = ax.scatter(lSFR[np.argsort(f_HII)],lL_line[np.argsort(f_HII)],marker=marker,s=20,\\\n c=np.log10(f_HII[np.argsort(f_HII)]),label=lab,alpha=0.6,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label='log HII region fraction',size=15)\n\n\n # Label galaxies?\n # for i in range(len(SFR)):\n # if SFR[i] > 0:\n # ax.text(SFR[i],L_line[i],'G%i' % GR.gal_num[i],fontsize=7)\n\n if p.add_obs:\n if (p.select == 'Zsfr') | (p.select == 'Sigma_M_H2'): \n add_line_SFR_obs(p.line,L_line,ax,select=p.select,vmin=p.vmin,vmax=p.vmax)\n else:\n add_line_SFR_obs(p.line,L_line,ax,select=p.select)\n\n ax.set_xlabel('log ' + getlabel('SFR'))\n ax.set_ylabel('log ' + getlabel(p.line))\n handles,labels = ax.get_legend_handles_labels()\n handles = np.flip(handles)\n labels = np.flip(labels)\n if ('CO' in p.line) | ('[OI]' in p.line): \n ax.legend(handles,labels,loc='upper left',fontsize=7,frameon=True,framealpha=0.5)\n else:\n ax.legend(handles,labels,loc='lower right',fontsize=7,frameon=True,framealpha=0.5)\n if not p.xlim: p.xlim = np.array([-3,4])\n if not p.ylim: \n p.ylim = [np.median(lL_line) - 5,np.median(lL_line) + 3]\n if p.line == '[OI]63': p.ylim = [np.median(lL_line) - 5,np.median(lL_line) + 4]\n if 'CO' in p.line: p.ylim = [np.median(lL_line) - 4,np.median(lL_line) + 4]\n\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.grid(ls='--')\n\n if p.savefig & (not p.add):\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/%s_SFR.png' % p.line, format='png', dpi=300)", "def airglow_line_components(self, vaclines, wave_range, disp_range):\n\n AA = []\n for line in vaclines:\n AA.append(np.exp(-0.5*((wave_range-line)/disp_range)**2))\n return np.vstack(AA)", "def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final", "def gaussian_process_TIE(fs, *erps, **kwerps):\n gptie = GPTIE(fs, *erps, **kwerps)\n phase = gptie()\n return phase", "def make_gaussian_sources_image(shape, source_table, oversample=1):\n model = models.Gaussian2D(x_stddev=1, y_stddev=1)\n\n if 'x_stddev' in source_table.colnames:\n xstd = source_table['x_stddev']\n else:\n xstd = model.x_stddev.value # default\n if 'y_stddev' in source_table.colnames:\n ystd = source_table['y_stddev']\n else:\n ystd = model.y_stddev.value # default\n\n colnames = source_table.colnames\n if 'flux' in colnames and 'amplitude' not in colnames:\n source_table = source_table.copy()\n source_table['amplitude'] = (source_table['flux']\n / (2.0 * np.pi * xstd * ystd))\n\n return make_model_sources_image(shape, model, source_table,\n oversample=oversample)", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def gen_data(self, s_gen, pg=None):\n # Generate Path\n if pg is None:\n pg = self.pg\n path = pg.gen_path()\n xr = path[0][np.newaxis, :].astype('float32')\n yr = path[1][np.newaxis, :].astype('float32')\n\n s_gen = s_gen.astype('float32')\n self.calculate_inner_products(s_gen, xr, yr)\n\n r = self.tc.spikes(s_gen, xr, yr)[0]\n if self.print_mode:\n print 'The mean firing rate is {:.2f}'.format(\n r.mean() / self.dt)\n\n if self.save_mode:\n self.build_param_and_data_dict(s_gen, xr, yr, r)\n\n return xr, yr, r", "def source_adj_gsdf(gmdata_sim,gmdata_obs,IsolationFilter,num_pts,dt): \n t = np.arange(num_pts)*dt\n ts=np.flip(-t[1:], axis=0)\n lTime = np.concatenate((ts,t), axis=0)#Lag time \n \n #convolve the waveforms for the cross- and auto-correlagrams \n cross = np.correlate(IsolationFilter,gmdata_obs,'full')\n auto = np.correlate(IsolationFilter,gmdata_sim,'full') \n \n #GSDF Parameters \n w0=2*np.pi/(lTime[-1]) \n# wN=2*np.pi/(2*dt)\n# w(:,1)=-wN:w0:wN\n wf=w0*np.linspace(-int(num_pts/2),int(num_pts/2),num_pts) \n fi = [0.05, 0.075, 0.1]\n# fi = [0.02, 0.03, 0.04, 0.05]\n# fi = [0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2]\n Tw = 2/np.mean(fi) # Effective window\n# sw = 2*np.pi*0.72/Tw; # Sigma w ~ 0.2827433388230814\n sw=0.1 \n \n# #% A local maximum will be selected closest to 0-lag\n# I_O=np.argmax(cross)\n# I_S=np.argmax(auto) \n\n I_O, peaks_O = find_peaks(np.abs(hilbert(cross))/np.max(np.abs(hilbert(cross))), height=0.25)\n I_S, peaks_S = find_peaks(np.abs(hilbert(auto))/np.max(np.abs(hilbert(auto))), height=0.25)\n\n PkO = peaks_O.get(\"peak_heights\", \"\")\n PkS = peaks_S.get(\"peak_heights\", \"\")\n\n if (I_O==[] or I_S==[]):\n I_O=np.argmax(cross)\n I_S=np.argmax(auto)\n else:\n I_O_min = np.argmin(np.multiply((1+np.abs(lTime[I_O]))**2,np.abs(1-PkO)))\n I_O = I_O[I_O_min]\n\n I_S_min = np.argmin(np.multiply((1+np.abs(lTime[I_S]))**2,np.abs(1-PkS)))\n I_S = I_S[I_S_min]\n \n ##Windowing\n win1=np.exp(-(0.5*sw**2)*(lTime-lTime[I_O])**2)\n win2=np.exp(-(0.5*sw**2)*(lTime-lTime[I_S])**2) \n \n #\n WO = np.multiply(win1,cross)\n WS = np.multiply(win2,auto)\n WS = WS*np.max(WO)/np.max(WS) #Normalized window by amplitude\n #% Parameters for \"bootstraping\"\n InOR=np.argmax(WO)\n InSR=np.argmax(WS) \n \n #% Isolation filter FFT for perturbation kernel\n tff=np.conj(fftshift(fft(IsolationFilter)))*1/num_pts \n \n adj_sim_decompose = np.zeros((len(fi),num_pts))\n adj_sim_sum = np.zeros(num_pts)\n TauP_arr = np.zeros(len(fi)) \n \n ne = int(np.min([2/np.min(fi)/dt,num_pts/2])) #% Effective bandwidth for inversion\n \n for i in range(0,len(fi)): \n si = 0.1*fi[i]\n #Crosscorrelagram and Autocorrelagram filtering\n dO=computebandfftfilter_gauss(WO,dt,fi[i],si,lTime);\n dS=computebandfftfilter_gauss(WS,dt,fi[i],si,lTime); \n \n # % Check bootstraping\n InO=np.argmax(np.real(dO))\n InS=np.argmax(np.real(dS)) \n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InO=int(InO)\n if (lTime[InO] < lTime[InOR]+0.51/fi[i]) and (lTime[InO] >= lTime[InOR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InO] >= (lTime[InOR]+0.45/fi[i])):\n InO=InO-np.round(1/fi[i]/dt)\n elif (lTime[InO] < lTime[InOR]-0.45/fi[i]):\n InO=InO+np.round(1/fi[i]/dt)\n Cn = Cn+1\n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InS=int(InS) \n if (lTime[InS] < lTime[InSR]+0.51/fi[i]) and (lTime[InS] >= lTime[InSR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InS] >= (lTime[InSR]+0.45/fi[i])):\n InS=InS-np.round(1/fi[i]/dt)\n elif (lTime[InS] < lTime[InSR]-0.45/fi[i]):\n InS=InS+np.round(1/fi[i]/dt)\n Cn = Cn+1 \n\n # Five parameter Gaussian wavelet fitting \n Ao = np.max(envelope(np.real(dO))); Io = np.argmax(envelope(np.real(dO)));\n As = np.max(envelope(np.real(dS))); Is = np.argmax(envelope(np.real(dS))); \n ##Constrain the initial values \n # Parameters for curve_fit\n wi=2*np.pi*fi[i] \n \n try:\n GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]))\n GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne])) \n except:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n\n# GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]),bounds=(0,[Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]))\n# GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne]),bounds=(0,[As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]])) \n \n# % Check fitting\n if ((GaO[0]/GaS[0]) > 10**5) or np.abs(GaO[4]-GaS[4]) > lTime[-1]/2:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n \n wP=((si**2)*wf+(sw**2)*wi)/(sw**2+si**2)\n wPP=((si**2)*wf-(sw**2)*wi)/(sw**2+si**2)\n siP=((si**2)*(sw**2)/(sw**2+si**2))**0.5 \n #Estimate waveform perturbation kernel (WPK)\n IW=(siP/(sw*GaS[0]))*np.multiply(np.exp(-0.5*(wf-2*np.pi*fi[i])**2/(sw**2+si**2)),np.divide(tff,wP))+\\\n (siP/(sw*GaS[0]))*np.exp(-0.5*(wf+2*np.pi*fi[i])**2/(sw**2+si**2))*tff/wPP\n \n IW[0:int(len(IW)/2)]=0*IW[0:int(len(IW)/2)]\n \n itff = ifft(fftshift(num_pts*IW)) \n \n #Save the GSDF measurements\n TauP_arr[i] = GaO[4]-GaS[4]; #% delta_P\n \n# Jp = np.real(itff)\n# Jp = np.imag(itff)\n Jp = -np.imag(itff) \n adj_sim_decompose[i,:] = np.flip(Jp,axis=0)*TauP_arr[i] \n \n #if i>0:\n adj_sim_sum = adj_sim_sum + adj_sim_decompose[i,:] \n \n return adj_sim_sum, TauP_arr", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def make_4gaussians_image(noise=True):\n table = QTable()\n table['amplitude'] = [50, 70, 150, 210]\n table['x_mean'] = [160, 25, 150, 90]\n table['y_mean'] = [70, 40, 25, 60]\n table['x_stddev'] = [15.2, 5.1, 3.0, 8.1]\n table['y_stddev'] = [2.6, 2.5, 3.0, 4.7]\n table['theta'] = np.radians(np.array([145.0, 20.0, 0.0, 60.0]))\n\n shape = (100, 200)\n data = make_gaussian_sources_image(shape, table) + 5.0\n\n if noise:\n rng = np.random.RandomState(12345)\n data += rng.normal(loc=0.0, scale=5.0, size=shape)\n\n return data", "def generate_template_trophy(h1, h2, w, drawing):\n drawing.add(dxf.line((0, 0), (600,0), color=255, layer='LINES', thickness=0.00))\n drawing.add(dxf.line((600, 0), (600,450), color=255, layer='LINES', thickness=0.00))\n drawing.add(dxf.line((600,450), (0,450), color=255, layer='LINES', thickness=0.00))\n drawing.add(dxf.line((0,450), (0,0), color=255, layer='LINES', thickness=0.00))\n refpoint = generate_ref_trophy(h1, h2, w)\n for i in refpoint[:4]:\n x,y = i\n draw(x,y,x+h1,y,drawing)\n draw(x,y,x,y-w,drawing)\n draw(x,y-w,x+h2,y-w,drawing)\n draw(x+h1,y,x+h2,y-w,drawing)\n for i in refpoint[4:8]:\n x,y=i\n draw(x,y,x-h1,y,drawing)\n draw(x,y,x,y+w,drawing)\n draw(x,y+w,x-h2,y+w,drawing)\n draw(x-h2,y+w,x-h1,y,drawing)\n x,y = refpoint[-2]\n draw(x,y,x,y+h1,drawing)\n draw(x,y,x+w,y,drawing)\n draw(x+w,y,x+w,y+h2,drawing)\n draw(x+w,y+h2,x,y+h1,drawing)\n x,y = refpoint[-1]\n draw(x,y,x,y-h1,drawing)\n draw(x,y,x-w,y,drawing)\n draw(x-w,y,x-w,y-h2,drawing)\n draw(x-w,y-h2,x,y-h1,drawing)", "def gaussian(\n self,\n width=None,\n mfreq=None,\n chromaticity=None,\n dtype=None,\n power=True,\n ):\n widths, dtype = self._process_args(width, mfreq, chromaticity, dtype)\n response = np.exp(-0.5 * (self.xs / np.sin(widths)) ** 2)\n if power:\n response = response ** 2\n return response.astype(dtype)", "def generate_sources(exp_time, fov, sky_center, area=40000.0, prng=None):\r\n prng = parse_prng(prng)\r\n\r\n exp_time = parse_value(exp_time, \"s\")\r\n fov = parse_value(fov, \"arcmin\")\r\n area = parse_value(area, \"cm**2\")\r\n\r\n agn_fluxes, gal_fluxes = generate_fluxes(exp_time, area, fov, prng)\r\n\r\n fluxes = np.concatenate([agn_fluxes, gal_fluxes])\r\n\r\n ind = np.concatenate([get_agn_index(np.log10(agn_fluxes)),\r\n gal_index * np.ones(gal_fluxes.size)])\r\n\r\n dec_scal = np.fabs(np.cos(sky_center[1] * np.pi / 180))\r\n ra_min = sky_center[0] - fov / (2.0 * 60.0 * dec_scal)\r\n dec_min = sky_center[1] - fov / (2.0 * 60.0)\r\n\r\n ra0 = prng.uniform(size=fluxes.size) * fov / (60.0 * dec_scal) + ra_min\r\n dec0 = prng.uniform(size=fluxes.size) * fov / 60.0 + dec_min\r\n\r\n return ra0, dec0, fluxes, ind", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def make_from_templates(templates, min_amplitude, max_amplitude,\n n_per_amplitude_step, probabilities=None,\n return_metadata=False,\n n_repeat=1):\n logger = logging.getLogger(__name__)\n\n logger.debug('templates shape: %s, min amplitude: %s, '\n 'max_amplitude: %s', templates.shape, min_amplitude,\n max_amplitude)\n\n n_templates, waveform_length, n_neighbors = templates.shape\n\n n_per_template = n_per_amplitude_step * n_repeat\n\n x = np.zeros((n_per_template * n_templates,\n waveform_length, n_neighbors))\n\n d = max_amplitude - min_amplitude\n\n amps_range = (min_amplitude + np.arange(n_per_amplitude_step)\n * d / (n_per_amplitude_step - 1))\n\n if probabilities is not None:\n amps_range = draw_with_group_probabilities(amps_range, probabilities)\n\n amps_range = amps_range[:, np.newaxis, np.newaxis]\n\n # go over every template\n for k in range(n_templates):\n\n # get current template and scale it\n current = templates[k]\n amp = np.max(np.abs(current))\n scaled = (current/amp)[np.newaxis, :, :]\n\n # create n clean spikes by scaling the template along the range\n spikes_in_range = scaled * amps_range\n\n # repeat n times\n spikes_in_range_repeated = np.repeat(spikes_in_range,\n repeats=n_repeat,\n axis=0)\n\n x[k * n_per_template:\n (k + 1) * n_per_template] = spikes_in_range_repeated\n\n if return_metadata:\n ids = [[k]*n_per_amplitude_step for k in range(n_templates)]\n ids = np.array([item for sublist in ids for item in sublist])\n metadata = dict(ids=ids)\n return yarr.ArrayWithMetadata(x, metadata)\n else:\n return x", "def preparehspiceidvgGEO1(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)", "def gauss_seeing(npix = None,fwhm=None,e1=None,e2=None,scale=scale):\n fwhm = fwhm/scale\n M20 = 2.*(fwhm/2.35482)**2\n row,col = np.mgrid[-npix/2:npix/2,-npix/2:npix/2]\n rowc = row.mean()\n colc = col.mean()\n Mcc = 0.5*M20*(1+e1)\n Mrc = 0.5*e2*M20\n Mrr = 0.5*M20*(1-e1)\n rho = Mrc/np.sqrt(Mcc*Mrr)\n img = np.exp(-0.5/(1-rho**2)*(row**2/Mrr + col**2/Mcc - 2*rho*row*col/np.sqrt(Mrr*Mcc)))\n res = img/img.sum()\n return res", "def build_gp(amplitude, length_scale):\n kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)\n return tfd.GaussianProcess(kernel=kernel, index_points=x, observation_noise_variance=0.0)", "def genGrid(nTot,gDict):\n \n # Generate nTot-by-8 array, and dump to disk.\n grid = np.empty([nTot,8])\n \n # Initialize Simulation ID (SID) to keep track of the number of propagations.\n SID = 1\n\n # The grid array is filled in the order: MA, AOP, RAAN, INC, ECC, SMA, MJD.\n \n # Get deltas\n for key in gDict:\n if gDict[key]['points'] > 1:\n gDict[key]['delta'] = (gDict[key]['end'] - gDict[key]['start']) / (gDict[key]['points'] - 1)\n else:\n gDict[key]['delta'] = 0.\n \n # Here's the Big Nested Loop.\n for i0 in range(0, gDict['MJD']['points']):\n MJD = gDict['MJD']['start'] + i0 * gDict['MJD']['delta']\n\n for i1 in range(0, gDict['SMA']['points']):\n SMA = gDict['SMA']['start'] + i1 * gDict['SMA']['delta']\n\n for i2 in range(0, gDict['ECC']['points']):\n ECC = gDict['ECC']['start'] + i2 * gDict['ECC']['delta']\n\n for i3 in range(0, gDict['INC']['points']):\n INC = gDict['INC']['start'] + i3 * gDict['INC']['delta']\n\n for i4 in range(0, gDict['RAAN']['points']):\n RAAN = gDict['RAAN']['start'] + i4 * gDict['RAAN']['delta']\n\n for i5 in range(0, gDict['AOP']['points']):\n AOP = gDict['AOP']['start'] + i5 * gDict['AOP']['delta']\n\n for i6 in range(0, gDict['MA']['points']):\n MA = gDict['MA']['start'] + i6 * gDict['MA']['delta']\n \n grid[SID - 1,:] = [SID,MJD,SMA,ECC,INC,RAAN,AOP,MA]\n SID = SID + 1\n\n return grid", "def gen_energies(n_muons):\r\n pdist, bounds = fit_energylaw()\r\n samples = monte_carlo_sample(pdist, bounds, n_muons)\r\n return samples", "def add_temp_clim_normals(gdf_of_interest,\n grid_of_minmax_temp_clim_norm_y = clim_norm_minmax_temp_y_np_unique, \n grid_of_minmax_temp_clim_norm_x = clim_norm_minmax_temp_x_np_unique, \n grid_of_mean_temp_clim_norm_y = clim_norm_mean_temp_y_np_unique, \n grid_of_mean_temp_clim_norm_x = clim_norm_mean_temp_x_np_unique): \n mean_monthly_min = []\n mean_monthly_max = []\n mean_monthly_temp = []\n for index in gdf_of_interest.index:\n # Find the closest x and y grid points for the mean min/max temperatures\n closest_y_index = find_nearest_point_1D(grid_of_minmax_temp_clim_norm_y, \n gdf_of_interest.geometry.y[index], \n print_nearest_val=False)\n closest_x_index = find_nearest_point_1D(grid_of_minmax_temp_clim_norm_x, \n gdf_of_interest.geometry.x[index], \n print_nearest_val=False)\n \n # Find the closest x and y grid points for the monthly mean temperature\n closest_y_index_mean = find_nearest_point_1D(grid_of_mean_temp_clim_norm_y, \n gdf_of_interest.geometry.y[index], \n print_nearest_val=False)\n closest_x_index_mean = find_nearest_point_1D(grid_of_mean_temp_clim_norm_x, \n gdf_of_interest.geometry.x[index], \n print_nearest_val=False)\n \n \n \n # Find the month of interest and define the correct format for the different file formats\n month_of_interest = int(gdf_of_interest[\"Month\"][0])\n min_month_of_int_format = \"Tn_m\" + str(month_of_interest)\n max_month_of_int_format = \"Tx_m\" + str(month_of_interest)\n mean_month_of_int_format = \"Tm_m\" + str(month_of_interest)\n \n # Append relevant climate normal data\n mean_monthly_min.append(gdf_clim_norm_temp_TN.loc[\n (gdf_clim_norm_temp_TN[\"east\"] == grid_of_minmax_temp_clim_norm_x[closest_x_index]) &\n (gdf_clim_norm_temp_TN[\"north\"] == grid_of_minmax_temp_clim_norm_y[closest_y_index]),\n min_month_of_int_format].values[0])\n \n mean_monthly_max.append(gdf_clim_norm_temp_TX.loc[\n (gdf_clim_norm_temp_TX[\"east\"] == grid_of_minmax_temp_clim_norm_x[closest_x_index]) &\n (gdf_clim_norm_temp_TX[\"north\"] == grid_of_minmax_temp_clim_norm_y[closest_y_index]),\n max_month_of_int_format].values[0])\n \n # NOTE: We currently do not have the NI data so assume that for NI stations\n # the mean value is in the middle of the min/max values.\n if (len(gdf_clim_norm_temp_mean.loc[\n (gdf_clim_norm_temp_mean[\"east\"] == grid_of_mean_temp_clim_norm_x[closest_x_index_mean]) &\n (gdf_clim_norm_temp_mean[\"north\"] == grid_of_mean_temp_clim_norm_y[closest_y_index_mean]),:]) == 0):\n mean_monthly_temp.append(np.mean([mean_monthly_min[-1], mean_monthly_max[-1]]))\n else:\n mean_monthly_temp.append(gdf_clim_norm_temp_mean.loc[\n (gdf_clim_norm_temp_mean[\"east\"] == grid_of_mean_temp_clim_norm_x[closest_x_index_mean]) &\n (gdf_clim_norm_temp_mean[\"north\"] == grid_of_mean_temp_clim_norm_y[closest_y_index_mean]),\n mean_month_of_int_format].values[0])\n \n \n gdf_of_interest[\"Mean Monthly Min Temp\"] = mean_monthly_min\n gdf_of_interest[\"Mean Monthly Max Temp\"] = mean_monthly_max\n gdf_of_interest[\"Mean Monthly Temp\"] = mean_monthly_temp\n \n return", "def create_gar(self):\n print('Maketh the report!')\n # Date setup\n date = datetime.today().strftime('%Y-%m-%d')\n year = datetime.today().strftime('%Y')\n\n # Page setup\n geometry_options = {\"tmargin\": \"2cm\",\n \"lmargin\": \"1.8cm\",\n \"rmargin\": \"1.8cm\",\n \"headsep\": \"1cm\"}\n\n doc = pylatex.Document(page_numbers=False,\n geometry_options=geometry_options)\n\n header = self.produce_header_footer()\n\n doc.preamble.append(header)\n doc.change_document_style(\"header\")\n\n #\n # DOCUMENT BODY/CREATION\n with doc.create(pylatex.Section('GeneSippr Analysis Report', numbering=False)):\n doc.append('GeneSippr!')\n\n with doc.create(pylatex.Subsection('GeneSeekr Analysis', numbering=False)) as genesippr_section:\n with doc.create(pylatex.Tabular('|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|')) as table:\n # Header\n table.add_hline()\n table.add_row(self.genesippr_table_columns)\n for sample_name in self.samples:\n table_data = [sample_name]\n for data in self.genesippr_headers:\n try:\n print(sample_name, data, self.report_data['genesippr'][sample_name][data])\n table_data.append(self.report_data['genesippr'][sample_name][data])\n except KeyError:\n pass\n table.add_row(table_data)\n self.create_caption(genesippr_section, 'a', \"+ indicates marker presence : \"\n \"- indicates marker was not detected\")\n\n # Create the PDF\n doc.generate_pdf('{}_{}_{}'\n .format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date), clean_tex=False)\n print('{}_{}_{}'.format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date))\n # for report_name in self.report_data:\n # for sample_name in self.samples:\n # for header, value in self.report_data[report_name][sample_name].items():\n # print(report_name, sample_name, header, value)", "def generate_plot(self, xlimg = None , ylimg =None , exname = '' , prefix = True , save = True):\n print ('start with the generation of plots')\n #plot of condensation energy\n self.plotwrap(0,2, 'energy (a.u.)' , name = 'ge'+ exname, titel = 'the energy (a.u.)', xlim = xlimg , ylim = ylimg , prefix = prefix ,save = save )\n self.plotwrap(0,1, 'condensation energy (a.u.)' , name = 'ce' + exname ,titel = 'the condensation energy (a.u.)',xlim = xlimg , ylim = ylimg , prefix = prefix,save = save )", "def generate_data(params, N, rng=(-7, 7)):\n hp = np.array(params)\n print(\"parameters for data generated from gp are : {0}\".format(hp))\n print(\"using a ExpSquared kernel\")\n gp = george.GP(hp[0] * kernels.ExpSquaredKernel(hp[1]))\n t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))\n\n #y = model(params, t)\n y = gp.sample(t)\n yerr = 1.e-5 #1 + 0.1 * np.random.randn(N)\n y += yerr\n\n return t, y, yerr", "def random_ngon_linify(cymk_img, n_min=3, n_max=6, r_min = .1, r_max = 1.):\n c_lines, y_lines, m_lines, k_lines = [],[],[],[]\n for X in range(cymk_img.shape[0]):\n for Y in range(cymk_img.shape[1]):\n if (cymk_img[X,Y,0]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n c_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n if (cymk_img[X,Y,1]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n y_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n if (cymk_img[X,Y,2]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n m_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n if (cymk_img[X,Y,3]>0.5):\n R = random.random()*(r_max-r_min) + r_min\n NN = random.randint(n_min,n_max)\n phase = random.random()*2*pi\n k_lines.append(ngon(X,Y,r=R,n=NN,phase=phase))\n return c_lines, y_lines, m_lines, k_lines", "def make_gaussian_prf_sources_image(shape, source_table):\n model = IntegratedGaussianPRF(sigma=1)\n\n if 'sigma' in source_table.colnames:\n sigma = source_table['sigma']\n else:\n sigma = model.sigma.value # default\n\n colnames = source_table.colnames\n if 'flux' not in colnames and 'amplitude' in colnames:\n source_table = source_table.copy()\n source_table['flux'] = (source_table['amplitude']\n * (2.0 * np.pi * sigma * sigma))\n\n return make_model_sources_image(shape, model, source_table,\n oversample=1)", "def my_phantomgallery( phantom_type ):\n\n if phantom_type == 'ellipses' or phantom_type == 'shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n M = np.array([[ .69, .92, 0, 0, 0, 1.],\n [ .6624, .8740, 0, -.0184, 0, -0.8],\n [ .1100, .3100, .22, 0, -18, -.2],\n [ .1600, .4100, -.22, 0, 18, -.2],\n [ .2100, .2500, 0, .35, 0, .1],\n [ .0460, .0460, 0, .1, 0, .1],\n [ .0460, .0460, 0, -.1, 0, .1],\n [ .0460, .0230, -.08, -.605, 0, .1],\n [ .0230, .0230, 0, -.605, 0, .1],\n [ .0230, .0460, .06, -.605, 0, .1]])\n\n\n elif phantom_type == 'modified_shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n p1 = [.7, .8, 0, 0, 0, 1]\n p2 = [.65,.75,0,0,0,-.9]\n p3 = [.15,.2,0,.4,0,.5]\n p4 = [.25,.15,-.25,.25,135.79,.2]\n p5 = [.25,.15,.25,.25,45.26,.2]\n p6 = [.08,.25,0,-.3,28.65,.65]\n p7 = [.05,.05,.5,-.3,0,.8]\n # combine into a matrix with one ellipse in each row\n M = np.array([p1, p2, p3, p4, p5, p6, p7]);\n \n\n\n elif phantom_type == 'squares':\n # [x center, y center, edge length ,phi=angle (degrees), greyscale=attenuation]\n s1 = [0,0,1.3,0,1]\n s2 = [0,0,1.1,0,-.9]\n s3 = [.1,-.1,.5,180/6,.4]\n s4 = [-.25,.15,.25,180/4,.2]\n s5 = [-.2,.25,.3,180/3,.4]\n #combine into a matrix with one square in each row\n M = np.array([s1, s2, s3, s4, s5]);\n\n elif (phantom_type == 'rectangles'):\n # [x center, y center, dimension 1, dimension 2, phi=angle (degrees), greyscale=attenuation]\n r1 = [0,0,1.3,1.1,0,1]\n r2 = [0,0,1.2,1,0,-.9]\n r3 = [0.25,.15,.25,.6,180/6,.4]\n r4 = [-.2,.1,.25,.20,180/4,.2]\n r5 = [-.3,.2,.3,.2,180/6,.4]\n #combine into a matrix with one square in each row\n M = np.array([r1, r2, r3, r4, r5])\n else:\n print('Unknown phantom_type')\n M = None\n\n return M", "def wrt_gau_input_once(self, imol):\n prefix = self.config['job_prefix'] \n inpfile = prefix + \"x\" +str(imol) + \".gjf\"\n t = self.template\n fp = open(inpfile, \"w\")\n link0 = t['link0']\n link0['%chk'] = prefix + \"x\" + str(imol) + \".chk\\n\"\n for key in link0:\n print >>fp, \"%s=%s\" % (key, link0[key]),\n print >>fp, \"%s\" % t['route'],\n print >>fp, \"\"\n print >>fp, \"%s\" % t['title']\n print >>fp, \"\"\n\n molspec = t['molspec']\n print >>fp, \"%s\" % molspec['spin_charge']\n \n onemol = self.model['mol'][imol]\n natom = onemol['natom']\n for atom in onemol['atom']:\n line = self.__build_gau_atom(atom)\n print >>fp, \"%s\" % line\n \n print >>fp, \"\"\n print >>fp, \"%s\" % t['tail'],\n \n fp.close()\n return", "def plot_interpolated_cog(self, teff, logg, vt, ews=None):\n if not self.interpolated:\n raise ValueError(\"Your model hasn't been interpolated yet\")\n import matplotlib.pyplot as plt\n if not ews:\n ews = np.arange(1, 100, 0.1)\n generated_mets = [self.load_model().predict([[teff, logg, vt, ew]])[0] for ew in ews]\n fig = plt.plot(ews, generated_mets)\n plt.xlabel(\"EW(m$\\AA$)\")\n plt.ylabel(\"A(Fe)\")\n return fig", "def _from_ppc_gen(net, ppc):\n n_gen = ppc[\"gen\"].shape[0]\n\n # if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array\n if len(ppc[\"gen\"].shape) == 1:\n ppc[\"gen\"] = np.array(ppc[\"gen\"], ndmin=2)\n\n bus_pos = _get_bus_pos(ppc, ppc[\"gen\"][:, GEN_BUS])\n\n # determine which gen should considered as ext_grid, gen or sgen\n is_ext_grid, is_gen, is_sgen = _gen_to_which(ppc, bus_pos=bus_pos)\n\n # take VG of the last gen of each bus\n vg_bus_lookup = pd.DataFrame({\"vg\": ppc[\"gen\"][:, VG], \"bus\": bus_pos})\n # vg_bus_lookup = vg_bus_lookup.drop_duplicates(subset=[\"bus\"], keep=\"last\").set_index(\"bus\")[\"vg\"]\n vg_bus_lookup = vg_bus_lookup.drop_duplicates(subset=[\"bus\"]).set_index(\"bus\")[\"vg\"]\n\n # create ext_grid\n idx_eg = list()\n for i in np.arange(n_gen, dtype=int)[is_ext_grid]:\n idx_eg.append(create_ext_grid(\n net, bus=bus_pos[i], vm_pu=vg_bus_lookup.at[bus_pos[i]],\n va_degree=ppc['bus'][bus_pos[i], VA],\n in_service=(ppc['gen'][i, GEN_STATUS] > 0).astype(bool),\n max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],\n max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN]))\n\n # create gen\n idx_gen = create_gens(\n net, buses=bus_pos[is_gen], vm_pu=vg_bus_lookup.loc[bus_pos[is_gen]].values,\n p_mw=ppc['gen'][is_gen, PG], sn_mva=ppc['gen'][is_gen, MBASE],\n in_service=(ppc['gen'][is_gen, GEN_STATUS] > 0), controllable=True,\n max_p_mw=ppc['gen'][is_gen, PMAX], min_p_mw=ppc['gen'][is_gen, PMIN],\n max_q_mvar=ppc['gen'][is_gen, QMAX], min_q_mvar=ppc['gen'][is_gen, QMIN])\n\n # create sgen\n idx_sgen = create_sgens(\n net, buses=bus_pos[is_sgen], p_mw=ppc['gen'][is_sgen, PG],\n q_mvar=ppc['gen'][is_sgen, QG], sn_mva=ppc['gen'][is_sgen, MBASE], type=\"\",\n in_service=(ppc['gen'][is_sgen, GEN_STATUS] > 0),\n max_p_mw=ppc['gen'][is_sgen, PMAX], min_p_mw=ppc['gen'][is_sgen, PMIN],\n max_q_mvar=ppc['gen'][is_sgen, QMAX], min_q_mvar=ppc['gen'][is_sgen, QMIN],\n controllable=True)\n\n neg_p_gens = np.arange(n_gen, dtype=int)[(ppc['gen'][:, PG] < 0) & (is_gen | is_sgen)]\n neg_p_lim_false = np.arange(n_gen, dtype=int)[ppc['gen'][:, PMIN] > ppc['gen'][:, PMAX]]\n neg_q_lim_false = np.arange(n_gen, dtype=int)[ppc['gen'][:, QMIN] > ppc['gen'][:, QMAX]]\n if len(neg_p_gens):\n logger.info(f'These gen have PG < 0 and are not converted to ext_grid: {neg_p_gens}.')\n if len(neg_p_lim_false):\n logger.info(f'These gen have PMIN > PMAX: {neg_p_lim_false}.')\n if len(neg_q_lim_false):\n logger.info(f'These gen have QMIN > QMAX: {neg_q_lim_false}.')\n\n # unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,\n # Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf\n\n # gen_lookup\n gen_lookup = pd.DataFrame({\n 'element': np.r_[idx_eg, idx_gen, idx_sgen],\n 'element_type': [\"ext_grid\"]*sum(is_ext_grid) + [\"gen\"]*sum(is_gen) + [\"sgen\"]*sum(is_sgen)\n })\n return gen_lookup", "def makeGaussian(size, fwhm, center=None):\n\n x = sp.arange(0, size, 1, float)\n y = x[:,sp.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return sp.exp(-4*sp.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def line_Mgas(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n L_line = getattr(GR,'L_'+p.line+'_sun')\n L_line = aux.Lsun_to_K_km_s_pc2(L_line,p.line)\n M_gas = getattr(GR,'M_gas')\n\n # Plot\n fig,ax = plt.subplots(figsize=(8,6))\n ax.plot(np.log10(M_gas),np.log10(L_line),'x',label='Simba galaxies')\n ax.set_xlabel(getlabel('lM_ISM'))\n ax.set_ylabel('log(L$_{\\mathrm{%s}}$ [K km$\\,s^{-1}$ pc$^2$])' % p.line)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/%s_Mgas.png' % p.line, format='png', dpi=300) \n\n # plt.close('all')", "def preparehspiceidvgGEO1v2(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam,PHIGparam,RSHSparam,RSHDparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'PHIGparam', PHIGparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHSparam', RSHSparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHDparam', RSHDparam)", "def add_elec_bunch_gaussian( sim, sig_r, sig_z, n_emit, gamma0, sig_gamma,\n Q, N, tf=0., zf=0., boost=None,\n filter_currents=True, save_beam=None ):\n # Get Gaussian particle distribution in x,y,z\n x = np.random.normal(0., sig_r, N)\n y = np.random.normal(0., sig_r, N)\n z = np.random.normal(zf, sig_z, N) # with offset in z\n # Define sigma of ux and uy based on normalized emittance\n sig_ur = (n_emit/sig_r)\n # Get Gaussian distribution of transverse normalized momenta ux, uy\n ux = np.random.normal(0., sig_ur, N)\n uy = np.random.normal(0., sig_ur, N)\n # Now we imprint an energy spread on the gammas of each particle\n if sig_gamma > 0.:\n gamma = np.random.normal(gamma0, sig_gamma, N)\n else:\n # Or set it to zero\n gamma = np.full(N, gamma0)\n if sig_gamma < 0.:\n print(\"Warning: Negative energy spread sig_gamma detected.\"\n \" sig_gamma will be set to zero. \\n\")\n # Finally we calculate the uz of each particle\n # from the gamma and the transverse momenta ux, uy\n uz = np.sqrt((gamma**2-1) - ux**2 - uy**2)\n # Get inverse gamma\n inv_gamma = 1./gamma\n # Get weight of each particle\n w = -1. * Q / N * np.ones_like(x)\n\n # Propagate distribution to an out-of-focus position tf.\n # (without taking space charge effects into account)\n if tf != 0.:\n x = x - ux*inv_gamma*c*tf\n y = y - uy*inv_gamma*c*tf\n z = z - uz*inv_gamma*c*tf\n\n # Save beam distribution to an .npz file\n if save_beam is not None:\n np.savez(save_beam, x=x, y=y, z=z, ux=ux, uy=uy, uz=uz,\n inv_gamma=inv_gamma, w=w)\n\n # Add the electrons to the simulation\n add_elec_bunch_from_arrays( sim, x, y, z, ux, uy, uz, w,\n boost=boost, filter_currents=filter_currents )", "def create_regular_grid(self, phosphene_resolution, size, jitter, intensity_var):\n grid = np.zeros(size)\n phosphene_spacing = np.divide(size,phosphene_resolution)\n for x in np.linspace(0,size[0],num=phosphene_resolution[0],endpoint=False)+0.5*phosphene_spacing[0] :\n for y in np.linspace(0,size[1],num=phosphene_resolution[1],endpoint=False)+0.5*phosphene_spacing[0]:\n deviation = np.multiply(jitter*(2*np.random.rand(2)-1),phosphene_spacing)\n intensity = intensity_var*(np.random.rand()-0.5)+1\n rx = np.clip(np.round(x+deviation[0]),0,size[0]-1).astype(int)\n ry = np.clip(np.round(y+deviation[1]),0,size[1]-1).astype(int)\n grid[rx,ry]= intensity\n return grid", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def prepare_CvD16_for_M2L_calc(templates_lam_range, verbose=False):\n import glob\n import os\n template_glob=os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*.s100')\n\n vcj_models=sorted(glob.glob(template_glob))\n temp_lamdas, x35, x3, x23, kroupa, flat=np.genfromtxt(vcj_models[-1], unpack=True)\n\n n_ages=7\n n_zs=5\n n_imfs=5\n\n \n\n\n Zs=['m1.5', 'm1.0', 'm0.5', 'p0.0', 'p0.2']\n ages=[1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.5]\n model_imfs_order=['x35', 'x3', 'x23', 'kroupa', 'flat']\n\n t_mask = ((temp_lamdas > templates_lam_range[0]) & (temp_lamdas <templates_lam_range[1]))\n\n\n\n y=x35[t_mask]\n x=temp_lamdas[t_mask]\n\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x)\n\n templates=np.empty((len(out), n_ages, n_zs, n_imfs))\n\n\n\n for a, Z in enumerate(Zs): \n for b, age in enumerate(ages):\n model=glob.glob(os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*{}*{}.ssp.s100'.format(Z, age)))[0]\n if verbose:\n print 'Loading {}'.format(model)\n data=np.genfromtxt(model)\n\n for c, counter in enumerate(reversed(range(1, data.shape[-1]))):\n \n #Interpolate templates onto a uniform wavelength grid and then log-rebin\n y=data[:, counter][t_mask] \n x=temp_lamdas[t_mask]\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x) \n\n templates[:, b, a, c]=out\n\n return templates, new_x", "def smooth_template_disp(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):\n \n wobs = templ.wave*(1+z)/1.e4\n if flambda:\n fobs = templ.flux_flam(z=z)#[wclip]\n else:\n fobs = templ.flux_fnu(z=z)#[wclip]\n \n if with_igm:\n fobs *= templ.igm_absorption(z)\n \n disp_r = np.interp(wobs, disp['WAVELENGTH'], disp['R'])*scale_disp\n fwhm_um = np.sqrt((wobs/disp_r)**2 + (velocity_fwhm/3.e5*wobs)**2)\n sig_um = np.maximum(fwhm_um/2.35, 0.5*np.gradient(wobs))\n \n x = wobs_um[:,np.newaxis] - wobs[np.newaxis,:]\n gaussian_kernel = 1./np.sqrt(2*np.pi*sig_um**2)*np.exp(-x**2/2/sig_um**2)\n tsmooth = np.trapz(gaussian_kernel*fobs, x=wobs, axis=1)\n \n return tsmooth", "def make_point_sources_file(simput_prefix, phlist_prefix, exp_time, fov, \r\n sky_center, absorb_model=\"wabs\", nH=0.05, \r\n area=40000.0, prng=None, append=False, \r\n overwrite=False, input_sources=None, \r\n output_sources=None):\r\n events = make_ptsrc_background(exp_time, fov, sky_center, \r\n absorb_model=absorb_model, nH=nH, \r\n area=area, input_sources=input_sources, \r\n output_sources=output_sources, prng=prng)\r\n write_photon_list(simput_prefix, phlist_prefix, events[\"flux\"], \r\n events[\"ra\"], events[\"dec\"], events[\"energy\"], \r\n append=append, overwrite=overwrite)", "def add_derived_GEOSChem_specs2df(df):\n # Add temperature in deg C\n df['T'] = df['GMAO_TEMP'].copy()\n df['T'] = df['GMAO_TEMP'].values - 273.15\n # Inc. V nd U with same variable names as GEOS-CF\n df['V'] = df['GMAO_VWND'].copy()\n df['U'] = df['GMAO_UWND'].copy()\n # Add NOx as combined NO and NO2\n df['NOx'] = df['NO'].values + df['NO2'].values\n # Add NOy as defined in GEOS-CF\n # NOy = no_no2_hno3_hno4_hono_2xn2o5_pan_organicnitrates_aerosolnitrates\n vars2use = AC.GC_var('NOy-all')\n df['NOy'] = df['N2O5'].copy() #  2 N2O5 in NOy, so 2x via template\n for var in vars2use:\n try:\n df.loc[:, 'NOy'] = df['NOy'].values + df[var].values\n except KeyError:\n pass\n # Add a variable for gas-phase NOy (by subtracting aerosol nitrate)\n vars2use = AC.GC_var('NOy-gas')\n df['NOy-gas'] = df['N2O5'].copy() #  2 N2O5 in NOy, so 2x via template\n for var in vars2use:\n try:\n df.loc[:, 'NOy-gas'] = df['NOy-gas'].values + df[var].values\n except KeyError:\n pass\n # Include a variable of NOy where HNO3 is removed\n # NOy = no_no2_hno3_hno4_hono_2xn2o5_pan_organicnitrates_aerosolnitrates\n df['NOy-HNO3'] = df['NOy'].values - df['HNO3'].values\n # Include a variable of NOy where HNO3 is removed\n df['NOy-HNO3-PAN'] = df['NOy'].values - \\\n df['HNO3'].values - df['PAN'].values\n # gas-phase (exc. PAN, HNO3, HNO4, Org-NIT, N2O5)\n df['NOy-Limited'] = df['NO'].values + df['NO2'].values + \\\n df['HNO2'].values + df['NIT'].values + df['NITs'].values\n # Add an all sulfate tracer\n NewVar = 'SO4-all'\n vars2use = AC.GC_var(NewVar)\n df[NewVar] = df['NIT'].values\n for var2use in vars2use:\n try:\n df[NewVar] = df[NewVar].values + df[var2use].values\n except KeyError:\n pass\n # And a all nitrate tracer\n NewVar = 'NIT-all'\n vars2use = AC.GC_var(NewVar)\n df[NewVar] = df['NIT'].values\n for var2use in vars2use:\n try:\n df[NewVar] = df[NewVar].values + df[var2use].values\n except KeyError:\n pass\n # Uset the P-I variable as a model level variable\n df['model-lev'] = df['P-I'].copy()\n return df", "def main(ngrains=100,sigma=15.,c2a=1.6235,mu=0.,\n prc='cst',isc=False,tilt_1=0.,\n tilts_about_ax1=0.,tilts_about_ax2=0.):\n if isc:\n h = mmm()\n else:\n h=np.array([np.identity(3)])\n gr = []\n for i in range(ngrains):\n dth = random.uniform(-180., 180.)\n if prc=='cst': g = gen_gr_fiber(th=dth,sigma=sigma,mu=mu,tilt=tilt_1,iopt=0) # Basal//ND\n elif prc=='ext': g = gen_gr_fiber(th=dth,sigma=sigma,mu=mu,tilt=tilt_1,iopt=1) # Basal//ED\n else:\n raise IOError('Unexpected option')\n for j in range(len(h)):\n temp = np.dot(g,h[j].T)\n\n ## tilts_about_ax1\n if abs(tilts_about_ax1)>0:\n g_tilt = rd_rot(tilts_about_ax1)\n temp = np.dot(temp,g_tilt.T)\n ## tilts_about_ax2?\n elif abs(tilts_about_ax2)>0:\n g_tilt = td_rot(tilts_about_ax2)\n temp = np.dot(temp,g_tilt.T)\n elif abs(tilts_about_ax2)>0 and abs(tilts_about_ax2)>0:\n raise IOError('One tilt at a time is allowed.')\n\n phi1,phi,phi2 = euler(a=temp, echo=False)\n gr.append([phi1,phi,phi2,1./ngrains])\n\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,2],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n return np.array(gr)", "def map_all_sig_p(limitregion=False, region=\"allsky\"):\n \n # Get ids of all pixels that contain RHT data\n rht_cursor, tablename = get_rht_cursor(region = region)\n all_ids = get_all_rht_ids(rht_cursor, tablename)\n \n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n \n if limitregion is True:\n print(\"Loading all allsky data points that are in the SC_241 region\")\n # Get all ids that are in both allsky data and SC_241\n all_ids_SC = pickle.load(open(\"SC_241_healpix_ids.p\", \"rb\"))\n all_ids = list(set(all_ids).intersection(all_ids_SC))\n \n all_sigpGsq = np.zeros(len(all_ids))\n\n update_progress(0.0)\n for i, hp_index in enumerate(all_ids):\n #likelihood = Likelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n (hp_index, T, Q, U) = planck_tqu_cursor.execute(\"SELECT * FROM Planck_Nside_2048_TQU_Galactic WHERE id = ?\", hp_index).fetchone()\n (hp_index, TT, TQ, TU, TQa, QQ, QU, TUa, QUa, UU) = planck_cov_cursor.execute(\"SELECT * FROM Planck_Nside_2048_cov_Galactic WHERE id = ?\", (hp_index,)).fetchone()\n \n # sigma_p as defined in arxiv:1407.0178v1 Eqn 3.\n sigma_p = np.zeros((2, 2), np.float_) # [sig_Q^2, sig_QU // sig_QU, UU]\n sigma_p[0, 0] = (1.0/T**2)*QQ #QQ\n sigma_p[0, 1] = (1.0/T**2)*QU #QU\n sigma_p[1, 0] = (1.0/T**2)*QU #QU\n sigma_p[1, 1] = (1.0/T**2)*UU #UU\n \n # det(sigma_p) = sigma_p,G^4\n det_sigma_p = np.linalg.det(sigma_p)\n sigpGsq = np.sqrt(det_sigma_p)\n \n all_sigpGsq[i] = sigpGsq\n \n update_progress((i+1.0)/len(all_ids), message='Calculating: ', final_message='Finished Calculating: ')\n \n # Place into healpix map\n hp_sigpGsq = make_hp_map(all_sigpGsq, all_ids, Nside = 2048, nest = True)\n \n out_root = \"/disks/jansky/a/users/goldston/susan/Wide_maps/\"\n if limitregion:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_SC_241.fits\", hp_sigpGsq, coord = \"G\", nest = True) \n else:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_DR2sky.fits\", hp_sigpGsq, coord = \"G\", nest = True)", "def plot_gheat_g(seed=1):\n fig, ax = plt.subplots(figsize=[2.5*plotdl.latex_width_inch, 3*plotdl.latex_height_inch])\n \n r = Factory_psi1_psiN( \"aapta_of_s_N{number_of_points[0]}.npz\", N=400)\n ckg = r.create_if_missing(dict(model_name= [\"Anderson\",], \n number_of_points=[400,], bandwidth=[1,],\n dis_param=np.linspace(0,1,100),c=[1,], k=[1.57,], seed=np.arange(1,6))) \n color_seq = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])\n for (seed,c) in zip(np.arange(1,6),color_seq):\n ck = ckg[ckg['seed']==seed]\n g, psi_1, psi_N = ck['g'], ck['psi_N'], ck['psi_1']\n\n psi_heat = 2*(abs(psi_1)**2)*(abs(psi_N)**2) / ((abs(psi_1)**2) + (abs(psi_N)**2))\n \n phs = np.nansum(psi_heat,axis=1)\n \n psi1psiN = np.nansum(abs(psi_1*psi_N), axis=1)\n #print(ckg['dis_param'], phs)\n ax.plot(ck['dis_param'], phs,'.', color=c)\n ax.plot(ck['dis_param'], abs(g),'+', color=c)\n ax.plot(ck['dis_param'], psi1psiN,'d', color=c)\n ax.set_xlabel('dis_param')\n mkdir_and_savefig(fig, 'pta_comparison_of_s_N400.png')\n plt.close(fig)\n ## use last ck\n fig1, axes1 = plt.subplots(3,2,figsize=[2*plotdl.latex_width_inch, 3*plotdl.latex_height_inch],\n sharex=True, sharey=True)\n axes1.flat[0].xaxis.set_major_locator(MaxNLocator(4))\n axes1.flat[0].yaxis.set_major_locator(MaxNLocator(4))\n for n, ax1 in zip(range(1,20,3), axes1.flat):\n ax1.plot(abs(ck['psi_1'][n]), abs(ck['psi_N'][n]), '.') \n ax1.set_title(\"W = {:0.2}\".format(ck['dis_param'][n]))\n fig1.savefig('pta_psi_1_psi_2_N400.png')\n \n ax.cla()\n ax.plot(ck['dis_param'], np.real(g), label='real')\n ax.plot(ck['dis_param'], np.imag(g), label='imag')\n ax.plot(ck['dis_param'], np.abs(g), label='abs')\n ax.legend(loc = 'upper right')\n ax.set_xlabel('dis_param')\n ax.set_ylabel('g')\n mkdir_and_savefig(fig, 'pta_real_imag_g_s_N400')", "def hemi_reg(\n input_image,\n input_image_tissue_segmentation,\n input_image_hemisphere_segmentation,\n input_template,\n input_template_hemisphere_labels,\n output_prefix,\n padding=10,\n labels_to_register = [2,3,4,5],\n is_test=False ):\n\n img = ants.rank_intensity( input_image )\n ionlycerebrum = ants.mask_image( input_image_tissue_segmentation,\n input_image_tissue_segmentation, labels_to_register, 1 )\n\n tdap = dap( input_template )\n tonlycerebrum = ants.mask_image( tdap, tdap, labels_to_register, 1 )\n template = ants.rank_intensity( input_template )\n\n regsegits=[200,200,20]\n\n # upsample the template if we are passing SR as input\n if min(ants.get_spacing(img)) < 0.8:\n regsegits=[200,200,200,20]\n template = ants.resample_image( template, (0.5,0.5,0.5), interp_type = 0 )\n\n if is_test:\n regsegits=[8,0,0]\n\n input_template_hemisphere_labels = ants.resample_image_to_target(\n input_template_hemisphere_labels,\n template,\n interp_type='genericLabel',\n )\n\n # now do a hemisphere focused registration\n synL = localsyn(\n img=img*ionlycerebrum,\n template=template*tonlycerebrum,\n hemiS=input_image_hemisphere_segmentation,\n templateHemi=input_template_hemisphere_labels,\n whichHemi=1,\n padder=padding,\n iterations=regsegits,\n output_prefix = output_prefix + \"left_hemi_reg\",\n )\n synR = localsyn(\n img=img*ionlycerebrum,\n template=template*tonlycerebrum,\n hemiS=input_image_hemisphere_segmentation,\n templateHemi=input_template_hemisphere_labels,\n whichHemi=2,\n padder=padding,\n iterations=regsegits,\n output_prefix = output_prefix + \"right_hemi_reg\",\n )\n\n ants.image_write(synL['warpedmovout'], output_prefix + \"left_hemi_reg.nii.gz\" )\n ants.image_write(synR['warpedmovout'], output_prefix + \"right_hemi_reg.nii.gz\" )\n\n fignameL = output_prefix + \"_left_hemi_reg.png\"\n ants.plot(synL['warpedmovout'],axis=2,ncol=8,nslices=24,filename=fignameL, black_bg=False, crop=True )\n\n fignameR = output_prefix + \"_right_hemi_reg.png\"\n ants.plot(synR['warpedmovout'],axis=2,ncol=8,nslices=24,filename=fignameR, black_bg=False, crop=True )\n\n lhjac = ants.create_jacobian_determinant_image(\n synL['warpedmovout'],\n synL['fwdtransforms'][0],\n do_log=1\n )\n ants.image_write( lhjac, output_prefix+'left_hemi_jacobian.nii.gz' )\n\n rhjac = ants.create_jacobian_determinant_image(\n synR['warpedmovout'],\n synR['fwdtransforms'][0],\n do_log=1\n )\n ants.image_write( rhjac, output_prefix+'right_hemi_jacobian.nii.gz' )\n return {\n \"synL\":synL,\n \"synLpng\":fignameL,\n \"synR\":synR,\n \"synRpng\":fignameR,\n \"lhjac\":lhjac,\n \"rhjac\":rhjac\n }", "def gxx(xp, yp, zp, prisms):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n res = 0\n for prism in prisms:\n if prism is None or 'density' not in prism.props:\n continue\n density = prism.props['density']\n res += kernelxx(xp, yp, zp, prism)*density\n res *= G * SI2EOTVOS\n return res", "def generate_G_from_H(H, variable_weight=False):\n if type(H) != list:\n return _generate_G_from_H(H, variable_weight)\n else:\n G = []\n for sub_H in H:\n G.append(generate_G_from_H(sub_H, variable_weight))\n return G", "def make_pmodel_energies():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n data = pmodel.load_data(\"test_data/protein_load/traj/traj_test.xtc\")\n heps, dheps = pmodel.get_potentials_epsilon(data)\n\n true_energies = np.loadtxt(\"test_data/protein_load/traj/energy_gaussian_test.dat\")\n\n return pmodel, data, heps, dheps, true_energies", "def plt_npr_gaussian_all(tb, npr, sigma, soil, snow, onset, figname='all_plot_test0420', size=(12, 8), xlims=[0, 365],\n shade=False, title=False, site_no='947', pp=False, subs=5, s_symbol='k.',\n day_tout=-1, end_ax1=[0, 0], end_ax2=[0, 0], end_ax3=[0, 0], tair=[], snow_plot=False):\n # ylim for each station\n site_lim = {'947': [-17, -7], '949': [-13, -7], '950': [-13, -7], '960': [-14, -8], '962': [-15, -8], '967': [-12, -8], '968': [-17, -7],\n '1089': [-13, -7], '1090': [-14, -7], '1175': [-15, -8], '1177': [-19, -10],\n '1233': [-17, -6], '2065': [-14, -8], '2081': [-15, -7], '2210': [-16, -8], '2211': [-16, -8], '2212': [-16, -8],\n '2213': [-17, -10]}\n axs = []\n fig = plt.figure(figsize=size)\n gs0 = gridspec.GridSpec(5, 1)\n gs00 = gridspec.GridSpecFromSubplotSpec(4, 1, subplot_spec=gs0[0])\n ax0 = plt.Subplot(fig, gs00[-1, :])\n # ax1, ax2, ax3, ax4 = plt.subplot(fig, gs0[1]), plt.subplot(fig, gs0[2]), \\\n # plt.subplot(fig, gs0[3]), plt.subplot(fig, gs0[4])\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, figsize=size, sharex=True) # sharex\n params = {'mathtext.default': 'regular'}\n plt.rcParams.update(params)\n\n # # add a time window bar 2018/05 updated\n # nr, st = 20, 9\n # sub_no = 4*nr+st\n # ax0, ax1, ax2, ax3, ax4 = plt.subplot2grid((sub_no, 1), (0, 0), rowspan=st), \\\n # plt.subplot2grid((sub_no, 1), (st, 0), rowspan=nr), \\\n # plt.subplot2grid((sub_no, 1), (st+nr, 0), rowspan=nr), \\\n # plt.subplot2grid((sub_no, 1), (st+2*nr, 0), rowspan=nr), \\\n # plt.subplot2grid((sub_no, 1), (st+3*nr, 0), rowspan=nr)\n\n # params = {'mathtext.default': 'regular'}\n # plt.rcParams.update(params)\n # row 1 tb\n # ax1 = fig.add_subplot(511) # tb\n # ax0 setting, for boundary of seasons\n # timings = [60, 150, 260, 350, 366]\n # timing_color = ['aqua', 'red', 'orange', 'blue', 'aqua']\n # timing_color_rgba = plot_funcs.check_rgba(timing_color)\n # timing_color_rgba[3] = [0., .3, 1., 1.]\n # print timing_color_rgba\n # timing_name = [\"Frozen\", \"Thawing\", \"Thawed\", \"Freezing\", \" \"]\n # fill_y1 = 1\n # ax0.plot(soil[0][1], soil[0][2]*0)\n # plot_funcs.make_ticklabels_invisible(ax0) # hide the y_tick\n # ax0.tick_params(axis='x', which='both', bottom='off', top='off')\n # ax0.tick_params(axis='y', which='both', left='off', right='off')\n # text_x0 = 0\n #\n # for i in range(0, len(timings)):\n # ax0.fill_between(np.arange(text_x0, timings[i]), fill_y1, color=timing_color_rgba[i])\n # text_x = 0.5*(timings[i]+text_x0)\n # print text_x\n # ax0.text(text_x, 0.5, timing_name[i], va=\"center\", ha=\"center\") # 1.3 up\n # text_x0 = timings[i]+1\n # if i < len(timings)-1:\n\n # # add vertical line and label\n # ax0.axvline(timings[i])\n # ax0.text(timings[i], 1.3, timings[i], va=\"center\", ha=\"center\")\n\n\n print np.nanmax(soil[0][1]), np.nanmin(soil[0][1])\n ax0.set_xlim(xlims)\n axs.append(ax1)\n # l1, = ax1.plot(tb[0][0], tb[0][1], 'bo', markersize=2)\n _, ax1_2, l1 = plot_funcs.pltyy(tb[0][0], tb[0][1], 'test_comp2', 'T$_b$ (K)',\n t2=tb[2][0], s2=tb[2][1], label_y2= '$E_{Tbv}$\\n(K/day)',\n symbol=['k.', 'g-'],\n handle=[fig, ax1], nbins2=6) # plot tbv\n l1_le = plot_funcs.plt_more(ax1, tb[1][0], tb[1][1], line_list=[l1])\n # ax1.locator_params(axis='y', nbins=4)\n # ax1_2.axhline(y=0)\n ax1.set_ylabel('T$_b$ (K)')\n # ax1.legend([l1_le[0][0], l1_le[1]], ['T$_{BV}$', 'T$_{BH}$'], loc=3, prop={'size': 6})\n if title is not False:\n plt.title(title)\n #ax4.legend([l4], [fe[0]], loc=2, prop={'size': 10})\n # ax4.text(0.85, 0.85, '(a)')\n # fig.text(0.85, 0.21, '(e)')\n # fig.text(0.85, 0.37, '(d)')\n # fig.text(0.85, 0.53, '(c)')\n # fig.text(0.85, 0.705, '(b)')\n # fig.text(0.85, 0.87, '(a)')\n #ax1.annotate(str(vline[0]), xy=(vline[0], 260))\n #ax1.annotate(str(vline[1]), xy=(vline[1], 260))\n # row2 npr\n # ax2 = fig.add_subplot(512) # npr\n axs.append(ax2)\n _, ax2_2, l2 = plot_funcs.pltyy(npr[0][0], npr[0][1], 'test_comp2', 'NPR ($10^{-2}$)',\n t2=npr[1][0], s2=npr[1][1], label_y2='$E_{NPR}$\\n($10^{-2}$/day)',\n symbol=[s_symbol, 'g-'], handle=[fig, ax2], nbins2=6)\n # ax2.locator_params(axis='y', nbins=5)\n #ax0.set_ylim([0, 0.06])\n #ax0.set_ylim([0, 0.06])\n\n # sigma\n # ax3 = fig.add_subplot(513) # sigma\n axs.append(ax3)\n _, ax3_2, l2 = plot_funcs.pltyy(sigma[0][0], sigma[0][1], 'test_comp2', '$\\sigma^0_{45} (dB)$',\n t2=sigma[1][0], s2=sigma[1][1], label_y2='$E_{\\sigma^0_{45}}$\\n(dB/day)',\n symbol=[s_symbol, 'g-'], handle=[fig, ax3], nbins2=6)\n # ax3.set_ylim(site_lim[site_no])\n # ax3.locator_params(axis='y', nbins=4)\n\n # moisture and temperature\n # ax4 = fig.add_subplot(514) # T soil and temperature\n axs.append(ax4)\n if snow_plot is False:\n _, ax4_2, l2 = plot_funcs.pltyy(soil[0][1], soil[0][2], 'test_comp2', 'VWC (%)',\n t2=soil[1][1], s2=soil[1][2], label_y2='T$_{soil}$ ($^\\circ$C)',\n symbol=['k-', 'b-'], handle=[fig, ax4], nbins2=6)\n for ax_2 in [ax4_2]:\n ax_2.axhline(ls='--', lw=1.5)\n else:\n ax4.plot(snow[1], snow[2], 'k', linewidth=2.0)\n ax4_2 = ax4.twinx()\n if len(tair) > 0:\n tair[1][tair[1] < -60] = np.nan\n ax4_2.plot(tair[0], tair[1], 'k:')\n ax4_2.set_ylim([-30, 30])\n ax4_2.axhline(ls='--', lw=1.5)\n ax4_2.yaxis.set_major_locator(MaxNLocator(5))\n ax4_2.set_ylabel('T$_{air}$ ($^o$C)')\n\n\n ax2s = [ax1_2, ax2_2, ax3_2, ax4_2]\n ax_ins = [ax4]\n # swe\n # ax5 = fig.add_subplot(515) # swe\n # axs.append(ax5)\n # ax_ins.append(ax5)\n # ax5.plot(snow[1], snow[2], 'k', linewidth=2.0)\n # add air temperature\n # if len(tair) > 0:\n # ax5_2 = ax5.twinx()\n # ax2s.append(ax5_2)\n # tair[1][tair[1] < -60] = np.nan\n # ax5_2.plot(tair[0], tair[1], 'k:')\n # ax5_2.set_ylim([-30, 30])\n # ax5_2.axhline(ls='--', lw=1.5)\n # ax5_2.yaxis.set_major_locator(MaxNLocator(5))\n # ax5_2.set_ylabel('T$_{air}$ ($^o$C)')\n # if not pp:\n # if site_no in ['947', '949', '950', '967', '1089']:\n # ax5.set_ylabel('SWE (mm)')\n # ax5.set_ylim([0, 200])\n # else:\n # ax5.set_ylabel('SD (cm)')\n # ax5.set_ylim([0, 100])\n # if site_no in ['950', '1089']:\n # ax5.set_ylim([0, 500])\n # else:\n # ax5.set_ylabel('precipitation (mm)')\n # ax4.set_xlabel('Day of year 2016')\n\n # add vertical line\n lz = ['--', '--', '--']\n labelz = ['$\\sigma^0$', 'TB', 'NPR']\n if onset.size> 4: # freeze and thaw\n i2 = -1\n # for ax in [ax4, ax5]:\n # # for i in [0, 1, 2]:\n # for i in [0, 1, 2]:\n # ax.axvline(x=onset[i*2], color='k', ls=lz[i], label=labelz[i])\n # ax.axvline(x=onset[i*2+1], color='k', ls=lz[i])\n for ax in [ax3, ax1, ax2]:\n # ax.axvline(x=onset[-2], color='r', ls='-', label='in situ')\n # ax.axvline(x=onset[-1], color='r', ls='-')\n i2 += 1\n ax.axvline(x=onset[i2*2], color='k', ls=lz[i2], label=labelz[i2])\n ax.axvline(x=onset[i2*2+1], color='k', ls=lz[i2])\n elif onset.size <=4:\n for ax in ax_ins:\n for i in [0]:\n ax.axvline(x=onset[i], color='k', ls=lz[i], label=labelz[i])\n ax.axvline(x=onset[i+1], color='k', ls=lz[i+1])\n ax.axvline(x=onset[i+2], color='k', ls=lz[i+2])\n\n l2d_sm = ax4.axvline(x=onset[6], color='r', ls='--')\n ax4.axvline(x=onset[7], color='r', ls='--')\n\n # special vline\n if day_tout > 0:\n l2d, = ax4.axvline(x=day_tout, color='r', ls=':')\n ax1.axvline(x=end_ax1[0], color='b', ls='--')\n ax1.axvline(x=end_ax1[1], color='b', ls='--')\n ax2.axvline(x=end_ax2[0], color='b', ls='-')\n ax2.axvline(x=end_ax2[1], color='b', ls='-')\n ax3.axvline(x=end_ax3[0], color='b', ls=':')\n ax3.axvline(x=end_ax3[1], color='b', ls=':')\n\n # plot settings\n # ticks setting\n for ax in ax_ins:\n ax.yaxis.set_major_locator(MaxNLocator(4))\n for ax in axs:\n yticks = ax.yaxis.get_major_ticks()\n yticks[0].label1.set_visible(False)\n yticks[-1].label1.set_visible(False)\n for ax in ax2s:\n yticks = ax.yaxis.get_major_ticks()\n yticks[0].label2.set_visible(False)\n yticks[-1].label2.set_visible(False)\n # label location\n text4 = ['a', 'b', 'c', 'd', 'e']\n i4 = -1\n for i, ax in enumerate(axs):\n ax.yaxis.set_major_locator(MaxNLocator(4))\n i4 += 1\n ax.get_yaxis().set_label_coords(-0.09, 0.5)\n ax.text(0.02, 0.95, text4[i], transform=ax.transAxes, va='top', fontsize=16)\n # ax.annotate(text4[i4], xy=get_axis_limits(ax), fontweight='bold')\n for ax in ax2s:\n ax.yaxis.set_major_locator(MaxNLocator(4))\n ax.get_yaxis().set_label_coords(1.10, 0.5) # position of labels\n\n # ylims\n ax3_2.set_ylim([-3, 2])\n ax1.set_ylim([210, 280])\n if site_no == '1233':\n ax1.set_ylim([180, 275])\n ax3_2.set_ylim([-3, 3])\n # ax1_2.set_ylim([-9, 9])\n if site_no == '1177':\n st = 0\n else:\n ax2.set_ylim([0, 6])\n ax2_2.set_ylim([-2, 2])\n\n # x_label\n for i3 in range(0, 4):\n axs[i3].set_xlabel('')\n\n if shade is False:\n shade_window = 'no shade'\n else:\n for ax in axs:\n for shade0 in shade:\n ax.axvspan(shade0[0], shade0[1], color=(0.8, 0.8, 0.8), alpha=0.5, lw=0)\n if xlims:\n for ax in axs:\n ax.set_xlim(xlims)\n\n # legend setting\n leg1 = ax1.legend([l1_le[0][0], l1_le[1]], ['T$_{bv}$', 'T$_{bh}$'],\n loc=3, ncol=1, prop={'size': 12}, numpoints=1)\n # for leg in [leg1]:\n # leg.get_frame().set_linewidth(0.0)\n # layout setting\n ax4.set_xlabel('Day of year 2016')\n plt.tight_layout()\n\n # if site_no == '1233':\n # ax1.set_visible(False)\n # ax1_2.set_visible(False)\n # ax3.set_visible(False)\n # ax3_2.set_visible(False)\n\n fig.subplots_adjust(hspace=0.05)\n\n # other setting like the title\n\n\n # ax_name = ['tb', 'npr', 'sig', 'VWC', 'SWE', 'tbG', 'nprG', 'sigG', 'tsoil']\n # ax_i = 0\n # yticks = ax2.yaxis.get_major_ticks()\n # yticks[0].label1.set_visible(False)\n # yticks[-2].label1.set_visible(False)\n # for ax in [ax1, ax2, ax3, ax4, ax5]:\n # yticks = ax.yaxis.get_major_ticks()\n # yticks[0].label1.set_visible(False)\n # yticks[-1].label1.set_visible(False)\n # for ax in [ax1_2, ax2_2, ax3_2, ax4_2]:\n # yticks = ax.yaxis.get_major_ticks()\n # yticks[0].label2.set_visible(False)\n # yticks[-1].label2.set_visible(False)\n plt.rcParams.update({'font.size': 16})\n print figname\n plt.savefig(figname, dpi=300)\n plt.close()\n\n return 0\n if vline is not False:\n ax_count = 0\n for ax in axs:\n ax.set_xlim([0, 350])\n ax_count += 1\n if type(vline) is list:\n ax.axvline(x=vline[0], color='k', ls='--')\n ax.axvline(x=vline[1], color='k', ls='-.')\n continue\n ax.axvline(x=vline[0, 1], color='k', label=repr(vline[0, 1]), ls='--')\n ax.axvline(x=vline[1, 1], color='k', label=repr(vline[1, 1]), ls='-.')\n ax.axvline(x=vline[2, 1], color='k', label=repr(vline[1, 1]), ls='--')\n ax.axvline(x=vline[3, 1], color='k', label=repr(vline[1, 1]), ls='-.')\n # ax.xaxis.set_minor_locator(months)\n # ax.xaxis.set_minor_formatter(monthsFmt)\n # ax.xaxis.set_major_locator(years)\n # ax.xaxis.set_major_formatter(yearsFmt)\n # ax.locator_params(axis='x', nbins=16)\n tick_num = np.array([50, 100, 150, 200, 250, 300, 350, 365, 415, 465, 515, 565, 615, 665, 715], dtype=int)\n ax.xaxis.set_ticks(tick_num)\n labels = [item.get_text() for item in ax.get_xticklabels()]\n n = 0\n for label in labels:\n if tick_num[n] == 50:\n if ax_count == 5:\n labels[n] = \"50\\nYear'15 \"\n else:\n labels[n] = \"50\"\n elif tick_num[n] == 350:\n labels[n] = ' '\n elif tick_num[n] == 365:\n labels[n] = \"365\\n Year'16\"\n elif tick_num[n] == 415:\n labels[n] = repr(tick_num[n]-365)\n elif tick_num[n] > 415:\n labels[n] = repr(tick_num[n]-365)\n else:\n labels[n] = repr(tick_num[n])\n n += 1\n # labels[0] = 'Year\\n2015'\n # labels[1] = '100'\n # labels[2] = '150'\n # labels[3] = '200'\n # labels[4] = '250'\n # labels[5] = '300'\n # labels[6] = ''\n # labels[7] = 'Year\\n2016'\n # labels[8] = '50'\n # labels[9] = '100'\n # labels[10] = '150'\n # labels[11] = '200'\n\n ax.set_xticklabels(labels)\n plt.savefig(figname, dpi=300)\n plt.close()", "def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def resample_gmms(model_set):\n samples = np.zeros(iter_num)\n\n for i in range(iter_num):\n rand_num = random()\n # For each distribution in the model\n for gmm_distro in model_set:\n # If the random number is less than the distribution's weight, where the weight is the sum of all\n # distribution's weights so far\n if rand_num < gmm_distro[3]:\n # Then sample from the distribution and save it as the path cost, then skip to the next iteration\n samples[i] = gauss(gmm_distro[0], gmm_distro[1])\n break\n\n # plt.hist(samples, bins=50, density=True)\n # plt.show()\n\n return samples", "def get_template_series(self, nb_images):\n\n # Tab for the series of images\n self.template = []\n\n # Tab\n temp = []\n\n # Make current position the zero position\n self.arm.set_to_zero([0, 1, 2])\n self.microscope.set_to_zero([0, 1, 2])\n\n # Take imges only in the template zone\n template = self.template_zone()\n height, width = template.shape[:2]\n\n # Tab of weight to detect where the pipette is\n weight = []\n\n # Detecting the tip\n for i in range(3):\n for j in range(3):\n if (i != 1) & (j != 1):\n # divide template zone into 8 images\n temp = template[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n\n # Search the tip using the number of darkest pixel in the image\n bin_edge, _ = np.histogram(temp.flatten())\n weight += [bin_edge.min()]\n else:\n # image is the center of template zone, do not consider to have functional get_withdraw_sign method\n weight += [-1]\n\n # pipette is in the image with the most darkest pixels\n index = weight.index(max(weight))\n j = index % 3\n i = index // 3\n\n # Update the position of the tip in image\n self.template_loc = [temp.shape[1] * (1 - j / 2.), temp.shape[0] * (1 - i / 2.)]\n\n # Get the series of template images at different height\n for k in range(nb_images):\n self.microscope.absolute_move(k - (nb_images - 1) / 2, 2)\n self.microscope.wait_motor_stop(2)\n time.sleep(1)\n img = self.template_zone()\n height, width = img.shape[:2]\n img = img[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n self.template += [img]\n\n # reset position at the end\n self.go_to_zero()\n pass", "def mGI(t):\r\n times = np.array([0, 3, 6, 9, 12, 15, 18, 21, 24])\r\n mGIs = np.array([0.0535789, 0.277942, 0.813305, 1., 0.373043, 0.00648925, 0.00439222, 0.0122333, 0.0535789])\r\n\r\n t_ = t % 24\r\n # print(\"GI:\", np.interp(t_, times, mGIs))\r\n return np.interp(t_, times, mGIs)", "def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):\n array = np.zeros(np.shape(x))\n for k in range(ntraps):\n array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)\n return array + offset", "def get_GP_samples(self):\n\n Z = tf.zeros([0, self.max_x_len, self.input_dim])\n\n # setup tf while loop (have to use this bc loop size is variable)\n def cond(i, Z):\n return i < self.N\n\n def body(i, Z):\n Yi = tf.reshape(tf.slice(self.Y, [i, 0], [1, self.num_obs_values[i]]), [-1])\n Ti = tf.reshape(tf.slice(self.T, [i, 0], [1, self.num_obs_times[i]]), [-1])\n ind_kfi = tf.reshape(tf.slice(self.ind_kf, [i, 0], [1, self.num_obs_values[i]]), [-1])\n ind_kti = tf.reshape(tf.slice(self.ind_kt, [i, 0], [1, self.num_obs_values[i]]), [-1])\n Xi = tf.reshape(tf.slice(self.X, [i, 0], [1, self.num_rnn_grid_times[i]]), [-1])\n X_len = self.num_rnn_grid_times[i]\n #T_len = self.num_obs_times[i]\n\n GP_draws = self.draw_GP(Yi, Ti, Xi, ind_kfi, ind_kti)\n pad_len = self.max_x_len - X_len # pad by this much\n padded_GP_draws = tf.concat([GP_draws, tf.zeros((self.n_mc_smps, pad_len, self.num_features))], 1)\n\n if self.use_med_cov:\n medcovs = tf.slice(self.med_cov_grid, [i, 0, 0], [1, -1, -1])\n tiled_medcovs = tf.tile(medcovs, [self.n_mc_smps, 1, 1])\n padded_GP_draws = tf.concat([padded_GP_draws, tiled_medcovs], 2)\n\n Z = tf.concat([Z, padded_GP_draws], 0)\n\n return i + 1, Z\n\n i = tf.constant(0)\n i, Z = tf.while_loop(cond, body, loop_vars=[i, Z],\n shape_invariants=[i.get_shape(), tf.TensorShape([None, None, None])])\n\n Z.set_shape([None, None, self.input_dim]) # somehow lost shape info, but need this\n\n return Z", "def line_SFR_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=False,add=True,cb=False)\n\n # Only 1 galaxy\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=True,add=True,cb=False)\n\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/lines_SFR_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def expgaussian(mu, wid, timeconstant, x): \n # Gaussian signal broadened by an exponetial signal\n g = gaussian(mu, wid, x)\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb", "def gaussian(amp, fwhm, mean):\n return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)", "def new_bragg(cls,\n DESCRIPTOR=\"Graphite\",\n H_MILLER_INDEX=0,\n K_MILLER_INDEX=0,\n L_MILLER_INDEX=2,\n TEMPERATURE_FACTOR=1.0,\n E_MIN=5000.0,\n E_MAX=15000.0,\n E_STEP=100.0,\n SHADOW_FILE=\"bragg.dat\"):\n # retrieve physical constants needed\n codata = scipy.constants.codata.physical_constants\n codata_e2_mc2, tmp1, tmp2 = codata[\"classical electron radius\"]\n # or, hard-code them\n # In [179]: print(\"codata_e2_mc2 = %20.11e \\n\" % codata_e2_mc2 )\n # codata_e2_mc2 = 2.81794032500e-15\n\n fileout = SHADOW_FILE\n descriptor = DESCRIPTOR\n\n hh = int(H_MILLER_INDEX)\n kk = int(K_MILLER_INDEX)\n ll = int(L_MILLER_INDEX)\n\n temper = float(TEMPERATURE_FACTOR)\n\n emin = float(E_MIN)\n emax = float(E_MAX)\n estep = float(E_STEP)\n\n #\n # end input section, start calculations\n #\n\n f = open(fileout, 'wt')\n\n cryst = xraylib.Crystal_GetCrystal(descriptor)\n volume = cryst['volume']\n\n #test crystal data - not needed\n itest = 1\n if itest:\n if (cryst == None):\n sys.exit(1)\n print (\" Unit cell dimensions are %f %f %f\" % (cryst['a'],cryst['b'],cryst['c']))\n print (\" Unit cell angles are %f %f %f\" % (cryst['alpha'],cryst['beta'],cryst['gamma']))\n print (\" Unit cell volume is %f A^3\" % volume )\n print (\" Atoms at:\")\n print (\" Z fraction X Y Z\")\n for i in range(cryst['n_atom']):\n atom = cryst['atom'][i]\n print (\" %3i %f %f %f %f\" % (atom['Zatom'], atom['fraction'], atom['x'], atom['y'], atom['z']) )\n print (\" \")\n\n volume = volume*1e-8*1e-8*1e-8 # in cm^3\n #flag Graphite Struecture\n f.write( \"%i \" % 5)\n #1/V*electronRadius\n f.write( \"%e \" % ((1e0/volume)*(codata_e2_mc2*1e2)) )\n #dspacing\n dspacing = xraylib.Crystal_dSpacing(cryst, hh, kk, ll)\n f.write( \"%e \" % (dspacing*1e-8) )\n f.write( \"\\n\")\n #Z's\n atom = cryst['atom']\n f.write( \"%i \" % atom[0][\"Zatom\"] )\n f.write( \"%i \" % -1 )\n f.write( \"%e \" % temper ) # temperature parameter\n f.write( \"\\n\")\n\n ga = (1e0+0j) + cmath.exp(1j*cmath.pi*(hh+kk)) \\\n + cmath.exp(1j*cmath.pi*(hh+ll)) \\\n + cmath.exp(1j*cmath.pi*(kk+ll))\n # gb = ga * cmath.exp(1j*cmath.pi*0.5*(hh+kk+ll))\n ga_bar = ga.conjugate()\n # gb_bar = gb.conjugate()\n\n\n f.write( \"(%20.11e,%20.11e ) \\n\" % (ga.real, ga.imag) )\n f.write( \"(%20.11e,%20.11e ) \\n\" % (ga_bar.real, ga_bar.imag) )\n f.write( \"(%20.11e,%20.11e ) \\n\" % (0.0, 0.0) )\n f.write( \"(%20.11e,%20.11e ) \\n\" % (0.0, 0.0) )\n\n zetas = [atom[0][\"Zatom\"]]\n for zeta in zetas:\n xx01 = 1e0/2e0/dspacing\n xx00 = xx01-0.1\n xx02 = xx01+0.1\n yy00= xraylib.FF_Rayl(int(zeta),xx00)\n yy01= xraylib.FF_Rayl(int(zeta),xx01)\n yy02= xraylib.FF_Rayl(int(zeta),xx02)\n xx = numpy.array([xx00,xx01,xx02])\n yy = numpy.array([yy00,yy01,yy02])\n fit = numpy.polyfit(xx,yy,2)\n #print \"zeta: \",zeta\n #print \"z,xx,YY: \",zeta,xx,yy\n #print \"fit: \",fit[::-1] # reversed coeffs\n #print \"fit-tuple: \",(tuple(fit[::-1].tolist())) # reversed coeffs\n #print(\"fit-tuple: %e %e %e \\n\" % (tuple(fit[::-1].tolist())) ) # reversed coeffs\n f.write(\"%e %e %e \\n\" % (tuple(fit[::-1].tolist())) ) # reversed coeffs\n\n f.write(\"%e %e %e \\n\" % (0.0, 0.0, 0.0)) # reversed coeffs\n\n\n npoint = int( (emax - emin)/estep + 1 )\n f.write( (\"%i \\n\") % npoint)\n for i in range(npoint):\n energy = (emin+estep*i)\n f1a = xraylib.Fi(int(zetas[0]),energy*1e-3)\n f2a = xraylib.Fii(int(zetas[0]),energy*1e-3)\n # f1b = xraylib.Fi(int(zetas[1]),energy*1e-3)\n # f2b = xraylib.Fii(int(zetas[1]),energy*1e-3)\n out = numpy.array([energy,f1a,abs(f2a),1.0, 0.0])\n f.write( (\"%20.11e %20.11e %20.11e \\n %20.11e %20.11e \\n\") % ( tuple(out.tolist()) ) )\n\n f.close()\n print(\"File written to disk: %s\" % fileout)", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def tps_rpm_bij_normals(x_nd, y_md, n_iter = 20, reg_init = .1, reg_final = .001, rad_init = .1, rad_final = .005, rot_reg = 1e-3, normal_coef=0.0001, \n nwsize=0.04, plotting = False, plot_cb = None, pts1 = None):\n \n _,d=x_nd.shape\n regs = loglinspace(reg_init, reg_final, n_iter)\n rads = loglinspace(rad_init, rad_final, n_iter)\n\n f = ThinPlateSpline(d)\n f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0)\n \n g = ThinPlateSpline(d)\n g.trans_g = -f.trans_g\n\n\n # r_N = None\n \n for i in xrange(n_iter):\n xwarped_nd = f.transform_points(x_nd)\n ywarped_md = g.transform_points(y_md)\n \n fwddist_nm = ssd.cdist(xwarped_nd, y_md,'euclidean')\n fwddist_normals_nm = calculate_normal_dist2(xwarped_nd, y_md,nwsize)\n invdist_nm = ssd.cdist(x_nd, ywarped_md,'euclidean')\n invdist_normals_nm = calculate_normal_dist2(x_nd, ywarped_md, nwsize)\n \n #import IPython\n #IPython.embed()\n \n r = rads[i]\n prob_nm = np.exp( -(fwddist_nm + invdist_nm + normal_coef*(fwddist_normals_nm + invdist_normals_nm) / (2*r)))\n corr_nm, r_N, _ = balance_matrix3(prob_nm, 10, 1e-1, 2e-1)\n corr_nm += 1e-9\n \n wt_n = corr_nm.sum(axis=1)\n wt_m = corr_nm.sum(axis=0)\n\n\n xtarg_nd = (corr_nm/wt_n[:,None]).dot(y_md)\n ytarg_md = (corr_nm/wt_m[None,:]).T.dot(x_nd)\n \n if plotting and i%plotting==0 and plot_cb is not None:\n plot_cb(x_nd, y_md, xtarg_nd, corr_nm, wt_n, f)\n \n# f = fit_ThinPlateSpline_normals(x_nd, xtarg_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg, normal_coef=normal_coef, nwsize = nwsize)\n# g = fit_ThinPlateSpline_normals(y_md, ytarg_md, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg, normal_coef=normal_coef, nwsize = nwsize)\n f = fit_ThinPlateSpline(x_nd, xtarg_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg)#, normal_coef=normal_coef, nwsize = nwsize)\n g = fit_ThinPlateSpline(y_md, ytarg_md, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg)#, normal_coef=normal_coef, nwsize = nwsize)\n# print (f.transform_points(pts1))\n \n\n f._cost = tps.tps_cost(f.lin_ag, f.trans_g, f.w_ng, f.x_na, xtarg_nd, regs[i], wt_n=wt_n)/wt_n.mean()\n g._cost = tps.tps_cost(g.lin_ag, g.trans_g, g.w_ng, g.x_na, ytarg_md, regs[i], wt_n=wt_m)/wt_m.mean()\n return f,g", "def makeGaussian(size, fwhm, sigma, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n \n #return (np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)) #approximation using exponenial functions\n return ((1/(2*np.pi*sigma**2))*np.exp(-((xx)**2 + (yy)**2)/(2*sigma**2))) # symmetric 2D Gaussian distribution", "def generate_hm(height, width, kpts, maxlenght, weight):\n\n def makeGaussian(height, width, sigma=3, center=None):\n \"\"\" Make a square gaussian kernel.\n size is the length of a side of the square\n sigma is full-width-half-maximum, which\n can be thought of as an effective radius.\n \"\"\"\n x = np.arange(0, width, 1, float)\n y = np.arange(0, height, 1, float)[:, np.newaxis]\n if center is None:\n x0 = width // 2\n y0 = height // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)\n\n num = kpts.shape[0]\n\n hm = np.zeros((height, width, num), dtype=np.float32)\n for i in range(num):\n if not (np.array_equal(kpts[i], [-1, -1])):\n s = 7 # int(np.sqrt(maxlenght) * maxlenght * 10 / 4096) + 2\n hm[:, :, i] = makeGaussian(height, width, sigma=s, center=(kpts[i, 0], kpts[i, 1]))\n else:\n hm[:, :, i] = np.zeros((height, width))\n return hm", "def show_gf(self, x):\n g = np.zeros((len(x[0]), self._num_fu), dtype=np.float64)\n for j in range(self._num_fu):\n x1 = self._gf[j*5]\n x2 = self._gf[j*5+1]\n x3 = self._gf[j*5+2]\n w = self._gf[j*5+3]\n a = self._gf[j*5+4]\n r1 = pow((x[0]-x1), 2)+pow((x[1]-x2), 2)+pow((x[2]-x3), 2)\n g[:, j] = a*np.exp(-r1/abs(w))\n\n return g", "def gaussian_template(\n wavelengths: np.ndarray,\n mean: Union[float, np.ndarray],\n std: Union[float, np.ndarray] = 30.0,\n) -> np.ndarray:\n y = norm.pdf(wavelengths, mean, std)\n return y / np.max(y, axis=-1, keepdims=True)", "def hProdPDF(mDarkPhoton, epsilon, norm, binsp, binstheta, tmin = -0.5 * math.pi, tmax = 0.5 * math.pi, suffix=\"\"):\n angles = np.linspace(tmin,tmax,binstheta).tolist()\n anglestep = 2.*(tmax - tmin)/binstheta\n momentumStep = (pMax(mDarkPhoton)-pMin(mDarkPhoton))/(binsp-1)\n momenta = np.linspace(pMin(mDarkPhoton),pMax(mDarkPhoton),binsp,endpoint=False).tolist()\n hPDF = r.TH2F(\"hPDF_eps%s_m%s\"%(epsilon,mDarkPhoton) ,\"hPDF_eps%s_m%s\"%(epsilon,mDarkPhoton),\n binsp,pMin(mDarkPhoton)-0.5*momentumStep,pMax(mDarkPhoton)-0.5*momentumStep,\n binstheta,tmin-0.5*anglestep,tmax-0.5*anglestep)\n hPDF.SetTitle(\"PDF for A' production (m_{A'}=%s GeV, #epsilon =%s)\"%(mDarkPhoton,epsilon))\n hPDF.GetXaxis().SetTitle(\"P_{A'} [GeV]\")\n hPDF.GetYaxis().SetTitle(\"#theta_{A'} [rad]\")\n hPDFtheta = r.TH1F(\"hPDFtheta_eps%s_m%s\"%(epsilon,mDarkPhoton),\n \"hPDFtheta_eps%s_m%s\"%(epsilon,mDarkPhoton),\n binstheta,tmin-0.5*anglestep,tmax-0.5*anglestep)\n hPDFp = r.TH1F(\"hPDFp_eps%s_m%s\"%(epsilon,mDarkPhoton),\n \"hPDFp_eps%s_m%s\"%(epsilon,mDarkPhoton),\n binsp,pMin(mDarkPhoton)-0.5*momentumStep,pMax(mDarkPhoton)-0.5*momentumStep)\n hPDFp.GetXaxis().SetTitle(\"P_{A'} [GeV]\")\n hPDFtheta.GetXaxis().SetTitle(\"#theta_{A'} [rad]\")\n for theta in angles:\n for p in momenta:\n w = normalisedProductionPDF(p,theta,mDarkPhoton,epsilon,norm)\n hPDF.Fill(p,theta,w)\n hPDFtheta.Fill(theta,w)\n hPDFp.Fill(p,w)\n hPdfFilename = sys.modules['__main__'].outputDir+\"/ParaPhoton_eps%s_m%s%s.root\"%(epsilon,mDarkPhoton,suffix)\n outfile = r.TFile(hPdfFilename,\"recreate\")\n #weight = hPDF.Integral(\"width\")\n #print \"Weight = %3.3f\"%weight\n #hPDF.Scale(1./weight)\n hPDF.Write()\n hPDFp.Write()\n hPDFtheta.Write()\n outfile.Close()\n del angles\n del momenta\n return hPDF", "def doMakeLimbTemplate2(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n try:#Gather limb specific data and check\n #==============\n self.curveDegree = self._mi_templateNull.curveDegree\n self.rollOverride = self._mi_templateNull.rollOverride\n\n doCurveDegree = getGoodCurveDegree(self)\n if not doCurveDegree:raise ValueError,\"Curve degree didn't query\"\n\n #>>>Scale stuff\n size = returnModuleBaseSize(self._mi_module)\n\n lastCountSizeMatch = len(self.corePosList) -1\n except Exception,error:raise Exception,\"Gather limb data | {0}\".format(error)\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Making the template objects\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[1]), progress=1) \t\t\t\t\t \n try:\n templHandleList = []\n self.ml_controlObjects = []\n self._mi_locs = []\n for i,pos in enumerate(self.corePosList):# Don't like this sizing method but it is what it is for now\n #>> Make each of our base handles\n #============================= \n if i == 0:\n sizeMultiplier = 1\n elif i == lastCountSizeMatch:\n sizeMultiplier = .8\n else:\n sizeMultiplier = .75\n\n #>>> Create and set attributes on the object\n i_obj = cgmMeta.validateObjArg( curves.createControlCurve('sphere',(size * sizeMultiplier)),'cgmObject',setClass = True )\n\n curves.setCurveColorByName(i_obj.mNode,self.moduleColors[0])\n\n i_obj.doStore('cgmName','%s.%s'%(self._mi_module.coreNames.mNode,self.d_coreNamesAttrs[i])) \n #i_obj.addAttr('cgmName',value = str(self.l_coreNames[i]), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n if self.direction != None:\n i_obj.addAttr('cgmDirection',value = self.direction,attrType = 'string',lock=True) \n i_obj.addAttr('cgmType',value = 'templateObject', attrType = 'string',lock=True) \n i_obj.doName()#Name it\n\n mc.move (pos[0], pos[1], pos[2], [i_obj.mNode], a=True)\n i_obj.parent = self._mi_templateNull\n\n #>>> Loc it and store the loc\n #i_loc = cgmMeta.cgmObject( i_obj.doLoc() )\n i_loc = i_obj.doLoc()\n i_loc.addAttr('cgmName',value = self._mi_module.getShortName(), attrType = 'string', lock=True) #Add name tag\n i_loc.addAttr('cgmType',value = 'templateCurveLoc', attrType = 'string', lock=True) #Add Type\n i_loc.v = False # Turn off visibility\n i_loc.doName()\n\n self._mi_locs.append(i_loc)\n i_obj.connectChildNode(i_loc.mNode,'curveLoc','owner')\n i_loc.parent = self._mi_templateNull#parent to the templateNull\n\n mc.pointConstraint(i_obj.mNode,i_loc.mNode,maintainOffset = False)#Point contraint loc to the object\n\n templHandleList.append (i_obj.mNode)\n self.ml_controlObjects.append(i_obj)\n except Exception,error:raise Exception,\"Template object creation | {0}\".format(error)\n\n try:#>> Make the curve\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[2]), progress=2) \t\t\t\t\t \n i_crv = cgmMeta.validateObjArg( mc.curve (d=doCurveDegree, p = self.corePosList , os=True),'cgmObject',setClass = True )\n\n i_crv.addAttr('cgmName',value = str(self._mi_module.getShortName()), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n if self.direction != None:\n i_crv.addAttr('cgmDirection',value = self.direction, attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n\n i_crv.addAttr('cgmType',value = 'templateCurve', attrType = 'string', lock=True)\n curves.setCurveColorByName(i_crv.mNode,self.moduleColors[0])\n i_crv.parent = self._mi_templateNull \n i_crv.doName()\n i_crv.setDrawingOverrideSettings({'overrideEnabled':1,'overrideDisplayType':2},True)\n\n for i,i_obj in enumerate(self.ml_controlObjects):#Connect each of our handles ot the cv's of the curve we just made\n mc.connectAttr ( (i_obj.curveLoc.mNode+'.translate') , ('%s%s%i%s' % (i_crv.mNode, '.controlPoints[', i, ']')), f=True )\n\n\n self.foundDirections = returnGeneralDirections(self,templHandleList)\n log.debug(\"directions: %s\"%self.foundDirections )\n except Exception,error:raise Exception,\"template curve | {0}\".format(error)\n\n try:#>> Create root control\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[3]), progress=3) \t\t\t\t\t \n\n rootSize = (distance.returnBoundingBoxSizeToAverage(templHandleList[0],True)*1.25) \n i_rootControl = cgmMeta.validateObjArg( curves.createControlCurve('cube',rootSize),'cgmObject',setClass = True )\n\n curves.setCurveColorByName(i_rootControl.mNode,self.moduleColors[0])\n i_rootControl.addAttr('cgmName',value = str(self._mi_module.getShortName()), attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug \n i_rootControl.addAttr('cgmType',value = 'templateRoot', attrType = 'string', lock=True)\n if self.direction != None:\n i_rootControl.addAttr('cgmDirection',value = self.direction, attrType = 'string', lock=True)#<<<<<<<<<<<FIX THIS str(call) when Mark fixes bug\n i_rootControl.doName()\n\n #>>> Position it\n if self._mi_module.moduleType in ['clavicle']:\n position.movePointSnap(i_rootControl.mNode,templHandleList[0])\n else:\n position.movePointSnap(i_rootControl.mNode,templHandleList[0])\n\n #See if there's a better way to do this\n log.debug(\"templHandleList: %s\"%templHandleList)\n if self._mi_module.moduleType not in ['foot']:\n if len(templHandleList)>1:\n log.debug(\"setting up constraints...\") \n constBuffer = mc.aimConstraint(templHandleList[-1],i_rootControl.mNode,maintainOffset = False, weight = 1, aimVector = [0,0,1], upVector = [0,1,0], worldUpVector = self.worldUpVector, worldUpType = 'vector' )\n mc.delete (constBuffer[0]) \n elif self._mi_module.getMessage('moduleParent'):\n #l_parentTemplateObjects = self._mi_module.moduleParent.templateNull.getMessage('controlObjects')\n helper = self._mi_module.moduleParent.templateNull.msgList_get('controlObjects',asMeta = True)[-1].helper.mNode\n if helper:\n log.info(\"helper: %s\"%helper)\n constBuffer = mc.orientConstraint( helper,i_rootControl.mNode,maintainOffset = False)\n mc.delete (constBuffer[0]) \n\n i_rootControl.parent = self._mi_templateNull\n i_rootControl.doGroup(maintain=True)\n except Exception,error:raise Exception,\"Root creation | {0}\".format(error)\n\n\n try:#>> Store objects\n #============================= \n self._mi_templateNull.curve = i_crv.mNode\n self._mi_templateNull.root = i_rootControl.mNode\n self._mi_templateNull.msgList_connect('controlObjects',templHandleList)\n\n self._mi_rootControl = i_rootControl#link to carry\n except Exception,error:raise Exception,\"store | {0}\".format(error)\n\n try:#>> Orientation helpers\n #============================= \n mc.progressBar(self.str_progressBar, edit=True, status = \"%s >>Template>> step:'%s' \"%(self._strShortName,self.l_strSteps[3]), progress=3) \t\t\t\t\t \n \"\"\" Make our Orientation Helpers \"\"\"\n doCreateOrientationHelpers(self)\n doParentControlObjects(self)\n\n #if self._mi_module.getMessage('moduleParent'):#If we have a moduleParent, constrain it\n #constrainToParentModule(self.m)\n\n #doOrientTemplateObjectsToMaster(self._mi_module)\n except Exception,error:raise Exception,\"Orientation helpers | {0}\".format(error)\n\n return True", "def stampaGTFEsIn(dictTranscript, dictGenes, dictInput, fileOut, geneNames):\n\n\tstringaGTF \t\t\t\t= \t\t'%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'\t\t\t\t\t# Formato della riga da stampare nel file\n\texonF\t\t\t\t\t= \t\t'exon_number \"%d\"'\t\t\t\t\t\t\t# Formato della stringa di tipo exon (True)\n\tintronF\t\t\t\t\t=\t\t'intron_number \"%d\"'\t\t\t\t\t\t# Formato della stringa di tipo intron (False)\n\t\n\t# Indici all'interno del dizionario dei transcript\n\t#\n\tidx_transcriptName = 0\n\tidx_geneID = 1\n\t\n\t# Indici all'interno del dizionari dei geni\n\t#\n\tidx_geneName = 0\n\tidx_cromosoma = 1\n\n\t# Indici all'interno del dizionario degli introni e degli esoni\n\t#\n\tidx_start = 0\n\tidx_end = 1\n\tidx_tipo = 2\t\n\n\t# Tipo di regioni\n\tesone = True\n\tintrone = False\n\n\n\t# Apertura e preparazione dei file da scrivere (un file gtf con\n\t# esoni/introni per ogni gene e uno totale con tutte le regioni per tutti\n\t# i geni passati dall'utente\n\t#\t\n\tfiles = {}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\n\tfor gene in geneNames:\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tcod = geneNames[gene]\n\t\t# Avendo tanti geni, ad ogni nome di gene si associa la relativa\n\t\t# cartella del gene corrente tra quelli passati dall'utente\n\t\t#\n\t\tif not path.exists(cartella % cod):\n\t\t\tsystem('mkdir ' + cartella % cod)\n\t\tfiles[gene] = open(str(cartella % cod + fileOut), 'w')\n\t\t\n\t# File contenente le regioni esoniche/introniche di tutti i geni\n\t# passati dall'utente (serve per mappare le reads)\n\t#\n\tfileGtf = open(str(fileOut), 'w')\t\t\t\t\t\t\t \n\n\tfor transcriptID in dictInput:\n\t\tgeneID \t\t\t= dictTranscript[transcriptID][idx_geneID]\n\t\tcromosoma\t\t= dictGenes[geneID][idx_cromosoma]\n\t\tgeneName\t\t= dictGenes[geneID][idx_geneName]\n\t\ttranscriptName \t= dictTranscript[transcriptID][idx_transcriptName]\n\t\t# Inizializzazione del numero di esone/introne da stampare nel file\n\t\t#\n\t\tnrEs \t\t\t= 1\n\t\tnrIn \t\t\t= 1\n\t\t\n\t\tfor i in range(0, len(dictInput[transcriptID][idx_start])):\n\t\t\tstart\t\t= dictInput[transcriptID][idx_start][i]\n\t\t\tend\t\t\t= dictInput[transcriptID][idx_end][i]\n\t\t\ttipo\t\t= dictInput[transcriptID][idx_tipo][i]\n\n\t\t\tif tipo == esone:\n\t\t\t\tregione = exonF % (nrEs)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato exon\n\t\t\t\tnrEs += 1\n\t\t\telse:\n\t\t\t\tregione = intronF % (nrIn)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato intron\n\t\t\t\tnrIn += 1\n\t\t\t\t\n\t\t\tstrGtf = stringaGTF % (cromosoma, str(start), str(end), regione,\t\t\n\t\t\t\t\t\t\t\t geneName, transcriptName)\t\t\t\t\t# Creazione della riga del file\n\t\t\t\n\t\t\tif geneName in geneNames:\t\t\t\t\t\t\t\t\t\t\t# Se il gene presenta regioni introniche..\n\t\t\t\tfiles[geneName].write(strGtf)\t\t\t\t\t\t\t\t\t# ..si stampa il file gtf relativo alle proprie..\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..regioni introniche nella propria cartella\n\n\t\t\tfileGtf.write(strGtf)\n\t\t\t\t\n\tif geneNames:\n\t\tfor gene in files:\n\t\t\tfiles[gene].close()\n\n\tfileGtf.close()", "def homogenous_poisson_gen():\n pass" ]
[ "0.5699116", "0.5599356", "0.5230809", "0.5207666", "0.5170641", "0.5158478", "0.51580274", "0.5144128", "0.5076723", "0.5075624", "0.50331193", "0.50331193", "0.5024959", "0.49573076", "0.49408728", "0.49217612", "0.48975858", "0.48955354", "0.48943654", "0.48930743", "0.48799923", "0.48453504", "0.48302928", "0.48218024", "0.48129565", "0.4784888", "0.47834697", "0.4777946", "0.47671887", "0.4763021", "0.47538927", "0.4750896", "0.47455582", "0.4745403", "0.47451857", "0.47425294", "0.4740136", "0.47375268", "0.47356796", "0.47287238", "0.47263196", "0.47202823", "0.47177365", "0.4713866", "0.47123045", "0.47120583", "0.47046342", "0.4692247", "0.46860147", "0.46670273", "0.46650064", "0.4642795", "0.4638214", "0.46366447", "0.46362364", "0.46285614", "0.462557", "0.46195382", "0.4614783", "0.46073467", "0.46040168", "0.46018404", "0.45949495", "0.45943215", "0.45937574", "0.45918804", "0.45888698", "0.45858613", "0.45836267", "0.45741883", "0.45622593", "0.456205", "0.45605934", "0.45598125", "0.4557426", "0.45573148", "0.45567954", "0.455194", "0.4546861", "0.4539076", "0.45385423", "0.45376584", "0.45356897", "0.45341104", "0.4530491", "0.4528303", "0.45268965", "0.45221457", "0.45210955", "0.45180863", "0.45153648", "0.4512422", "0.45007342", "0.4496499", "0.4494651", "0.44902727", "0.4487026", "0.44796866", "0.44783685", "0.44734994" ]
0.58730155
0
Defining prior distributions for the model.
def set_priors(parnames, limits, linenames, vsyst, nssps=1): priors = {} for parname in parnames: name = parname.split("_")[0] if name in limits: #all the CvD ssp parameters vmin, vmax = limits[name] # print(parname,vmin,vmax) delta = vmax - vmin priors[parname] = stats.uniform(loc=vmin, scale=delta) elif parname in vsyst: priors[parname] = stats.norm(loc=vsyst[parname], scale=500) elif parname == "eta": #what does eta do? priors["eta"] = stats.uniform(loc=1., scale=19)#uniform distribution in range [1,19] elif parname == "nu": #what does nu do? priors["nu"] = stats.uniform(loc=2, scale=20)#uniform distribution in range [2,20] elif parname == "sigma": priors["sigma"] = stats.uniform(loc=50, scale=300)#obtains the uniform distribution on [loc, loc + scale]. i.e. uniform in range [50,300] elif parname == "sigma_gas": priors[parname] = stats.uniform(loc=50, scale=100)#uniform between [50,100]km/s elif name == "w": priors[parname] = stats.uniform(loc=0, scale=1)#weights uniform between 0 and 1 elif name in linenames: # priors[parname] = stats.expon(loc=0, scale=0.5)#favors low values>~0; make even stronger by decreasing scale. priors[parname] = stats.expon(loc=0, scale=0.2)#favors low values>~0; make even stronger by decreasing scale. elif name in ["pred", "pblue"]: porder = int(parname.split("_")[1]) if porder == 0: mu, sd = 1 / nssps, 1 a, b = (0 - mu) / sd, (np.infty - mu) / sd priors[parname] = stats.truncnorm(a, b, mu, sd) else: priors[parname] = stats.norm(0, 0.05) else: print(f"parameter without prior: {parname}") return priors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())", "def get_prior(self):\n assert self._prior in self._priors, 'Unsupported prior! Check the _priors attribute for a list of priors.'\n if self._prior == 'Gaussian':\n prior = 0.5 * torch.sum(self.parameters ** 2)/self.prior_var\n elif self._prior == 'Cauchy':\n dimconst = (self.parameters.shape[0] + 1)/2.\n prior = dimconst*torch.log(self.prior_var + torch.sum(self.parameters ** 2))\n elif self._prior == 'Sparse':\n n = self.dataset.shape[1]\n gauss_prior = 0.5 * torch.sum(torch.exp(self.parameters[-1] * torch.exp(self.parameters[n:2*n]) * self.parameters[:n] ** 2))\n gamma_density = torch.distributions.Gamma(1.5,0.5)\n# gamma_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n# lambda_density = torch.distributions.Gamma(1.5,0.5)\n lambda_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n prior = gauss_prior + lambda_prior\n return prior", "def add_prior(self, prior):\n if self.rate_variation:\n # Gamma prior with mean 1 over all mutation rates\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRatePrior.s:%s\" % self.name, \"name\":\"distribution\"})\n compound = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRateCompound:%s\" % self.name, \"spec\":\"beast.core.parameter.CompoundValuable\", \"name\":\"x\"})\n plate = ET.SubElement(compound, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"var\", {\n \"idref\":\"featureClockRate:%s:$(feature)\" % self.name})\n gamma = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRatePriorGamma:%s\" % self.name, \"spec\":\"beast.math.distributions.Gamma\", \"name\":\"distr\", \"alpha\":\"@featureClockRateGammaShape:%s\" % self.name, \"beta\":\"@featureClockRateGammaScale:%s\" % self.name})\n # Exponential hyperprior on scale of Gamma prior\n # Exponential prior favours small scales over large scales, i.e. less rate variation\n # Mean scale 0.23 chosen for general sensibility, e.g.:\n # - Prior distribution is roughly 50/50 that ratio of fastest\n # to slowest feature rate in a dataset of size 200 is below\n # or above 10.\n # - Prior probability of roughly 0.90 that this ratio is below\n # 100.\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRateGammaScalePrior.s:%s\" % self.name, \"name\":\"distribution\", \"x\":\"@featureClockRateGammaScale:%s\" % self.name})\n ET.SubElement(sub_prior, \"Exponential\", {\"id\":\"featureClockRateGammaShapePriorExponential.s:%s\" % self.name, \"mean\":\"0.23\", \"name\":\"distr\"})", "def set_prior(self,field):\n self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]\n self.observation_samples = 1\n # TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements\n # self.norm = field.max()", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def prior(self, batch_size: int = 1) -> Distribution:\n prior_params = self.prior_params.expand(\n batch_size, *self.prior_params.shape[-1:]\n )\n mu, log_sigma = prior_params.chunk(2, dim=-1)\n\n # return the distribution `p(z)`\n return Normal(mu, log_sigma.exp())", "def setupMixedPrior(self):\n\n if self.namePrior.find('mixed') < 0:\n return\n\n # we set up the default parameters for bounded flat prior,\n # then update them with non-flat examples\n if np.size(self.hyper) < 7:\n self.setupDefaultPars()\n\n # Adjust the hyperparameters for defaults.\n self.hyper[0][2] = 0.45\n self.hyper[1][2] = 0.05\n self.hyper[0][3] = 16.3\n self.hyper[1][3] = 0.1\n\n nMeths = np.shape(self.hyper)[-1]\n self.mixedNames = ['binaryBoundedOne' for i in range(nMeths)]\n\n ### Let's try some gaussians. Eccentricity and period\n self.mixedNames[2] = 'gaussianOne'\n self.mixedNames[3] = 'gaussianOne'\n\n self.findMixedMethods()", "def setPrior(self,xPrior,priorWeight):\n assert self.regularizationLambda == 0\n if not isinstance(xPrior,np.ndarray):\n xPrior = np.array(xPrior)\n self.count = 1\n self.sumWeight = priorWeight\n self.scale = 1\n self.AtA = np.eye(self.n)*priorWeight\n self.AtAinv = np.eye(self.n)/priorWeight\n self.Atb = xPrior*priorWeight\n self.btb = np.dot(xPrior,xPrior)*priorWeight\n self.degenerate = False\n self.x = xPrior", "def lnprior(self):\n \n return", "def prior(kernel_size, bias_size): #removed dtype=None, unused argument\n number = kernel_size + bias_size\n prior_model = keras.Sequential(\n [\n tfp.layers.DistributionLambda(\n lambda t: tfp.distributions.MultivariateNormalDiag(\n loc=tf.zeros(number), scale_diag=tf.ones(number)\n )\n )\n ]\n )\n return prior_model", "def prior_z(self) -> distributions.Distribution:\n return distributions.Categorical(self.pi)", "def buildConditionalPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.conditional_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_qxgy.dimshuffle(0,'x',1,'x') - self.log_pxgzw), axis=3), axis=[1,2])", "def set_prior_priorunc_synthetic(self):\n\n lai_coeff_absunc = None\n statevec_absunc = None\n\n #-- \n if self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n elif self.use_generic_prior:\n self._setprior_generic_agriculture()\n statevec_absunc = self.generic_prior_unc\n else:\n #-- overall number of time-points in schedule\n npts = self.get_npts()\n\n #-- default prior file\n prior_file = os.path.join(ipt_dir_path, 'mni_stat_jules_2017.csv')\n\n #-- get signature simulator default state\n msg = \"START reading state variables from file ***{}***...\".format(prior_file)\n FileLogger.info(msg)\n state_inst = sv.get_state_csv(fname=prior_file, fmt='%Y-%m-%d %H:%M:%S' )\n msg = \"...reading DONE\"\n FileLogger.info(msg)\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n\n for i,date_utc in enumerate(self.schedule_dct['date_utc']):\n idx, timedelt = sv.find_nearest_date_idx(state_inst.date_utc, date_utc)\n # print \"MVMV::nearest={} idx={} timedelt={}\".format(\n # state_inst.date_utc[idx], idx, timedelt)\n #-- LAI\n self.prstate[0,i] = state_inst.lai[idx]\n #-- canopy-height\n self.prstate[1,i] = state_inst.can_height[idx]\n #-- SM\n self.prstate[2,i] = state_inst.soil_moisture[idx]\n\n #-- set uncertainty values\n self._set_priorunc(statevec_absunc=statevec_absunc, lai_coeff_absunc=lai_coeff_absunc)", "def set_prior_distribution(self, dist: BaseDistribution):\n # TODO(thiagovas): Raise an exception if len(dist) != self.n_rows.\n self.prior = dist\n return self", "def buildZPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.z_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_pzgxw + T.log(self.hyper['num_clust'])), axis=3), axis=[1,2])\r\n\r\n self.z_prior_modif = - T.maximum(self.hyper['treshold_z_prior'], - self.z_prior)", "def set_prior_priorunc_general(self):\n\n #-- some configurations apply absolute uncertainties\n lai_coeff_absunc = None\n statevec_absunc = None\n is_generic_prior = False\n\n #--\n if self.prior_states_file!=None:\n states_file = self.prior_states_file\n basename = os.path.basename(states_file)\n if os.path.splitext(basename)[1]=='.nc':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_jules(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n elif os.path.splitext(basename)[1]=='.csv':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_csv(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n else:\n msg = \"Unrecognised format of states file ***{}***. Cannot continue!\".format(\n states_file)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n return\n elif self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n else:\n self._setprior_generic_agriculture()\n is_generic_prior = True\n statevec_absunc = self.generic_prior_unc\n\n #-- set uncertainty values\n self._set_priorunc( lai_coeff_absunc=lai_coeff_absunc,\n statevec_absunc=statevec_absunc,\n is_generic_prior=is_generic_prior )", "def prior_model(self) -> Collection:\n return Collection(self.prior)", "def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n\r\n mu = np.zeros(2)\r\n Cov = np.array([[beta, 0], [0, beta]])\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu, Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Prior Distribution of α\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.show()\r\n \r\n return", "def __init__(self, prior: float, beta: float = 0., gamma: float = 1.0):\n super(NonNegativePULoss, self).__init__()\n\n self.beta = beta\n self.prior = prior\n self.gamma = gamma\n self.loss_fn = torch.nn.CrossEntropyLoss(reduction='none')", "def test_3_prior(self):\n print(\"test 3: prior probabilities\")\n\n for i, x in enumerate(self.X):\n print(i+1, prior_probability(\n x, self.means, self.dispersions, self.cluster_probabilities\n ), sep=' : ')", "def calculate_uniform_prior(grids_dict):\n shape = list(grids_dict.values())[0].shape\n prior = np.ones(shape, dtype=\"float\")\n # The prior will be normalised later\n return prior", "def P_prior(self):\n return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))", "def set_prior(self,which,what):\n\n which = self.grep_param_names(which)\n\n #check tied situation\n tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie)==set(which))]\n if len(tie_partial_matches):\n raise ValueError, \"cannot place prior across partial ties\"\n tie_matches = [tie for tie in self.tied_indices if set(which)==set(tie) ]\n if len(tie_matches)>1:\n raise ValueError, \"cannot place prior across multiple ties\"\n elif len(tie_matches)==1:\n which = which[:1]# just place a prior object on the first parameter\n\n\n #check constraints are okay\n if isinstance(what, (priors.gamma, priors.log_Gaussian)):\n assert not np.any(which[:,None]==self.constrained_negative_indices), \"constraint and prior incompatible\"\n assert not np.any(which[:,None]==self.constrained_bounded_indices), \"constraint and prior incompatible\"\n unconst = np.setdiff1d(which, self.constrained_positive_indices)\n if len(unconst):\n print \"Warning: constraining parameters to be positive:\"\n print '\\n'.join([n for i,n in enumerate(self._get_param_names()) if i in unconst])\n print '\\n'\n self.constrain_positive(unconst)\n elif isinstance(what,priors.Gaussian):\n assert not np.any(which[:,None]==self.all_constrained_indices()), \"constraint and prior incompatible\"\n else:\n raise ValueError, \"prior not recognised\"\n\n\n #store the prior in a local list\n for w in which:\n self.priors[w] = what", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def bias_prior(self):", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicPoissonDistribution(self.mu)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n self.raiseAnError(IOError,'Truncated poisson not yet implemented')", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p", "def prior_sample(self):\n pass", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n), cfg['dtype'])\n h_prior = tf.reshape(\n h_prior, [cfg['q/n_samples'], cfg['batch_size'], -1])\n h = [None] * cfg['p/n_layers']\n h[cfg['p/n_layers'] - 1] = h_prior\n for n in range(cfg['p/n_layers'] - 1, 0, -1):\n p_h_n = self.build_stochastic_layer(n, h_above=h[n])\n h[n - 1] = tf.cast(p_h_n.sample(), cfg['dtype'])\n return self.likelihood(h[0])", "def log_prior(self):\n raise NotImplementedError(\"the log_prior property should \"\n \"be defined in the Estimator sub-class\")", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicBernoulliDistribution(self.p)\n else:\n self.raiseAnError(IOError,'Truncated Bernoulli not yet implemented')", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)", "def setModelPrior(self,A,B,C,priorWeight):\n Cpattern = self.coeffPattern[2]\n for i in range(self.m):\n ai = A[i,:].tolist()\n bi = B[i,:].tolist()\n (xuc,constant) = self._toEstimator(i,ai,bi)\n if Cpattern == None or Cpattern[i] == None:\n xuc[-1] = C[i]\n self.estimators[i].setPrior(np.array(xuc),priorWeight)\n return", "def get_initial_marking_distribution(informed_prior):\n\n support = np.arange(0, MAX_MARK_VALUE+1)\n loc = support.min()\n max = support.max()\n scale = max-loc\n\n\n if not informed_prior:\n probs = ss.uniform(loc=loc, scale=scale).pdf(support)\n else: \n probs = ss.norm.pdf(support, scale=0.7, loc=support.mean()) \n \n probs /= probs.sum()\n x1 = np.random.choice(support, size=1000, p=probs)\n x2 = np.random.choice(support, size=1000, p=probs)\n x = x1-x2\n h,_ = np.histogram(x, bins=len(compute_support()))\n h = h / x.shape[0]\n plt.clf()\n plt.hist(x, bins=len(compute_support()))\n plt.savefig(FM().results_folder/'histogram')\n return ProbabilityDistribution(compute_support(), probability=h)", "def __init__(self, space, prior=None):\n super().__init__(space=space, linear=False, grad_lipschitz=np.nan)\n\n if prior is not None and prior not in self.domain:\n raise ValueError('`prior` not in `domain`'\n ''.format(prior, self.domain))\n\n self.__prior = prior", "def __init__(self, space, prior=None):\n super().__init__(space=space, linear=False, grad_lipschitz=np.nan)\n\n if prior is not None and prior not in self.domain:\n raise ValueError('`prior` not in `domain`'\n ''.format(prior, self.domain))\n\n self.__prior = prior", "def __init__(self, space, prior=None):\n super().__init__(space=space, linear=False, grad_lipschitz=np.nan)\n\n if prior is not None and prior not in self.domain:\n raise ValueError('`prior` not in `domain`'\n ''.format(prior, self.domain))\n\n self.__prior = prior", "def __init__(self, space, prior=None):\n super().__init__(space=space, linear=False, grad_lipschitz=np.nan)\n\n if prior is not None and prior not in self.domain:\n raise ValueError('`prior` not in `domain`'\n ''.format(prior, self.domain))\n\n self.__prior = prior", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def prior_param(self, param_dict={}): \n self.param_obj = Params(param_dict) # parameter object \n self.param_names = param_dict.keys() \n self.n_params = len(param_dict.keys()) # number of parameters in theta ", "def process_custom_prior(prior) -> Tuple[Distribution, int, bool]:\n\n check_prior_methods(prior)\n\n check_prior_batch_behavior(prior)\n\n prior, is_prior_numpy = maybe_wrap_prior_to_pytorch(prior)\n\n parameter_dim = prior.sample().numel()\n\n return prior, parameter_dim, is_prior_numpy", "def __init__(self, bandits: List[Bandit]):\n self.bandits = bandits\n # mu_pri and var_pri are hypermaterers for prior distribution\n self.mu_pri = np.zeros(len(self.bandits)) # 5 for each\n self.var_pri = np.ones(len(self.bandits))*5\n # mu and var are hyperparameters for posterior distribution\n self.mu = self.mu_pri\n self.var = self.var_pri\n self.var0 = 1 # 1 is taken but any can be taken no prob upto a limit constant of inintial distribution\n self.logging = Logging()", "def __init__(self, prior: Prior):\n # TODO: Consider analytical solution rather than implementing optimisation\n super().__init__(prior.factor, x=prior, name=namer(self.__class__.__name__))\n self.prior = prior\n self.label = f\"PriorFactor({prior.label})\"", "def randomize(self):\r\n # first take care of all parameters (from N(0,1))\r\n x = self._get_params_transformed()\r\n x = np.random.randn(x.size)\r\n self._set_params_transformed(x)\r\n # now draw from prior where possible\r\n x = self._get_params()\r\n if self.priors is not None:\r\n [np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]\r\n self._set_params(x)\r\n self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)\r", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogNormalDistribution(self.mean,self.sigma,self.low)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicLogNormalDistribution(self.mean,self.sigma,self.lowerBound,self.upperBound, self.low)", "def prior_sample_parameter(self, parameter):\n pass", "def set_prior(self, prior, warning=True):\n repriorized = self.unset_priors()\n self._add_to_index_operations(self.priors, repriorized, prior, warning)\n\n from .domains import _REAL, _POSITIVE, _NEGATIVE\n if prior.domain is _POSITIVE:\n self.constrain_positive(warning)\n elif prior.domain is _NEGATIVE:\n self.constrain_negative(warning)\n elif prior.domain is _REAL:\n rav_i = self._raveled_index()\n assert all(all(False if c is __fixed__ else c.domain is _REAL for c in con) for con in self.constraints.properties_for(rav_i)), 'Domain of prior and constraint have to match, please unconstrain if you REALLY wish to use this prior'", "def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)", "def prior_vars(self):\n priors = []\n for i in self.active_ssms(0):\n ssm = self.ssms[i]\n prior = ssm.prior_vars()\n\n if self.ssm_starts[i] < 0:\n P = np.diag(prior)\n P2 = P.copy()\n for k in range(-self.ssm_starts[i]):\n ssm.transition_covariance(P2, k+1, P)\n ssm.transition_noise_diag(k+1, prior)\n np.fill_diagonal(P, np.diag(P) + prior)\n P2 = P\n\n # since the interface only supports independent\n # priors, return a diagonal approximation of the true\n # prior\n prior = np.diag(P)\n priors.append(prior)\n return np.concatenate(priors)", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def initialize_Rs_prior(self):\n return [np.transpose(np.linalg.cholesky(K))\n for K in self.Ks]", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def initializeDistribution(self):\n self.convertToDistrDict['Hermite'] = self.convertHermiteToNormal\n self.convertToQuadDict ['Hermite'] = self.convertNormalToHermite\n self.measureNormDict ['Hermite'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma)\n self.lowerBound = -sys.float_info.max\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Hermite'\n self.preferredPolynomials = 'Hermite'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma,\n a,b)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def _get_prior_parameters_observations(self):\n self._has_been_used = True\n\n file = (\n \"prior-parameters.npy\"\n if not self._gaussian_prior\n else \"prior-parameters-gaussian.npy\"\n )\n parameters = np.load(\n os.path.join(utils.get_data_root(), \"lotka-volterra\", file)\n )\n\n file = (\n \"prior-observations.npy\"\n if not self._gaussian_prior\n else \"prior-observations-gaussian.npy\"\n )\n observations = np.load(\n os.path.join(utils.get_data_root(), \"lotka-volterra\", file)\n )\n\n ix = np.random.permutation(range(parameters.shape[0]))\n\n return parameters[ix], observations[ix]", "def set_prior(self, regexp, what):\r\n if self.priors is None:\r\n self.priors = [None for i in range(self._get_params().size)]\r\n\r\n which = self.grep_param_names(regexp)\r\n\r\n # check tied situation\r\n tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie) == set(which))]\r\n if len(tie_partial_matches):\r\n raise ValueError, \"cannot place prior across partial ties\"\r\n tie_matches = [tie for tie in self.tied_indices if set(which) == set(tie) ]\r\n if len(tie_matches) > 1:\r\n raise ValueError, \"cannot place prior across multiple ties\"\r\n elif len(tie_matches) == 1:\r\n which = which[:1] # just place a prior object on the first parameter\r\n\r\n\r\n # check constraints are okay\r\n\r\n if what.domain is POSITIVE:\r\n constrained_positive_indices = [i for i, t in zip(self.constrained_indices, self.constraints) if t.domain is POSITIVE]\r\n if len(constrained_positive_indices):\r\n constrained_positive_indices = np.hstack(constrained_positive_indices)\r\n else:\r\n constrained_positive_indices = np.zeros(shape=(0,))\r\n bad_constraints = np.setdiff1d(self.all_constrained_indices(), constrained_positive_indices)\r\n assert not np.any(which[:, None] == bad_constraints), \"constraint and prior incompatible\"\r\n unconst = np.setdiff1d(which, constrained_positive_indices)\r\n if len(unconst):\r\n print \"Warning: constraining parameters to be positive:\"\r\n print '\\n'.join([n for i, n in enumerate(self._get_param_names()) if i in unconst])\r\n print '\\n'\r\n self.constrain_positive(unconst)\r\n elif what.domain is REAL:\r\n assert not np.any(which[:, None] == self.all_constrained_indices()), \"constraint and prior incompatible\"\r\n else:\r\n raise ValueError, \"prior not recognised\"\r\n\r\n # store the prior in a local list\r\n for w in which:\r\n self.priors[w] = what", "def process_prior(tmp, model_num):\n\n if re_prior_const.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.constant, value=float(tmp[1]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n\n elif re_prior_normal.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.normal, mean=float(tmp[1]), variance=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n\n elif re_prior_uni.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.uniform, lower_bound=float(tmp[1]), upper_bound=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n\n elif re_prior_logn.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.lognormal, mu=float(tmp[1]), sigma=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n elif re_prior_gamma.match(tmp[0]):\n try:\n prior_params = Prior(type=PriorType.gamma, shape=float(tmp[1]), scale=float(tmp[2]))\n except ValueError:\n sys.exit(\"\\nValue of the prior for model %s (counting from 1) has wrong format: %s\" % (model_num, tmp[1]))\n else:\n sys.exit(\"\\nSupplied parameter prior %s unsupported\" % tmp[0])\n\n return prior_params", "def __init__(self, bandits: List[Bandit], epsilon:float = None):\n self.bandits = bandits\n self.epsilon = epsilon\n # mu_pri and var_pri are hypermaterers for prior distribution\n self.mu_pri = np.zeros(len(self.bandits))\n self.var_pri = np.ones(len(self.bandits))*5\n # mu and var are hyperparameters for posterior distribution\n self.mu = self.mu_pri\n self.var = self.var_pri\n self.var0 = 1 # 1 is taken but any can be taken no prob upto a limit constant of inintial distribution\n self.logging = Logging()", "def set_uniform_probabilities(self, sentence_aligned_corpus):\n ...", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def prior_of_priors(self, tt):\n for i in xrange(self.n_params): \n try: \n p_theta *= self.param_obj.prior()[i].pdf(tt[i]) \n\n except UnboundLocalError: \n p_theta = self.param_obj.prior()[i].pdf(tt[i]) \n\n return p_theta", "def from_prior(self):\n M = 1E3*rng.rand()\n M = dnest4.wrap(M, self.M_min, self.M_max)\n M = np.array([M])\n # Sampling fluids from prior (uniform distribution between 0 and 1).\n fluids = [rng.rand() for i in range(0,fluid_number)]\n\n if self.int_lim:\n int_terms = np.zeros(len(self.int_lim))\n for i in range(len(self.int_lim)):\n term = 1E3*rng.rand()\n term = dnest4.wrap(term, self.int_lim[i][0], self.int_lim[i][1])\n int_terms[i] = term\n return np.concatenate((M, fluids, int_terms))\n return np.concatenate((M, fluids))", "def generate_parameters(self):\n self.parameters = np.zeros(self.D)\n for l in range(self.D):\n if self.p_l[l] >= np.random.uniform(0,1):\n self.parameters[l] = 1", "def _construct_sample_from_prior(self):\n z_sym = T.matrix()\n x_sym = T.matrix()\n irs = self.ir_steps\n oputs = [self.obs_transform(self.s0)]\n oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])\n _, hi_zmuv = self._construct_zmuv_samples(x_sym, 1)\n sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \\\n givens={ self.z: z_sym, \\\n self.x_in: T.zeros_like(x_sym), \\\n self.x_out: T.zeros_like(x_sym), \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.scan_updates)\n def prior_sampler(samp_count):\n x_samps = to_fX( np.zeros((samp_count, self.obs_dim)) )\n old_switch = self.train_switch.get_value(borrow=False)\n # set model to generation mode\n self.set_train_switch(switch_val=0.0)\n z_samps = to_fX( npr.randn(samp_count, self.z_dim) )\n model_samps = sample_func(z_samps, x_samps)\n # set model back to either training or generation mode\n self.set_train_switch(switch_val=old_switch)\n return model_samps\n return prior_sampler", "def __init__(self):\n super(PriProb, self).__init__()\n # initialize R: distribute R_TOTAL reward points in J_avi locations randomly\n # self.r preserved for debugging, no real use in the script\n self.r = np.array(ad.randint_upto_sum(R_TOTAL, J_avi)).astype(NP_DTYPE)\n\n # expand self.r from J_avi locations to J locations using is_avi\n self.r_exp = np.zeros((J), dtype=NP_DTYPE)\n self.r_exp[np.nonzero(is_avi.cpu().numpy())] = self.r\n\n #normalizedR = ad.normalize(self.r_exp, using_max=False)\n self.R = nn.Parameter(torch.from_numpy(self.r_exp))", "def create_normal_normal_goals():\n # Create the pattern/form of the prior normal distribution\n beta_name_lv = var('beta_name')\n beta_size_lv = var('beta_size')\n beta_rng_lv = var('beta_rng')\n a_lv = var('a')\n R_lv = var('R')\n beta_prior_mt = mt.MvNormalRV(a_lv, R_lv,\n size=beta_size_lv,\n rng=beta_rng_lv,\n name=beta_name_lv)\n # beta_type_lvars = mt_type_params(beta_prior_mt)\n\n y_name_lv = var('y_name')\n y_size_lv = var('y_size')\n y_rng_lv = var('y_rng')\n F_t_lv = var('f')\n V_lv = var('V')\n E_y_mt = mt.dot(F_t_lv, beta_prior_mt)\n Y_mt = mt.MvNormalRV(E_y_mt, V_lv,\n size=y_size_lv,\n rng=y_rng_lv,\n name=y_name_lv)\n\n Y_obs_mt = mt.observed(obs_sample_mt, Y_mt)\n\n # Create tuple-form expressions for the posterior\n e_expr = mt.sub(Y_obs_mt, mt.dot(F_t_lv, a_lv))\n F_expr = (mt.transpose, F_t_lv)\n R_F_expr = (mt.dot, R_lv, F_expr)\n Q_expr = (mt.add,\n V_lv,\n (mt.dot,\n F_t_lv,\n R_F_expr))\n A_expr = (mt.dot, R_F_expr, (mt.matrix_inverse, Q_expr))\n # m = C \\left(F V^{-1} y + R^{-1} a\\right)\n m_expr = (mt.add, a_lv, (mt.dot, A_expr, e_expr))\n # C = \\left(R^{-1} + F V^{-1} F^{\\top}\\right)^{-1}\n # TODO: We could use the naive posterior forms and apply identities, like\n # Woodbury's, in another set of \"simplification\" relations.\n # In some cases, this might make the patterns simpler and more broadly\n # applicable.\n C_expr = (mt.sub,\n R_lv,\n (mt.dot,\n (mt.dot, A_expr, Q_expr),\n (mt.transpose, A_expr)))\n\n norm_posterior_exprs = (mt.MvNormalRV,\n m_expr, C_expr,\n y_size_lv, y_rng_lv)\n\n fact(conjugate,\n # MvNormal likelihood, MvNormal prior mean\n Y_obs_mt, norm_posterior_exprs)\n\n return ((eq, prior_dist_mt, beta_prior_mt),\n # This should unify `Y_mt` and `obs_dist_mt`.\n (eq, obs_mt, Y_obs_mt))", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def sample_from_prior(self, *args, **kwargs):\n pass", "def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'):\n prior = {'layer_number':layer_number, 'prior_type':prior_type, \\\n 'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units}\n self.priors.append(prior)\n return", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def _prior_gaussian(self, x_start):\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)", "def prior(cube, ndim, nparams):\n # construct prior from recovery file\n counter = 0\n if params2 is None:\n return\n for key in params2.keys():\n nparams_tmp = int(params2[key]['nparams'])\n for ii in range(nparams_tmp):\n # sp = [name, prior type, x1, x2]\n sp =\\\n params2[key]['param'+str(ii+1)].split(',')\n if sp[1][0] == 'U' and sp[2][:5]=='param' and sp[3][:5]=='param':\n subtract1 = int(key[-1]) - int(sp[2][-1])\n subtract2 = int(key[-1]) - int(sp[3][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract1], cube[counter-subtract2])\n elif sp[1][0] == 'U' and sp[2][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract], float(sp[3]))\n elif sp[1][0] == 'U' and sp[3][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n float(sp[2]), cube[counter - subtract])\n else:\n cube[counter] = GeneralPrior(cube[counter], sp[1], float(sp[2]),\n float(sp[3]))\n counter += 1", "def prioritizers(self, prioritizers):\n\n self._prioritizers = prioritizers", "def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')", "def get_prior(self, x):\n\n K_xx = self.kernel.eval(x, x)\n prior_mean = self.mean.eval(x)\n return prior_mean, K_xx", "def set_prior(self, parameter, prior):\n\n if parameter in self.parameter_map:\n self._mcmc.set_prior(self.parameter_map[parameter], prior)\n else:\n raise AstropyUserWarning(\"Parmater {name} not found in parameter\"\n \"map\".format(name=parameter))", "def reset_parameters_xavier_uniform(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters_xavier_uniform(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def initializeDistribution(self):\n if self.nPoints is None:\n self.xArray = np.arange(self.lowerBound,self.upperBound+1)\n else:\n self.xArray = np.linspace(self.lowerBound,self.upperBound,self.nPoints)\n\n # Here the actual calculation of discrete distribution parameters is performed\n self.pdfArray = 1.0/self.xArray.size * np.ones(self.xArray.size)\n paramsDict={}\n paramsDict['outcome'] = self.xArray\n paramsDict['state'] = self.pdfArray\n\n self.categoricalDist = Categorical()\n self.categoricalDist.initializeFromDict(paramsDict)\n initialPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(initialPerm)", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale)\n else:\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale,a,b)", "def get_default_prior_params(n_allele):\n\n prior_params_dict = dict()\n prior_params_paragami = paragami.PatternDict()\n\n # DP prior parameter for the individual admixtures\n prior_params_dict['dp_prior_alpha'] = np.array([6.0])\n prior_params_paragami['dp_prior_alpha'] = \\\n paragami.NumericArrayPattern(shape=(1, ), lb = 0.0)\n\n # prior on the allele frequencies\n # beta distribution parameters\n prior_params_dict['allele_prior_lambda_vec'] = np.ones(n_allele)\n prior_params_paragami['allele_prior_lambda_vec'] = \\\n paragami.NumericArrayPattern(shape=(n_allele, ), lb = 0.0)\n\n return prior_params_dict, prior_params_paragami", "def initializeDistribution(self):\n if self.lowerBoundUsed == False:\n self.lowerBound = -sys.float_info.max\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicLaplaceDistribution(self.location,self.scale,self.lowerBound,self.upperBound)", "def Initialize(log_like, log_prior, model_func, mean, cov):\n\n curr_params = proposal_rule(cov, mean, (len(mean)-1)/2)\n print('Init params:', curr_params) \n print_params(curr_params, int((len(mean)-1)/2))\n curr_model = model_func(curr_params)\n print('Init model', curr_model)\n curr_like = log_like(curr_model)\n print('Init like:', curr_like) \n curr_prior = log_prior(curr_params)\n print('Init prior', curr_prior)\n return(curr_params, curr_model, curr_like, curr_prior)", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def log_prior_parameters(self, x):\n par_dict = self.get_par_dict(x)\n exclude_parameters = list(set(par_dict.keys())-set(self.fit_parameters)-set(self.nuisance_parameters))\n prob_dict = self.par_obj.get_logprobability_all(par_dict, exclude_parameters=exclude_parameters)\n return sum([p for obj, p in prob_dict.items()])", "def log_prior_parameters(self, x):\n par_dict = self.get_par_dict(x)\n exclude_parameters = list(set(par_dict.keys())-set(self.fit_parameters)-set(self.nuisance_parameters))\n prob_dict = self.par_obj.get_logprobability_all(par_dict, exclude_parameters=exclude_parameters)\n return sum([p for obj, p in prob_dict.items()])", "def nuisancePriorPosterior(nuis,name):\n n,bins,patched = plt.hist(nuis,bins=40,alpha=0.5,color='g',lw=0,\n density=True, histtype='stepfilled')\n\n gbins = np.arange(-4,4,0.1)\n gaus = norm.pdf(gbins,0.,1.)\n line = plt.plot(gbins,gaus,'r',lw=2)\n\n (mm,ss) = norm.fit(nuis)\n\n plt.xlabel(\"Nuisance Parameter \"+name)\n plt.text(0.1,0.8,'mean=%1.2f'%mm,transform=plt.gca().transAxes)\n plt.text(0.1,0.65,'RMS=%1.2f'%ss,transform=plt.gca().transAxes)\n plt.legend((line[0],patched[0]),('prior','posterior'))\n\n return mm,ss", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicBinomialDistribution(self.n,self.p)\n else:\n self.raiseAnError(IOError,'Truncated Binomial not yet implemented')" ]
[ "0.7287337", "0.7219811", "0.698138", "0.69521374", "0.6878878", "0.6738291", "0.65607095", "0.65233296", "0.6507224", "0.64965516", "0.6484119", "0.64804757", "0.64787775", "0.6447634", "0.6433855", "0.6417294", "0.6408151", "0.64017296", "0.6352515", "0.6351014", "0.63464016", "0.63280743", "0.627214", "0.62582886", "0.62061816", "0.61850643", "0.6176571", "0.61764765", "0.61717284", "0.61484337", "0.61219144", "0.611578", "0.6101655", "0.60975194", "0.60653967", "0.6064222", "0.60607064", "0.60592836", "0.60543066", "0.6025806", "0.6018228", "0.6018228", "0.6018228", "0.6018228", "0.60095775", "0.6001441", "0.59959996", "0.598069", "0.5980024", "0.5968928", "0.5950582", "0.5941201", "0.59350866", "0.59327507", "0.5929349", "0.5914918", "0.5906582", "0.590075", "0.58877933", "0.5866188", "0.5866188", "0.5862142", "0.5862142", "0.5862142", "0.5862142", "0.5861947", "0.58522826", "0.58430505", "0.5842117", "0.5832814", "0.5827179", "0.5824281", "0.5813279", "0.5812216", "0.58111405", "0.58099115", "0.58056915", "0.5802241", "0.5796451", "0.5784576", "0.57804763", "0.5769911", "0.57670313", "0.57661796", "0.57603157", "0.5758687", "0.5758097", "0.57470953", "0.57413334", "0.5738344", "0.5738344", "0.57344496", "0.57287854", "0.57253325", "0.57157034", "0.5709856", "0.5699832", "0.56982046", "0.56982046", "0.569531", "0.5689266" ]
0.0
-1
Calculates the probability of a model.
def log_probability(theta): global priors global logp lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)]) if not np.isfinite(lp) or np.isnan(lp): return -np.inf ll = logp(theta) if not np.isfinite(ll): return -np.inf return lp + ll
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_model_prob(self, per_list_logodds):\n with tf.compat.v1.name_scope(name='compute_model_prob'):\n return tf.stop_gradient(\n tf.exp(-self._alpha *\n (per_list_logodds -\n tf.reduce_min(per_list_logodds, axis=2, keepdims=True))))", "def calculate_probability(self):\n return 0", "def p(self) -> Probability:\n ...", "def probability(self, words):\n if len(words) == 0:\n return 0\n \n prob = 1\n model = self.mdl\n \n words_ngram = NGramLM(self.N, []).create_ngrams(words) # Create NGram model for words\n for ngram in words_ngram:\n # Never seen before ngram or n-1gram\n if (ngram not in list(model['ngram'])) or (ngram[:-1] not in list(model['n1gram'])):\n return 0\n if isinstance(self, NGramLM):\n prob *= model[model['ngram'] == ngram]['prob'].values[0]\n \n def recur_prob(model, w):\n prob = 1\n prev_mod = model.prev_mdl\n if isinstance(prev_mod, UnigramLM): # Unigram base case\n prob *= prev_mod.mdl[w[0]]\n else:\n words_n1gram = NGramLM(prev_mod.N, []).create_ngrams(w) # Create NGram model for words\n prob *= prev_mod.mdl[prev_mod.mdl['ngram'] == words_n1gram[0]]['prob'].values[0]\n prob *= recur_prob(prev_mod, words_n1gram[0]) # Recursive call\n return prob\n\n prob *= recur_prob(self, words_ngram[0])\n \n return prob", "def prob(self, w):\n return self.counts[w] / self.total_count", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def probabilities(self):\n raise NotImplementedError", "def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total", "def probability_of_default(model, prediction_features):\n return model.predict_proba(prediction_features)[:, 1]", "def model_probs(self, classifier=None):\n if not classifier:\n classifier = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n classifier.fit(self.X_train, self.y_train)\n predictions = classifier.predict_proba(self.X)\n return predictions", "def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # add bias variable 1\n prob = np.zeros(X.shape[0], self.num_classes)\n ### YOUR CODE HERE\n z = X.dot(self.w)\n prob = soft_reg.softmax(z)\n ### END CODE\n return prob", "def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # Add one for bias to the first columns\n probs = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n probs = log_reg.logistic(z)\n ### END CODE\n assert probs.shape == (X.shape[0],)\n return probs", "def doc_prob(self, doc, cat):\n features = self.get_features(doc) \n # Multiply the probabilities of all the features together\n p = Decimal(1)\n for f in features:\n p *= Decimal(str(self.weighted_prob(f, cat, self.feature_prob))) \n return p", "def probabilities_score(model_id, test_set_id, rubric_id):\n result = {'true_average_probability': 0, 'false_average_probability': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_probability(model_id, test_set_id, rubric_id)\n\n true_number = 0\n true_probability = 0\n false_number = 0\n false_probability = 0\n\n for key in rubrication_result:\n if answers[key]:\n true_number += 1\n true_probability += rubrication_result[key]\n else:\n false_number +=1\n false_probability += rubrication_result[key]\n\n if true_number:\n result['true_average_probability'] = true_probability / true_number\n\n if false_number:\n result['false_average_probability'] = false_probability / false_number\n\n return result", "def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)", "def probability(self, samples):\n pass", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def posterior(self, model, sentence, label):\r\n\r\n if model == \"Simple\":\r\n cost = sum(\r\n [\r\n (\r\n (math.log(self.emission_probability[label[i]][sentence[i]]))\r\n + (math.log(self.posterior_probability[label[i]]))\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (math.log(1 / float(10 ** 10)))\r\n + (math.log(self.posterior_probability[label[i]]))\r\n )\r\n for i in range(len(sentence))\r\n ]\r\n )\r\n return cost\r\n elif model == \"Complex\":\r\n post_array = []\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * self.initial_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10)) * self.initial_probability[label[i]]\r\n )\r\n elif i == 1:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n )\r\n else:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 2]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * (\r\n self.transition_probability[label[i - 2]][label[i]]\r\n * self.posterior_probability[label[i - 2]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n )\r\n post_array = [math.log(p) for p in post_array]\r\n cost = sum(post_array)\r\n return cost\r\n\r\n elif model == \"HMM\":\r\n post_array = []\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n post_array.append(\r\n (\r\n self.initial_probability[label[i]]\r\n * self.emission_probability[label[i]][sentence[i]]\r\n )\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (self.initial_probability[label[i]] * (1 / float(10 ** 8)))\r\n )\r\n else:\r\n emi = (\r\n (self.emission_probability[label[i]][sentence[i]])\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n )\r\n\r\n min_val = post_array[i - 1] * (\r\n (self.transition_probability[label[i - 1]][label[i]])\r\n )\r\n\r\n post_array.append(emi * min_val)\r\n\r\n post_array = [math.log(p) for p in post_array]\r\n\r\n cost = sum(post_array)\r\n\r\n return cost\r\n else:\r\n print(\"Unknown algorithm!\")", "def scoring_function(self, model, y_true, y_predicted_probability):", "def prob(self):\n\t\treturn self._prob", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def perplexity(filepath, model):\n log_prob, count = log_prob_of_file(filepath, model)\n perplexity = math.exp((-1.0/count) * log_prob)\n return perplexity", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))", "def predict_proba(self, X):\n linear_model = X.dot(self.W) + self.b\n prob = 1 / (1 + np.exp(-linear_model))\n return prob", "def sum_model_probs(model, uniq_words):\n sum_probs = 0\n for word in uniq_words:\n sum_probs += model.get_prob(word)\n sum_probs += (voc_size - len(uniq_words)) * model.get_prob_by_word_freq(0)\n return sum_probs", "def perplexity(model, data):\n probs = [model.get_prob(word) for word in data] # get word's probability\n probs_log = [\n log2(word_prob) if word_prob > 0 else log2(float_info.epsilon)\n for word_prob in probs\n ] # log the probabilities. using epsilon when the probability is 0\n sum_probs = reduce(lambda a, b: a + b, probs_log) # sum all\n power_val = (-1 * sum_probs) / len(probs_log) # divide by n and neg all\n return 2 ** power_val", "def compute_probabilities():\n global total_spam_words, total_ham_words\n total_words = total_spam_words+total_ham_words\n unique_words = len(all_dict)\n print(\"Training Set Description: \")\n len_ham = len(ham_file_list)\n len_spam = len(spam_file_list)\n print(\"SPAM EMAILS: \",len_spam)\n print(\"HAM EMAILS: \",len_ham)\n print(\"Total words: \",total_words)\n print(\"Training...\")\n \n spam_probability = math.log((len_spam)/(len_spam+len_ham))\n ham_probability = math.log((len_ham)/(len_spam+len_ham))\n \n \n \n output_file = open(\"nbmodel.txt\", \"w+\", encoding=\"latin-1\")\n output_file.write(\"model_params \"+str(spam_probability)+\" \"+str(ham_probability)+\"\\n\")\n \n nbmodel = {}\n nbmodel[\"model_params\"] = (spam_probability,ham_probability)\n for word in all_dict.keys():\n spam_count = 1\n if word in spam_dict:\n spam_count+= spam_dict[word]\n \n word_spam_probability = math.log(spam_count / (total_spam_words+unique_words))\n \n ham_count = 1\n if word in ham_dict:\n ham_count+= ham_dict[word]\n \n word_ham_probability = math.log(ham_count / (total_ham_words+unique_words))\n \n output_file.write(word+\" \"+str(word_spam_probability)+\" \"+str(word_ham_probability)+\"\\n\")\n nbmodel[word] = (word_spam_probability, word_ham_probability) \n \n print(\"nbmodel.txt generated successfully...\")\n print(\"SPAM Probability: \",spam_probability)\n print(\"HAM Probability: \",ham_probability)\n output_file.close()", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def prob_distr(self, x):\n return 1.0/x", "def perplexity(sentences: List[Tuple[List[int], List[int]]], model: Seq2SeqAttentionModel) -> float:\n LL_Total = torch.tensor(0, dtype=torch.float)\n total_words = torch.tensor(0, dtype=torch.float)\n for i, (source_sentence, target_sentence) in enumerate(sentences):\n LL_Total += log_likelihood(source_sentence, target_sentence, model)\n total_words += len(target_sentence)\n\n return torch.exp(-LL_Total / total_words)", "def _calculate_probability(self,k):\n\t\tif abs(k * self.delta_x) > (3 * np.sqrt(self.variance)):\n\t\t\treturn 0.0\n\t\tbinom_coeff = special.binom(self.n,(self.n + k)/2)\n\t\tb_value = binom_coeff * ((self.p) ** ((self.n + k)/2)) * ((1-self.p) ** ((self.n - k)/2))\n\t\treturn b_value", "def find_exploration_proba(self):\n if self._nb_models_done is not None and self._nb_models_done > 500:\n nb_models = self._nb_models_done\n # proba no longer descreases, so I don't have to know the exact number of models\n else:\n nb_models = self.get_nb_models_done()\n\n if nb_models <= self.min_nb_of_models:\n return 1\n\n elif nb_models <= 100:\n return 0.5\n\n elif nb_models <= 500:\n return 0.25\n\n else:\n return 0.1", "def CalculateProbabilities(self, beta_0, beta_1):\n denom = self.zero_zero + self.zero_one + self.one_zero + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero = min( max( (self.zero_zero + self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one = min( max( (self.one_zero + self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_zero + self.one_zero + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_zero = min( max( (self.zero_zero + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_zero = min( max( (self.one_zero + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_one + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_one = min( max( (self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_one = min( max( (self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def probability(self, sequence):\n return 2 ** (self.log_probability(self._transform(sequence)))", "def _model(x, p):\n y_hat = 0\n for i, pi in enumerate(reversed(p)):\n y_hat += x**i * pi\n return y_hat", "def predict_proba(self, x):\n e = self.predict_evidence(x)\n a = e + self.prior\n return a / torch.sum(a, dim=-1, keepdim=True)", "def predict_proba(self):\n if self.rank_prob is None:\n raise ValueError('No results available. Did you already call predict(...)?')\n\n return np.array([sum(map(lambda x: x[1], result)) / len(result) for result in self.rank_prob])", "def test_model(parameters):\n if parameters is None:\n return \"No Value\"\n else:\n return round(modelo.predict_proba([parameters])[0]*100, 3)", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def predict_probability(self, x, weights):\n # Take dot product of feature_matrix and coefficients \n scores = np.dot(x, weights)\n \n # Compute P(y_i = +1 | x_i, w) using the link function\n probs = 1./(1. + np.exp(-scores))\n \n # return probs predictions\n return scores, probs", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def probability(series, params):\n\n prob = 1\n\n for result in series:\n\n prob *= params[result]\n\n return prob * params[\"die\"]", "def calc_prob_local(self, *args):\n return 0", "def prob_given(self, posterior, prior):\n\t # print \"posterior, prior\", posterior, prior\n\t return self.prob(merge(prior, posterior)) / self.prob(prior) if self.prob(prior) else 0", "def get_posterior_model_probabilities(self, mode='BIC'):\n # Note: assumes uniform prior!\n bf = np.exp(self.get_log_Bayes_factor(mode))\n if np.isinf(bf):\n return {'pmc': 0.0, 'pmd': 1.0}\n else:\n pmd = bf / (1+bf)\n pmc = 1 - pmd\n return {'pmc': pmc, 'pmd': pmd}", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def pdf(self, point: np.ndarray) -> float:\n return self._probs.dot([rv.pdf(point) for rv in self._rvs])", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def get_probability(self, sentence):\n if len(sentence) == 1:\n return Decimal(10) ** self.get_unigram_log_prob(sentence)\n elif len(sentence) == 2:\n return Decimal(10) ** self.get_bigram_log_prob(sentence)\n else:\n log_prob = Decimal(0.0)\n for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]):\n log_prob += self.get_trigram_log_prob((w1, w2, w3))\n log_prob = Decimal(log_prob)\n return Decimal(10) ** log_prob", "def get_probability(self, combination):\n\n\t\tprob = 1\n\t\tfor i in np.arange(self.codelength):\n\t\t\tprob *= self.prior[combination[i]-1]\n\t\treturn prob", "def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)", "def lnprobability(self):\n return", "def PredictiveDist(self, label='pred'):\n # TODO: fill this in\n lam = 1\n pred = thinkbayes2.MakePoissonPmf(lam, 15)\n return pred", "def compute_prob(self, w0, w, x_dict):\n\t\tdot_prod = self.compute_w_x_dot(w0, w, x_dict)\n\t\tlog_prob = dot_prod - self.log_sum_exp(dot_prod)\n\t\treturn exp(log_prob)", "def estimate_prob(self, history, word):\n\t\t# YOUR CODE HERE\n\n\t\tif history == '':\n\t\t\t# unigram\n\t\t\tword_frequency = self.ngram_counts[tuple([word])]\n\t\t\treturn word_frequency/self.total_counts\n\n\t\telse:\n\t\t\t# bigram\n\t\t\tword_frequency = self.ngram_counts[tuple([history, word])]\n\t\t\t# history_count = sum([self.ngram_counts[key] for key in self.ngram_counts if key[0] == history])\n\t\t\t# history_count = self.history_count[history]\n\t\t\thistory_count = self.ngram_counts[tuple([history])]\n\t\t\t# print('his: {}',format(history))\n\t\t\t# print('his count {}'.format(history_count))\n\t\t\treturn word_frequency/history_count", "def score():\n # Get probability from our data\n data = flask.request.json\n x = np.matrix(data[\"example\"])\n x_add = scaler.transform(x[0, (0,4,5,6,7,8)])\n x_scaled = np.delete(x, [0,4,5,6,7,8], axis=1)\n x_scaled = np.insert(x_scaled, (0,3,3,3,3,3), x_add, axis=1)\n prob = model.predict_proba(x_scaled)\n # Put the results in a dict to send as json\n results = {\"prob\": prob[0,1]}\n return flask.jsonify(results)", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result", "def pbias(self) -> float:\n return float(100.0 * sum(self.predicted - self.true) / sum(self.true))", "async def predict(property: Property):\n prediction = model.predict(property.to_df())\n price = np.exp(prediction[0]) \n return '{}$ per night is an optimal price.'.format(round(price))", "def probability(self, tokens):\n\n return 2 ** self.log_probability(tokens)", "def _get_prob(self, a, b=1.0):\n p = self.prior\n return (p * a) / ((1-p)*b + p*a)", "def calculate_probability(self, unseen_mail):\n unseen_mail = self.__handle_unseen_mail_unknown_words(unseen_mail)\n \n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1));\n sentences = unseen_mail.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split(NGramModel.END_SENTENCE_TOKEN)\n \n log_probability = 0;\n for sentence in sentences:\n if len(sentence.strip()) > 0:\n word_list = sentence.split()\n word_list.append(NGramModel.END_SENTENCE_TOKEN)\n \n for ngram in self.__generate_n_grams(word_list, self.__n):\n probability = self.__smoother.calculate_probability(self, ' '.join(ngram))\n if probability == 0:\n return 0\n log_probability += math.log10(probability)\n return log_probability", "def predict(self, X):\n pred = np.zeros(X.shape[0])\n ### YOUR CODE HERE 1-3 lines\n probabilities = np.array([model.probability(X) for model in self.models])\n pred=np.argmax(probabilities, axis=0)\n ### END CODE\n assert pred.shape == (X.shape[0],)\n return pred", "def calculate_word_probabilities(word):\n\n\tprobabilities = {\"one\":0,\"two\":0,\"three\":0,\"four\":0,\"five\":0}\n\n\tfor star in range(1,6):\n\t\tconditional = float(word[number_to_text[star]])/statements_with_star[star]\n\t\tprobabilities[number_to_text[star]]=conditional*10\n\n\tdb.words.update({\"_id\":ObjectId(word[\"_id\"])},{\"$set\":{\"conditionals\":probabilities}})\n\n\treturn 1", "def probability(p):\n return p > random.uniform(0.0, 1.0)", "def prob(seq, model):\n if seq in model:\n\n return (model[seq][0], len(seq))\n elif len(seq) == 1: #this is an OOV, it isn't in the model, and is one long\n return (model[(\"<unk>\",)][0],0) #return 0 for order if OOV\n elif seq[:len(seq)-1] in model:\n\n pr=prob(seq[1:], model)\n return (model[seq[:len(seq)-1]][1] + pr[0], pr[1])\n else:\n\n return prob(seq[1:], model)", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def predict_prob(self, _input_data):\n yhat_probs = (self.merged_model).predict(_input_data, verbose=0)\n\n return yhat_probs[:, 0]", "def joint_proba(self, X):\n return self.weights * self._bernoulli(X)", "def predict_probability_model(*args):\n final_data = None\n any_null = validate_none(args)\n if any_null:\n final_data = transform_fields(args[-3:])\n final_data = list(args[0:5]) + final_data\n predicted = test_model(final_data)\n converts, styles = user_converts(predicted)\n\n return [f'{predicted} %', converts] + styles", "def prob(self, feature_index, feature_value, class_):\r\n\r\n deviation = self.conditional_prob[class_][feature_index][1]\r\n mean = self.conditional_prob[class_][feature_index][0]\r\n\r\n val1 = math.pow((feature_value - mean), 2)\r\n val1 = val1/math.pow(deviation, 2)\r\n\r\n val2 = 2*math.pi*math.pow(deviation, 2)\r\n val2 = 1/(math.sqrt(val2))\r\n\r\n probability = val2 * math.exp(-val1)\r\n\r\n return probability", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def probability_array(self):\n q = self.apply_weights()\n return np.exp(q)/(1 + np.exp(q))", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def probability(problem, train_ixs, obs_labels, selected_ixs, batch_size, **kwargs):\n points = problem['points']\n model = problem['model']\n\n test_X = points[selected_ixs]\n\n p_x = model.predict_proba(test_X)\n\n return p_x[:,1].reshape(-1)", "def probability_from_internal(internal_values, constr):\n return internal_values / internal_values.sum()", "def get_ngram_prob(self, label_seq):\n curr_ngram = self.all_grams\n for i in range(0, len(label_seq)):\n label = label_seq[i]\n if i == len(label_seq) - 1:\n denom = curr_ngram.get_count() + self.SMOOTHING_VALUE * 9\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # For smoothing, just add self.SMOOTHING_VALUE\n numer = curr_ngram.get_count() + self.SMOOTHING_VALUE\n return float(numer) / denom", "def _predict_p(self, f):\n return self.p * np.exp(self.dbeta * f)", "def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output", "def get_probability(fields, dic):\r\n sum_ = sum(dic.values())\r\n p = 0.0\r\n for f in fields:\r\n value = dic.get(f, 0.0) + 0.0001\r\n p = p + math.log(float(value)/float(sum_))\r\n return p", "def compute_prob_mle(X: np.ndarray) -> float:\n\n Geometric._check_input_data(X=X)\n Geometric._check_support(X=X)\n\n prob = 1 / X.mean()\n return prob", "def calc_probabilities(applications):\n sum_advantage = sum(app.get_advantage() for app in applications)\n return [app.get_advantage() / sum_advantage for app in applications]", "def pdf(self, test_vectors: np.ndarray) -> np.ndarray:\n probability = np.zeros((len(test_vectors)), float)\n for i in range(len(self.weights_)):\n curve = multivariate_normal(mean=self.means_[i], cov=self.covariances_[i], allow_singular=True)\n probability += self.weights_[i] * curve.pdf(test_vectors)\n return probability", "def get_probability_loss(self):\n return sum(self._loss)/len(self._loss)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def log_prob(self):", "def probability(self, token: str, follower: str) -> float:\n return self._cooccurrence_matrix.distribution(token).probability(follower)", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = choices(CHOICES, weights=freqs)[0]\n return CHOICES[(prediction_for_them + 1) % 3]", "def prob_given(graph, posterior, prior):\n return graph.prob(merge(prior, posterior)) / graph.prob(prior)", "def predict_proba(self, Xs):\n\n Xs = check_Xs(Xs,\n multiview=True,\n enforce_views=self.n_views)\n\n X1 = Xs[0]\n X2 = Xs[1]\n\n # predict each probability independently\n y1_proba = self.estimator1_.predict_proba(X1)\n y2_proba = self.estimator2_.predict_proba(X2)\n # return the average probability for the sample\n return (y1_proba + y2_proba) * .5", "def prob(self, cut):\n return self._root.prob(cut)", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]" ]
[ "0.7188488", "0.7145453", "0.6747787", "0.67278534", "0.65877354", "0.6577112", "0.6537247", "0.6531125", "0.6514498", "0.65023434", "0.64306843", "0.63726646", "0.63619375", "0.63538843", "0.62945044", "0.6287746", "0.62856567", "0.62772304", "0.62686586", "0.62612885", "0.62612885", "0.6248773", "0.6238829", "0.6231773", "0.6228018", "0.62136304", "0.6213568", "0.62045926", "0.6193254", "0.6181407", "0.6173534", "0.61433756", "0.61276305", "0.61271226", "0.611515", "0.6110473", "0.61076784", "0.6095584", "0.60803795", "0.6067375", "0.60414124", "0.60288095", "0.6006739", "0.59858197", "0.59805524", "0.5978054", "0.59728396", "0.59715396", "0.596024", "0.59502923", "0.59468865", "0.594125", "0.59381384", "0.5929332", "0.59280473", "0.5925896", "0.5903797", "0.5902652", "0.5884513", "0.5870309", "0.58673507", "0.58515", "0.5850338", "0.58392817", "0.5835221", "0.58265114", "0.5824942", "0.58238494", "0.5822807", "0.58187085", "0.58146644", "0.5814225", "0.581126", "0.58108896", "0.58073986", "0.5796951", "0.57932454", "0.57914954", "0.5790204", "0.5788914", "0.5784001", "0.57810473", "0.57799286", "0.57692415", "0.5768635", "0.5759454", "0.57570976", "0.57565737", "0.5751395", "0.57400423", "0.57338303", "0.57333726", "0.57325584", "0.5723702", "0.57055956", "0.57041866", "0.57026684", "0.57013655", "0.5687335", "0.56827635", "0.56813157" ]
0.0
-1
Combine SSP traces to have mass/luminosity weighted properties
def weighted_traces(parnames, trace, nssps): weights = np.array([trace["w_{}".format(i+1)].data for i in range( nssps)]) wtrace = [] for param in parnames: data = np.array([trace["{}_{}".format(param, i+1)].data for i in range(nssps)]) t = np.average(data, weights=weights, axis=0) wtrace.append(Table([t], names=["{}_weighted".format(param)])) return hstack(wtrace)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_combined_variation(nums, SSC, band, rms):\n\n def get_spectra(nums, SSC, band, rms):\n spectrum = spectra[str(SSC['no'])][band]\n frequency = spectrum['frequency'].to(u.GHz)\n intensity = spectrum['spectrum'].to(u.K)\n # shift spectrum to rest frequency\n velshift = SSC['velshift']\n frequency = [(-vsys-velshift).to(u.GHz, equivalencies=u.doppler_optical(f)).value for f in frequency]*u.GHz\n # remove NaNs\n frequency, intensity = crossmatch(frequency.to(u.GHz).value, intensity.to(u.K).value)\n # add noise\n intensities = []\n for num in nums:\n if not num==0:\n randstate = np.random.RandomState(num)\n noise = np.random.normal(loc=0., scale=rms.to(u.K).value, size=len(frequency))\n int_noise = intensity+noise\n intensities.append(int_noise)\n else:\n intensities.append(intensity)\n # get percentiles\n d16,dmed,d84 = np.percentile(np.array(intensities), (16,50,84), axis=0)\n return frequency,d16,dmed,d84\n\n def get_models(nums, SSC, band):\n with open(escape_fname(os.path.join(XCLASSdir,'SSC_'+str(SSC['no']),'model_spectrum','run_0','combined_model.spectrum.pickle')), 'rb') as f:\n m = pickle.load(f, encoding=\"latin1\")\n frequency = (m[:,0]*u.MHz).to(u.GHz).value\n\n models = []\n for num in nums:\n with open(escape_fname(os.path.join(XCLASSdir,'SSC_'+str(SSC['no']),'model_spectrum','run_'+str(num),'combined_model.spectrum.pickle')), 'rb') as f:\n m = pickle.load(f, encoding=\"latin1\")\n model = (m[:,1]*u.K).value\n models.append(model)\n m16,mmed,m84 = np.percentile(np.array(models), (16,50,84), axis=0)\n return frequency,m16,mmed,m84\n\n def set_up_figure(SSC, band):\n fig,ax = plt.subplots(nrows=1, ncols=1, squeeze=True, sharex='col', sharey='row', figsize=(10,8))\n ax.text(0.05, 0.9, 'SSC '+str(SSC['no'])+': '+band, color='k', transform=ax.transAxes, ha='left', va='top', weight='bold', fontsize=16, bbox=props)\n return fig,ax\n\n def plot_spectra(ax, frequency, d16, dmed, d84):\n ax.plot(frequency, dmed, lw=1, ls='-', color='k', zorder=3)\n ax.fill_between(frequency, d16, d84, color='k', alpha=0.5, zorder=2)\n\n def plot_fitted_spectra(ax, frequency, m16, mmed, m84):\n ax.plot(frequency, mmed, lw=1, ls='-', color='r', zorder=5)\n ax.fill_between(frequency, m16, m84, color='r', alpha=0.5, zorder=4)\n\n def get_detected_lines(band=None):\n # get detected species\n all_species = []\n for SSC in SSCs:\n for specie in detected_species[str(SSC['no'])]:\n if not specie in all_species:\n all_species.append(specie)\n # get all lines of the detected species\n all_lines = []\n for specie in all_species:\n slines = [l for l in lines if l['XCLASS']==specie]\n for sl in slines:\n all_lines.append(sl)\n # keep only lines of given band\n if not band==None:\n bandlines = []\n for line in all_lines:\n if band=='LSB':\n if line['restfreq']<350*u.GHz:\n bandlines.append(line)\n elif band=='USB':\n if line['restfreq']>350*u.GHz:\n bandlines.append(line)\n return sorted(bandlines, key=lambda k: k['restfreq'])\n else:\n return sorted(all_lines, key=lambda k: k['restfreq'])\n\n def label_lines(ax, spectrum, band):\n detected_lines = get_detected_lines(band=band)\n for idx,line in enumerate(detected_lines):\n restfreq = line['restfreq'].to(u.GHz).value\n if (restfreq>frequency[0] and restfreq<frequency[-1]):\n if band=='LSB':\n xlim = [342.4, 346.2]\n elif band=='USB':\n xlim = [354.3, 358.1]\n xloc = xlim[0] +((idx+0.5)/len(detected_lines))*(xlim[1]-xlim[0])\n ax.axvline(x=restfreq, ymin=0, ymax=1, color='dimgrey', ls='--', lw=0.5, zorder=1)\n ax.plot([restfreq,xloc], [1.05*np.nanmax(spectrum), 1.05*1.05*np.nanmax(spectrum)], color='dimgrey', ls='--', lw=0.5, zorder=1, clip_on=False)\n ax.text(xloc, 1.06*1.05*np.nanmax(spectrum), line_tex(line), color='dimgrey', fontsize=10, rotation=90, ha='center', va='bottom')\n\n def format_figure(ax, frequency, spectrum, band):\n if band=='LSB':\n ax.set_xlim([342.4, 346.2])\n elif band=='USB':\n ax.set_xlim([354.3, 358.1])\n ax.set_ylim(-0.05*np.nanmax(spectrum), 1.05*np.nanmax(spectrum))\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.1))\n ax.yaxis.set_major_locator(MultipleLocator(10))\n ax.yaxis.set_minor_locator(MultipleLocator(2))\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.set_axisbelow(True)\n ax.grid(axis='y', ls=':', c='grey')\n ax.set_xlabel(r'$\\nu_\\mathrm{rest}$ [GHz]', fontsize=12)\n ax.set_ylabel(r'T$_\\mathrm{b}$ [K]', fontsize=12)\n fig.set_tight_layout(True)\n\n def save_figure(fig, band):\n savepath = escape_fname(os.path.join(plotdir, '03.XCLASS_fit', 'combined_spectra', 'SSC_'+str(SSC['no'])+'.'+band+'.combined_spectra.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')\n\n\n frequency, d16,dmed,d84 = get_spectra(nums, SSC, band, rms)\n mfrequency, m16,mmed,m84 = get_models(nums, SSC, band)\n fig,ax = set_up_figure(SSC, band)\n plot_spectra(ax, frequency, d16,dmed,d84)\n plot_fitted_spectra(ax, mfrequency, m16,mmed,m84)\n label_lines(ax, dmed, band)\n format_figure(ax, frequency, dmed, band)\n save_figure(fig, band)", "def spectral_data(spectra):\n weights = np.concatenate([ s.ivar for s in spectra ])\n flux = np.concatenate([ s.flux for s in spectra ])\n wflux = weights * flux\n return (weights, flux, wflux)", "def spindle_attributes(self):\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n\n dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])\n self.spindle_events = {}\n self.spindle_rejects = {}", "def weight_mm(self,m1,m2):\n lw = 1.\n\n # particle id and isolation\n lw *= self._muIDISOWeight.value(m1.pt(),m1.eta(),'0')\n lw *= self._muIDISOWeight.value(m2.pt(),m2.eta(),'0')\n\n # Trigger\n hlt_sf_run2012_a = (self._muTRIGGERWeight_leg8_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_A.value(m2.pt(),m2.eta(),'0') +\\\n self._muTRIGGERWeight_leg17_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg8_A.value(m2.pt(),m2.eta(),'0') -\\\n self._muTRIGGERWeight_leg17_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_A.value(m2.pt(),m2.eta(),'0'))\n\n hlt_sf_run2012_b = (self._muTRIGGERWeight_leg8_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_B.value(m2.pt(),m2.eta(),'0') +\\\n self._muTRIGGERWeight_leg17_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg8_B.value(m2.pt(),m2.eta(),'0') -\\\n self._muTRIGGERWeight_leg17_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_B.value(m2.pt(),m2.eta(),'0'))\n \n lw *= (0.5*hlt_sf_run2012_a + 0.5*hlt_sf_run2012_b) ##percentage according to the lumi in which they were not prescaled (apparently same efficinecy for AB)\n #lw *= 0.966 ## temporary solution!\n\n if abs(configuration.LeptonTnPfactor)<0.01 :\n return lw\n else:\n return lw + configuration.LeptonTnPfactor*self.uncertainty_mm(m1,m2)", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def __init__(self, data1, data2, tail = 'two', significant_level=0.05):\r\n Critical_05 = pd.DataFrame({'2': [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0] ,\r\n '3': [-1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 9.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 17.0, 17.0, 18.0, 18.0] ,\r\n '4': [-1.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 15.0, 16.0, 17.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 31.0] ,\r\n '5': [-1.0, 0.0, 1.0, 2.0, 3.0, 5.0, 6.0, 7.0, 8.0, 9.0, 11.0, 12.0, 13.0, 14.0, 15.0, 17.0, 18.0, 19.0, 20.0, 22.0, 23.0, 24.0, 25.0, 27.0, 28.0, 29.0, 30.0, 32.0, 33.0, 34.0, 35.0, 37.0, 38.0, 39.0, 40.0, 41.0, 43.0, 44.0, 45.0] ,\r\n '6': [-1.0, 1.0, 2.0, 3.0, 5.0, 6.0, 8.0, 10.0, 11.0, 13.0, 14.0, 16.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 29.0, 30.0, 32.0, 33.0, 35.0, 37.0, 38.0, 40.0, 42.0, 43.0, 45.0, 46.0, 48.0, 50.0, 51.0, 53.0, 55.0, 56.0, 58.0, 59.0] ,\r\n '7': [-1.0, 1.0, 3.0, 5.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, 32.0, 34.0, 36.0, 38.0, 40.0, 42.0, 44.0, 46.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 62.0, 64.0, 66.0, 68.0, 70.0, 72.0, 74.0] ,\r\n '8': [0, 2, 4, 6, 7, 10, 13, 15, 17, 19, 22, 24, 26, 29, 31, 34, 36, 38, 41, 43, 45, 48, 50, 53, 55, 57, 60, 62, 65, 67, 69, 72, 74, 77, 79, 81, 84, 86, 89] ,\r\n '9': [0, 2, 4, 7, 10, 12, 15, 17, 20, 23, 26, 28, 31, 34, 37, 39, 42, 45, 48, 50, 53, 56, 59, 62, 64, 67, 70, 73, 76, 78, 81, 84, 87, 89, 92, 95, 98, 101, 103] ,\r\n '10': [0, 3, 5, 8, 11, 14, 17, 20, 23, 26, 29, 33, 36, 39, 42, 45, 48, 52, 55, 58, 61, 64, 67, 71, 74, 77, 80, 83, 87, 90, 93, 96, 99, 103, 106, 109, 112, 115, 119] ,\r\n '11': [0, 3, 6, 9, 13, 16, 19, 23, 26, 30, 33, 37, 40, 44, 47, 51, 55, 58, 62, 65, 69, 73, 76, 80, 83, 87, 90, 94, 98, 101, 105, 108, 112, 116, 119, 123, 127, 130, 134] ,\r\n '12': [1, 4, 7, 11, 14, 18, 22, 26, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149] ,\r\n '13': [1, 4, 8, 12, 16, 20, 24, 28, 33, 37, 41, 45, 50, 54, 59, 63, 67, 72, 76, 80, 85, 89, 94, 98, 102, 107, 111, 116, 120, 125, 129, 133, 138, 142, 147, 151, 156, 160, 165] ,\r\n '14': [1, 5, 9, 13, 17, 22, 26, 31, 36, 40, 45, 50, 55, 59, 64, 67, 74, 78, 83, 88, 93, 98, 102, 107, 112, 117, 122, 127, 131, 136, 141, 146, 151, 156, 161, 165, 170, 175, 180] ,\r\n '15': [1, 5, 10, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59, 64, 70, 75, 80, 85, 90, 96, 101, 106, 111, 117, 122, 127, 132, 138, 143, 148, 153, 159, 164, 169, 174, 180, 185, 190, 196] ,\r\n '16': [1, 6, 11, 15, 21, 26, 31, 37, 42, 47, 53, 59, 64, 70, 75, 81, 86, 92, 98, 103, 109, 115, 120, 126, 132, 137, 143, 149, 154, 160, 166, 171, 177, 183, 188, 194, 200, 206, 211] ,\r\n '17': [2, 6, 11, 17, 22, 28, 34, 39, 45, 51, 57, 63, 67, 75, 81, 87, 93, 99, 105, 111, 117, 123, 129, 135, 141, 147, 154, 160, 166, 172, 178, 184, 190, 196, 202, 209, 215, 221, 227] ,\r\n '18': [2, 7, 12, 18, 24, 30, 36, 42, 48, 55, 61, 67, 74, 80, 86, 93, 99, 106, 112, 119, 125, 132, 138, 145, 151, 158, 164, 171, 177, 184, 190, 197, 203, 210, 216, 223, 230, 236, 243] ,\r\n '19': [2, 7, 13, 19, 25, 32, 38, 45, 52, 58, 65, 72, 78, 85, 92, 99, 106, 113, 119, 126, 133, 140, 147, 154, 161, 168, 175, 182, 189, 196, 203, 210, 217, 224, 231, 238, 245, 252, 258] ,\r\n '20': [2, 8, 14, 20, 27, 34, 41, 48, 55, 62, 69, 76, 83, 90, 98, 105, 112, 119, 127, 134, 141, 149, 156, 163, 171, 178, 186, 193, 200, 208, 215, 222, 230, 237, 245, 252, 259, 267, 274] \r\n })\r\n\r\n Critical_1 = pd.DataFrame({'2': [-1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 11.0] ,\r\n '3': [-1.0, -1.0, 0.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 5.0, 5.0, 6.0, 7.0, 7.0, 8.0, 9.0, 9.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 14.0, 15.0, 15.0, 16.0, 17.0, 17.0, 18.0, 19.0, 19.0, 20.0, 21.0, 21.0, 22.0, 23.0, 23.0, 24.0] ,\r\n '4': [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 38.0, 39.0] ,\r\n '5': [0, 1, 2, 4, 5, 6, 8, 9, 11, 12, 13, 15, 16, 18, 19, 20, 22, 23, 25, 26, 28, 29, 30, 32, 33, 35, 36, 38, 39, 40, 42, 43, 45, 46, 48, 49, 50, 52, 53] ,\r\n '6': [0, 2, 3, 5, 7, 8, 10, 12, 14, 16, 17, 19, 21, 23, 25, 26, 28, 30, 32, 34, 36, 37, 39, 41, 43, 45, 46, 48, 50, 52, 54, 56, 57, 59, 61, 63, 65, 67, 68] ,\r\n '7': [0, 2, 4, 6, 8, 11, 13, 15, 17, 19, 21, 24, 26, 28, 30, 33, 35, 37, 39, 41, 44, 46, 48, 50, 53, 55, 57, 59, 61, 64, 66, 68, 70, 73, 75, 77, 79, 82, 84] ,\r\n '8': [1, 3, 5, 8, 10, 13, 15, 18, 20, 23, 26, 28, 31, 33, 36, 39, 41, 44, 47, 49, 52, 54, 57, 60, 62, 65, 68, 70, 73, 76, 78, 81, 84, 86, 89, 91, 94, 97, 99] ,\r\n '9': [1, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60, 63, 66, 69, 72, 75, 78, 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115] ,\r\n '10': [1, 4, 7, 11, 14, 17, 20, 24, 27, 31, 34, 37, 41, 44, 48, 51, 55, 58, 62, 65, 68, 72, 75, 79, 82, 86, 89, 93, 96, 100, 103, 107, 110, 114, 117, 121, 124, 128, 131] ,\r\n '11': [1, 5, 8, 12, 16, 19, 23, 27, 31, 34, 38, 42, 46, 50, 54, 57, 61, 65, 69, 73, 77, 81, 85, 89, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 131, 135, 139, 143, 147] ,\r\n '12': [2, 5, 9, 13, 17, 21, 26, 30, 34, 38, 42, 47, 51, 55, 60, 64, 68, 72, 77, 81, 85, 90, 94, 98, 103, 107, 111, 116, 120, 124, 128, 133, 137, 141, 146, 150, 154, 159, 163] ,\r\n '13': [2, 6, 10, 15, 19, 24, 28, 33, 37, 42, 47, 51, 56, 61, 65, 70, 75, 80, 84, 89, 94, 98, 103, 108, 113, 117, 122, 127, 132, 136, 141, 146, 151, 156, 160, 165, 170, 175, 179] ,\r\n '14': [2, 7, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71, 77, 82, 87, 92, 97, 102, 107, 113, 118, 123, 128, 133, 138, 144, 149, 154, 159, 164, 170, 175, 180, 185, 190, 196] ,\r\n '15': [3, 7, 12, 18, 23, 28, 33, 39, 44, 50, 55, 61, 66, 72, 77, 83, 88, 94, 100, 105, 111, 116, 122, 128, 133, 139, 144, 150, 156, 161, 167, 172, 178, 184, 189, 195, 201, 206, 212] ,\r\n '16': [3, 8, 14, 19, 25, 30, 36, 42, 48, 54, 60, 65, 71, 77, 83, 89, 95, 101, 107, 113, 119, 125, 131, 137, 143, 149, 156, 162, 168, 174, 180, 186, 192, 198, 204, 210, 216, 222, 228] ,\r\n '17': [3, 9, 15, 20, 26, 33, 39, 45, 51, 57, 64, 70, 77, 83, 89, 96, 102, 109, 115, 121, 128, 134, 141, 147, 154, 160, 167, 173, 180, 186, 193, 199, 206, 212, 219, 225, 232, 238, 245] ,\r\n '18': [4, 9, 16, 22, 28, 35, 41, 48, 55, 61, 68, 75, 82, 88, 95, 102, 109, 116, 123, 130, 136, 143, 150, 157, 164, 171, 178, 185, 192, 199, 206, 212, 219, 226, 233, 240, 247, 254, 261] ,\r\n '19': [4, 10, 17, 23, 30, 37, 44, 51, 58, 65, 72, 80, 87, 94, 101, 109, 116, 123, 130, 138, 145, 152, 160, 167, 174, 182, 189, 196, 204, 211, 218, 226, 233, 241, 248, 255, 263, 270, 278] ,\r\n '20': [4, 11, 18, 25, 32, 39, 47, 54, 62, 69, 77, 84, 92, 100, 107, 115, 123, 130, 138, 146, 154, 161, 169, 177, 185, 192, 200, 208, 216, 224, 231, 239, 247, 255, 263, 271, 278, 286, 294] })\r\n \r\n self.critical05 = Critical_05\r\n self.critical1 = Critical_1\r\n\r\n # Mann Whitney Test \r\n x = np.asarray(data1)\r\n y = np.asarray(data2)\r\n n1 = len(x)\r\n n2 = len(y)\r\n ranked = rankdata(np.concatenate((x, y)))\r\n rankx = ranked[0:n1] # get the x-ranks\r\n u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x\r\n u2 = n1*n2 - u1 # remainder is U for y\r\n\r\n # use the min(u1, u2) as u-stat\r\n if u1 <= u2:\r\n stat_a, larger = u1, 1\r\n else:\r\n stat_a, larger = u2, 2\r\n\r\n # compute the effect size \r\n effect = 1 - (2*stat_a)/(n1*n2) \r\n\r\n # Mann-Whitney test \r\n if min(n1, n2) < 2: # sample size too small - cannot do test\r\n return 'Sorry, sample size is too small to test significance. Please collect more data...'\r\n\r\n # Do test for small sample size \r\n elif 2<=min(n1, n2) <= 20 and 2 <= max(n1, n2) <= 40:\r\n if tail != 'two': # only have data for two tail testing\r\n return 'Sorry, sample size too small, only two-tailed test available...'\r\n\r\n u_05 = Critical_05[str(min(n1, n2))][max(n1, n2)-2] # u=critical at signif level .05\r\n u_1 = Critical_1[str(min(n1, n2))][max(n1, n2)-2] # u=critical at signif level .1\r\n\r\n if significant_level == 0.05 and stat_a <= u_05:\r\n self.significance = True\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_05\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n elif significant_level == 0.1 and stat_a <= u_1:\r\n self.significance = True\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_1\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n elif significant_level == 0.05:\r\n self.significance = False\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_05\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n else:\r\n self.significance = False\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_1\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n\r\n else:\r\n T = tiecorrect(ranked)\r\n sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)\r\n \r\n if T == 0:\r\n raise ValueError('All numbers are identical in mannwhitneyu')\r\n meanrank = n1*n2/2.0 + 0.5 \r\n\r\n if tail == 'two':\r\n bigu = max(u1, u2)\r\n elif tail == 'less':\r\n bigu = u1\r\n elif tail == 'more':\r\n bigu = u2\r\n z = (bigu - meanrank) / sd\r\n \r\n if tail == 'two':\r\n p = 2 * norm.sf(abs(z))\r\n else:\r\n p = norm.sf(z)\r\n if p <= significant_level:\r\n self.significance = True\r\n else:\r\n self.significance = False\r\n \r\n self.sample_size = 'Large'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.p = p\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger", "def calc_spindle_means(self):\n\n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_aggregates = {}\n datatypes = ['Raw', 'spfilt']\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles[chan]:\n spindle_aggregates[chan] = {}\n for datatype in datatypes:\n # set the base df\n agg_df = pd.DataFrame(self.spindles[chan][0][datatype])\n agg_df = agg_df.rename(columns={datatype:'spin_0'})\n rsuffix = list(range(1, len(self.spindles[chan])))\n # join on the index for each spindle\n agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')\n spindle_aggregates[chan][datatype] = agg_df\n \n print('Calculating spindle statistics...')\n # create a new multiindex dataframe for calculations\n spindle_means = {}\n calcs = ['count', 'mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n for datatype in datatypes:\n spindle_means[datatype] = pd.DataFrame(columns=columns)\n # fill the dataframe\n for chan in spindle_aggregates.keys():\n spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)\n spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)\n spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)\n spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)\n \n self.spindle_aggregates = spindle_aggregates\n self.spindle_means = spindle_means\n print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\\n')", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def coadd(self, sp, method='pixel'):\n\t\tif method == 'pixel':\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tif self.apply_sigma_mask:\n\t\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\telse:\n\t\t\t\tself.mask = []\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\telif method == 'wavelength':\n\t\t\tself_supers = copy.deepcopy(self)\n\t\t\tg = interpolate.interp1d(self.wave, self.flux)\n\t\t\tsp_supers = copy.deepcopy(sp)\n\t\t\tf = interpolate.interp1d(sp.wave, sp.flux)\n\t\t\t## 10x supersample the average difference of \n\t\t\t## the wavelength\n\t\t\t#step0 = np.mean(np.diff(self.wave))/10\n\t\t\t#self_supers.wave = np.arange(self.wave[0],\n\t\t\t#\tself.wave[-1],step0)\n\t\t\tself_supers.flux = g(self_supers.wave)\n\t\t\tself_supers.oriWave = np.arange(self.oriWave[0],\n\t\t\t\tself.oriWave[-1],(self.oriWave[-1]-self.oriWave[0])/10240)\n\t\t\tg1 = interpolate.interp1d(self.oriWave, self.oriFlux)\n\t\t\tself_supers.oriFlux = g1(self_supers.oriWave)\n\n\t\t\t#step = np.mean(np.diff(sp.wave))/10\n\t\t\t#sp_supers.wave = np.arange(sp.wave[0],sp.wave[-1],step)\n\t\t\t#sp_supers.flux = f(sp_supers.wave)\n\t\t\tsp_supers.oriWave = np.arange(sp.oriWave[0],\n\t\t\t\tsp.oriWave[-1],(sp.oriWave[-1]-sp.oriWave[0])/10240)\n\t\t\tf1 = interpolate.interp1d(sp.oriWave, sp.oriFlux)\n\t\t\tsp_supers.oriFlux = f1(sp_supers.oriWave)\n\n\t\t\t## calculate the max cross correlation value\n\t\t\tdef xcorr(a0,b0,shift):\n\t\t\t\t\"\"\"\n\t\t\t\tShift is the index number after supersampling \n\t\t\t\tboth of the spectra.\n\t\t\t\t\"\"\"\n\t\t\t\ta = copy.deepcopy(a0)\n\t\t\t\tb = copy.deepcopy(b0)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\tlength = b.oriFlux.shape[0]\n\t\t\t\tif shift >= 0:\n\t\t\t\t\tmask_a = np.arange(0,shift,1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(length-1,length-shift-1,-1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\telif shift < 0:\n\t\t\t\t\tmask_a = np.arange(length-1,length+shift-1,-1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(0,-shift,1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\t#b.wave += shift * step\n\t\t\t\t## discard the points where the wavelength values\n\t\t\t\t## are larger\n\t\t\t\t#condition = (a.wave > b.wave[0]) & (a.wave < b.wave[-1])\n\t\t\t\t\n\t\t\t\t#a.flux = a.flux[np.where(condition)]\n\t\t\t\t#a.wave = a.wave[np.where(condition)]\n\t\t\t\t## resampling the telluric model\n\t\t\t\t#b.flux = np.array(smart.integralResample(xh=b.wave, \n\t\t\t\t#\tyh=b.flux, xl=a.wave))\n\t\t\t\t\n\t\t\t\treturn np.inner(a.oriFlux, b.oriFlux)/\\\n\t\t\t\t(np.average(a.oriFlux)*np.average(b.oriFlux))/a.oriFlux.shape[0]\n\n\t\t\txcorr_list = []\n\t\t\t## mask the ending pixels\n\t\t\tself_supers2 = copy.deepcopy(self_supers)\n\t\t\tsp_supers2 = copy.deepcopy(sp_supers)\n\t\t\tself_supers2.wave = self_supers2.wave[1000:-1000]\n\t\t\tself_supers2.flux = self_supers2.flux[1000:-1000]\n\t\t\tsp_supers2.wave = sp_supers2.wave[1000:-1000]\n\t\t\tsp_supers2.flux = sp_supers2.flux[1000:-1000]\n\t\t\tfor shift in np.arange(-10,10,1):\n\t\t\t\txcorr_list.append(xcorr(self_supers2,sp_supers2,shift))\n\n\t\t\t## dignostic plot for cc result\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(np.arange(-10,10,1),np.array(xcorr_list),'k-')\n\t\t\tplt.show()\n\t\t\tplt.close()\n\n\t\t\tstep = np.absolute(np.mean(np.diff(sp_supers.wave)))\n\t\t\tbestshift = np.arange(-10*step,10*step,step)[np.argmax(xcorr_list)]\n\t\t\tsp_supers.oriWave += bestshift\n\t\t\t## discard the points where the wavelength values\n\t\t\t## are larger\n\t\t\tcondition = (self.oriWave > sp_supers.oriWave[0])\\\n\t\t\t& (self.oriWave < sp_supers.oriWave[-1])\n\n\t\t\tself.oriFlux = self.oriFlux[np.where(condition)]\n\t\t\tself.oriWave = self.oriWave[np.where(condition)]\n\t\t\tself.oriNoise = self.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriNoise = sp_supers.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriFlux = np.array(smart.integralResample(xh=sp_supers.oriWave, \n\t\t\t\tyh=sp_supers.oriFlux, xl=self.oriWave))\n\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp_supers.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp_supers.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)", "def process_traces(self, s, h):\n # filter data\n if PAR.FREQLO and PAR.FREQHI:\n s = sbandpass(s, h, PAR.FREQLO, PAR.FREQHI)\n\n return s", "def _summarize_trace(power_trace, ci_alpha=None):\n power_trace = np.atleast_2d(power_trace)\n pwr_mean = power_trace.mean(0)\n\n if power_trace.shape[0] == 1:\n pwr_err = np.zeros(power_trace.shape) * np.nan\n elif ci_alpha is None:\n pwr_err = power_trace.std(0)\n else:\n pwr_err = confidence_bound(power_trace, alpha=ci_alpha, axis=0)\n\n pwr_lo = pwr_mean - pwr_err\n pwr_hi = pwr_mean + pwr_err\n\n return pwr_mean, pwr_lo, pwr_hi", "def one_transition_spectrum_gauss(self,tr):\n \n \n fa = tr[\"fa\"] # Frequency axis\n HWHH = tr[\"HWHH\"] # Half width at the half hight (maximum)\n dd = tr[\"dd\"] # transition dipole strength\n rr = tr[\"rr\"] # transition dipole strength\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"]+self.rwa # frequency\n \n # LineShape = lambda p, x: (x/(p[1]*np.sqrt(2*m.pi))*np.exp(-0.5*((x-p[0])/p[1])**2))\n # broad = broad/np.sqrt(2*np.log(2))\n sigma = HWHH/numpy.sqrt(2*numpy.log(2))\n \n # x = ta.data\n \n data = (fa.data/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-0.5*((fa.data-om)/sigma)**2))\n data_abs = dd*data\n data_CD = rr*data\n data_LD = ld*data\n \n return data_abs,data_CD, data_LD", "def create_python_data(self) -> dict:\r\n s = self.scale\r\n minimum, maximum = self.get_min_max()\r\n diff = maximum - minimum\r\n\r\n output = {}\r\n\r\n # Create the data for the scatters\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output[name] = {}\r\n output[name][\"meta\"] = self.scatters[name]\r\n output[name][\"type\"] = \"scatter\"\r\n\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in data[mapping[\"x\"]]], dtype=np.float32\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in data[mapping[\"y\"]]], dtype=np.float32\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in data[mapping[\"z\"]]], dtype=np.float32\r\n )\r\n\r\n if mapping[\"labels\"] in data:\r\n # Make sure that the labels are always strings\r\n output[name][\"labels\"] = list(map(str, data[mapping[\"labels\"]]))\r\n\r\n if mapping[\"s\"] in data:\r\n output[name][\"s\"] = np.array(data[mapping[\"s\"]], dtype=np.float32)\r\n\r\n output[name][\"colors\"] = [{}] * len(data[mapping[\"c\"]])\r\n for s in range(len(data[mapping[\"c\"]])):\r\n if mapping[\"cs\"] in data:\r\n colors = np.array([cmaps[s](x) for x in data[mapping[\"c\"]][s]])\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][s][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output[name][\"colors\"][s][\"r\"] = np.array(\r\n colors[:, 0], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"g\"] = np.array(\r\n colors[:, 1], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"b\"] = np.array(\r\n colors[:, 2], dtype=np.float32\r\n )\r\n else:\r\n colors = np.array([cmaps[s](x) for x in data[mapping[\"c\"]][s]])\r\n colors = np.round(colors * 255.0)\r\n output[name][\"colors\"][s][\"r\"] = np.array(\r\n colors[:, 0], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"g\"] = np.array(\r\n colors[:, 1], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"b\"] = np.array(\r\n colors[:, 2], dtype=np.float32\r\n )\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output[name] = {}\r\n output[name][\"meta\"] = self.trees[name]\r\n output[name][\"type\"] = \"tree\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in x_t], dtype=np.float32\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in y_t], dtype=np.float32\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in z_t], dtype=np.float32\r\n )\r\n else:\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in data[mapping[\"x\"]]],\r\n dtype=np.float32,\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in data[mapping[\"y\"]]],\r\n dtype=np.float32,\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in data[mapping[\"z\"]]],\r\n dtype=np.float32,\r\n )\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output[name][\"r\"] = np.array(colors[:, 0], dtype=np.float32)\r\n output[name][\"g\"] = np.array(colors[:, 1], dtype=np.float32)\r\n output[name][\"b\"] = np.array(colors[:, 2], dtype=np.float32)\r\n\r\n return output", "def GetUnivariateSmry(ds,quantileCuts=[0.05 , 0.1, 0.2, 0.25,0.3, 0.4, 0.5, 0.6, 0.7, 0.75,0.8, 0.9, 0.95,0.98,0.99]):\n# Quantile distn:\n d1 = ds.quantile(quantileCuts).T\n d1.reset_index(inplace=True)\n qNames = [f'Q{int(x* 100)}' for x in quantileCuts]\n newNames = ['index']\n newNames.extend(qNames)\n d1.columns = newNames \n \n# Other Basic metrics\n d2 = pd.DataFrame(ds.isna().sum(),columns = ['NullCount'])\n d2['DataType'] = d2.index.map(ds.dtypes)\n d2['BlankCount'] = d2.index.map((ds=='').sum())\n d2['NonNullCount'] = d2.index.map(ds.notna().sum())\n d2['FillPerc']= round(d2['NonNullCount']/ds.shape[0],2)\n d2['UniqueCount'] = d2.index.map(ds.nunique())\n d2['Min'] = ds.min(numeric_only=True)\n d2['Mean'] = ds.mean()\n d2['NonZeroMean'] = ds.replace(0, np.nan).mean()\n d2['Max'] = ds.max(numeric_only=True)\n d2['Total']= ds.sum(numeric_only=True)\n d2['std'] = ds.std()\n d2['skewness'] = ds.skew()\n d2['kurtosis'] = ds.kurtosis()\n d2.reset_index(inplace=True)\n \n# creating master summary\n d = d2.merge(d1, on='index', how='left')\n d.rename(columns={\"index\":\"ParameterName\"},inplace=True)\n \n# re-arranging columns\n first_cols = ['ParameterName','DataType']\n last_cols = [col for col in d.columns if col not in first_cols]\n d = d[first_cols+last_cols]\n \n return d", "def side_traces(x,im):\n s0 = x['side-traces'][0]\n s1 = x['side-traces'][1]\n t1 = Scatter(y=s0)\n t2 = Scatter(y=s1)\n\n #put_thing(im,x['abs-line'],(255,0,0),(0,0),3)\n\n groups = []\n diff_traces = []\n markers = []\n y3 = []\n TriangleHumps.get_dimensions(x,debug_groups=groups,debug_diffs=diff_traces,debug_markers = markers, im = im,y3=y3)\n mode = stats.mode(y3)[0][0]\n trigger = mode*2+1\n t3 = Scatter(y=y3)\n\n annotations = []\n diff_traces = [Scatter(y=v) for v in diff_traces]\n t4 = Scatter(x=markers,y=[10]*len(markers),mode = 'markers+text')\n for gru in groups:\n for hump in gru:\n annotations.append({\n 'x':hump['range'][0],\n 'y':trigger,\n 'text':'%d,%d'%(hump['area'],hump['length']),\n })\n\n name = 'mode=%d,trigger=%d,groups=%d' % (mode,trigger,len(groups))\n \n #return (t1,t2,t3,)\n #print('markers %d:' % x['id'],markers,[trigger]*len(markers))\n return [t3,t4,] + diff_traces,annotations, name", "def skystats(stamp):\n\t\n\tif isinstance(stamp, galsim.Image):\n\t\ta = stamp.array\n\t\t# Normally there should be a .transpose() here, to get the orientation right.\n\t\t# But in the present case it doesn't change anything, and we can skip it.\n\telse:\n\t\ta = stamp # Then we assume that it's simply a numpy array.\n\t\n\tedgepixels = np.concatenate([\n\t\t\ta[0,1:], # left\n\t\t\ta[-1,1:], # right\n\t\t\ta[:,0], # bottom\n\t\t\ta[1:-1,-1] # top\n\t\t\t])\n\tassert len(edgepixels) == 2*(a.shape[0]-1) + 2*(a.shape[0]-1)\n\n\t# And we convert the mad into an estimate of the Gaussian std:\n\treturn {\n\t\t\"std\":np.std(edgepixels), \"mad\": 1.4826 * mad(edgepixels),\n\t\t\"mean\":np.mean(edgepixels), \"med\":np.median(edgepixels),\n\t\t\"stampsum\":np.sum(a)\n\t\t}", "def raw_processing(self):\n well_dilution_code = {'e': 5, 'f': 6, 'g': 7, 'h': 8}\n\n for well in self.data_labels:\n x = 10 ** well_dilution_code[well[-1]]\n y = self.film_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n z = self.plank_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n\n self.film_conc.append(y)\n self.plank_conc.append(z)", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def consolidate_unitary_EPSP_traces(source_dict):\n trace_len = int((context.ISI['units'] + context.trace_baseline) / context.dt)\n target_dict = {}\n\n for syn_group in source_dict:\n if syn_group not in target_dict:\n target_dict[syn_group] = {}\n num_syn_ids = len(context.syn_id_dict[syn_group])\n for syn_condition in source_dict[syn_group]:\n if syn_condition not in target_dict[syn_group]:\n target_dict[syn_group][syn_condition] = {}\n for rec_name in context.synaptic_integration_rec_names:\n target_array = np.empty((num_syn_ids, trace_len))\n for i, syn_id in enumerate(context.syn_id_dict[syn_group]):\n target_array[i,:] = source_dict[syn_group][syn_condition][syn_id][rec_name]\n target_dict[syn_group][syn_condition][rec_name] = target_array\n\n return target_dict", "def emission_vs_depth(filename,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n if p.add:\n ax = plt.gca(); c = 'r'\n if not p.add:\n fig,ax = plt.subplots(figsize=(8,6)); c = 'b'\n st_cols = ['depth','[CII]158','[OI]63','CO(1-0)','CO(2-1)','CO(3-2)']\n st = pd.read_csv(p.d_cloudy.replace('ext/','') + 'NH/' + filename + '.str',sep='\\t',skiprows=1,names=st_cols)\n dx = np.append(0,np.diff(st.depth))\n pc2cm = u.parsec.to(u.cm)\n # Derive mass-luminosity ratio\n import astropy.constants as c\n M = 1e3 * c.m_p.value * st.depth.values.max() / u.M_sun.to(u.kg) \n cloudy_lin_header = ['#lineslist','C 1 609.590m','C 1 370.269m','C 2 157.636m','O 1 63.1679m','O 1 145.495m','O 3 88.3323m','N 2 205.244m','N 2 121.767m','CO 2600.05m','CO 1300.05m','CO 866.727m','CO 650.074m','CO 325.137m','H2 17.0300m','H2 12.2752m','H2 9.66228m','H2 8.02362m','H2 6.90725m','H2 6.10718m','H2 5.50996m','O 4 25.8832m','NE 2 12.8101m','NE 3 15.5509m','S 3 18.7078m','FE 2 25.9811m']\n cloudy_lin = pd.read_csv(p.d_cloudy.replace('ext/','') + 'NH/' + filename + '.lin',\\\n sep='\\t',names=cloudy_lin_header,comment='#').reset_index(drop=True)\n Cloudy_lines_dict = aux.get_Cloudy_lines_dict()\n cloudy_lin = cloudy_lin.rename(columns=Cloudy_lines_dict)\n L = cloudy_lin['CO(1-0)'][0] * u.erg.to('J') / c.L_sun.value\n print(L,M)\n ax.plot(st.depth/pc2cm,dx*st['CO(1-0)'],'-',color='m',label='CO(1-0): %.2e Lsun/Msun' % (L/M))\n L = cloudy_lin['[OI]63'][0] * u.erg.to('J') / c.L_sun.value\n print(L,M)\n ax.plot(st.depth/pc2cm,dx*st['[OI]63'],'g--',label='[OI]63: %.2e Lsun/Msun' % (L/M))\n ax.set_yscale(\"log\")\n ax.set_xscale(\"log\")\n ax.set_xlabel('Depth [pc]')\n ax.set_ylabel('Intensity [ergs/s/cm^2]')\n ax.legend()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig('plots/look-up/emission_%s' % filename,dpi=200)", "def weighted_ps(self, mfactor=1.1):\n self.weightedpower=[]\n #ksum=np.sum(self.psdata[self.klist)\n Nk=int(len(self.klist)/mfactor)\n for i in range(self.Nsubs):\n nsum=np.sum(self.psdata[i][1][0:Nk])\n total=np.sum(np.array([self.psdata[i][1][j]*self.powerspectra[i][j] for j in range(Nk)]))\n self.weightedpower.append(total/nsum)\n\n # also find correlation\n self.corr=[]\n for i in range(self.Nsubs):\n self.corr.append(self.ds[i]*self.weightedpower[i])\n\n self.corr_mean=np.mean(self.corr)\n self.corr_sigma=np.sqrt(np.var(self.corr))", "def get_traces(self, conc_traces, spectra):\n # linear fit of the fitted_concs to the spectra CANNOT fit intercept here!\n #self.regressor.fit(conc_traces,spectral_trace)\n #fitted_spectral_traces = self.regressor.predict(conc_traces)\n fitted_spectral_traces = spectra.dot(conc_traces.T) \n return fitted_spectral_traces.T", "def log_weights_statistics(self):\n for weight_name, weight_parameter in self._weights.items():\n for statistic_function in self._statistics_functions:\n self._weights_statistics[statistic_function.__name__][\n weight_name\n ].append(float(statistic_function(weight_parameter)))", "def create_data(self) -> str:\r\n s = self.scale\r\n mini, maxi = self.get_min_max()\r\n diff = maxi - mini\r\n\r\n output = \"const data = {\\n\"\r\n\r\n # Create the data for the scatters\r\n # TODO: If it's not interactive, labels shouldn't be exported.\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output += name + \": {\\n\"\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"labels\"] in data:\r\n fmt_labels = [\"'{0}'\".format(s) for s in data[mapping[\"labels\"]]]\r\n output += \"labels: [\" + \",\".join(fmt_labels) + \"],\\n\"\r\n\r\n if mapping[\"s\"] in data:\r\n output += \"s: [\"\r\n\r\n for series in range(len(data[mapping[\"s\"]])):\r\n output += (\r\n \"[\"\r\n + \",\".join(map(str, np.round(data[mapping[\"s\"]][series], 3)))\r\n + \"],\\n\"\r\n )\r\n\r\n output += \"],\\n\"\r\n\r\n output += \"colors: [\\n\"\r\n for series in range(len(data[mapping[\"c\"]])):\r\n output += \"{\\n\"\r\n if mapping[\"cs\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][series][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n elif mapping[\"c\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n colors = np.round(colors * 255.0)\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n output += \"},\\n\"\r\n\r\n output += \"]\"\r\n output += \"},\\n\"\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output += name + \": {\\n\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in x_t]\r\n output += f\"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in y_t]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in z_t]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n else:\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output += \"r: [\" + \",\".join(map(str, colors[:, 0])) + \"],\\n\"\r\n output += \"g: [\" + \",\".join(map(str, colors[:, 1])) + \"],\\n\"\r\n output += \"b: [\" + \",\".join(map(str, colors[:, 2])) + \"],\\n\"\r\n\r\n output += \"},\\n\"\r\n\r\n output += \"};\\n\"\r\n\r\n return output", "def add_supplementary_traces(recording, stimulus=None, derivative=None, timeconstant=2.6):\n if stimulus is None:\n stimulus = recording.stimulus\n\n pos_only = np.zeros_like(stimulus)\n neg_only = np.zeros_like(stimulus)\n pos_only[np.where(stimulus > 0)[0]] = stimulus[np.where(stimulus > 0)[0]] # positive stimulus only\n neg_only[np.where(stimulus < 0)[0]] = stimulus[np.where(stimulus < 0)[0]] # negative stimulus only\n\n if derivative is None:\n derivative = np.gradient(stimulus)\n\n supp_data = {'deriv_stim': derivative,\n 'abs_deriv_stim': np.abs(np.gradient(stimulus)),\n 'pos_only_stim': pos_only,\n 'abs_neg_only_stim': np.abs(neg_only)} # dictionary of supplementary data to add\n\n for data_name in supp_data: # put all supp. data in rec\n recording.add_supp_single_data(s_name=data_name, s_data=supp_data[data_name])\n\n pos_deriv = derivative.copy()\n pos_deriv[pos_deriv < 0] = 0\n neg_deriv = derivative.copy()\n neg_deriv[neg_deriv > 0] = 0\n recording.add_supp_single_data('pos_deriv_stim', pos_deriv) # positive derivative only\n recording.add_supp_single_data('neg_deriv_stim', neg_deriv) # postiive derivative only\n\n # Add exponentially smoothed functions. Especially useful for fast derivatives.\n recording.add_supp_single_data('conv_stim', exp_smooth(stimulus, time_constant=timeconstant))\n recording.add_supp_single_data('conv_deriv_stim', exp_smooth(derivative, time_constant=timeconstant))\n recording.add_supp_single_data('conv_pos_stim', exp_smooth(recording.pos_only_stim, time_constant=timeconstant))\n recording.add_supp_single_data('conv_neg_stim', exp_smooth(recording.abs_neg_only_stim, time_constant=timeconstant))\n recording.add_supp_single_data('conv_pos_deriv_stim', exp_smooth(recording.pos_deriv_stim, time_constant=timeconstant))\n recording.add_supp_single_data('conv_neg_deriv_stim', exp_smooth(recording.neg_deriv_stim, time_constant=timeconstant))\n recording.add_supp_single_data('abs_conv_neg_deriv_stim', np.abs(exp_smooth(recording.neg_deriv_stim, time_constant=timeconstant)))\n recording.add_supp_single_data('abs_conv_all_deriv_stim', np.abs(exp_smooth(derivative, time_constant=timeconstant)))", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def get_traces(self, traces, **kwargs):\n self.resource.clear()\n sweep = kwargs.get(\"sweep\", False)\n\n name_prefix = kwargs.get(\"name_prefix\", \"\")\n if name_prefix:\n name_prefix += \" - \"\n\n channels = OrderedDict()\n for trace in traces:\n ch = trace[\"channel\"]\n if ch not in channels.keys():\n channels[ch] = {\n \"frequency\": None,\n \"traces\": list()}\n channels[ch][\"traces\"].append(trace)\n\n if sweep is True:\n self.sweep(channels=list(channels.keys()))\n\n traces = []\n for ch, ch_data in channels.items():\n frequency = ch_data[\"frequency\"] = self.get_frequency()\n for trace in ch_data[\"traces\"]:\n self.scpi.set_selected_meas_by_number(trace[\"channel\"], trace[\"measurement number\"])\n sdata = self.scpi.query_data(trace[\"channel\"], \"SDATA\")\n s = sdata[::2] + 1j * sdata[1::2]\n ntwk = skrf.Network()\n ntwk.s = s\n ntwk.frequency = frequency\n ntwk.name = name_prefix + trace.get(\"parameter\", \"trace\")\n traces.append(ntwk)\n return traces", "def calc_source_blend_params(params,log):\n\n source = photometry_classes.Star()\n\n source.fs_g = params['f_s_g']\n source.sig_fs_g = params['sig_f_s_g']\n (source.g, source.sig_g) = flux_to_mag_pylima(source.fs_g,source.sig_fs_g)\n\n source.fs_r = params['f_s_r']\n source.sig_fs_r = params['sig_f_s_r']\n (source.r, source.sig_r) = flux_to_mag_pylima(source.fs_r,source.sig_fs_r)\n\n source.fs_i = params['f_s_i']\n source.sig_fs_i = params['sig_f_s_i']\n (source.i, source.sig_i) = flux_to_mag_pylima(source.fs_i,source.sig_fs_i)\n\n source.compute_colours(use_inst=True)\n source.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Source measured photometry:')\n log.info(source.summary(show_mags=True))\n log.info(source.summary(show_mags=False,show_colours=True))\n log.info(source.summary(show_mags=False,johnsons=True))\n\n blend = photometry_classes.Star()\n\n blend.fs_g = params['f_b_g']\n blend.sig_fs_g = params['sig_f_b_g']\n (blend.g, blend.sig_g) = flux_to_mag_pylima(blend.fs_g,blend.sig_fs_g)\n\n blend.fs_r = params['f_b_r']\n blend.sig_fs_r = params['sig_f_b_r']\n (blend.r, blend.sig_r) = flux_to_mag_pylima(blend.fs_r,blend.sig_fs_r)\n\n blend.fs_i = params['f_b_i']\n blend.sig_fs_i = params['sig_f_b_i']\n (blend.i, blend.sig_i) = flux_to_mag_pylima(blend.fs_i,blend.sig_fs_i)\n\n blend.compute_colours(use_inst=True)\n blend.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Blend measured photometry:')\n log.info(blend.summary(show_mags=True))\n log.info(blend.summary(show_mags=False,show_colours=True))\n log.info(blend.summary(show_mags=False,johnsons=True))\n\n return source, blend", "def calc_spindle_psd_concat(self, psd_bandwidth):\n \n print('Calculating power spectra (this may take a few minutes)...')\n self.metadata['spindle_analysis']['psd_dtype'] = 'raw_concat'\n self.metadata['spindle_analysis']['psd_method'] = 'multitaper'\n self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth\n sf = self.metadata['analysis_info']['s_freq']\n \n spindle_psd = {}\n spindle_multitaper_calcs = pd.DataFrame(index=['data_len', 'N', 'W', 'NW', 'K'])\n for chan in self.spindles:\n #print(f'Calculating spectra for {chan}...')\n if len(self.spindles[chan]) > 0:\n # concatenate spindles\n spindles = [self.spindles[chan][x].Raw.values for x in self.spindles[chan]]\n data = np.concatenate(spindles)\n \n # record PS params [K = 2NW-1]\n N = len(data)/sf\n W = psd_bandwidth\n K = int((2*N*W)-1)\n spindle_multitaper_calcs[chan] = [len(data), N, W, N*W, K] \n \n # calculate power spectrum\n pwr, freqs = psd_array_multitaper(data, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25, \n normalization='full', verbose=0)\n # convert to series & add to dict\n psd = pd.Series(pwr, index=freqs)\n spindle_psd[chan] = psd\n \n self.spindle_multitaper_calcs = spindle_multitaper_calcs\n self.spindle_psd_concat = spindle_psd\n print('Done. Spectra stored in obj.spindle_psd_concat. Calculations stored in obj.spindle_multitaper_calcs.\\n')", "def combine_sd_ratios(data1: DataSeries, data2: DataSeries) -> DataSeries:\n return (data1.pow(2) + data2.pow(2)).pow(0.5)", "def combined_costs(matrix_MSLL_IO):\r\n return", "def getMeasures():", "def WLS(store):\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].sample()", "def modify_weights(msname, ionfactor, dryrun=False, ntot=None, trim_start=True):\n t = pt.table(msname, readonly=False, ack=False)\n freq_tab = pt.table(msname + '/SPECTRAL_WINDOW', ack=False)\n freq = freq_tab.getcol('REF_FREQUENCY')\n wav = 3e8 / freq\n anttab = pt.table(msname + '/ANTENNA', ack=False)\n antlist = anttab.getcol('NAME')\n fwhm_list = []\n\n for t2 in t.iter([\"ANTENNA1\",\"ANTENNA2\"]):\n if (t2.getcell('ANTENNA1',0)) < (t2.getcell('ANTENNA2',0)):\n weightscol = t2.getcol('WEIGHT_SPECTRUM')\n uvw = t2.getcol('UVW')\n uvw_dist = np.sqrt(uvw[:,0]**2 + uvw[:,1]**2 + uvw[:,2]**2)\n weightscol_modified = np.copy(weightscol)\n timepersample = t2[1]['TIME'] - t2[0]['TIME']\n dist = np.mean(uvw_dist) / 1e3\n stddev = ionfactor * np.sqrt((25e3 / dist)) * (freq / 60e6) # in sec\n fwhm = 2.3548 * stddev\n fwhm_list.append(fwhm[0])\n\n if not dryrun:\n for pol in range(0,len(weightscol[0,0,:])):\n for chan in range(0,len(weightscol[0,:,0])):\n weights = weightscol[:,chan,pol]\n\n if ntot is None:\n ntot = len(weights)\n gauss = scipy.signal.gaussian(ntot, stddev/timepersample)\n if trim_start:\n weightscol_modified[:,chan,pol] = weights * gauss[ntot-len(weights):]\n else:\n weightscol_modified[:,chan,pol] = weights * gauss[:len(weights)]\n t2.putcol('WEIGHT_SPECTRUM', weightscol_modified)\n t.close()\n freq_tab.close()\n return (min(fwhm_list), max(fwhm_list))", "def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)", "def calc_sources_power(sources, medium):\r\n I_0 = 1.0 / (2.0 * medium.density * medium.speed_of_sound)\r\n S = numpy.sum(sources['Ss'])\r\n W = I_0 * S\r\n return W", "def normalize(traces):\n\n start = time()\n\n avg_intens = np.mean(traces, axis=1)\n n_traces = traces.shape[0]\n n_frames = traces.shape[1]\n\n # zero-center the traces\n centered_cy5 = np.zeros((n_traces, n_frames))\n for i in range(n_traces):\n centered_cy5[i, :] = traces[i, :] - avg_intens[i]\n\n scaled_data_cy5 = np.zeros((n_traces, n_frames))\n\n for i in range(n_traces):\n cy5_trc = centered_cy5[i, :]\n cy5_min = cy5_trc.min()\n cy5_max = cy5_trc.max()\n if cy5_min == cy5_max:\n scaled_data_cy5[i] = np.ones(cy5_trc.shape) \n else:\n scaled_data_cy5[i] = (cy5_trc - cy5_min) / (cy5_max - cy5_min)\n\n print(\"Time passed: \" + str(time() - start))\n\n return scaled_data_cy5", "def plot_intensity_prop(stack, wavelengths_arr, colors_arr):\n\n # for the electric fields profile\n for i, wl in enumerate(wavelengths_arr):\n electric_tot_te, electric_tot_tm, reflectivity_te, reflectivity_tm, transmission_te, transmission_tm, index_tot, L_tot, theta_tot = transfer_matrix_method(\n stack, 1, 0, wl, 0)\n intensity = np.abs(electric_tot_te[::-1]) ** 2\n plt.plot(L_tot * 1e6, intensity / max(intensity) * 2, color=colors_arr[i])\n # for the indexes profile\n ax.plot(L_tot * 1e6, index_tot[::-1], color='black')\n ax.fill_between(L_tot * 1e6, index_tot[::-1], color='azure')", "def create_scatterplots(PrimaryParticleName, LongVectorSignals, LongVectorSignalsCher,\n\tShortVectorSignals, ShortVectorSignalsCher, ScinTreshold,\n\tCherTreshold):\n\n\t#Set ROOT plots\n\tTH2LongScinSignals = TH2F(\"LongScatterplotSignals\", PrimaryParticleName, 46, 0., 93., 93, 0., 93.)\n\tTH2ShortScinSignals = TH2F(\"ShortScatterplotSignals\", PrimaryParticleName, 46, 0., 93., 93, 0., 93.)\n\tTH2LongCherSignals = TH2F(\"LongCherScatterplotSignals\", PrimaryParticleName, 46, 0., 93., 92, 0., 93.)\n\tTH2ShortCherSignals = TH2F(\"ShortCherScatterplotSignals\", PrimaryParticleName, 46, 0., 93., 92, 0., 93.)\n\tTH2Uniformity = TH2F(\"Uniformity\", \"Uniformity\", 46, 0., 93., 92, 0., 93.)\n\n\t#Fill plots in for loop\n\tfor index in range(len(LongVectorSignals)):\n\t\tif LongVectorSignals[index] > ScinTreshold:\n\t\t\tX,Y = Staggeredmap.mapXYLongScin(index)\n\t\t\tTH2LongScinSignals.Fill(X,Y,LongVectorSignals[index])\n\t\tif LongVectorSignalsCher[index] > CherTreshold:\n\t\t\tX,Y = Staggeredmap.mapXYLongCherenkov(index)\n\t\t\tTH2LongCherSignals.Fill(X,Y,LongVectorSignalsCher[index])\n\t\tif ShortVectorSignals[index] > ScinTreshold:\n\t\t\tX,Y = Staggeredmap.mapXYShortScin(index)\n\t\t\tTH2ShortScinSignals.Fill(X,Y, ShortVectorSignals[index])\n\t\tif ShortVectorSignalsCher[index] > CherTreshold:\n\t\t\tX,Y = Staggeredmap.mapXYShortCherenkov(index)\n\t\t\tTH2ShortCherSignals.Fill(X,Y, ShortVectorSignalsCher[index])\n\n\t#Fill uniformity plot\n\tfor index in range(len(LongVectorSignals)):\n\t\tX,Y = Staggeredmap.mapXYLongScin(index)\n\t\tTH2Uniformity.Fill(X,Y,1.)\t\n\t\tX,Y = Staggeredmap.mapXYShortScin(index)\n\t\tTH2Uniformity.Fill(X,Y,1.)\n\t\tX,Y = Staggeredmap.mapXYLongCherenkov(index)\n\t\tTH2Uniformity.Fill(X,Y,1.)\n\t\tX,Y = Staggeredmap.mapXYShortCherenkov(index)\n\t\tTH2Uniformity.Fill(X,Y,1.)\n\n\t#Draw + DrawOptions plots\n\tStyle = gStyle\n\tStyle.SetPalette(1) #Root palette style\n\tStyle.SetOptStat(0) #Do not show statistics\n\tTH2LongScinSignals.SetLineWidth(0) #TH2LongScinSignals #No line width\n\tTH2LongScinSignals.SetLineColor(2)\n\t#TH2Signals.SetFillColorAlpha(2, 0.)\n\tXAxis = TH2LongScinSignals.GetXaxis()\n\tXAxis.SetTitle(\"x (mm)\")\n\tXAxis.CenterTitle()\n\tXAxis.SetTitleOffset(1.8)\n\tYAxis = TH2LongScinSignals.GetYaxis()\n\tYAxis.SetTitle(\"y (mm)\")\n\tYAxis.CenterTitle()\n\tYAxis.SetTitleOffset(1.8)\n\tZAxis = TH2LongScinSignals.GetZaxis()\n\tZAxis.SetTitle(\"Energy (MeV)\")\n\tZAxis.SetTitleOffset(1.4)\n\tTH2LongScinSignals.Draw(\"LEGO2Z 0 FB\")\n\tgPad.SaveAs(\"ImageLongScintillation.eps\")\n\tTH2ShortScinSignals.SetLineWidth(0) #TH2ShortScinSignals #No line width\n\tTH2ShortScinSignals.SetLineColor(2)\n\t#TH2Signals.SetFillColorAlpha(2, 0.)\n\tXAxis = TH2ShortScinSignals.GetXaxis()\n\tXAxis.SetTitle(\"x (mm)\")\n\tXAxis.CenterTitle()\n\tXAxis.SetTitleOffset(1.8)\n\tYAxis = TH2ShortScinSignals.GetYaxis()\n\tYAxis.SetTitle(\"y (mm)\")\n\tYAxis.CenterTitle()\n\tYAxis.SetTitleOffset(1.8)\n\tZAxis = TH2ShortScinSignals.GetZaxis()\n\tZAxis.SetTitle(\"Energy (MeV)\")\n\tZAxis.SetTitleOffset(1.4)\n\tTH2ShortScinSignals.Draw(\"LEGO2Z 0 FB\")\n\tgPad.SaveAs(\"ImageShortScintillation.eps\")\n\tTH2ShortCherSignals.SetLineWidth(0) #TH2ShortCherSignals #No line width\n\tTH2ShortCherSignals.SetLineColor(4)\n\t#TH2Signals.SetFillColorAlpha(2, 0.)\n\tXAxis = TH2ShortCherSignals.GetXaxis()\n\tXAxis.SetTitle(\"x (mm)\")\n\tXAxis.CenterTitle()\n\tXAxis.SetTitleOffset(1.8)\n\tYAxis = TH2ShortCherSignals.GetYaxis()\n\tYAxis.SetTitle(\"y (mm)\")\n\tYAxis.CenterTitle()\n\tYAxis.SetTitleOffset(1.8)\n\tZAxis = TH2ShortCherSignals.GetZaxis()\n\tZAxis.SetTitle(\"Cher p.e.\")\n\tZAxis.SetTitleOffset(1.4)\n\tTH2ShortCherSignals.Draw(\"LEGO2Z 0 FB\")\n\tgPad.SaveAs(\"ImageShortCherenkov.eps\")\n\tTH2LongCherSignals.SetLineWidth(0) #TH2LongCherSignals #No line width\n\tTH2LongCherSignals.SetLineColor(4)\n\tXAxis = TH2LongCherSignals.GetXaxis()\n\tXAxis.SetTitle(\"x (mm)\")\n\tXAxis.CenterTitle()\n\tXAxis.SetTitleOffset(1.8)\n\tYAxis = TH2LongCherSignals.GetYaxis()\n\tYAxis.SetTitle(\"y (mm)\")\n\tYAxis.CenterTitle()\n\tYAxis.SetTitleOffset(1.8)\n\tZAxis = TH2LongCherSignals.GetZaxis()\n\tZAxis.SetTitle(\"Cher p.e.\")\n\tZAxis.SetTitleOffset(1.4)\n\tTH2LongCherSignals.Draw(\"LEGO2Z FB 0\")\n\tgPad.SaveAs(\"ImageLongCherenkov.eps\")\n\tTH2Uniformity.SetLineWidth(0) #TH2Uniformity #No line width\n\tTH2Uniformity.SetLineColor(2)\n\t#TH2Signals.SetFillColorAlpha(2, 0.)\n\tXAxis = TH2Uniformity.GetXaxis()\n\tXAxis.SetTitle(\"x (mm)\")\n\tXAxis.CenterTitle()\n\tXAxis.SetTitleOffset(1.8)\n\tYAxis = TH2Uniformity.GetYaxis()\n\tYAxis.SetTitle(\"y (mm)\")\n\tYAxis.CenterTitle()\n\tYAxis.SetTitleOffset(1.8)\n\tZAxis = TH2Uniformity.GetZaxis()\n\tZAxis.SetTitle(\"Set to 1\")\n\tZAxis.SetTitleOffset(1.4)\n\tTH2Uniformity.Draw(\"LEGO2Z 0 FB\")\n\t#gPad.SaveAs(\"ImageUniformity.eps\")", "def _beamstability_data(self):\n traces = unpack(self._bo + 'i', self.fh.read(4))[0]\n x = []\n data = []\n maxpoints = 0\n for _ in range(traces):\n points = unpack(self._bo + 'i', self.fh.read(4))[0]\n d = np.fromfile(self.fh, dtype=self._bo+'f8', count=2*points).reshape(2, points)\n data.append(d[1])\n if points > maxpoints:\n x = d[0]\n maxpoints = points\n\n for d in range(len(data)):\n pad_width = maxpoints - data[d].shape[0]\n data[d] = np.pad(data[d], (0, pad_width), 'constant')\n\n if self.header['file type'] == 31:\n if self.header['analysis type'].endswith('trolley step scan'):\n xprop = 'radius'\n xunit = 'mm'\n else:\n xprop = 'deflection'\n xunit = 'V'\n elif self.header['file type'] == 35:\n xprop = 'time'\n xunit = 's'\n\n self.data = xarray.DataArray(data, dims=('species', xprop),\n coords={\n 'species': ('species', list(self.header['label list'])),\n xprop: (xprop, x, {'unit': xunit})\n },\n attrs={\n 'unit': 'counts/s'\n })", "def _extract_imp_data(self) -> np.ndarray:\n \n mats = Material.objects.all()\n \n mat_arrays = []\n for mat in mats: # django queryset -> python list\n mat_features = []\n \n # Add data\n # Some data are missing here.\n #TODO: Delete those if sentences after cleaning the data.\n mat_features.append(mat.pvt_b5 if mat.pvt_b5!=None else 0)\n mat_features.append(mat.pvt_b6 if mat.pvt_b6!=None else 0)\n mat_features.append(mat.pvt_b1m if mat.pvt_b1m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b4m if mat.pvt_b4m!=None else 0)\n mat_features.append(mat.pvt_b1s if mat.pvt_b1s!=None else 0)\n mat_features.append(mat.pvt_b2s if mat.pvt_b2s!=None else 0)\n mat_features.append(mat.pvt_b3s if mat.pvt_b3s!=None else 0)\n mat_features.append(mat.pvt_b4s if mat.pvt_b4s!=None else 0)\n mat_features.append(mat.pvt_b7 if mat.pvt_b7!=None else 0)\n mat_features.append(mat.pvt_b8 if mat.pvt_b8!=None else 0)\n mat_features.append(mat.pvt_b9 if mat.pvt_b9!=None else 0)\n mat_features.append(mat.seven_params_n if mat.seven_params_n!=None else 0.)\n mat_features.append(mat.seven_params_Tau if mat.seven_params_Tau!=None else 0.)\n mat_features.append(mat.seven_params_D1 if mat.seven_params_D1!=None else 0.)\n mat_features.append(mat.seven_params_D2 if mat.seven_params_D2!=None else 0.)\n mat_features.append(mat.seven_params_D3 if mat.seven_params_D3!=None else 0.)\n mat_features.append(mat.seven_params_A1 if mat.seven_params_A1!=None else 0.)\n mat_features.append(mat.seven_params_A2 if mat.seven_params_A2!=None else 0.)\n \n mat_arrays.append(mat_features)\n \n # Get numpy arrays.\n mat_arrays = np.array(mat_arrays, dtype=np.float64)\n \n return mat_arrays", "def powder_XRD(crystal,wavelength, get_mults=False):\n \n # The wavenumber of the input wavelength\n nu = 2*n.pi/wavelength\n\n # Make a list of the accessible rlvs\n rlvs = find_accessible_rlvs(crystal,wavelength)\n \n # Now we calculate the scattering intensity from each rlv\n intensities = {\n tuple(rlv): n.abs(crystal.structure_factor(rlv))**2\n for rlv in rlvs}\n \n # Now sum up all rlvs with the same magnitude. We also\n # get rid of all the scattering vectors with 0 intensity\n magnitudes = {}\n multiplicities = {}\n for rlv, intensity in intensities.items():\n repeat = False\n mag = n.linalg.norm(rlv)\n for oldmag in magnitudes:\n if n.isclose(mag,oldmag):\n magnitudes[oldmag] += intensity\n multiplicities[oldmag] += 1\n repeat = True\n break\n if not repeat and not n.isclose(mag,0):\n multiplicities[mag] = 1\n magnitudes[mag] = intensity\n \n # Now we reformat the multiplicity data in a nice way\n multiplicities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n multiplicity\n for mag, multiplicity in multiplicities.items()\n if not n.allclose(magnitudes[mag],0)}\n\n # And now we calculate the scattering intensities\n # (a.u. per steradian) as a function of scattering angle\n intensities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n intensity * \n # This factor corrects for the fact that the same total\n # power in the debye scherrer rings is more\n # concentrated when 2\\theta is near 0 or 2pi\n 1 / n.sin(2*n.arcsin(mag/(2*nu))) *\n # This factor corrects for the probability that any\n # given crystal domain will scatter into the rlv\n 1 / mag *\n # This factor corrects for polarization effects,\n # Assuming an unpolarized input beam and no polarization\n # analysis\n (1 + n.cos(2*n.arcsin(mag/(2*nu)))**2)/2\n for mag, intensity in magnitudes.items()\n if not n.allclose(intensity,0)}\n if get_mults:\n return intensities, multiplicities\n else:\n return intensities", "def combine_data(self):\n\t\tself.Full_E = None\n\t\tself.Imaginary_Spectrum = None\n\t\tif self.raw_file is not None:\n\t\t\tlogger.info(\"Convert to scattering factors\")\n\t\t\tself.NearEdgeData = data.convert_data(self.raw_file,self.DataTypeCombo.GetValue(),'ASF')\n#\t\t\tif self.InvertDataCheckBox.GetValue():\n#\t\t\t\tself.NearEdgeData[:,1] = numpy.abs(self.NearEdgeData[:,1] - 2*numpy.mean(self.NearEdgeData[:,1]))\n\t\tlogger.info(\"Combine Data\")\n\t\t# Get splice points\n\t\tsplice_eV = numpy.array([10.0, 30000.0]) # Henke limits\n\t\tif self.SpliceText1.GetValue() == \"Start\":\n\t\t\tif self.raw_file is not None:\n\t\t\t\tsplice_eV[0] = self.NearEdgeData[0, 0]\n\t\telse:\n\t\t\tsplice_eV[0] = float(self.SpliceText1.GetValue())\n\t\tif self.SpliceText2.GetValue() == \"End\":\n\t\t\tif self.raw_file is not None:\n\t\t\t\tsplice_eV[1] = self.NearEdgeData[-1, 0]\n\t\telse:\n\t\t\tsplice_eV[1] = float(self.SpliceText2.GetValue())\n\t\tif self.raw_file is not None and self.ASF_Data is None:\n\t\t\tself.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), plotting_extras=True)\n\n\t\telif self.raw_file is None and self.ASF_Data is not None:\n\t\t\tself.Full_E = self.ASF_E\n\t\t\tself.Imaginary_Spectrum = self.ASF_Data\n\n\t\telif self.raw_file is not None and self.ASF_Data is not None:\n\t\t\t\n\t\t\tself.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), fix_distortions=self.FixDistortionsCheckBox.GetValue(), plotting_extras=True)\n\n\t\t\t### get start and end Y values from nexafs and asf data\n\t\t\t##splice_nexafs_Im = numpy.interp(splice_eV, raw_Im[:, 0], raw_Im[:, 1])\n\t\t\t###splice_asf_Im = numpy.interp(splice_eV, self.total_asf[:, 0], self.total_asf[:, 2])\n\t\t\t##splice_asf_Im = (data.coeffs_to_ASF(splice_eV[0],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[0])[0][-1]]),data.coeffs_to_ASF(splice_eV[1],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[1])[0][-1]]))\n\t\t\t##cut_boolean = (splice_eV[0]<raw_Im[:, 0]) == (raw_Im[:, 0]<splice_eV[1])\n\t\t\t### Merge Y values\n\t\t\t##if not self.AddBackgroundCheckBox.GetValue():\n\t\t\t\t##logger.info(\"Merge data sets\")\n\t\t\t\t##scale = (splice_asf_Im[1]-splice_asf_Im[0])/(splice_nexafs_Im[1]-splice_nexafs_Im[0])\n\t\t\t\t##scaled_nexafs_Im = ((raw_Im[:, 1]-splice_nexafs_Im[0])*scale)+splice_asf_Im[0]\n\t\t\t\t##self.asf_bg = None # We won't be using this variable this time\n\t\t\t##else:\n\t\t\t\t##logger.info(\"Add data sets (this will currently only work at energies below 30 keV)\")\n\t\t\t\t### Set up background function\n\t\t\t\t### We trust this point to be just before the absorption edge\n\t\t\t\t##trusted_ind = max(0, numpy.where(self.total_asf[:, 0]>splice_eV[0])[0][0]-1)\n\t\t\t\t##Log_total_asf = numpy.log(self.total_asf[:, 2])\n\t\t\t\t### Lets trust the 5 points before our trusted point and make an initial guess at the background function\n\t\t\t\t##p = numpy.polyfit(self.total_asf[(trusted_ind-5):trusted_ind, 0], Log_total_asf[(trusted_ind-5):trusted_ind], 1)\n\t\t\t\t### Now lets look for the points up util the absorption edge\n\t\t\t\t##p_vals = numpy.exp(numpy.polyval(p, self.total_asf[(trusted_ind-5):-1, 0]))\n\t\t\t\t##p_err = max(p_vals[0:5]-self.total_asf[(trusted_ind-5):trusted_ind, 2])\n\t\t\t\t##edge_ind = numpy.where(self.total_asf[trusted_ind:-1, 2]-p_vals[4:-1]>p_err*10)\n\t\t\t\t##if len(edge_ind[0])!=0:\n\t\t\t\t\t##edge_ind = edge_ind[0][0]\n\t\t\t\t##else:\n\t\t\t\t\t##edge_ind = trusted_ind\n\t\t\t\t### Redo background using the 5 points before the background point\n\t\t\t\t##p = numpy.polyfit(self.total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind, 0], Log_total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind], 1)\n\t\t\t\t##asf_bg = numpy.exp(numpy.polyval(p, raw_Im[:, 0]))\n\t\t\t\t##logger.info(\"Background defined as: y=exp(%(p1)ex %(p0)+e)\" % {\"p1\":p[1], \"p0\":p[0]})\n\t\t\t\t### Apply background function\n\t\t\t\t##scale = (splice_asf_Im[1]-numpy.exp(numpy.polyval(p, splice_eV[1])))/splice_nexafs_Im[1]\n\t\t\t\t##scaled_nexafs_Im = raw_Im[:, 1]*scale+asf_bg\n\t\t\t\t### store background data for plotting\n\t\t\t\t##cut_boolean_wide = numpy.roll(cut_boolean, -1) + numpy.roll(cut_boolean, 1)\n\t\t\t\t##self.asf_bg = [[trusted_ind+edge_ind-5, trusted_ind+edge_ind], numpy.vstack((raw_Im[cut_boolean_wide, 0], asf_bg[cut_boolean_wide])).T]\n\t\t\t\n\t\t\t##nexafs_cut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T\n\t\t\t####Merge point-wise data sets together\n\t\t\t##asf_cut_high = self.total_asf[self.total_asf[:, 0]>splice_eV[1], :]\n\t\t\t##asf_cut_low = self.total_asf[self.total_asf[:, 0]<splice_eV[0], :]\n\t\t\t##self.merged_Im = numpy.vstack((asf_cut_low[:, [0, 2]], (splice_eV[0], splice_asf_Im[0]), nexafs_cut, (splice_eV[1], splice_asf_Im[1]), asf_cut_high[:, [0, 2]]))\n\t\t\t\n\t\t\t####Merge coeff data together\n\t\t\t##coeffs_cut_high = self.total_Im_coeffs[self.total_E[:-1]>splice_eV[1],:]\n\t\t\t##coeffs_cut_low = self.total_Im_coeffs[self.total_E[:-1]<splice_eV[0],:]\n\t\t\t###convert points to coeffs\n\t\t\t##nexafs_coeffs_cut = numpy.zeros((len(nexafs_cut)+1,5))\n\t\t\t##Y = numpy.append(numpy.insert(nexafs_cut[:,1],0,splice_asf_Im[0]),splice_asf_Im[1])\n\t\t\t##nexafs_E = numpy.append(numpy.insert(nexafs_cut[:,0],0,splice_eV[0]),splice_eV[1])\n\t\t\t##M = (Y[1:]-Y[:-1])/(nexafs_E[1:]-nexafs_E[:-1])\n\t\t\t##nexafs_coeffs_cut[:,0] = M\n\t\t\t##nexafs_coeffs_cut[:,1] = Y[:-1]-M*nexafs_E[:-1]\n\t\t\t###assemble merged coeffs and energy values\n\t\t\t##self.merged_Im_coeffs = numpy.vstack((coeffs_cut_low, nexafs_coeffs_cut, self.total_Im_coeffs[-coeffs_cut_high.shape[0]-2,:], coeffs_cut_high))\n\t\t\t##self.merged_E = numpy.concatenate((self.total_E[self.total_E<splice_eV[0]], nexafs_E, self.total_E[self.total_E>splice_eV[1]]))\n\t\t\t### Extras for plotting\n\t\t\t##self.splice_ind = (len(asf_cut_low[:, 0]), -len(asf_cut_high[:, 0]))\n\t\t\t##cut_boolean = (splice_eV[0]<=raw_Im[:, 0]) != (raw_Im[:, 0]<=splice_eV[1])\n\t\t\t##self.nexafs_CutOut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T\n\t\t### Previous calculation of f_1 is no longer matching displayed f_2 data\n\t\t##self.KK_Real_Spectrum = None", "def reduce_data(self, ctx):\n self.baselines_type = ctx.get(\"baselines_type\")\n visibilities = ctx.get(\"visibilities\")\n p_signal = self.compute_power(visibilities)\n\n # Remember that the results of \"simulate\" can be used in two places: (i) the computeLikelihood method, and (ii)\n # as data saved to file. In case of the latter, it is useful to save extra variables to the dictionary to be\n # looked at for diagnosis, even though they are not required in computeLikelihood().\n return [dict(p_signal=p_signal, baselines=self.baselines, frequencies=self.frequencies,\n u=self.u, eta=self.eta)]\n #, nbl_uv=self.nbl_uv, nbl_uvnu=self.nbl_uvnu, nbl_u=self.nbl_u, grid_weights=self.grid_weights)]", "def make_flats(side='blue',overwrite=False):\r\n\r\n iraf.unlearn('flatcombine')\r\n iraf.flatcombine.ccdtype = \"\"\r\n iraf.flatcombine.process = \"no\"\r\n iraf.flatcombine.subsets = \"no\"\r\n iraf.flatcombine.rdnoise = \"RON\"\r\n iraf.flatcombine.gain = \"GAIN\"\r\n for aperture in ['0.5', '1.0', '1.5', '2.0']:\r\n flats = find_flats(aperture, side=side)\r\n if len(flats) > 0:\r\n if overwrite:\r\n iraf.delete('flat_%s_%s.fits' % (side, aperture), verify='no')\r\n iraf.delete('temp.fits' , verify='no')\r\n iraf.delete('tempsmooth.fits', verify='no')\r\n iraf.delete('norm_temp.fits', verify='no')\r\n # normalize the flat\r\n if side == 'blue': \r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 2\r\n # iraf.unlearn('response')\r\n # iraf.response.function = 'legendre'\r\n iraf.response.order = 100\r\n # iraf.response.high_rej = 5\r\n # iraf.response.low_rej = 2\r\n # iraf.response.niterate = 10\r\n # iraf.response('temp[0]', 'temp[0]',\r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n iraf.imfilter.boxcar('temp', 'tempsmooth', xwindow='1', ywindow='500')\r\n iraf.imarith('temp', '/', 'tempsmooth', 'norm_temp.fits')\r\n iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('norm_temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n else:\r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 1\r\n iraf.unlearn('response')\r\n iraf.response.function = \"spline3\" \r\n iraf.response.order = 100\r\n iraf.response.high_rej = 3\r\n iraf.response.low_rej = 3\r\n iraf.response.niterate = 3\r\n iraf.response('temp[0]', 'temp[0]',\r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n # iraf.unlearn('response')\r\n # iraf.response.function = \"spline3\"\r\n # iraf.response.order = 100\r\n # iraf.response.niterate = 3\r\n # iraf.response.low_rej = 3\r\n # iraf.response.high_rej = 3\r\n # if side == 'blue':\r\n # iraf.twodspec.longslit.dispaxis = 2\r\n # else:\r\n # iraf.twodspec.longslit.dispaxis = 1\r\n \r\n\r\n # measure flat-field error from sigma images\r\n iraf.unlearn('imcombine')\r\n iraf.imcombine.reject = 'avsigclip'\r\n iraf.imcombine(','.join(flats), output='flat', sigma='sigma', scale='mode')\r\n iraf.imarith('sigma', '/', 'flat', 'frac')\r\n s = iraf.imstat('frac.fits', fields=\"mean\", nclip=20, Stdout=1, format=\"no\")\r\n print 'Flat field error: ', np.float(s[0])\r\n iraf.delete('flat.fits', verify=\"no\")\r\n iraf.delete('sigma.fits', verify=\"no\")\r\n iraf.delete('frac.fits', verify=\"no\")\r\n else:\r\n print \"No dome or internal flats for the %s arcsec slit.\" % aperture", "def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):\n assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \\\n 'All sets of hms must be tagged with downscaling and upscaling flags'\n\n # Classify objects into small+medium and large based on their box areas\n areas = box_utils.boxes_area(boxes)\n sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH\n l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH\n\n # Combine heatmaps computed under different transformations for each object\n hms_c = np.zeros_like(hms_ts[0])\n\n for i in range(hms_c.shape[0]):\n hms_to_combine = []\n for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):\n # Discard downscaling predictions for small and medium objects\n if sm_objs[i] and ds_t:\n continue\n # Discard upscaling predictions for large objects\n if l_objs[i] and us_t:\n continue\n hms_to_combine.append(hms_t[i])\n hms_c[i] = heur_f(hms_to_combine)\n\n return hms_c", "def spMultiIndex(self):\n # reset column levels\n self.spfiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n self.spRMS.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMS'), len(self.channels))],names=['Channel','datatype'])\n self.spRMSmavg.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMSmavg'), len(self.channels))],names=['Channel','datatype'])\n\n # list df vars for index specs\n dfs =[self.spfiltEEG, self.spRMS, self.spRMSmavg] # for > speed, don't store spinfilt_RMS as an attribute\n calcs = ['Filtered', 'RMS', 'RMSmavg']\n lvl0 = np.repeat(self.channels, len(calcs))\n lvl1 = calcs*len(self.channels) \n \n # combine & custom sort\n self.spindle_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])", "def calc_spindle_buffer_means(self):\n \n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_buffer_aggregates = {}\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles_wbuffer[chan]:\n # set the base df\n agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])\n rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))\n # join on the index for each spindle\n for x in range(1, len(self.spindles_wbuffer[chan])):\n mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])\n spindle_buffer_aggregates[chan] = mean_df\n \n print('Calculating statistics...')\n # create a new multiindex dataframe for calculations\n calcs = ['mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n spindle_buffer_means = pd.DataFrame(columns=columns)\n \n # fill the dataframe\n for chan in spindle_buffer_aggregates.keys():\n spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)\n spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)\n spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)\n \n self.spindle_buffer_aggregates = spindle_buffer_aggregates\n self.spindle_buffer_means = spindle_buffer_means\n print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')", "def WeightFromPro(infile='PROCAR', lsorbit=False):\n\n assert os.path.isfile(infile), '%s cannot be found!' % infile\n FileContents = [line for line in open(infile) if line.strip()]\n\n # when the band number is too large, there will be no space between \";\" and\n # the actual band number. A bug found by Homlee Guo.\n # Here, #kpts, #bands and #ions are all integers\n nkpts, nbands, nions = [int(xx) for xx in re.sub(\n '[^0-9]', ' ', FileContents[1]).split()]\n\n # Weights = np.asarray([line.split()[-1] for line in FileContents\n # if not re.search('[a-zA-Z]', line)], dtype=float)\n Weights = np.asarray([line.split()[1:-1] for line in FileContents\n if not re.search('[a-zA-Z]', line)], dtype=float)\n\n kpt_weight = np.asarray(\n [line.split()[-1] for line in FileContents if 'weight' in line], dtype=float)\n\n energies = np.asarray([line.split()[-4] for line in FileContents\n if 'occ.' in line], dtype=float)\n\n nlmax = Weights.shape[-1]\n nspin = Weights.shape[0] // (nkpts * nbands * nions)\n nspin //= 4 if lsorbit else 1\n\n if lsorbit:\n Weights.resize(nspin, nkpts, nbands, 4, nions, nlmax)\n Weights = Weights[:, :, :, 0, :, :]\n else:\n Weights.resize(nspin, nkpts, nbands, nions, nlmax)\n\n kpt_weight.resize(nspin, nkpts)\n energies.resize(nspin, nkpts, nbands)\n\n return energies, kpt_weight, Weights", "def make_steer_frs(dims, numlevels, numorientations, bandwidth):\n \n result = []\n bands=[]\n p = numorientations-1\n const = math.sqrt(float(math.pow(2,(2*p))*math.pow(math.factorial(p),2)) / float(math.factorial(2*p)*(p+1)))\n f1 = freqspace(dims[0])\n f2 = freqspace(dims[1])\n wx, wy = np.meshgrid(f1, f2)\n size = wx.shape\n r = np.sqrt(wx**2 + wy**2)\n theta = np.arctan2(wy, wx) \n \n bands = np.full((numlevels, numorientations, dims[0], dims[1]), const*1j)\n for level in range(numlevels):\n for orientation in range(numorientations):\n theta_offset = orientation * np.pi / numorientations\n ctrfreq = pi / math.pow(2, (level+1)*bandwidth)\n band = np.cos(theta - theta_offset)**p * log_raised_cos(r, ctrfreq, bandwidth)\n bands[level,orientation,:,:] *= band\n \n hi = log_raised_coshi(r, pi / math.pow(2, bandwidth), bandwidth)\n\n lo = log_raised_coslo(r, pi / math.pow(2, bandwidth * numlevels), bandwidth)\n \n result.append(hi)\n result.append(bands)\n result.append(lo)\n return result", "def apply_transmission(self, slamb, sflux):\n mean, samples = self._get_mean_and_samples_attribute('apply_transmission')\n mean_val = mean(slamb, sflux)\n samp_val = [sk(slamb, sflux) for sk in samples]\n return mean_val, samp_val", "def weightedAverage(requestContext, seriesListAvg, seriesListWeight, node):\n\n sortedSeries={}\n\n for seriesAvg, seriesWeight in izip(seriesListAvg , seriesListWeight):\n key = seriesAvg.name.split(\".\")[node]\n if key not in sortedSeries:\n sortedSeries[key]={}\n\n sortedSeries[key]['avg']=seriesAvg\n key = seriesWeight.name.split(\".\")[node]\n if key not in sortedSeries:\n sortedSeries[key]={}\n sortedSeries[key]['weight']=seriesWeight\n\n productList = []\n\n for key in sortedSeries.keys():\n if 'weight' not in sortedSeries[key]:\n continue\n if 'avg' not in sortedSeries[key]:\n continue\n\n seriesWeight = sortedSeries[key]['weight']\n seriesAvg = sortedSeries[key]['avg']\n\n productValues = [ safeMul(val1, val2) for val1,val2 in izip(seriesAvg,seriesWeight) ]\n name='product(%s,%s)' % (seriesWeight.name, seriesAvg.name)\n productSeries = TimeSeries(name,seriesAvg.start,seriesAvg.end,seriesAvg.step,productValues)\n productSeries.pathExpression=name\n productList.append(productSeries)\n\n sumProducts=sumSeries(requestContext, productList)[0]\n sumWeights=sumSeries(requestContext, seriesListWeight)[0]\n\n resultValues = [ safeDiv(val1, val2) for val1,val2 in izip(sumProducts,sumWeights) ]\n name = \"weightedAverage(%s, %s)\" % (','.join(set(s.pathExpression for s in seriesListAvg)) ,','.join(set(s.pathExpression for s in seriesListWeight)))\n resultSeries = TimeSeries(name,sumProducts.start,sumProducts.end,sumProducts.step,resultValues)\n resultSeries.pathExpression = name\n return resultSeries", "def normaliseTracesMagnitude(traces, unfolded_level): \n return traces / np.abs(unfolded_level)", "def get_weights_from_log(log, plot = False):\n with open(log, 'r') as f:\n log_file = f.readlines()\n\n i = 0\n time = []\n weights_info = []\n weights_0 = []\n while i < len(log_file):\n if 'init-lambda-weights[' in log_file[i]:\n weights_0.append(float(log_file[i].split('=')[-1]))\n\n if 'MC-lambda information' in log_file[i]:\n # Finding the time\n for j in range(i,0,-1):\n if log_file[j].startswith(' Step Time'):\n j += 1\n time.append(float(log_file[j].split()[-1]))\n break\n # Finding the weight\n weights_info_tmp = []\n i += 3\n while log_file[i] != '\\n':\n split = log_file[i].split()\n count = int(split[2])\n weight = float(split[3])\n weights_info_tmp.append((count, weight))\n i += 1\n weights_info.append(weights_info_tmp)\n i += 1\n # Add weights at t = 0, because the counts are all 0 and I delate the entrances with total count 0 in next lines,\n # What i could do is put 1 in the initial temperature\n time.insert(0,0)\n weights_info.insert(0,list(zip([1] + (len(weights_0) - 1)*[0], weights_0)))\n\n #Converting to array\n time = np.array(time)\n weights_info = np.array(weights_info)\n # Some times (I don't know why) GROMACS reset all the weights and all the counts are 0. We need to eliminate those points\n sum_of_weights = weights_info[:,:,0].sum(axis = 1)\n time = time[sum_of_weights != 0]\n weights_info = weights_info[sum_of_weights != 0]\n sum_of_weights = sum_of_weights[sum_of_weights != 0]\n\n\n if plot:\n dir = os.path.dirname(log)\n fig, axes = plt.subplots(2, figsize = (16,9), sharex=True)\n NUM_COLORS = weights_info.shape[1]\n cm = plt.get_cmap('viridis')#gist_rainbow viridis\n for axe in axes:\n axe.set_prop_cycle('color', [cm(1.*j/NUM_COLORS) for j in range(NUM_COLORS)])\n\n probability = weights_info[:,:,0] / sum_of_weights[:,np.newaxis]\n for j in range(weights_info.shape[1]):\n #axes[0].plot(time, weights_info[:,j,0], label = str(j))\n axes[0].plot(time, probability[:,j], label = str(j))\n axes[1].plot(time, weights_info[:,j,1])\n\n fig.legend(loc = 'lower center', ncol = int(weights_info.shape[1] / 2))\n axes[0].set(\n xlim = (time.min(), time.max()),\n ylim = (0,1),\n ylabel = 'Probability',\n )\n axes[1].set(\n xlabel = 'Time [ps]',\n ylabel = 'Weight values'\n )\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_progression.svg'), bbox_inches=\"tight\")\n\n # Plotting the violin plot of the weights\n df = pd.DataFrame()\n for j in range(weights_info.shape[1]):\n #df[temperatures[j]] = weights_info[:,j,1]\n df[j] = weights_info[:,j,1]\n # Set up the matplotlib figure\n sns.set_theme(style=\"whitegrid\")\n fig, ax = plt.subplots(figsize=(25, 25))\n\n # Draw a violinplot with a narrower bandwidth than the default\n sns.violinplot(data=df, palette=\"Set3\", bw=.2, cut=1, linewidth=1)\n # The plot is not over the actual temperatures, the temperatures ara only labels\n ax.plot(range(len(weights_info[0,:,1])), weights_info[0,:,1], '-o', label = 'Initial weights')\n ax.set(\n title = 'Weights per state over the entire simulation',\n xlabel = 'Sate',\n ylabel = 'Weight',\n )\n plt.legend()\n sns.despine(left=True, bottom=True)\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_per_state.svg'), bbox_inches=\"tight\")\n sns.reset_defaults()\n\n return time, weights_info", "def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]", "def transfrom_stats(self, s_sample, d_sample, x_sample, wav_len):\n\t\ts_STMS_sample = []\n\t\td_STMS_sample = []\n\t\tx_STMS_sample = []\n\t\tfor i in tqdm(range(s_sample.shape[0])):\n\t\t\ts_STMS, _ = self.polar_analysis(s_sample[i,0:wav_len[i]])\n\t\t\td_STMS, _ = self.polar_analysis(d_sample[i,0:wav_len[i]])\n\t\t\tx_STMS, _ = self.polar_analysis(x_sample[i,0:wav_len[i]])\n\t\t\ts_STMS_sample.append(np.squeeze(s_STMS.numpy()))\n\t\t\td_STMS_sample.append(np.squeeze(d_STMS.numpy()))\n\t\t\tx_STMS_sample.append(np.squeeze(x_STMS.numpy()))\n\t\ts_STMS_sample = np.vstack(s_STMS_sample)\n\t\td_STMS_sample = np.vstack(d_STMS_sample)\n\t\tx_STMS_sample = np.vstack(x_STMS_sample)\n\t\treturn s_STMS_sample, d_STMS_sample, x_STMS_sample", "def add_weights(projections, settings):\n uu, vv = np.meshgrid(settings.detector_us, settings.detector_vs)\n\n weights = settings.source_to_detector_dist / np.sqrt(\n settings.source_to_detector_dist ** 2. + uu ** 2. + vv ** 2.)\n\n return projections * weights[:, :, np.newaxis]", "def analyze_data(self, bandwith=1):\n data = {\"Topple Count\": self.topple_count, \"Fallen mass\": self.mass_fallen_count}\n print(data)\n\n self.plot()\n self.plot(type='mass')\n self.plot(type='topple')\n self.plot(type='histogram',bandwith=bandwith)\n self.plot(type='pdf',bandwith=bandwith)", "def __add__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] += intensity\n else:\n output[wavelength] = intensity\n return output", "def add_nfb_export_data(self, signal: dict):\n signal[\"fSmoothingFactor\"] = self.smoothingFactor()\n signal[\"method\"] = self.method()\n signal[\"sTemporalSmootherType\"] = self.smootherType()", "def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :\n if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))\n else : fig,ax=fig\n tbins=[3000,3500,4000,4500,5500,8000,30000] \n hbins=[8,11,12,13,15]\n try: snr = a['SNREV']\n except: snr=a['SNR']\n j=np.where(snr > 300) [0]\n snr[j] = 300\n for i in range(len(tbins)-1) :\n ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)\n for j in range(len(hbins)-1) :\n ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))\n gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &\n (a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &\n (a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]\n print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))\n try :\n #plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')\n ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)\n ax[i,j].set_xlabel('VSCATTER (km/s)')\n ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())\n #ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])\n #ax[i,1].set_xlabel('VSCATTER')\n except : pass\n\n if out is not None : \n fig.savefig(out+'.png')\n plt.close()\n\n fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))\n return fig,ax", "def spectrum_processing(s):\n s = default_filters(s)\n s = add_precursor_mz(s)\n s = normalize_intensities(s)\n s = reduce_to_number_of_peaks(s, n_required=5, ratio_desired=0.5, n_max=500)\n s = select_by_mz(s, mz_from=0, mz_to=1000)\n s = add_losses(s, loss_mz_from=10.0, loss_mz_to=200.0)\n s = require_minimum_number_of_peaks(s, n_required=5)\n return s", "def plot_combined_spectrum(SSC, band):\n\n def get_spectrum(SSC, band):\n spectrum = spectra[str(SSC['no'])][band]\n frequency = spectrum['frequency'].to(u.GHz)\n intensity = spectrum['spectrum'].to(u.K)\n # shift spectrum to rest frequency\n velshift = SSC['velshift']\n frequency = [(-vsys-velshift).to(u.GHz, equivalencies=u.doppler_optical(f)).value for f in frequency]*u.GHz\n # remove NaNs\n frequency, intensity = crossmatch(frequency.to(u.GHz).value, intensity.to(u.K).value)\n return frequency, intensity\n\n def get_model(SSC, band):\n with open(escape_fname(os.path.join(XCLASSdir,'SSC_'+str(SSC['no']),'combined_model.spectrum.pickle')), 'rb') as f:\n m = pickle.load(f, encoding=\"latin1\")\n frequency = (m[:,0]*u.MHz).to(u.GHz)\n model = m[:,1]*u.K\n return frequency.value,model.value\n\n def set_up_figure(SSC, band):\n fig,ax = plt.subplots(nrows=1, ncols=1, squeeze=True, sharex='col', sharey='row', figsize=(10,8))\n ax.text(0.05, 0.9, 'SSC '+str(SSC['no'])+': '+band, color='k', transform=ax.transAxes, ha='left', va='top', weight='bold', fontsize=16, bbox=props)\n return fig,ax\n\n def plot_spectrum(ax, frequency, spectrum):\n ax.plot(frequency, spectrum, lw=1, ls='-', color='k', zorder=3)\n ax.fill_between(frequency, spectrum, [0. for f in frequency], color='grey', alpha=0.5, zorder=2)\n\n def plot_fitted_spectrum(ax, frequency, model):\n ax.plot(frequency, model, lw=1, ls='-', color='r', zorder=5)\n # ax.fill_between(frequency, model, [0. for f in frequency], color='r', alpha=0.5, zorder=4)\n\n def get_detected_lines(band=None):\n # get detected species\n all_species = []\n for SSC in SSCs:\n for specie in detected_species[str(SSC['no'])]:\n if not specie in all_species:\n all_species.append(specie)\n # get all lines of the detected species\n all_lines = []\n for specie in all_species:\n slines = [l for l in lines if l['XCLASS']==specie]\n for sl in slines:\n all_lines.append(sl)\n # keep only lines of given band\n if not band==None:\n bandlines = []\n for line in all_lines:\n if band=='LSB':\n if line['restfreq']<350*u.GHz:\n bandlines.append(line)\n elif band=='USB':\n if line['restfreq']>350*u.GHz:\n bandlines.append(line)\n return sorted(bandlines, key=lambda k: k['restfreq'])\n else:\n return sorted(all_lines, key=lambda k: k['restfreq'])\n\n def label_lines(ax, spectrum, band):\n detected_lines = get_detected_lines(band=band)\n for idx,line in enumerate(detected_lines):\n restfreq = line['restfreq'].to(u.GHz).value\n if (restfreq>frequency[0] and restfreq<frequency[-1]):\n if band=='LSB':\n xlim = [342.4, 346.2]\n elif band=='USB':\n xlim = [354.3, 358.1]\n xloc = xlim[0] +((idx+0.5)/len(detected_lines))*(xlim[1]-xlim[0])\n ax.axvline(x=restfreq, ymin=0, ymax=1, color='dimgrey', ls='--', lw=0.5, zorder=1)\n ax.plot([restfreq,xloc], [1.05*np.nanmax(spectrum), 1.05*1.05*np.nanmax(spectrum)], color='dimgrey', ls='--', lw=0.5, zorder=1, clip_on=False)\n ax.text(xloc, 1.06*1.05*np.nanmax(spectrum), line_tex(line), color='dimgrey', fontsize=10, rotation=90, ha='center', va='bottom')\n\n def format_figure(ax, frequency, spectrum, band):\n if band=='LSB':\n ax.set_xlim([342.4, 346.2])\n elif band=='USB':\n ax.set_xlim([354.3, 358.1])\n ax.set_ylim(-0.05*np.nanmax(spectrum), 1.05*np.nanmax(spectrum))\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.1))\n ax.yaxis.set_major_locator(MultipleLocator(10))\n ax.yaxis.set_minor_locator(MultipleLocator(2))\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.set_axisbelow(True)\n ax.grid(axis='y', ls=':', c='grey')\n ax.set_xlabel(r'$\\nu_\\mathrm{rest}$ [GHz]', fontsize=12)\n ax.set_ylabel(r'T$_\\mathrm{b}$ [K]', fontsize=12)\n fig.set_tight_layout(True)\n\n def save_figure(fig, band):\n savepath = escape_fname(os.path.join(plotdir, '03.XCLASS_fit', 'combined_spectra', 'SSC_'+str(SSC['no'])+'.'+band+'.combined_spectrum.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')\n\n\n frequency, spectrum = get_spectrum(SSC, band)\n mfrequency, model = get_model(SSC, band)\n fig,ax = set_up_figure(SSC, band)\n plot_spectrum(ax, frequency, spectrum)\n plot_fitted_spectrum(ax, mfrequency, model)\n label_lines(ax, spectrum, band)\n format_figure(ax, frequency, spectrum, band)\n save_figure(fig, band)", "def process_pssm_data(self):\n\n self.pssm_data = self._mask_pssm(self.pssm_data,nmask=self.nmask)\n self.pssm_data = self._filter_pssm(self.pssm_data)\n self.pssm_data = self._smooth_pssm(self.pssm_data,msmooth=self.nsmooth)\n self.pssm_data = np.mean(self.pssm_data,1)", "def GetParameters_and_Weight_of_CalSensor(ind, similar_sensors): \n v, a, h = similar_sensors.loc[ind]['Vert_Shift'], similar_sensors.loc[ind]['Amplitude'], similar_sensors.loc[ind]['Horiz_Shift']\n por, res, drain = similar_sensors.loc[ind]['Porosity'], similar_sensors.loc[ind]['Res_SM'], similar_sensors.loc[ind]['Drainage']\n n, w = similar_sensors.loc[ind]['n'], similar_sensors.loc[ind]['Weight']\n return v,a,h,por,res,drain,n,w", "def weightThick(var,lats,types):\n \n if types == 'lens':\n sityr = np.empty((var.shape[0],var.shape[1],var.shape[2]))\n for ens in xrange(var.shape[0]):\n for i in xrange(var.shape[1]):\n for j in xrange(var.shape[2]):\n varq = var[ens,i,j,:,:]\n mask = np.isfinite(varq) & np.isfinite(lats)\n varmask = varq[mask]\n areamask = np.cos(np.deg2rad(lats[mask]))\n sityr[ens,i,j] = np.nansum(varmask*areamask)/np.sum(areamask)\n \n print 'Completed: Weighting per ensemble #%s!' % ense[ens]\n \n elif types == 'piomas':\n sityr = np.empty((var.shape[0],var.shape[1]))\n for i in xrange(var.shape[0]):\n for j in xrange(var.shape[1]):\n varq = var[i,j,:,:]\n mask = np.isfinite(varq) & np.isfinite(lats)\n varmask = varq[mask]\n areamask = np.cos(np.deg2rad(lats[mask]))\n sityr[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)\n \n print '\\nCompleted: Yearly weighted SIT average!' \n return sityr", "def readSrc_bySens(self):\n dctn = self.srcData\n dctn['header'] = []\n # dctn['header'] = ['%% This dictionary created by alog_manip.alogrd_dict']\n for msg in self.srcFile: # broken by lines, are now strings\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n dctn['header'].append(msg) # assume all comments occur at beginning of file\n else:\n msg = msg.split()\n if msg[2] not in dctn: # none from this gSource yet\n dctn[msg[2]] = {}\n if msg[1] not in dctn[msg[2]]: # none in this gSource from this zMeas yet\n dctn[msg[2]][msg[1]] = {}\n try:\n dctn[msg[2]][msg[1]][float(msg[0])] = float(msg[3]) # double\n except ValueError: # it's a string\n # dimc = msg[3].split(']')[0].split('x')[1] # cols\n # dimr = msg[3].split(']')[0].split('x')[0][1:] # rows\n value_s = msg[3].split(']')[1][1:-1].split(',')\n dctn[msg[2]][msg[1]][float(msg[0])] = [float(i) for i in value_s]\n except IndexError: # it's blank\n dctn[msg[2]][msg[1]][float(msg[0])] = None # nan better?", "def __add__(self, spectrum):\n new_wave = np.unique(np.append(self.wave, spectrum.wave))\n new_wave.sort()\n \n new_flux = np.interp(new_wave, self.wave, self.flux)\n new_flux += np.interp(new_wave, spectrum.wave, spectrum.flux)\n out = SpectrumTemplate(wave=new_wave, flux=new_flux)\n out.fwhm = spectrum.fwhm\n return out", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def addMetrics_nlcd(feats, feats_id, raster, fld_prefix=\"imp\", mask=None):\n arcpy.env.cellSize = raster\n arcpy.env.snapRaster = raster\n\n exstFld = [a.name for a in arcpy.ListFields(feats)]\n print('Calculating zonal statistics...')\n if mask:\n envmask = arcpy.env.mask\n arcpy.env.mask = mask\n print(\"Using mask `\" + mask + \"`...\")\n arcpy.sa.ZonalStatisticsAsTable(feats, feats_id, raster, 'tmp_zs', \"DATA\", \"MEAN\")\n print('Joining and calculating raster area/percentages...')\n calcFld('tmp_zs', fld_prefix + '_perc', \"!MEAN!\", field_type=\"FLOAT\")\n if fld_prefix + '_perc' in exstFld:\n arcpy.DeleteField_management(feats, fld_prefix + '_perc')\n arcpy.JoinField_management(feats, feats_id, 'tmp_zs', feats_id, fld_prefix + '_perc')\n calc = '((!' + fld_prefix + '_perc! / 100) * !Shape_Area!) / 4046.856'\n calcFld(feats, fld_prefix + '_acres', calc, field_type=\"FLOAT\")\n calc = '((1- (!' + fld_prefix + '_perc! / 100)) * !Shape_Area!) / 4046.856'\n calcFld(feats, 'not' + fld_prefix + '_acres', calc, field_type=\"FLOAT\")\n # add percentage fields\n calc = '100 - !' + fld_prefix + '_perc!'\n calcFld(feats, 'not' + fld_prefix + '_perc', calc, field_type=\"FLOAT\")\n # set mask back to original\n if mask:\n arcpy.env.mask = envmask\n\n return feats", "def fwhmwhisker_multiext(filename,sigma,band,zenith):\n hdu=pf.open(filename)\n e1=[]\n e2=[]\n fwhmw=[]\n whiskerw=[]\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n for i in range(Nobj):\n print i\n img = hdui.data[i][4:].reshape(160,160)\n imgrbin = rebin(img,(40,40))\n res=wfwhm(imgrbin,sigma)\n e1.append(res[0])\n e2.append(res[1])\n whiskerw.append(res[2]*0.27)\n fwhmw.append(res[3]*0.27)\n e1 = np.array(e1)\n e2 = np.array(e2)\n fwhmw = np.array(fwhmw)\n whiskerw = np.array(whiskerw)\n e1mean = e1.mean()\n e1std = e1.std()\n e2mean = e2.mean()\n e2std = e2.std()\n whiskerwmean = whiskerw.mean()\n whiskerwstd = whiskerw.std()\n fwhmwmean = fwhmw.mean()\n fwhmwstd = fwhmw.std()\n r50mean = np.mean(fwhmw/2.)\n r50std = np.std(fwhmw/2.)\n pl.figure(figsize=(15,10))\n pl.subplot(2,3,1)\n pl.hist(e1,bins=20,normed=True)\n pl.xlabel('e1')\n pl.title('mean: '+str(round(e1mean,6))+' std: '+str(round(e1std,5)))\n pl.subplot(2,3,2)\n pl.hist(e2,bins=20,normed=True)\n pl.xlabel('e2')\n pl.title('mean: '+str(round(e2mean,6))+' std: '+str(round(e2std,5)))\n pl.subplot(2,3,3)\n pl.hist(whiskerw,bins=20,normed=True)\n pl.xlabel('whisker')\n pl.title('mean: '+str(round(whiskerwmean,5))+' std: '+str(round(whiskerwstd,5)))\n pl.subplot(2,3,4)\n pl.hist(fwhmw,bins=20,normed=True)\n pl.xlabel('fwhm')\n pl.title('mean: '+str(round(fwhmwmean,5))+' std: '+str(round(fwhmwstd,5)))\n pl.subplot(2,3,5)\n pl.hist(fwhmw/2.,bins=20,normed=True)\n pl.xlabel('r50')\n pl.title('mean: '+str(round(r50mean,5))+' std: '+str(round(r50std,5)))\n pl.figtext(0.7,0.4,'band: '+band)\n pl.figtext(0.7,0.37,'zenith angle: '+zenith +' deg')\n pl.figtext(0.3,0.95,'Perfect focus/alignment, 0.7 arcsec fwhm circular seeing',fontsize=18,color='red')\n pl.savefig(filename[0:-6]+'png')\n np.savetxt(filename[0:-6]+'txt',[e1mean,e1std,e2mean,e2std,whiskerwmean,whiskerwstd,fwhmwmean,fwhmwstd,r50mean,r50std],fmt='%10.5f')\n pl.close()\n return '---done !-----'", "def make_figure(self, traces):\n pass", "def whiskerStat_multiext(filename,sigma,noise=False,mag=None,exptime=None):\n hdu=pf.open(filename)\n data = []\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n Mcc=np.zeros(Nobj)\n Mrr = np.zeros(Nobj)\n Mrc = np.zeros(Nobj)\n r50 = np.zeros(Nobj)\n for i in range(Nobj):\n print i\n imgo = hdui.data[i][4:].reshape(160,160)\n psf = rebin(imgo,(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n Mcc[i],Mrr[i],Mrc[i]=complex2ndMoments(img,sigma)\n r50[i] = mfwhm(img)[5]\n data.append([np.mean(Mcc),np.mean(Mrr),np.mean(Mrc),np.mean(r50)])\n data = np.array(data)\n datamean =np.array([robust_mean(data[:,0]),robust_mean(data[:,1]),robust_mean(data[:,2]),robust_mean(data[:,3])])\n #r50 = 0.5*2.35482*np.sqrt((datamean[0]+datamean[1])/2.)*0.27\n r50moffat = datamean[3]*0.27\n whk = ((datamean[0]-datamean[1])**2 + (2.*datamean[2])**2)**(0.25)*0.27\n phi = np.rad2deg(0.5*np.arctan2(2.*datamean[2],(datamean[0]-datamean[1])))\n datasubmean = data - datamean\n whkrms = (robust_mean((datasubmean[:,0] - datasubmean[:,1])**2 + 4.*datasubmean[:,2]**2))**(0.25)*0.27\n np.savetxt(filename[0:-6]+'txt',[r50moffat,whk,phi,whkrms,datamean[0],datamean[1],datamean[2]],fmt='%10.5f')\n return '---done !-----'", "def summarize_sample_props(psd_list, sample_list):\n prop_list = [psd.sample_props for psd in psd_list]\n cols = ['amplicon median', 'mean size', 'lower size', 'upper size']\n\n return pd.DataFrame(prop_list, columns=cols, index=sample_list)", "def copyWeights(self, shp, stray = 0, colour = (240,100,100)):\n self.weights = []\n self.bias = []\n if(stray == 0): # straight copy\n for i, wt in enumerate(shp.weights):\n self.weights.append(wt.copy())\n for i,bs in enumerate(shp.bias):\n self.bias.append(bs.copy())\n else: # Copy with some random added in\n for i, wt in enumerate(shp.weights):\n self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))\n for i,bs in enumerate(shp.bias):\n self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))\n self.normalizeWeights()\n self.colour = colour\n self.parentname = shp.name\n self.parentcolour = shp.colour\n self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def consolidate_compound_EPSP_traces(source_dict):\n trace_len = int((context.sim_duration['clustered'] - context.equilibrate + context.trace_baseline) / context.dt)\n target_dict = {}\n\n for syn_group in source_dict:\n if syn_group not in target_dict:\n target_dict[syn_group] = {}\n num_syn_ids = len(context.syn_id_dict[syn_group])\n for syn_condition in source_dict[syn_group]:\n if syn_condition not in target_dict[syn_group]:\n target_dict[syn_group][syn_condition] = {}\n for rec_name in context.synaptic_integration_rec_names:\n target_array = np.empty((num_syn_ids, trace_len))\n for i in range(num_syn_ids):\n num_syns = i + 1\n target_array[i,:] = source_dict[syn_group][syn_condition][num_syns][rec_name]\n target_dict[syn_group][syn_condition][rec_name] = target_array\n\n return target_dict", "def A_weight(signal, fs):\n\n b, a = A_weighting(fs)\n return lfilter(b, a, signal)", "def preprocessing(self):\n # Standardizing series names\n self.raw.columns = ['stress', 'strain', 'e']\n # Removing percentage format to strain values\n if self.strainPercent:\n self.raw['strain'] = self.raw['strain'].divide(100)\n # On-table (initial) void ratio\n self.e_0 = self.raw['e'].iloc[0]\n return", "def sld(self, probe):\n # Convert fractions into an array, with the final fraction\n fraction = np.array([0.]+[f.value for f in self.fraction])\n fraction[0] = 100 - sum(fraction)\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n if (fraction < 0).any():\n return NaN, NaN\n\n # Lookup SLD\n slds = [c.sld(probe) for c in [self.base] + self.material]\n rho, irho = [np.asarray(v) for v in zip(*slds)]\n\n # Use calculator to convert individual SLDs to overall SLD\n volume_fraction = self._volume(fraction)\n rho = np.sum(rho*extend(volume_fraction, rho))\n\n irho = np.sum(irho*extend(volume_fraction, irho))\n if self.use_incoherent:\n raise NotImplementedError(\"incoherent scattering not supported\")\n #print \"Mixture\", self.name, coh, absorp\n\n return rho, irho", "def denoise(self):\n\n #make sure the data has a len dividible by 2^2\n self.len_swt = self.len\n while not (self.len_swt/4).is_integer():\n self.len_swt -= 1\n\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp,self.wave,level=2)\n\n print(\" \\t Denoise STW coefficients \\t %1.2f %1.2f\" %(self.TK,self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n\n # rolling kurtosis\n k2 = self._rolling_kts(cD2,self.nwin)\n k1 = self._rolling_kts(cD1,self.nwin)\n\n # thresholding\n cD2[k2<self.TK] = 0\n cD1[k1<self.TK] = 0\n\n cA2[k2<self.TK] = 0\n cA1[k1<self.TK] = 0\n\n # universal threshold\n sigma_roll_1 = mad(cD1[cD1!=0])*np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2*np.log(self.len_swt))\n cD1[abs(cD1)<uthresh_roll_1] = 0\n\n # universal threshold\n sigma_roll_2 = mad(cD2[cD2!=0])*np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2*np.log(self.len_swt))\n cD2[abs(cD2)<uthresh_roll_2] = 0\n\n # final threshold\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1,cD1),(cA2,cD2)]\n\n # denoise the data\n #self.input_denoised = self._iswt(self.denoised_coeffs,self.wave)\n self.input_denoised = pywt.iswt(self.denoised_coeffs,self.wave)", "def addSTDdevIndices(img):\n\t\t\timg = img.addBands(img.normalizedDifference(['green','swir1']).rename(['ND_green_swir1'])); # NDSI, MNDWI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','red']).rename(['ND_nir_red'])); # NDVI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','swir2']).rename(['ND_nir_swir2'])); # NBR, MNDVI\n\t\t\t\n\t\t\treturn img;", "def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n #for decimated data,sampleRate should be reflected\r\n #set wlen to 0.25 sec, high pass is 250\r\n wlen = 0.5*sampleRate\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n Emat = None\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n \r\n t_ = (traceList[0].stats.endtime-traceList[0].stats.starttime)\r\n dx_ = traceList[1].stats.distance - traceList[0].stats.distance\r\n extent = [0,len(traceList)*dx_/1e3,0,t_/100.0]\r\n\r\n for itr in range(0,nTraces):\r\n #F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n # window='hann', nfft=nfft, mode='magnitude')\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n #energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n energy = np.sum(SXX[1:,:],axis=0)\r\n #energy = np.log10(np.abs(energy/np.max(energy)))*10.0\r\n energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n if DEBUG:\r\n plt.figure()\r\n im = plt.imshow(Emat,extent=extent)\r\n plt.colorbar(im)\r\n plt.savefig('spectralenergy{0}_ch{1}_{2}.png'.format(outfile,channelStart,channelEnd))\r\n plt.close()", "def get_mlt_phys(sed_name):\n\n new_name = sed_name.replace('+','-').replace('a','-').split('-')\n\n logg_sgn_dex = len(new_name[0])\n\n if sed_name[logg_sgn_dex] == '-':\n logg_sgn = 1.0\n elif sed_name[logg_sgn_dex] == '+':\n logg_sgn = -1.0\n else:\n raise RuntimeError('Cannot get logg_sgn for %s' % sed_name)\n\n metallicity_sgn_dex = len(new_name[0]) + len(new_name[1]) + 1\n\n if sed_name[metallicity_sgn_dex] == '-':\n metallicity_sgn = -1.0\n elif sed_name[metallicity_sgn_dex] == '+':\n metallicity_sgn = 1.0\n else:\n raise RuntimeError('Cannot get metallicity_sgn for %s' % sed_name)\n\n teff = 100.0*float(new_name[0][3:])\n metallicity = metallicity_sgn*float(new_name[2])\n logg = logg_sgn*float(new_name[1])\n\n return teff, metallicity, logg", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def ProduceSMFromSimilarCalSensors(similar_sensors, depth, site_data):\n sm_estimates, weights = [],[] \n for ind in similar_sensors.index:\n v,a,h,por,res,drain,n,w = GetParameters_and_Weight_of_CalSensor(ind, similar_sensors)\n print(\"Estimate from parameters of:\", similar_sensors.loc[ind]['Network'], similar_sensors.loc[ind]['Site_Code'], similar_sensors.loc[ind]['Site_Info'])\n recent_site_data = site_data[(n*-1 - 1):] #just enough to generate a SM estimate\n recent_site_data = CalculateSMEstimates(recent_site_data, [v,a,h,por,res,drain,n], depth, 'SM', 'P', 'DOY')\n sm_estimates.append(list(recent_site_data['SMest_' + str(depth)])[-1]); weights.append(w)\n return sm_estimates, weights", "def spectrumify(scattering_data, instr_broadening=0.1):\n graph_angles = n.linspace(0,180,10000)\n graph_intensities = n.zeros(graph_angles.shape)\n \n for angle, intensity in sorted(scattering_data.items()):\n graph_intensities += intensity * \\\n n.exp(-(graph_angles - angle)**2 / \\\n (2*(instr_broadening)**2))\n \n return graph_angles, graph_intensities", "def main():\n# pixel_to_wavelen_dir = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\\n# Spectral_Band_pass\\Pixel_to_wavelen_map'\n\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM_only_Gaussian'\n radiance_file = read_radiance_data()\n file_path_2 = r'C:\\Users\\nmishra\\Workspace\\TEMPO_Spectrometer\\Spectral_Band_pass\\\n All_FWHM\\spectral_bandpass_1400'\n\n #start with Gaussian Bandpass\n# data_names = [each for each in os.listdir(file_path)\n# if each.startswith(\"Params_Gauss\")]\n#\n#\n# sample_data = []\n# for data_files in data_names[9:]:\n# #print(data_files)\n#\n# wavelen_suffix = data_files.split('_')[-1]\n#\n# pixel_to_wvl_map_data = sorted([each for each in os.listdir(pixel_to_wavelen_dir)\n# if each.endswith(wavelen_suffix)])\n#\n# gaussian_files = os.path.join(file_path, data_files)\n#\n# dframe = pd.read_csv(gaussian_files)\n# #dframe = dframe[['A1', 'A2', 'Sigma1', 'Sigma2']]\n# dframe = dframe[['A1', 'Sigma1']] # for Gaussian only\n# pixel_to_wav_map = os.path.join(pixel_to_wavelen_dir, pixel_to_wvl_map_data[0])\n# dframe1 = pd.read_csv(pixel_to_wav_map)\n# dframe['CW'] = dframe1['CW']\n# dframe = dframe.iloc[1400]\n# sample_data.append(dframe.values)\n # for flat top Gaussian\n# #gaussian_values = perform_spectral_interpolation(np.array(sample_data))\n\n# gaussian_values = perform_spectral_interpolation_only_gaussian(np.array(sample_data))\n#\n##\n## # Let us now create a spectral bandpass\n# #create_spectral_bandpass(gaussian_values, radiance_file, file_path) # flat top Gaussian\n# create_spectral_bandpass_only_gaussian(gaussian_values, radiance_file, file_path)\n#\n#\n## #Make sure that the center wavelength of Gaussians are the same\n## sample_val = []\n## data_names_interpol = sorted([each for each in os.listdir(file_path_2)\n## if each.endswith('csv')])\n## interpol_wavelen = []\n## interpol_rad = [ ]\n##\n## for i in range(0, 64):\n## sub_sample_wvl = []\n## sub_sample_rad = []\n##\n## for files in data_names_interpol[9:]:\n##\n## interpol_rsr = os.path.join(file_path_2, files)\n## dframe = pd.read_csv(interpol_rsr, usecols=[\"wavelength\", \"rad\"])\n##\n## wavelength = dframe['wavelength'][i]\n## rad = dframe['rad'][i]\n## sub_sample_wvl.append(wavelength)\n## sub_sample_rad.append(rad)\n## dframe = perform_point_interpolation(sub_sample_wvl, sub_sample_rad,\n #np.array(sample_data)[:,-1])\n## interpol_rad.append(dframe['rad'].values)\n## interpol_wavelen.append(dframe['wavelength'].values)\n## create_spectral_bandpass_interpol(np.array(interpol_wavelen),\n #np.array(interpol_rad),\n #np.array(sample_data)[:,-1], file_path_2)\n# cc\n##\n#\n##\n###\n## # let us now perfrom spectral convolution with high res. radiance data\n calculate_in_band_irradiance(file_path, file_path_2, radiance_file)", "def _plot_psth_flat(self, sigma=5, figsize = (15, 8)):\n\t\n\t\tgaus_filt = sp.ndimage.gaussian_filter1d\n\t\tall_resp = gaus_filt(self.conditions_hist_mean.flatten(), sigma)\n\t\t\n\t\tfig = plt.figure(figsize=figsize)\n\t\tax = fig.add_subplot(1, 1, 1)\n\t\t\n\t\tax.plot(all_resp, linestyle='-', color='0.28')\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\tcon_mark = np.arange(0, (self.bins.size -1) * n_con, self.bins.size -1)\n\t\t\t\t\n\t\tax.xaxis.set_ticks(con_mark)\n\n\t\ttry:\n\t\t\tax.xaxis.set_ticklabels(self.cond_label)\n\t\texcept:\n\t\t\tax.xaxis.set_ticklabels(np.unique(self.marker_codes))\n\t\t\n\t\tfreq_label = np.round(ax.get_yticks() * (1/self.bin_width),\n\t\t\t\t\t\t\t decimals = 1)\n\t\tax.set_yticklabels(freq_label)\n\t\tax.set_ylabel('Frequency')\n\t\t\n\t\tfor label in ax.xaxis.get_majorticklabels():\n\t\t\tlabel.set_horizontalalignment('left')\n\t\t\t\n\t\tax.set_xlim(0, (self.bins.size -1) * n_con)\n\t\t\n\t\t# bug with macosx backend\n# plt.tight_layout()\n\t\tplt.subplots_adjust(hspace=0.45)", "def _plot_update(self):\n omit_log = ['sens_log']\n for log_group, log_arrays in self.qbpm.log_names.items():\n for log_array in log_arrays:\n if log_array not in omit_log:\n self.curves[log_array].setData(self.qbpm.log_time, self.qbpm.log_arrays[log_array],clear=True)\n # self.fill.setCurves(self.curves['posz_sens_low_log'], self.curves['posz_sens_high_log'])", "def soMultiIndex(self):\n # reset column levels\n self.sofiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n self.spsofiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n\n # list df vars for index specs\n # dfs =[self.sofiltEEG] # for > speed, don't store spinfilt_RMS as an attribute\n # calcs = ['Filtered']\n # lvl0 = np.repeat(self.channels, len(calcs))\n # lvl1 = calcs*len(self.channels) \n \n # # combine & custom sort --> what was this for??\n # self.so_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])", "def export_unitary_EPSP_traces():\n start_time = time.time()\n description = 'unitary_EPSP_traces'\n trace_len = int((context.ISI['units'] + context.trace_baseline) / context.dt)\n context.temp_model_data_legend = dict()\n\n model_keys = list(context.temp_model_data.keys())\n model_keys = context.interface.global_comm.gather(model_keys, root=0)\n if context.interface.global_comm.rank == 0:\n model_keys = list(set([key for key_list in model_keys for key in key_list]))\n else:\n model_keys = None\n model_keys = context.interface.global_comm.bcast(model_keys, root=0)\n\n if context.temp_model_data_file_path is None:\n if context.interface.global_comm.rank == 0:\n context.temp_model_data_file_path = '%s/%s_uuid%i_%s_temp_model_data.hdf5' % \\\n (context.output_dir,\n datetime.datetime.today().strftime('%Y%m%d_%H%M'),\n uuid.uuid1(),\n context.optimization_title)\n context.temp_model_data_file_path = \\\n context.interface.global_comm.bcast(context.temp_model_data_file_path, root=0)\n context.temp_model_data_file = h5py.File(context.temp_model_data_file_path, 'a', driver='mpio',\n comm=context.interface.global_comm)\n\n for i, model_key in enumerate(model_keys):\n group_key = str(i)\n context.temp_model_data_legend[model_key] = group_key\n if group_key not in context.temp_model_data_file:\n context.temp_model_data_file.create_group(group_key)\n if description not in context.temp_model_data_file[group_key]:\n context.temp_model_data_file[group_key].create_group(description)\n for syn_group in context.syn_id_dict:\n context.temp_model_data_file[group_key][description].create_group(syn_group)\n num_syn_ids = len(context.syn_id_dict[syn_group])\n for syn_condition in context.syn_conditions:\n context.temp_model_data_file[group_key][description][syn_group].create_group(syn_condition)\n for rec_name in context.synaptic_integration_rec_names:\n context.temp_model_data_file[group_key][description][syn_group][\n syn_condition].create_dataset(rec_name, (num_syn_ids, trace_len), dtype='f8')\n\n target_rank = i % context.interface.global_comm.size\n if model_key in context.temp_model_data:\n this_temp_model_data = context.temp_model_data.pop(model_key)\n else:\n this_temp_model_data = {}\n this_temp_model_data = context.interface.global_comm.gather(this_temp_model_data, root=target_rank)\n if context.interface.global_comm.rank == target_rank:\n context.temp_model_data[model_key] = {description: {}}\n for element in this_temp_model_data:\n if element:\n dict_merge(context.temp_model_data[model_key], element)\n context.interface.global_comm.barrier()\n\n for model_key in context.temp_model_data:\n context.temp_model_data[model_key][description] = \\\n consolidate_unitary_EPSP_traces(context.temp_model_data[model_key][description])\n group_key = context.temp_model_data_legend[model_key]\n for syn_group in context.temp_model_data[model_key][description]:\n for syn_condition in context.temp_model_data[model_key][description][syn_group]:\n for rec_name in context.temp_model_data[model_key][description][syn_group][syn_condition]:\n context.temp_model_data_file[group_key][description][syn_group][syn_condition][\n rec_name][:,:] = \\\n context.temp_model_data[model_key][description][syn_group][syn_condition][rec_name]\n\n context.interface.global_comm.barrier()\n context.temp_model_data_file.flush()\n\n del context.temp_model_data\n context.temp_model_data = dict()\n\n sys.stdout.flush()\n time.sleep(1.)\n\n if context.interface.global_comm.rank == 0 and context.disp:\n print('optimize_DG_GC_synaptic_integration: export_unitary_EPSP_traces took %.2f s' %\n (time.time() - start_time))\n sys.stdout.flush()\n time.sleep(1.)", "def update_full_traces(self, nStimulus, epoch, diverge):\n self.full_traces['Harmony_trace'][nStimulus,\n epoch, :] = self.vars['Harmony_trace']\n #self.full_traces['speed_trace'][nStimulus, epoch, : ] = self.vars['speed_trace']\n #self.full_traces['ema_trace'][nStimulus, epoch, : ] = self.vars['ema_trace']\n\n self.full_traces['lambda_trace'][nStimulus,\n epoch, :] = self.vars['lambda_trace']\n self.full_traces['time_trace'][nStimulus,\n epoch, :] = self.vars['temp_trace']\n self.full_traces['TP_h_trace'][nStimulus,\n epoch, :] = self.vars['TP_h_trace']\n self.full_traces['TP_dist_trace'][nStimulus,\n epoch, :] = self.vars['TP_dist_trace']\n self.full_traces['S_trace'][nStimulus,\n epoch, :, :, :] = self.vars['S_trace']\n self.full_traces['reaction_times'][nStimulus,\n epoch] = self.vars['step']\n self.full_traces['divergence'][nStimulus, epoch] = diverge\n self.full_traces['winners_idx'][nStimulus,\n epoch, :, :] = self.vars['winners']\n\n key = self.inputNames[nStimulus] + \"/rep_\" + str(epoch)\n self.full_traces['TP_trace'][key] = self.vars['TP_trace']\n # TODO: change this!\n self.full_traces['TPnum_trace'][nStimulus,\n epoch, :] = self.vars['TPnum_trace']", "def dotheglm(sensitivities, eventdir):\n sensitivities_stacked = mv.vstack(sensitivities)\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.bilat_ROIs)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.all_ROIs)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # average onsets into one event file\n events = get_group_events(eventdir)\n # save the event_file\n fmt = \"%10.3f\\t%10.3f\\t%16s\\t%60s\"\n np.savetxt(results_dir + 'group_events.tsv', events, delimiter='\\t', comments='',\n header='onset\\tduration\\ttrial_type\\tstim_file', fmt=fmt)\n # get events into dictionary\n events_dicts = []\n for i in range(0, len(events)):\n dic = {\n 'onset': events[i][0],\n 'duration': events[i][1],\n 'condition': events[i][2]\n }\n events_dicts.append(dic)\n\n hrf_estimates = mv.fit_event_hrf_model(mean_sens_transposed,\n events_dicts,\n time_attr='time_coords',\n condition_attr='condition',\n design_kwargs=dict(drift_model='blank'),\n glmfit_kwargs=dict(model='ols'),\n return_model=True)\n mv.h5save(results_dir + 'sens_glm_objectcategories_results.hdf5', hrf_estimates)\n print('calculated glm, saving results.')\n return hrf_estimates", "def _trace_production(self, production, p, span, width):\n ...", "def normalizeWeights(self, filter):\n\n (wavelengths, weights) = filter\n weights = np.array(weights, dtype=np.float64)\n sum = weights.sum()\n weights /= sum\n\n return (wavelengths, weights)", "def scaleLandsat(self,img):\n\t\tthermal = img.select(ee.List(['thermal'])).multiply(0.1)\n\t\tscaled = ee.Image(img).select(self.env.divideBands).multiply(ee.Number(0.0001))\n\t\t\n\t\treturn img.select([]).addBands(scaled).addBands(thermal)" ]
[ "0.53677773", "0.52855563", "0.52082103", "0.5192049", "0.50655925", "0.50220954", "0.5019231", "0.4980716", "0.496982", "0.49639016", "0.496379", "0.49591964", "0.49528778", "0.49409777", "0.4926693", "0.49150053", "0.48961598", "0.4841809", "0.48396593", "0.48142034", "0.48041737", "0.4799263", "0.47968122", "0.4772713", "0.4767107", "0.47551715", "0.47549874", "0.4741252", "0.47255725", "0.47172722", "0.47167602", "0.47130874", "0.47007224", "0.46955723", "0.4678708", "0.46780452", "0.46759778", "0.4671364", "0.46687865", "0.46592057", "0.4655478", "0.4652814", "0.46520323", "0.46404922", "0.46403602", "0.4637952", "0.46325296", "0.46226516", "0.46223143", "0.46208334", "0.4619907", "0.4617902", "0.4614845", "0.4611181", "0.46104932", "0.4602927", "0.4599217", "0.45944554", "0.45916486", "0.45910332", "0.45905018", "0.45856118", "0.45820823", "0.45741093", "0.4571888", "0.45709315", "0.45688385", "0.45673308", "0.45670876", "0.45568", "0.4555245", "0.45537177", "0.454976", "0.45441422", "0.4539132", "0.45370328", "0.4536123", "0.45348608", "0.45325017", "0.4527757", "0.45233196", "0.45220932", "0.45144835", "0.45140898", "0.45094377", "0.45021954", "0.45012188", "0.44994494", "0.44935212", "0.4491853", "0.4491076", "0.44890803", "0.44884193", "0.44878936", "0.448493", "0.44790912", "0.44786942", "0.44755915", "0.44747147" ]
0.5468182
1
Call function to transform and format common fields in results.
def __call__(self, results): if 'img' in results: img = results['img'] if len(img.shape) < 3: img = np.expand_dims(img, -1) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) if 'gt_semantic_seg' in results: # convert to long results['gt_semantic_seg'] = DC(to_tensor( results['gt_semantic_seg'][None, ...].astype(np.int64)), stack=True) if 'gt_masks' in results: results['gt_masks'] = DC(to_tensor(results['gt_masks'])) if 'gt_labels' in results: results['gt_labels'] = DC(to_tensor(results['gt_labels'])) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform():", "def transform():\n pass", "def transform(self):", "def _apply_transform(self):\n pass", "def _output_format(cls, func, override=None):\n @wraps(func)\n def _format_wrapper(self, *args, **kwargs):\n json_response, data_key, meta_data_key = func(self, *args, **kwargs)\n data = json_response[data_key]\n if meta_data_key is not None:\n meta_data = json_response[meta_data_key]\n else:\n meta_data = None\n # Allow to override the output parameter in the call\n if override is None:\n output_format = self.output_format.lower()\n elif 'json' or 'pandas' in override.lower():\n output_format = override.lower()\n # Choose output format\n if output_format == 'json':\n return data, meta_data\n elif output_format == 'pandas':\n data_pandas = pandas.DataFrame.from_dict(data,\n orient='index', dtype=float)\n # Rename columns to have a nicer name\n col_names = [re.sub(r'\\d+.', '', name).strip(' ')\n for name in list(data_pandas)]\n data_pandas.columns = col_names\n return data_pandas, meta_data\n else:\n raise ValueError('Format: {} is not supported'.format(\n self.output_format))\n return _format_wrapper", "def transform(self, data):", "def get_transform_fn():", "def Transform(self, record):\n pass", "def _process(self):\n # choose the correct transform model before processing TI data\n self._select_transform()\n\n # process type first, fail early\n self._process_type()\n\n # process type specific data\n if isinstance(self.transform, GroupTransformModel):\n self._process_group()\n elif isinstance(self.transform, IndicatorTransformModel):\n self._process_indicator()\n\n # self.process_associations(self.transform.associations)\n self._process_associated_group(self.transform.associated_groups)\n self._process_attributes(self.transform.attributes or [])\n self._process_security_labels(self.transform.security_labels or [])\n self._process_tags(self.transform.tags or [])\n\n # date added\n self._process_metadata_datetime('dateAdded', self.transform.date_added)\n\n # last modified\n self._process_metadata_datetime('lastModified', self.transform.last_modified)\n\n # xid\n self._process_metadata('xid', self.transform.xid)", "def transform(self, record):\n if not record:\n return None\n\n if not type(record) is DASRecord:\n logging.warning('Improper format record: %s',record)\n return None\n\n update = False\n for field_name in record.fields:\n if field_name in self.course_fields:\n self.course_val = record.fields[field_name]\n elif field_name in self.speed_fields:\n self.speed_val = record.fields[field_name] * self.convert_speed_factor\n elif field_name in self.heading_fields:\n self.heading_val = record.fields[field_name]\n elif field_name in self.wind_dir_fields:\n self.wind_dir_val = record.fields[field_name]\n elif field_name in self.wind_speed_fields:\n self.wind_speed_val = record.fields[field_name] * self.convert_wind_factor\n \n if field_name in self.update_on_fields:\n update = True\n\n # If we've not seen anything that updates fields that would\n # trigger a new true winds value, return None.\n if not update:\n return None\n\n if self.course_val is None:\n logging.info('Still missing course_val')\n return None\n if self.speed_val is None:\n logging.info('Still missing speed_val')\n return None\n if self.heading_val is None:\n logging.info('Still missing heading_val')\n return None\n if self.wind_dir_val is None:\n logging.info('Still missing wind_dir_val')\n return None\n if self.wind_speed_val is None:\n logging.info('Still missing wind_speed_val')\n return None\n\n logging.info('Computing new true winds')\n (true_dir, true_speed, app_dir) = truew(crse=self.course_val,\n cspd=self.speed_val,\n hd=self.heading_val,\n wdir=self.wind_dir_val,\n zlr=self.zero_line_reference,\n wspd=self.wind_speed_val)\n\n logging.info('Got true winds: dir: %s, speed: %s, app_dir: %s',\n true_dir, true_speed, app_dir)\n if true_dir is None or true_speed is None or app_dir is None:\n logging.info('Got invalid true winds')\n return None\n\n # If here, we've got a valid new true wind result\n if self.output_nmea:\n new_record = '%s %s %g,%g,%g' % (self.data_id,\n time_str(record.timestamp),\n true_dir, true_speed, app_dir)\n else:\n new_record = DASRecord(data_id=self.data_id,\n timestamp=record.timestamp,\n fields={'TrueWindDir': true_dir,\n 'TrueWindSpeed': true_speed,\n 'ApparentWindDir': app_dir})\n return new_record", "def format(self,result,filter='id',field=None):\n #print('field in format is',field,filter,file=sys.stderr)\n if not field: field=self.ID\n #print('field in format is',field,filter,file=sys.stderr)\n\n #check that result is a request object or a dictionary\n if not isinstance(result,requests.models.Response):\n if not isinstance(result,dict):\n return str(result)\n else:\n rr=result\n text=str(result)\n else:\n text=result.text\n rr=result.json()\n\n if self.debug:\n print(\"MPO_ARG DEBUG format:\",field,result,str(type(result)),text,file=sys.stderr)\n\n\n if filter=='id':\n if self.RESULT in rr: #then format is to have record list in result field\n rr=rr[self.RESULT]\n output=[]\n if isinstance(rr,list):\n if self.debug:\n print(\"Caution, response format of 'id' used when result is a list.\",file=sys.stderr)\n print(\"Returning list of ID's\",file=sys.stderr)\n\n for r in rr:\n output.append(str(r[field]))\n\n if len(output)==1: #if it is a one element list, just return contents.\n output=output[0]\n elif field in rr:\n output=rr[field]\n\n elif filter=='json' or filter=='dict':\n output=rr\n elif filter=='pretty':\n output=json.dumps(rr,separators=(',', ':'),indent=4)\n elif filter=='raw':\n output=result\n print('raw header',output.headers,file=sys.stderr)\n print('raw type',str(type(output)),file=sys.stderr)\n print('raw content',output.text,file=sys.stderr)\n output=str(result)\n elif filter=='text':\n output=str(text)\n else:\n output=str(result)\n\n return output", "def transform(self, *args, **kwargs):\n raise NotImplementedError", "def _transform(self, document):\n pass", "def process_field_mapping(self, analysis, observable: Observable, result, result_field, result_time=None) -> None:\n pass", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def _set_transforms(self, transform: Dict[str, Union[Field, Dict]]) -> None:\n columns: Union[int, List[int]]\n\n for k, t in enumerate(transform.items()):\n name, value = t\n if isinstance(value, Field):\n field = value\n columns = k\n else:\n try:\n field, tmp_cols = value['field'], value.get('columns', k)\n\n # Process as list to avoid repeating code\n if not isinstance(tmp_cols, List):\n tmp_cols = [tmp_cols]\n\n for i, c in enumerate(tmp_cols[:]):\n if isinstance(c, str):\n if not self.named_columns:\n raise ValueError(\n \"Columns parameter is required for str-based indexing\"\n )\n try:\n tmp_cols[i] = self.named_columns.index(c)\n except ValueError:\n raise ValueError(\n f\"Dataset has no column name {c}. \" +\n f\"Available columns: {self.named_columns}\"\n )\n\n columns = tmp_cols\n\n # If it was a value originally then process\n # it as a single value\n if len(tmp_cols) == 1:\n columns = tmp_cols[0]\n\n except KeyError:\n raise ValueError(\n f\"If a dict is provided in 'transform', then it must have the 'field' key.\"\n f\" transform item = {k, t}\"\n )\n\n setattr(self, name, field)\n args = [self._train[:, columns]]\n if self._val is not None:\n args.append(self._val[:, columns])\n if self._test is not None:\n args.append(self._test[:, columns])\n field.setup(*args)\n self.transform_hooks.append((field, columns))", "def transform(self, func):\n # TODO: codes here are similar with GroupBy.apply. Needs to deduplicate.\n if not isinstance(func, Callable):\n raise TypeError(\"%s object is not callable\" % type(func))\n\n assert callable(func), \"the first argument should be a callable function.\"\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n if return_sig is None:\n raise ValueError(\"Given function must have return type hint; however, not found.\")\n\n return_type = _infer_return_type(func).tpe\n input_groupnames = [s.name for s in self._groupkeys]\n data_columns = self._kdf._internal.data_columns\n return_schema = StructType([\n StructField(c, return_type) for c in data_columns if c not in input_groupnames])\n\n index_columns = self._kdf._internal.index_columns\n index_names = self._kdf._internal.index_names\n data_columns = self._kdf._internal.data_columns\n\n def rename_output(pdf):\n # TODO: This logic below was borrowed from `DataFrame.pandas_df` to set the index\n # within each pdf properly. we might have to deduplicate it.\n import pandas as pd\n\n if len(index_columns) > 0:\n append = False\n for index_field in index_columns:\n drop = index_field not in data_columns\n pdf = pdf.set_index(index_field, drop=drop, append=append)\n append = True\n pdf = pdf[data_columns]\n\n if len(index_names) > 0:\n if isinstance(pdf.index, pd.MultiIndex):\n pdf.index.names = index_names\n else:\n pdf.index.name = index_names[0]\n\n # pandas GroupBy.transform drops grouping columns.\n pdf = pdf.drop(columns=input_groupnames)\n pdf = pdf.transform(func)\n # Remaps to the original name, positionally.\n pdf = pdf.rename(columns=dict(zip(pdf.columns, return_schema.fieldNames())))\n return pdf\n\n grouped_map_func = pandas_udf(return_schema, PandasUDFType.GROUPED_MAP)(rename_output)\n\n sdf = self._kdf._sdf\n input_groupkeys = [s._scol for s in self._groupkeys]\n sdf = sdf.groupby(*input_groupkeys).apply(grouped_map_func)\n internal = _InternalFrame(\n sdf=sdf, data_columns=return_schema.fieldNames(), index_map=[]) # index is lost.\n return DataFrame(internal)", "def apply(input, output, fields, delimiter, encoding, verbose, format_in, zipfile, script, filter):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['zipfile'] = zipfile\n options['filter'] = filter\n options['script'] = script\n acmd = Transformer()\n acmd.script(input, options)\n pass", "def content_for_fields(klass, result, fields, highlight_fields):\n return tuple(result[field] for field in highlight_fields)", "def run(self, data, config=None, pipeline=None):\n value = self.value\n if self.reformat:\n value = value.format(**data)\n data[self.field] = value\n return data", "def reformat(ctx):\n pass", "def apply(self, transform_func):\n #input_shapes = transform_func.input_shapes\n #input_types = transform_func.input_types\n #data_shapes = transform_func.data_shapes\n #data_types = transform_func.data_types\n #assert input_shapes == self._data_shapes\n #assert input_types = self._data_types\n ret_gen = transform_func(self.generator)\n ret = type(self).from_generator_func(ret_gen)\n if self.name is not None:\n ret.name = self.name\n #ret.data_shapes = data_shapes\n #ret.data_types = data_types\n return ret", "def valueformat(value, format_list):\n\n # print(\"\\n\", format_list, value)\n concat_key = format_list.split('.')\n # Pass in either the key of the field\n # or pass in resource.key to enable a resource lookup.\n key = \"\"\n resource = \"\"\n member_id = \"\"\n key_sequence = [key, resource, member_id]\n count = 0\n for r in reversed(concat_key):\n key_sequence[count] = r\n count += 1\n\n # print(\"Concat_key:\", concat_key)\n key = key_sequence[0]\n resource = key_sequence[1]\n member_id = key_sequence[2]\n\n # print(\"Key:\", key)\n\n if key:\n if key.lower() == \"address\":\n return dt_address(value)\n\n elif key.lower() == \"telecom\":\n return dt_telecom(value)\n\n elif key.lower() == \"name\":\n return dt_name(value)\n elif key.lower() == 'dosage':\n return dt_dosage(value)\n elif key.lower() == 'medicationreference':\n # print(\"Working on\", key, \": \", value)\n # f_value = value\n # lookup field_formats\n # concat_key should have a resource name\n # print(\"\\n\\nRESOURCE:\", resource)\n # print(\"calling dt_medicationreference with Resource:\", resource, \", value:\", value)\n return dt_medicationreference(value, member_id, resource)\n elif key.lower() == 'dataabsentreason':\n if isinstance(value, dict):\n return value['coding'][0]['display']\n else:\n return value\n elif key.lower() == 'valuequantity':\n # return str(value['value']) + \" \" + value['unit']\n return dt_valuequantity(value)\n elif key.lower() == 'valuestring':\n return value\n elif key.lower() == 'interpretation':\n return value['coding'][0]['display']\n elif key.lower() == 'referencerange':\n return dt_referencerange(value)\n elif key.lower() == 'requester':\n if 'display' in value['agent']:\n return dt_reference(value['agent'], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n # elif key.lower() == \"result\":\n # return dt_reference(value[0], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'participant':\n if 'display' in value[0]['individual']:\n return dt_reference(value[0]['individual'], member_id)\n elif key.lower() == 'location':\n if 'display' in value[0]['location']:\n return dt_reference(value[0]['location'], member_id)\n elif key.lower() == 'communication':\n return dt_communication(value)\n else:\n # print(\"value:\", value, \" type:\", type(value), \" for: \", key)\n return value", "def transform(self, results: Dict) -> Optional[Dict]:\n for t in self.transforms:\n results = t(results) # type: ignore\n if results is None:\n return None\n return results", "def __call__(self, results):\n\n results = self._mixup_transform(results)\n return results", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results", "def transform(self):\n with open(self.csv_path, \"r\") as f:\n csv_entries = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]\n\n nested_fields = get_nested_fieldnames(csv_entries[0])\n # values of these fields should be transformed to a list\n # list_fields = set()\n # for entry in csv_entries:\n # for k, v in entry.items():\n # if '||' in v:\n # list_fields.add(k)\n list_fields = {\n \"BITSTREAM Download URL\",\n \"BITSTREAM License\",\n \"BITSTREAM Webshop URL\",\n \"dc.contributor\",\n \"dc.contributor.author\",\n \"dc.contributor.editor\",\n \"dc.date.available\",\n \"dc.date.accessioned\",\n \"dc.date.issued\",\n \"dc.date.submitted\",\n \"dc.dateSubmitted\",\n \"dc.description.abstract\",\n \"dc.description.provenance\",\n \"dc.grantproject\",\n \"dc.identifier\",\n \"dc.identifier.pr\",\n \"dc.language\",\n \"dc.notes\",\n \"dc.number\",\n \"dc.redirect\",\n \"dc.relation.ispartofseries\",\n \"dc.relationisFundedBy\",\n \"dc.subject\",\n \"dc.subject.classification\",\n \"dc.subject.other\",\n \"dc.title\",\n \"dc.title.alternative\",\n \"dc.type\",\n \"oapen.collection\",\n \"oapen.grant.number\",\n \"oapen.grant.program\",\n \"oapen.imprint\",\n \"oapen.relation.hasChapter\",\n \"oapen.relation.hasChapter_dc.title\",\n \"oapen.relation.isFundedBy\",\n \"oapen.relation.isFundedBy_grantor.name\",\n \"oapen.relation.isPartOfBook\",\n \"oapen.relation.isPartOfBook_dc.title\",\n \"oapen.relation.isPublishedBy_publisher.name\",\n \"oapen.relation.isPublisherOf\",\n \"oapen.relation.isbn\",\n \"oapen.remark.public\",\n \"peerreview.anonymity\",\n \"peerreview.id\",\n \"peerreview.open.review\",\n \"peerreview.publish.responsibility\",\n \"peerreview.review.decision\",\n \"peerreview.review.stage\",\n \"peerreview.review.type\",\n \"peerreview.reviewer.type\",\n }\n # add custom 'dc.subject.classification_code'\n list_fields.add(\"dc.subject.classification_code\")\n entries = transform_dict(csv_entries, convert, nested_fields, list_fields)\n\n # Transform release into JSON Lines format saving in memory buffer\n # Save in memory buffer to gzipped file\n list_to_jsonl_gz(self.transform_path, entries)", "def _template_formatting(field, inputs, inputs_dict_st):\n from .specs import MultiInputObj, MultiOutputFile\n\n # if a template is a function it has to be run first with the inputs as the only arg\n template = field.metadata[\"output_file_template\"]\n if callable(template):\n template = template(inputs)\n\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(r\"{\\w+}\", template)\n inp_fields_fl = re.findall(r\"{\\w+:[0-9.]+f}\", template)\n inp_fields += [re.sub(\":[0-9.]+f\", \"\", el) for el in inp_fields_fl]\n if len(inp_fields) == 0:\n return template\n\n val_dict = {}\n file_template = None\n\n for fld in inp_fields:\n fld_name = fld[1:-1] # extracting the name form {field_name}\n if fld_name not in inputs_dict_st:\n raise AttributeError(f\"{fld_name} is not provided in the input\")\n fld_value = inputs_dict_st[fld_name]\n if fld_value is attr.NOTHING:\n # if value is NOTHING, nothing should be added to the command\n return attr.NOTHING\n else:\n # checking for fields that can be treated as a file:\n # have type File, or value that is path like (including str with extensions)\n if isinstance(fld_value, os.PathLike) or (\n isinstance(fld_value, str) and \".\" in fld_value\n ):\n if file_template:\n raise Exception(\n f\"can't have multiple paths in {field.name} template,\"\n f\" but {template} provided\"\n )\n else:\n file_template = (fld_name, fld_value)\n else:\n val_dict[fld_name] = fld_value\n\n # if field is MultiOutputFile and some elements from val_dict are lists,\n # each element of the list should be used separately in the template\n # and return a list with formatted values\n if field.type is MultiOutputFile and any(\n [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()]\n ):\n # all fields that are lists\n keys_list = [\n k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj))\n ]\n if any(\n [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]]\n ):\n raise Exception(\n f\"all fields used in {field.name} template have to have the same length\"\n f\" or be a single value\"\n )\n formatted_value = []\n for ii in range(len(val_dict[keys_list[0]])):\n val_dict_el = copy(val_dict)\n # updating values to a single element from the list\n for key in keys_list:\n val_dict_el[key] = val_dict[key][ii]\n\n formatted_value.append(\n _element_formatting(\n template, val_dict_el, file_template, keep_extension=keep_extension\n )\n )\n else:\n formatted_value = _element_formatting(\n template, val_dict, file_template, keep_extension=keep_extension\n )\n return formatted_value", "def _process_group(self):\n if not isinstance(self.transform, GroupTransformModel):\n return\n\n self._process_name()\n\n if self.transformed_item['type'] == 'Campaign':\n self._process_metadata_datetime('firstSeen', self.transform.first_seen)\n\n if self.transformed_item['type'] == 'Document':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('malware', self.transform.malware)\n self._process_metadata('password', self.transform.password)\n\n if self.transformed_item['type'] == 'Email':\n self._process_metadata('body', self.transform.body)\n self._process_metadata('from', self.transform.from_addr)\n self._process_metadata('header', self.transform.header)\n self._process_metadata('subject', self.transform.subject)\n self._process_metadata('to', self.transform.to_addr)\n\n if self.transformed_item['type'] in ('Event', 'Incident'):\n self._process_metadata_datetime('eventDate', self.transform.event_date)\n self._process_metadata('status', self.transform.status)\n\n if self.transformed_item['type'] == 'Report':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata_datetime('publishDate', self.transform.publish_date)\n\n # Handle sig specific fields here\n if self.transformed_item['type'] == 'Signature':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('fileType', self.transform.file_type)\n self._process_metadata('fileText', self.transform.file_text)", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def _read_group_format_result_custom(self, data, annotated_groupbys, groupby, domain):\n\n sections = []\n for gb in annotated_groupbys:\n ftype = gb['type']\n value = data[gb['groupby']]\n\n # full domain for this groupby spec\n d = None\n if value:\n if ftype == 'many2one':\n value = value[0]\n elif ftype in ('date', 'datetime'):\n locale = self._context.get('lang') or 'en_US'\n if locale == \"ar_SY\":\n locale = \"ar\"\n fmt = DEFAULT_SERVER_DATETIME_FORMAT if ftype == 'datetime' else DEFAULT_SERVER_DATE_FORMAT\n tzinfo = None\n range_start = value\n range_end = value + gb['interval']\n # value from postgres is in local tz (so range is\n # considered in local tz e.g. \"day\" is [00:00, 00:00[\n # local rather than UTC which could be [11:00, 11:00]\n # local) but domain and raw value should be in UTC\n if gb['tz_convert']:\n tzinfo = range_start.tzinfo\n range_start = range_start.astimezone(pytz.utc)\n range_end = range_end.astimezone(pytz.utc)\n\n range_start = range_start.strftime(fmt)\n range_end = range_end.strftime(fmt)\n if ftype == 'datetime':\n label = babel.dates.format_datetime(\n value, format=gb['display_format'],\n tzinfo=tzinfo, locale=locale\n )\n else:\n label = babel.dates.format_date(\n value, format=gb['display_format'],\n locale=locale\n )\n data[gb['groupby']] = ('%s/%s' % (range_start, range_end), label)\n d = [\n '&',\n (gb['field'], '>=', range_start),\n (gb['field'], '<', range_end),\n ]\n\n if d is None:\n d = [(gb['field'], '=', value)]\n sections.append(d)\n sections.append(domain)\n\n data['__domain'] = expression.AND(sections)\n if len(groupby) - len(annotated_groupbys) >= 1:\n data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}\n del data['id']\n return data", "def build_format(i, ex, args, meta_args):\n formatter = string.Formatter()\n format_string = meta_args.format_string\n fields = list(formatter.parse(format_string))\n\n kwarg_fields = []\n indexed_fields = []\n\n i.result = hive.variable('str')\n i.result_out = hive.pull_out(i.result)\n\n for index, field in enumerate(fields):\n literal_text = field[1]\n\n if literal_text is None:\n continue\n\n if not literal_text.isidentifier():\n field_name = \"field_{}\".format(index)\n indexed_fields.append(field_name)\n\n else:\n field_name = literal_text\n kwarg_fields.append(field_name)\n\n # Create IO\n attr = hive.variable()\n setattr(i, field_name, attr)\n\n in_attr = hive.pull_in(attr)\n setattr(i, \"{}_in\".format(field_name), in_attr)\n\n setattr(ex, field_name, hive.antenna(in_attr))\n hive.trigger(i.result_out, in_attr, pretrigger=True)\n\n ex.result = hive.output(i.result_out)\n\n def do_format(self):\n args = [getattr(self, \"_{}\".format(attr_name)) for attr_name in indexed_fields]\n kwargs = {attr_name: getattr(self, \"_{}\".format(attr_name)) for attr_name in kwarg_fields}\n self._result = formatter.format(format_string, *args, **kwargs)\n\n i.func = hive.modifier(do_format)\n hive.trigger(i.result_out, i.func, pretrigger=True)", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def stringify_results(self, results, format_fn=str):\n processed = []\n\n for row in results:\n processed.append([format_fn(v) for v in row])\n\n return processed", "def __call__(self, results):\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results", "def transform(self, data, input_content_type, output_content_type):\n return self.transform_fn(data, input_content_type, output_content_type)", "def _apply_to_texts(text_func, meta_dict, args, kwargs):\n if isinstance(meta_dict, dict):\n for key in list(meta_dict.keys()):\n if key in ['sets', 'ddf']:\n pass\n elif key == 'text' and isinstance(meta_dict[key], dict):\n text_func(meta_dict[key], *args, **kwargs)\n else:\n DataSet._apply_to_texts(text_func, meta_dict[key], args, kwargs)\n\n elif isinstance(meta_dict, list):\n for item in meta_dict:\n DataSet._apply_to_texts(text_func, item, args, kwargs)", "def transform(self, *fs):\n return transform(self, *fs)", "def transform(self, results: Dict) -> Dict:\n\n # Apply mapping\n inputs = self._map_input(results, self.mapping)\n # Apply wrapped transforms\n outputs = self._apply_transforms(inputs)\n # Apply remapping\n outputs = self._map_output(outputs, self.remapping)\n\n results.update(outputs) # type: ignore\n return results", "def records_to_human_readable_output(fields: str, table_name: str, results: list) -> str:\n filtered_results: list = []\n\n if fields == '*':\n for result in results:\n filtered_result = {\n 'Source Address': result.get('source_ip', {}).get('value'),\n 'Destination Address': result.get('dest_ip', {}).get('value'),\n 'Application': result.get('app'),\n 'Action': result.get('action', {}).get('value'),\n 'RuleMatched': result.get('rule_matched'),\n 'TimeGenerated': human_readable_time_from_epoch_time(result.get('time_generated')),\n 'FileID': result.get('file_id'),\n 'FileName': result.get('file_name'),\n 'FileType': result.get('file_type')\n }\n filtered_results.append(filtered_result)\n else:\n for result in results:\n filtered_result = {}\n for root in result.keys():\n parsed_tree: dict = parse_tree_by_root_to_leaf_paths(root, result[root])\n filtered_result.update(parsed_tree)\n filtered_results.append(filtered_result)\n\n return tableToMarkdown(f'Logs {table_name} table', filtered_results, removeNull=True)", "def transform_thread(self):\n self.transform_result = Transformer.transform_all(self.extract_list, SelectableLabel.selected_hotel, self.hotel_address)", "def __call__(data, source_format, target_format):", "def output_fields(request, project_id):\n project = Project.objects.get(id=project_id)\n dal = dal_mongo.DALMongo(project_id)\n ret = {}\n if project.segmentation_skipped:\n ret['col_or_outputfield'] = \"column\"\n ret['values'] = dal.get_matched_cols()\n else:\n ret['col_or_outputfield'] = \"output field\"\n ret['values'] = dal.get_output_fields_matched_cols()\n\n return JsonResponse(ret, safe=False)", "def transform(self, node):\n # type: (nodes.Node) -> None\n typemap = self.typemap\n\n entries = []\n groupindices = {} # type: Dict[unicode, int]\n types = {} # type: Dict[unicode, Dict]\n\n # step 1: traverse all fields and collect field types and content\n for field in node:\n fieldname, fieldbody = field\n try:\n # split into field type and argument\n fieldtype, fieldarg = fieldname.astext().split(None, 1)\n except ValueError:\n # maybe an argument-less field type?\n fieldtype, fieldarg = fieldname.astext(), ''\n typedesc, is_typefield = typemap.get(fieldtype, (None, None))\n\n # collect the content, trying not to keep unnecessary paragraphs\n if docfields._is_single_paragraph(fieldbody):\n content = fieldbody.children[0].children\n else:\n content = fieldbody.children\n\n # sort out unknown fields\n if typedesc is None or typedesc.has_arg != bool(fieldarg):\n continue\n\n typename = typedesc.name\n\n # if the field specifies a type, put it in the types collection\n if is_typefield:\n # filter out only inline nodes; others will result in invalid\n # markup being written out\n content = [n for n in content if isinstance(n, nodes.Inline) or\n isinstance(n, nodes.Text)]\n if content:\n types.setdefault(typename, {})[fieldarg] = content\n continue\n\n # also support syntax like ``:param type name:``\n if typedesc.is_typed:\n try:\n argtype, argname = fieldarg.split(None, 1)\n except ValueError:\n pass\n else:\n xrefs = typedesc.make_xrefs(\n nodes.Text(argtype),\n self.directive.domain,\n nodes.Text(argtype),\n )\n xrefs[0].attributes['json:name'] = strip_json_array(nodes.Text(argtype))\n types.setdefault(typename, {})[argname] = xrefs\n fieldarg = argname\n\n translatable_content = nodes.inline(fieldbody.rawsource,\n translatable=True)\n translatable_content.document = fieldbody.parent.document\n translatable_content.source = fieldbody.parent.source\n translatable_content.line = fieldbody.parent.line\n translatable_content += content\n\n # grouped entries need to be collected in one entry, while others\n # get one entry per field\n if typedesc.is_grouped:\n if typename in groupindices:\n group = entries[groupindices[typename]]\n else:\n groupindices[typename] = len(entries)\n group = [typedesc, []]\n entries.append(group)\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n group[1].append(entry)\n else:\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n entries.append([typedesc, entry])\n\n # step 2: all entries are collected, construct the new field list\n new_list = nodes.field_list()\n for entry in entries:\n if isinstance(entry, nodes.field):\n # pass-through old field\n new_list += entry\n else:\n fieldtype, content = entry\n fieldtypes = types.get(fieldtype.name, {})\n env = self.directive.state.document.settings.env\n new_list += fieldtype.make_field(fieldtypes, self.directive.domain,\n content, env=env)\n\n node.replace_self(new_list)", "def do_transform(self):\r\n if not self.transform:\r\n return\r\n try:\r\n self.latest_value = utils.Transform(\r\n expr=self.transform, value=self.latest_value,\r\n timedelta=self.time_between_updates().total_seconds()).result()\r\n except (TypeError, ValueError):\r\n logger.warn(\"Invalid transformation '%s' for metric %s\",\r\n self.transfrom, self.pk)\r\n self.transform = ''", "def result_field_map():\n return {\n \"[run number]\": \"run_number\",\n \"map-file\": \"map_file\",\n \"People\": \"people\",\n \"person_path_weight\": \"person_path_weight\",\n \"Slow\": \"slow\",\n \"Medium\": \"medium\",\n \"Fast\": \"fast\",\n \"display-path-cost?\": \"display_path_cost_p\",\n \"add-person-spacing?\": \"add_person_spacing_p\",\n \"people-wait?\": \"people_wait_p\",\n \"equal-diagonal-weight?\": \"equal_diagonal_weight_p\",\n \"Slow-Speed\": \"slow_speed\",\n \"Medium-Speed\": \"medium_speed\",\n \"Fast-Speed\": \"fast_speed\",\n \"set-fire?\": \"set_fire_p\",\n \"Fire_Speed\": \"fire_speed\" ,\n \"mean-escape-time\": \"mean_escape_time\",\n }", "def _transform(self, dataset):\n raise NotImplementedError()", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def _output_format_sector(func, override=None):\n @wraps(func)\n def _format_wrapper(self, *args, **kwargs):\n json_response, data_key, meta_data_key = func(self, *args, **kwargs)\n if isinstance(data_key, list):\n # Replace the strings into percentage\n data = {key: {k:self.percentage_to_float(v)\n for k,v in json_response[key].items()} for key in data_key}\n else:\n data = json_response[data_key]\n #TODO: Fix orientation in a better way\n meta_data = json_response[meta_data_key]\n # Allow to override the output parameter in the call\n if override is None:\n output_format = self.output_format.lower()\n elif 'json' or 'pandas' in override.lower():\n output_format = override.lower()\n # Choose output format\n if output_format == 'json':\n return data, meta_data\n elif output_format == 'pandas':\n data_pandas = pandas.DataFrame.from_dict(data,\n orient='columns')\n # Rename columns to have a nicer name\n col_names = [re.sub(r'\\d+.', '', name).strip(' ')\n for name in list(data_pandas)]\n data_pandas.columns = col_names\n return data_pandas, meta_data\n else:\n raise ValueError('Format: {} is not supported'.format(\n self.output_format))\n return _format_wrapper", "def transform(self, f):\n\n return f(self)", "def transform(self, df):\n _df = df.copy()\n _df['Age'] = _df['YrSold'] - _df['YearBuilt']\n _df['AgeRemod'] = _df['YrSold'] - _df['YearRemodAdd']\n _df['Baths'] = _df['FullBath'] + _df['HalfBath']\n _df['BsmtBaths'] = _df['BsmtFullBath'] + _df['BsmtHalfBath']\n _df['OverallQual_Square'] = _df['OverallQual'] * _df['OverallQual']\n _df['OverallQual_3'] = _df['OverallQual'] * _df['OverallQual'] * _df['OverallQual']\n _df['OverallQual_exp'] = np.exp(_df['OverallQual'])\n _df['GrLivArea_Square'] = _df['GrLivArea'] * _df['GrLivArea']\n _df['GrLivArea_3'] = _df['GrLivArea'] * _df['GrLivArea'] * _df['GrLivArea']\n _df['GrLivArea_exp'] = np.exp(_df['GrLivArea'])\n _df['GrLivArea_log'] = np.log(_df['GrLivArea'])\n _df['TotalBsmtSF_/GrLivArea'] = _df['TotalBsmtSF'] / _df['GrLivArea']\n _df['OverallCond_sqrt'] = np.sqrt(_df['OverallCond'])\n _df['OverallCond_square'] = _df['OverallCond'] * _df['OverallCond']\n _df['LotArea_sqrt'] = np.sqrt(_df['LotArea'])\n _df['1stFlrSF_log'] = np.log1p(_df['1stFlrSF'])\n _df['1stFlrSF'] = np.sqrt(_df['1stFlrSF'])\n _df['TotRmsAbvGrd_sqrt'] = np.sqrt(_df['TotRmsAbvGrd'])\n categorical = self.find_categorical(df)\n numerical = self.find_numerical(df)\n\n for cat in categorical:\n _df[cat] = pd.Categorical(_df[cat], categories=_test_categories[cat])\n\n for key in numerical:\n _df[key].fillna(_df[key].median(), inplace=True)\n\n # #if numerical feature are skewed apply log\n\n skewed_feats = _df[numerical].apply(lambda x: scipy.stats.skew(x.dropna())) # compute skewness\n skewed_feats = skewed_feats[skewed_feats > 0.75]\n skewed_feats = skewed_feats.index\n _df[skewed_feats] = np.log1p(_df[skewed_feats])\n cat_matrix = pd.get_dummies(_df[categorical]).as_matrix()\n num_matrix = StandardScaler().fit_transform(np.asarray(_df[numerical]))\n\n return np.hstack((num_matrix, cat_matrix))", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, file_manager: FileManager, real: xr.DataArray = None,\n **kwargs: xr.DataArray) -> SummaryObjectTable:\n\n cols = []\n for metric_name in self.metrics:\n cols.extend([f\"{metric_name}_Model\", f\"{metric_name}_Sum\", f\"{metric_name}_Min\",\n f\"{metric_name}_Max\", f\"{metric_name}_Mean\"])\n df_result = pd.DataFrame(columns=cols)\n\n if self.task == TSTRTask.Regression:\n for name, data in kwargs.items():\n X, y, X_test, y_test = self._get_regression_data(real.values, data.values)\n res = self._evaluate(X, y, X_test, y_test, self.n_targets, name)\n df_result = df_result.append(res, ignore_index=True)\n X, y, X_test, y_test = self._get_regression_data(real.values, real.values)\n res = self._evaluate(X, y, X_test, y_test, self.n_targets, \"real_data\")\n df_result = df_result.append(res, ignore_index=True)\n\n else:\n for name, data in kwargs.items():\n if name.endswith(\"_target\"):\n continue\n X, y, X_test, y_test = self._get_classification_data(real.values, kwargs[name].values,\n kwargs[\"real_target\"].values,\n kwargs[name + \"_target\"].values)\n res = self._evaluate(X, y, X_test, y_test, self.n_targets, name)\n df_result = df_result.append(res, ignore_index=True)\n X, y, X_test, y_test = self._get_classification_data(real.values, real.values, kwargs[\"real_target\"].values,\n kwargs[\"real_target\"].values, )\n res = self._evaluate(X, y, X_test, y_test, self.n_targets, \"real_data\")\n df_result = df_result.append(res, ignore_index=True)\n\n resulting_summary = SummaryObjectTable(self.name, additional_information=str(df_result.columns))\n resulting_summary.set_kv(self.name, df_result.values)\n\n return resulting_summary", "def test_transform_record(self):\n response = {\"frequency\": 0.009112876, \"info\": {\"accessType\": \"PUBLIC\"},\n \"referenceBases\": \"CT\", \"alternateBases\": \"AT\",\n \"start\": 10, \"end\": 12,\n \"variantCount\": 3, \"variantType\": \"MNP\"}\n record = Record(\"PUBLIC\", 0.009112875989879, referenceBases=\"CT\", alternateBases=\"AT\", start=10, end=12, variantCount=3, variantType=\"MNP\")\n result = transform_record(record)\n self.assertEqual(result, response)", "def __transform(self):\n try:\n self.tokenized_document, self.stack = None, []\n\n InlineProcessor.initialize()\n LinkHelper.initialize()\n\n POGGER.debug(\"\\n\\n>>>>>>>parse_blocks_pass>>>>>>\")\n first_pass_results = self.__parse_blocks_pass()\n\n POGGER.debug(\"\\n\\n>>>>>>>coalesce_text_blocks>>>>>>\")\n coalesced_results = CoalesceProcessor.coalesce_text_blocks(\n first_pass_results\n )\n\n POGGER.debug(\"\\n\\n>>>>>>>parse_inline>>>>>>\")\n final_pass_results = InlineProcessor.parse_inline(coalesced_results)\n\n POGGER.debug(\"\\n\\n>>>>>>>final_pass_results>>>>>>\")\n return final_pass_results\n except Exception as this_exception:\n raise BadTokenizationError(\n \"An unhandled error occurred processing the document.\"\n ) from this_exception", "def _get_line_vals(self, record, line_field, fields):\n line_field, max_row = get_line_max(line_field)\n lines = record[line_field]\n if max_row > 0 and len(lines) > 3800 :#and len(lines) > max_row:\n raise Exception(\n _('Records in %s exceed max record allowed!') % line_field)\n vals = dict([(field, []) for field in fields])\n # Get field condition & aggre function\n field_cond_dict = {}\n aggre_func_dict = {}\n field_format_dict = {}\n pair_fields = [] # I.e., ('debit${value and . or .}@{sum}', 'debit')\n for field in fields:\n temp_field, eval_cond = get_field_condition(field)\n temp_field, field_format = get_field_format(temp_field)\n raw_field, aggre_func = get_field_aggregation(temp_field)\n # Dict of all special conditions\n field_cond_dict.update({field: eval_cond})\n aggre_func_dict.update({field: aggre_func})\n field_format_dict.update({field: field_format})\n # --\n pair_fields.append((field, raw_field))\n # --\n for line in lines:\n for field in pair_fields: # (field, raw_field)\n value = self._get_field_data(field[1], line)\n if type(value) == type(''):\n value = re.sub(r\"[\u001e\u0006]\", '', value)\n # Case Eval\n eval_cond = field_cond_dict[field[0]]\n if eval_cond: # Get eval_cond of a raw field\n eval_context = {'float_compare': float_compare,\n 'time': time,\n 'datetime': dt,\n 'date': date,\n 'value': value,\n 'object': line,\n 'model': self.env[record._name],\n 'env': self.env,\n 'context': self._context,\n }\n # value = str(eval(eval_cond, eval_context))\n # Test removing str(), coz some case, need resulting number\n value = eval(eval_cond, eval_context)\n # --\n vals[field[0]].append(value)\n return (vals, aggre_func_dict, field_format_dict)", "def customizations(record):\n\n record = type(record)\n # record = author(record)\n record = convert_to_unicode(record)\n # record = editor(record)\n # record = journal(record)\n # record = keyword(record)\n # record = link(record)\n # record = page_double_hyphen(record)\n # record = doi(record)\n return record", "def fetch_output_row(self, common_fields_to_print, fields_to_print, filter_code):\r\n \r\n old_passed_cutoff = False\r\n new_passed_cutoff = False\r\n\r\n self.check_call_similarity()\r\n self.check_allele_freq_diff()\r\n \r\n if self.old:\r\n common_fields = self._fetch_fields(self.old, common_fields_to_print)\r\n if self.old.passes_cutoff(filter_code):\r\n old_passed_cutoff = True\r\n \r\n if self.new:\r\n common_fields = self._fetch_fields(self.new, common_fields_to_print)\r\n if self.new.passes_cutoff(filter_code):\r\n new_passed_cutoff = True \r\n \r\n old_fields = self._fetch_fields(self.old, fields_to_print)\r\n new_fields = self._fetch_fields(self.new, fields_to_print)\r\n \r\n outfields = \"\\t\".join( common_fields + [self.similarity, str(self.freq_diff)] + old_fields + new_fields )\r\n \r\n if not old_passed_cutoff and not new_passed_cutoff:\r\n outfields = None\r\n \r\n return outfields", "def transform(self, results: Dict) -> Optional[Dict]:\n idx = self.random_pipeline_index()\n return self.transforms[idx](results)", "def _wr_3fmt_goeaobj(goea_results, goeaobj, wr_params, log):\n # List of all fields, printable or not, available from GOEnrichmentRecord\n log.write(\"\\nGOEnrichmentRecord FIELDS: {F}\\n\".format(F=\" \".join(goea_results[0].get_prtflds_all())))\n # Use the subset of namedtuple fields_names that are listed in the format string:\n # Same format: print to screen and print to file:\n goeaobj.prt_txt(log, goea_results, **wr_params) # Print to screen\n goeaobj.wr_txt(\"nbt3102_subset_obj.txt\", goea_results, **wr_params)\n # Print to Excel Spreadsheet\n title=\"Print subset of fields from GOEnrichmentRecord\"\n goeaobj.wr_xlsx(\"nbt3102_subset_obj.xlsx\", goea_results, title=title, **wr_params)\n # Print to tab-separated file\n goeaobj.wr_tsv(\"nbt3102_subset_obj.tsv\", goea_results, **wr_params)", "def EvaluateFields(self, *float, **kwargs):\n ...", "def test_formatResult(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.formatResult(3), '3')", "def conform_output_data(rowdict,fields_to_show=''):\n rowdict['TimeStamp'] = str(rowdict['TimeStamp'])\n if fields_to_show:\n rowdict= removed_fields(fields_to_show, rowdict)\n return rowdict", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n numeric = HEADER.as_feature_indices(\n [\"review_count\", \"lat\", \"lng\", \"lat2\", \"lng2\"]\n )\n\n # These features contain a relatively small number of unique items.\n categorical = HEADER.as_feature_indices(\n [\"distance\", \"price_level\", \"review_count\", \"Sp1\", \"type\"]\n )\n\n # These features can be parsed as natural language.\n text = HEADER.as_feature_indices(\n [\n \"slug\", \"menu\", \"slug.1\", \"categories\", \"name\", \"url\", \"homeurl\",\n \"resource_id1\", \"resource_id2\"\n ]\n )\n\n numeric_processors = Pipeline(steps=[(\"robustimputer\", RobustImputer())])\n\n categorical_processors = Pipeline(\n steps=[\n (\"thresholdonehotencoder\", ThresholdOneHotEncoder(threshold=162))\n ]\n )\n\n text_processors = Pipeline(\n steps=[\n (\n \"multicolumntfidfvectorizer\",\n MultiColumnTfidfVectorizer(\n max_df=0.9977,\n min_df=0.0003137465824032988,\n analyzer=\"word\",\n max_features=10000\n )\n )\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\"numeric_processing\", numeric_processors, numeric\n ), (\"categorical_processing\", categorical_processors,\n categorical), (\"text_processing\", text_processors, text)\n ]\n )\n\n return Pipeline(\n steps=[\n (\"column_transformer\",\n column_transformer), (\"robustpca\", RobustPCA(n_components=88)),\n (\"robuststandardscaler\", RobustStandardScaler())\n ]\n )", "def _convert_other(self, column, field, recformat):\n if isinstance(recformat, _FormatX):\n # special handling for the X format\n return self._convert_x(field, recformat)\n\n scale_factors = self._get_scale_factors(column)\n _str, _bool, _number, _scale, _zero, bscale, bzero, dim = scale_factors\n\n indx = self.names.index(column.name)\n\n # ASCII table, convert strings to numbers\n # TODO:\n # For now, check that these are ASCII columns by checking the coldefs\n # type; in the future all columns (for binary tables, ASCII tables, or\n # otherwise) should \"know\" what type they are already and how to handle\n # converting their data from FITS format to native format and vice\n # versa...\n if not _str and isinstance(self._coldefs, _AsciiColDefs):\n field = self._convert_ascii(column, field)\n\n # Test that the dimensions given in dim are sensible; otherwise\n # display a warning and ignore them\n if dim:\n # See if the dimensions already match, if not, make sure the\n # number items will fit in the specified dimensions\n if field.ndim > 1:\n actual_shape = field.shape[1:]\n if _str:\n actual_shape = actual_shape + (field.itemsize,)\n else:\n actual_shape = field.shape[0]\n\n if dim == actual_shape:\n # The array already has the correct dimensions, so we\n # ignore dim and don't convert\n dim = None\n else:\n nitems = reduce(operator.mul, dim)\n if _str:\n actual_nitems = field.itemsize\n elif len(field.shape) == 1:\n # No repeat count in TFORMn, equivalent to 1\n actual_nitems = 1\n else:\n actual_nitems = field.shape[1]\n if nitems > actual_nitems and not isinstance(recformat, _FormatP):\n warnings.warn(\n \"TDIM{} value {:d} does not fit with the size of \"\n \"the array items ({:d}). TDIM{:d} will be ignored.\".format(\n indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1\n )\n )\n dim = None\n\n # further conversion for both ASCII and binary tables\n # For now we've made columns responsible for *knowing* whether their\n # data has been scaled, but we make the FITS_rec class responsible for\n # actually doing the scaling\n # TODO: This also needs to be fixed in the effort to make Columns\n # responsible for scaling their arrays to/from FITS native values\n if not column.ascii and column.format.p_format:\n format_code = column.format.p_format\n else:\n # TODO: Rather than having this if/else it might be nice if the\n # ColumnFormat class had an attribute guaranteed to give the format\n # of actual values in a column regardless of whether the true\n # format is something like P or Q\n format_code = column.format.format\n\n if _number and (_scale or _zero) and not column._physical_values:\n # This is to handle pseudo unsigned ints in table columns\n # TODO: For now this only really works correctly for binary tables\n # Should it work for ASCII tables as well?\n if self._uint:\n if bzero == 2**15 and format_code == \"I\":\n field = np.array(field, dtype=np.uint16)\n elif bzero == 2**31 and format_code == \"J\":\n field = np.array(field, dtype=np.uint32)\n elif bzero == 2**63 and format_code == \"K\":\n field = np.array(field, dtype=np.uint64)\n bzero64 = np.uint64(2**63)\n else:\n field = np.array(field, dtype=np.float64)\n else:\n field = np.array(field, dtype=np.float64)\n\n if _scale:\n np.multiply(field, bscale, field)\n if _zero:\n if self._uint and format_code == \"K\":\n # There is a chance of overflow, so be careful\n test_overflow = field.copy()\n try:\n test_overflow += bzero64\n except OverflowError:\n warnings.warn(\n \"Overflow detected while applying TZERO{:d}. \"\n \"Returning unscaled data.\".format(indx + 1)\n )\n else:\n field = test_overflow\n else:\n field += bzero\n\n # mark the column as scaled\n column._physical_values = True\n\n elif _bool and field.dtype != bool:\n field = np.equal(field, ord(\"T\"))\n elif _str:\n if not self._character_as_bytes:\n with suppress(UnicodeDecodeError):\n field = decode_ascii(field)\n\n if dim and not isinstance(recformat, _FormatP):\n # Apply the new field item dimensions\n nitems = reduce(operator.mul, dim)\n if field.ndim > 1:\n field = field[:, :nitems]\n if _str:\n fmt = field.dtype.char\n dtype = (f\"|{fmt}{dim[-1]}\", dim[:-1])\n field.dtype = dtype\n else:\n field.shape = (field.shape[0],) + dim\n\n return field", "def _hacked_transform(typemap, node):\n entries = []\n groupindices = {}\n types = {}\n\n # step 1: traverse all fields and collect field types and content\n for field in node:\n fieldname, fieldbody = field\n try:\n # split into field type and argument\n fieldtype, fieldarg = fieldname.astext().split(None, 1)\n except ValueError:\n # maybe an argument-less field type?\n fieldtype, fieldarg = fieldname.astext(), ''\n typedesc, is_typefield = typemap.get(fieldtype, (None, None))\n\n # sort out unknown fields\n if typedesc is None or typedesc.has_arg != bool(fieldarg):\n # either the field name is unknown, or the argument doesn't\n # match the spec; capitalize field name and be done with it\n new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]\n if fieldarg:\n new_fieldname += ' ' + fieldarg\n fieldname[0] = nodes.Text(new_fieldname)\n entries.append(field)\n continue\n\n typename = typedesc.name\n\n # collect the content, trying not to keep unnecessary paragraphs\n if _is_single_paragraph(fieldbody):\n content = fieldbody.children[0].children\n else:\n content = fieldbody.children\n\n # if the field specifies a type, put it in the types collection\n if is_typefield:\n # filter out only inline nodes; others will result in invalid\n # markup being written out\n content = [n for n in content if isinstance(n, nodes.Inline) or\n isinstance(n, nodes.Text)]\n if content:\n types.setdefault(typename, {})[fieldarg] = content\n continue\n\n # also support syntax like ``:param type name:``\n if typedesc.is_typed:\n try:\n argtype, argname = fieldarg.split(None, 1)\n except ValueError:\n pass\n else:\n types.setdefault(typename, {})[argname] = \\\n [nodes.Text(argtype)]\n fieldarg = argname\n\n translatable_content = nodes.inline(fieldbody.rawsource,\n translatable=True)\n translatable_content.source = fieldbody.parent.source\n translatable_content.line = fieldbody.parent.line\n translatable_content += content\n\n # grouped entries need to be collected in one entry, while others\n # get one entry per field\n if typedesc.is_grouped:\n if typename in groupindices:\n group = entries[groupindices[typename]]\n else:\n groupindices[typename] = len(entries)\n group = [typedesc, []]\n entries.append(group)\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n group[1].append(entry)\n else:\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n entries.append([typedesc, entry])\n\n return (entries, types)", "def __call__(self, results):\n\n results = self._mosaic_transform(results)\n return results", "def _TransformInputs(self, _):\n raise NotImplementedError()", "def transform(self, results: Dict) -> Optional[Dict]:\n if self.random_apply():\n return self.transforms(results) # type: ignore\n else:\n return results", "def _summarize(obj, fields):\n for name in fields:\n attr = getattr(obj, name)\n if attr is None:\n continue\n elif isinstance(attr, datetime):\n attr = attr.isoformat()\n yield f\"{name}: {repr(attr)}\"", "def _summarize(obj, fields):\n for name in fields:\n attr = getattr(obj, name)\n if attr is None:\n continue\n elif isinstance(attr, datetime):\n attr = attr.isoformat()\n yield f\"{name}: {repr(attr)}\"", "def mapper(k, v):\n instance = json.loads(v)\n field_values = []\n for f in output_fields:\n field = getFromDict(instance, f.split('.'))\n if isinstance(field, dict):\n field_values.append(unicode(json.dumps(field), \"utf-8\"))\n elif isinstance(field, basestring):\n field_values.append(getFromDict(instance, f.split('.')))\n else:#list\n field_values.append(','.join(field))\n\n line = output_separator.join(v for v in field_values)\n yield line, ''", "def formatResult(self, result):\r\n data, sample_names, calc_names = result\r\n res = format_matrix(data, sample_names, calc_names)\r\n return res", "def format_data(self, data):", "def normalize(results):\n postal_code = None\n date = None\n\n for result in \\\n [r for r in results if r.type in ['Date', 'Postal Code']]:\n\n if result.type == 'Date':\n date = result\n elif result.type == 'Postal Code':\n postal_code = result\n\n assert isinstance(date, ParseResult)\n assert isinstance(postal_code, ParseResult)\n\n date.confidence = min(70, postal_code.confidence+4)\n\n return results", "def transform(filtered_list):\n out_put = {}\n out_list = []\n # loop to get the required columns, random ordered\n for item in filtered_list:\n for val in item._fields:\n if val in type_dict:\n out_put[val] = type_dict.get(val)(getattr(item, val))\n out_list.append(out_put)\n out_put = {}\n\n # loop to the ordered columns data as per output\n all_rows = []\n for item in out_list:\n tmp_row = []\n for key in type_dict.keys():\n out_put[key] = item[key]\n tmp_row.append(item[key])\n all_rows.append(tmp_row)\n\n col_row = [col.replace('_', '-') for col in type_dict.keys()]\n all_rows.insert(0, col_row)\n return all_rows", "def transform(self, x):", "def render(self):\n self.read_csv()\n opt = self.get_parser()\n convert = self.get_convert()\n return opt, convert", "def compute(\n self, data: Union[gpd.GeoDataFrame, pd.DataFrame]\n ) -> pd.Series:\n\n log.debug(f\"Computing transform {self.name}\")\n # Match for input_type\n if self.input_type == \"s\":\n return self.func(s=data[self.cols_input[0]], **self.kwargs)\n elif self.input_type == \"df\":\n return self.func(df=data[self.cols_input], **self.kwargs)\n elif self.input_type == \"gdf\":\n return self.func(gdf=data, col=self.cols_input[0], **self.kwargs)\n else:\n raise RuntimeError(\n f\"Something broke with Transform, \"\n f\"couldn't match self.input_type {self.input_type}\"\n )", "def _transform(obj):\n\n if isinstance(obj, date) or isinstance(obj, time) or isinstance(obj, datetime):\n return str(obj)\n if isinstance(obj, decimal):\n return str(float(obj))\n if obj == None: \n return 'null'\n return str(obj)", "def _format_map_output(\n result_format: dict,\n success: bool,\n element_count: Optional[int] = None,\n nonnull_count: Optional[int] = None,\n unexpected_count: Optional[int] = None,\n unexpected_list: Optional[List[Any]] = None,\n unexpected_index_list: Optional[List[int]] = None,\n unexpected_rows=None,\n) -> Dict:\n if element_count is None:\n element_count = 0\n\n # NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list\n # Incrementally add to result and return when all values for the specified level are present\n return_obj: Dict[str, Any] = {\"success\": success}\n\n if result_format[\"result_format\"] == \"BOOLEAN_ONLY\":\n return return_obj\n\n skip_missing = False\n missing_count: Optional[int] = None\n if nonnull_count is None:\n skip_missing = True\n else:\n missing_count = element_count - nonnull_count\n\n missing_percent: Optional[float] = None\n unexpected_percent_total: Optional[float] = None\n unexpected_percent_nonmissing: Optional[float] = None\n if unexpected_count is not None and element_count > 0:\n unexpected_percent_total = unexpected_count / element_count * 100\n\n if not skip_missing and missing_count is not None:\n missing_percent = missing_count / element_count * 100\n if nonnull_count is not None and nonnull_count > 0:\n unexpected_percent_nonmissing = unexpected_count / nonnull_count * 100\n else:\n unexpected_percent_nonmissing = None\n else:\n unexpected_percent_nonmissing = unexpected_percent_total\n\n return_obj[\"result\"] = {\n \"element_count\": element_count,\n \"unexpected_count\": unexpected_count,\n \"unexpected_percent\": unexpected_percent_nonmissing,\n }\n\n if unexpected_list is not None:\n return_obj[\"result\"][\"partial_unexpected_list\"] = unexpected_list[\n : result_format[\"partial_unexpected_count\"]\n ]\n\n if not skip_missing:\n return_obj[\"result\"][\"missing_count\"] = missing_count\n return_obj[\"result\"][\"missing_percent\"] = missing_percent\n return_obj[\"result\"][\"unexpected_percent_total\"] = unexpected_percent_total\n return_obj[\"result\"][\n \"unexpected_percent_nonmissing\"\n ] = unexpected_percent_nonmissing\n\n if result_format[\"include_unexpected_rows\"]:\n return_obj[\"result\"].update(\n {\n \"unexpected_rows\": unexpected_rows,\n }\n )\n\n if result_format[\"result_format\"] == \"BASIC\":\n return return_obj\n\n if unexpected_list is not None:\n if len(unexpected_list) and isinstance(unexpected_list[0], dict):\n # in the case of multicolumn map expectations `unexpected_list` contains dicts,\n # which will throw an exception when we hash it to count unique members.\n # As a workaround, we flatten the values out to tuples.\n immutable_unexpected_list = [\n tuple([val for val in item.values()]) for item in unexpected_list\n ]\n else:\n immutable_unexpected_list = unexpected_list\n\n # Try to return the most common values, if possible.\n partial_unexpected_count: Optional[int] = result_format.get(\n \"partial_unexpected_count\"\n )\n partial_unexpected_counts: Optional[List[Dict[str, Any]]] = None\n if partial_unexpected_count is not None and 0 < partial_unexpected_count:\n try:\n partial_unexpected_counts = [\n {\"value\": key, \"count\": value}\n for key, value in sorted(\n Counter(immutable_unexpected_list).most_common(\n result_format[\"partial_unexpected_count\"]\n ),\n key=lambda x: (-x[1], x[0]),\n )\n ]\n except TypeError:\n partial_unexpected_counts = [\n {\"error\": \"partial_exception_counts requires a hashable type\"}\n ]\n finally:\n return_obj[\"result\"].update(\n {\n \"partial_unexpected_index_list\": unexpected_index_list[\n : result_format[\"partial_unexpected_count\"]\n ]\n if unexpected_index_list is not None\n else None,\n \"partial_unexpected_counts\": partial_unexpected_counts,\n }\n )\n\n if result_format[\"result_format\"] == \"SUMMARY\":\n return return_obj\n\n return_obj[\"result\"].update(\n {\n \"unexpected_list\": unexpected_list,\n \"unexpected_index_list\": unexpected_index_list,\n }\n )\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return return_obj\n\n raise ValueError(f\"Unknown result_format {result_format['result_format']}.\")", "def do_manipulations(self, *args, **kwargs):\n pass", "def transform(stuff, pipelines=DEFAULT_PIPELINE_NAMES):\n global _pipelines\n for name in pipelines:\n p = load_pipeline(name)\n stuff = p.transform(stuff)\n return stuff", "def test_all():\n\n def annotate(data, response):\n return {\"Text\": data}\n\n assert hug.transform.all(str, annotate)(1, response=\"hi\") == {\"Text\": \"1\"}", "def extract(self, optimized_str):\n\n logger.debug('START optimized_str ========================')\n logger.debug(optimized_str)\n logger.debug('END optimized_str ==========================')\n logger.debug(\n 'Date parsing: languages=%s date_formats=%s',\n self.options['languages'], self.options['date_formats'])\n logger.debug('Float parsing: decimal separator=%s', self.options['decimal_separator'])\n logger.debug(\"keywords=%s\", self['keywords'])\n logger.debug(self.options)\n\n # Try to find data for each field.\n output = {}\n for k, v in self['fields'].items():\n if k.startswith('static_'):\n logger.debug(\"field=%s | static value=%s\", k, v)\n output[k.replace('static_', '')] = v\n else:\n logger.debug(\"field=%s | regexp=%s\", k, v)\n\n # Fields can have multiple expressions\n if type(v) is list:\n for v_option in v:\n res_find = re.findall(v_option, optimized_str)\n if res_find:\n break\n else:\n res_find = re.findall(v, optimized_str)\n if res_find:\n logger.debug(\"res_find=%s\", res_find)\n if k.startswith('date'):\n raw_date = res_find[0]\n output[k] = dateparser.parse(\n raw_date, date_formats=self.options['date_formats'],\n languages=self.options['languages'])\n logger.debug(\"result of date parsing=%s\", output[k])\n if not output[k]:\n logger.error(\n \"Date parsing failed on date '%s'\", raw_date)\n return None\n elif k.startswith('amount'):\n assert res_find[0].count(self.options['decimal_separator']) < 2,\\\n 'Decimal separator cannot be present several times'\n # replace decimal separator by a |\n amount_pipe = res_find[0].replace(self.options['decimal_separator'], '|')\n # remove all possible thousands separators\n amount_pipe_no_thousand_sep = re.sub(\n '[.,\\s]', '', amount_pipe)\n # put dot as decimal sep\n amount_regular = amount_pipe_no_thousand_sep.replace('|', '.')\n # it is now safe to convert to float\n output[k] = float(amount_regular)\n else:\n output[k] = res_find[0]\n else:\n logger.warning(\"regexp for field %s didn't match\", k)\n\n output['currency'] = self.options['currency']\n\n if len(output.keys()) >= 4:\n output['desc'] = 'Invoice %s from %s' % (\n output['invoice_number'], self['issuer'])\n logger.debug(output)\n return output\n else:\n logger.error(output)\n return None", "def _enrich_results(self, record, query):\n record['metadata.query_name'] = query['name']\n record['metadata.query_id'] = '{}_{}'.format(\n query['name'], self.run_tag)\n record['metadata.query_description'] = query['description']\n record['metadata.query_headers'] = query['headers']\n record['@timestamp'] = int(round(time.time() * 1000))\n return record", "def _fields(self, doclet):\n FIELD_TYPES = OrderedDict([('params', _params_formatter),\n ('properties', _params_formatter),\n ('exceptions', _exceptions_formatter),\n ('returns', _returns_formatter)])\n for field_name, callback in iteritems(FIELD_TYPES):\n for field in doclet.get(field_name, []):\n description = field.get('description', '')\n unwrapped = sub(r'[ \\t]*[\\r\\n]+[ \\t]*', ' ', description)\n yield callback(field, unwrapped)", "def numerical_raw(self, dataframe, combination_dict1, combination_dict2):\n grouped = pd.Series(dataframe[1].values, index=dataframe[0].values)\n grouped = pd.Series.sort_values(grouped)\n transform_scenario = {}\n transform_scenario[\"transform\"] = \"Numerical_raw_Column %s Column %s\" %(combination_dict1, combination_dict2)\n transform_scenario[\"X\"] = combination_dict1\n transform_scenario[\"Y\"] = combination_dict2\n transform_scenario[\"Agg_func_X\"] = \"RAW\"\n transform_scenario[\"Agg_func_Y\"] = \"RAW\"\n transform_scenario[\"transform_score\"] = 0\n transform_scenario[\"scenario_num\"] = self.scenario_num\n transform_scenario[\"3column\"] = False\n\n self.scenario_dict[\"%d\" %self.scenario_num] = transform_scenario\n # print (self.scenario_dict[\"%d\" %self.scenario_num][\"transform\"])\n # print(\"Transformation score : %.4f\" % self.scenario_dict[\"%d\" % self.scenario_num][\"transform_score\"])\n self.scenario_num += 1\n return grouped", "def _format_getters(self, format_get_info=None, format_get_k_info=None):\n ## Get info setting\n if format_get_k_info is None:\n self.get_k = self._general_get_k\n elif format_get_k_info == \"default\":\n self.get_k = self._default_get_k\n elif format_get_k_info == \"general\":\n self.get_k = self._general_get_k\n elif format_get_k_info == \"list\":\n self.get_k = self._list_get_k\n elif format_get_k_info == \"integer\":\n self.get_k = self._integer_get_k\n ## Get information setting\n if format_get_info is None:\n self.get_information = self._general_get_information\n elif format_get_info == \"default\":\n self.get_information = self._default_get_information\n elif format_get_info == \"general\":\n self.get_information = self._general_get_information\n ## Other getters\n if self.staticneighs:\n self.get_copy_iss = self._staticneighs_get_copy_iss\n self.get_copy_iss_by_ind = self._staticneighs_get_copy_iss_by_ind\n else:\n self.get_copy_iss = self._notstaticneighs_get_copy_iss\n self.get_copy_iss_by_ind =\\\n self._notstaticneighs_get_copy_iss_by_ind", "def apply_transform(key, data, transform_list):\n for transform in transform_list:\n method_name = transform[MethodKeys.METHOD]\n method_params = transform[MethodKeys.PARAMETERS]\n\n if method_name == 'compute_and_apply_vocabulary':\n method_params.update({'vocab_filename': key})\n\n data = TransformMethods.get_method(method_name)(data,\n **method_params)\n return data", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def __format_management(self, index, matched):\n\n for _, case_data in self.format_cases.items():\n if int(index) in case_data[0]:\n # The regex number is into the currently read case data.\n\n # We return a list with the formatted elements.\n # 1. We convert the day to 2 digits.\n # 2. We convert the month to the unified format.\n # 3. We return the year.\n return [\n converter.Digit2Digits(matched[case_data[1][0]]).get_converted(),\n converter.Month(matched[case_data[1][1]]).get_converted(),\n str(matched[case_data[1][2]]),\n ]\n\n return matched # pragma: no cover", "def processItem(self):\r\n self.extract()\r\n self.mergeLanguageClaims()\r\n self.validateClaims()\r\n self.mergeWithWikidata()\r\n self.writeToWikidata()\r\n self.log()", "def transform(self):\n\n if not hasattr(self, \"_transform\"):\n self._transform = Doc.load_single_filter(self.session, self.FILTER)\n return self._transform" ]
[ "0.6625306", "0.6302095", "0.6020546", "0.59190804", "0.5870707", "0.5793297", "0.573849", "0.57165796", "0.56867325", "0.56854814", "0.56783605", "0.5602666", "0.55980164", "0.5573735", "0.55417204", "0.55297184", "0.5522978", "0.5522028", "0.54894423", "0.5454132", "0.54165745", "0.5407115", "0.53942066", "0.5389558", "0.5385535", "0.53641236", "0.5340221", "0.5288681", "0.5286452", "0.5275365", "0.52639985", "0.52502507", "0.524334", "0.52427477", "0.5237935", "0.522147", "0.5179896", "0.5177975", "0.51692206", "0.5165094", "0.5153743", "0.5150673", "0.51345885", "0.5134244", "0.51247054", "0.5108408", "0.5103149", "0.50950825", "0.5094777", "0.50916505", "0.50876355", "0.5087168", "0.5083695", "0.5083695", "0.5083695", "0.5083695", "0.5083695", "0.5083695", "0.5083695", "0.508262", "0.5074876", "0.5057091", "0.50515187", "0.503203", "0.50218534", "0.5008415", "0.49990878", "0.49945325", "0.49927992", "0.49848187", "0.4984739", "0.49778235", "0.49660498", "0.4963818", "0.4959076", "0.4958632", "0.49584955", "0.49584955", "0.49563062", "0.49409539", "0.49235138", "0.49158546", "0.49141473", "0.49108452", "0.49106565", "0.49089998", "0.49075085", "0.49066147", "0.4905993", "0.49022585", "0.48950273", "0.48806843", "0.487961", "0.4879072", "0.4878954", "0.48773", "0.48715612", "0.48678255", "0.48661026", "0.48583812", "0.48567274" ]
0.0
-1
update learning rate of optimizers
def updatelearningrate(self, epoch): self.lr = getlearningrate(epoch=epoch, opt=self.opt) # update learning rate of model optimizer if isinstance(self.model, list): count = 0 for param_group in self.optimzer.param_groups: # if type(model) is <list> then update modules with different learning rate param_group['lr'] = self.lr count += 1 # print ">>> count is:", count-1 else: for param_group in self.optimzer.param_groups: param_group['lr'] = self.lr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def update_learning_rate(self, it):\n self.scheduler.step()\n for param_group in self.optimizer.param_groups:\n v = param_group['lr']\n self.tb_logger.add_scalar('train/lr', v, it)", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def adjust_learning_rate(optimizer):\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0\n group['step'] += 1\n\n group['lr'] = args.lr / (1 + group['step'] * args.lr_decay)", "def update_learning_rate(self):\r\n self.scheduler.step(self.clock.epoch)", "def adjust_learning_rate(args, optimizer, epoch):\n if (epoch*3==args.epochs) or (epoch*3==2*args.epochs):\n lr = args.lr * (0.1 ** (epoch*3//args.epochs))\n print(\"Changing Learning Rate to {}\".format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer):\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n group['lr'] = args.lr * (\n 1.0 - float(group['step']) * float(args.batch_size) / (args.n_triplets * float(args.epochs)))\n return", "def adjust_learning_rate(self, optimizer, epoch, args):\n lr = args.learning_rate * (0.1 ** (epoch // 30))\n # print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(cfg, optimizer):\n for idx, group in enumerate(optimizer.param_groups):\n init_lr = cfg.TRAINING.LR\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n\n group['lr'] = init_lr * (\n 1.0 - float(group['step']) * float(cfg.TRAINING.BATCH_SIZE) /\n (cfg.TRAINING.N_TRIPLETS * float(cfg.TRAINING.EPOCHS)))\n return", "def adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Comes from pytorch demo\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args, step):\n lr = args.lr * (0.1 ** (epoch // step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, optimizer, epoch, initial_lr, writer=None):\n lr = initial_lr * (0.98 ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n if writer:\n writer.add_scalar(\"lr_G\", lr, epoch + 1)", "def adjust_learning_rate(optimizer, lr, step, args):\n # decay = 0.1**(sum(epoch >= np.array(lr_steps)))\n lr = lr * (0.95**(step//args.lr_decay_every))\n print(\"current learning rate: {:.6f}\".format(lr))\n param_group = optimizer.param_groups\n for i in range(len(param_group)):\n param_group[i]['lr'] = lr\n\n return optimizer", "def adjust_learning_rate(optimizer, epochs, base_lr):\r\n lr = base_lr * (0.01 ** (epochs//5))\r\n print('Learning Rate decreased to {}'.format(lr))\r\n for param_group in optimizer.state_dict()['param_groups']:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, batch):\n lr = learning_rate\n for i in range(len(steps)):\n scale = scales[i] if i < len(scales) else 1\n if batch >= steps[i]:\n lr = lr * scale\n if batch == steps[i]:\n break\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr / batch_size\n return lr", "def adjust_learning_rate(optimizer, epoch, lr):\n lr = lr * ((1 - 0.015) ** epoch)\n print('learning rate : {}'.format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opt, optimizer, epoch):\r\n lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, learning_rate):\n\n if epoch >= 60 and epoch < 75:\n lr = learning_rate / 2\n elif epoch >= 75:\n lr = learning_rate / 4\n else:\n lr = learning_rate\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if 20 < epoch <= 30:\n lr = 0.0001\n elif 30 < epoch :\n lr = 0.00001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"learning rate -> {}\\n\".format(lr))", "def adjust_learning_rate(self, opt, epoch):\n lr = opt.learning_rate * 0.1 ** (epoch // opt.lr_update)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, step):\n lr = args.lr * (0.8 ** step)\n print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(learning_rate,optimizer, epoch):\n lr = learning_rate * (0.1 ** (epoch // 25))\n print(str(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.5 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, iteration_count):\n lr = args.lr / (1.0 + args.lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_lr(optimizer, lr, epoch, max_epochs, exponent=0.9):\n optimizer.param_groups[0]['lr'] = lr * (1 - epoch / max_epochs)**exponent", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, step):\n global lr\n lr = lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_lr(epoch, optimizer, args):\n gamma = 0\n for step in args.step:\n if epoch + 1.0 > int(step):\n gamma += 1\n lr = args.lr * math.pow(0.1, gamma)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(jnet, optimizer, epoch):\n lr = args.lr * (0.5 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, optimizer, epoch):\n lr = self.lr\n if epoch >= 80:\n lr = self.lr * (0.1 ** ((epoch-80) // 40))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_learning_rate(self, validation_loss=None):\n if validation_loss is None:\n for scheduler in self.schedulers:\n scheduler.step()\n else:\n for scheduler in self.schedulers:\n scheduler.step(validation_loss)\n self.lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = {0:.7f}'.format(self.lr))", "def adjust_learning_rate(optimizer, epoch):\n lr = ln * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"learning rate\",lr)", "def adjust_learning_rate(optimizer, args, epoch):\n\tlr = args.learning_rate * (0.1 ** (epoch // args.lr_decay_step))\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr'] = lr", "def adjust_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, base_lr, gamma, step):\n lr = base_lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opts, optimizer, epoch):\n lr = opts.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate_warmup(optimizer, epoch, args):\n lr = args.lr * (epoch + 1) / args.warmup_epoch\n global current_lr\n current_lr = lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return current_lr", "def adjust_learning_rate(optimizer, epoch):\n lr = hyper.lr * (0.5 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def adjust_learning_rate(optimizer: torch.optim.SGD, epoch: int, args: Namespace):\n lr = args.lr * (0.1 ** (epoch // 90)) * (0.1 ** (epoch // 180)) * (0.1 ** (epoch // 270))\n # log to TensorBoard\n if args.tensorboard:\n log_value('learning_rate', lr, epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opt, optimizer, epoch, F_txt):\n\tif opt.classifier_model == 'Baseline':\n\t\tlr = opt.lr * (0.5 ** (epoch // 30))\n\telse:\n\t\tlr = opt.lr * (0.1 ** (epoch // 10))\n\tprint('Learning rate: %f' %lr)\n\tprint('Learning rate: %f' %lr, file=F_txt)\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr'] = lr", "def adjust_learning_rate(args,optimizer, epoch):\n \n args.epochs\n\n lr = args.lr * (\n (0.2 ** int(epoch >= args.epochs - 140))\n * (0.2 ** int(epoch >= args.epochs - 80))\n * (0.2 ** int(epoch >= args.epochs - 40))\n )\n\n ##lr = args.lr ##DELETE ME!\n\n if args.tensorboard:\n log_value(\"learning_rate\", lr, epoch)\n\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr", "def adjust_learning_rate(optimizer, epoch, power):\n lr = args.lr * (0.1 ** (power*(epoch // 30)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = 0.5 * (0.1 ** (epoch // 100))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def _update_initial_learning_rate(configs, learning_rate):\n\n optimizer_type = get_optimizer_type(configs[\"train_config\"])\n if optimizer_type == \"rms_prop_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n elif optimizer_type == \"momentum_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.momentum_optimizer\n elif optimizer_type == \"adam_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.adam_optimizer\n else:\n raise TypeError(\"Optimizer %s is not supported.\" % optimizer_type)\n\n learning_rate_type = get_learning_rate_type(optimizer_config)\n if learning_rate_type == \"constant_learning_rate\":\n constant_lr = optimizer_config.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n elif learning_rate_type == \"exponential_decay_learning_rate\":\n exponential_lr = (\n optimizer_config.learning_rate.exponential_decay_learning_rate)\n exponential_lr.initial_learning_rate = learning_rate\n elif learning_rate_type == \"manual_step_learning_rate\":\n manual_lr = optimizer_config.learning_rate.manual_step_learning_rate\n original_learning_rate = manual_lr.initial_learning_rate\n learning_rate_scaling = float(learning_rate) / original_learning_rate\n manual_lr.initial_learning_rate = learning_rate\n for schedule in manual_lr.schedule:\n schedule.learning_rate *= learning_rate_scaling\n elif learning_rate_type == \"cosine_decay_learning_rate\":\n cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate\n learning_rate_base = cosine_lr.learning_rate_base\n warmup_learning_rate = cosine_lr.warmup_learning_rate\n warmup_scale_factor = warmup_learning_rate / learning_rate_base\n cosine_lr.learning_rate_base = learning_rate\n cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate\n else:\n raise TypeError(\"Learning rate %s is not supported.\" % learning_rate_type)", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr * (0.3 ** (epoch // args.lr_decay))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(init_lr, optimizer, epoch, n=100):\n init_lr = init_lr * (0.1 ** (epoch // n))\n print('learning rate : ', init_lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = init_lr", "def adjust_learning_rate(start_lr, optimizer, epoch, total_epoch_num):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n if epoch==total_epoch_num:\n lr = lr * 0.3\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (args.expo ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_lr(self):\n learning_rate = self.params.base_lr * (1 - float(self.epoch) / self.params.num_epoch) ** self.params.power\n for param_group in self.opt.param_groups:\n param_group['lr'] = learning_rate\n print('Change learning rate into %f' % (learning_rate))\n self.summary_writer.add_scalar('learning_rate', learning_rate, self.epoch)", "def adjust_learning_rate(optimizer, epoch):\n initial_lr = args.lr\n if epoch <= 150:\n lr = initial_lr\n elif epoch <=225:\n lr = initial_lr/10\n else:\n lr = initial_lr/100\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"=\"*100)\n print('At epoch:',epoch,\" lr is:\",lr)", "def adjust_learning_rate(optimizer, gamma, step):\r\n lr = args.lr * (gamma ** (step))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(initial_lr, optimizer, epoch, every_epoch):\n lr = initial_lr * (0.1 ** (epoch // every_epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = learning_rate * (0.88 ** (epoch))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(lr, optimizer, epoch):\n lr = lr * (0.1 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, epoch):\n lr = self.lr * (0.5 ** (epoch // 2))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.4 ** (epoch // 4))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(epoch, learn_rate, decay_step, decay_rate, optimizer):\n steps = np.sum(epoch > np.asarray(decay_step))\n if steps > 0:\n new_lr = learn_rate * (decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 100))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, frame_idx):\r\n lrr = maxlr * (0.1 ** (frame_idx//replay_size))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lrr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * ((1 - 0.015) ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1**(epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\r\n lr = args.lr\r\n if epoch >= 0.5 * args.epoch:\r\n lr /= 10\r\n if epoch >= 0.75 * args.epoch:\r\n lr /= 10\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def assign_learning_rate(session, lr_update, lr_placeholder, new_lr):\n session.run(lr_update, feed_dict={lr_placeholder: new_lr})", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // args.lr_drop))\n print('lr= '+str(lr), flush=True)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(learning_rate, optimizer, epoch):\n lr = learning_rate * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\r\n lr = 0.001 * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate_D(start_lr, optimizer, epoch):\n #lr = start_lr * (0.1 ** (epoch // 30))\n lr = start_lr * (0.3 ** (epoch // 5))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate_and_learning_taks(optimizer, epoch, args):\n if epoch >= args.step2: \n lr = args.lr * 0.01\n elif epoch >= args.step1:\n lr = args.lr * 0.1\n else:\n lr = args.lr\n \n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Return training classes\n return range(len(args.dataset))", "def adjust_learning_rate(optimizer, epoch):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr\n if epoch >= 75:\n lr = args.lr * 0.1\n if epoch >= 90:\n lr = args.lr * 0.01\n if epoch >= 100:\n lr = args.lr * 0.001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.95 ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args_lr, epoch_adjust):\n lr = args_lr * (0.1 ** (epoch // epoch_adjust))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr\n if epoch >= 60:\n lr = args.lr * 0.1\n if epoch >= 90:\n lr = args.lr * 0.01\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch):\n lr = args.lr * (0.1 ** (epoch // 15))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, lr_factor, epoch):\n #lr = args.lr * (0.1 ** (epoch // 30))\n print('the learning rate is set to {0:.5f}'.format(lr_factor[epoch]*args.lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_factor[epoch]*args.lr", "def adjust_learning_rate2(opt, optimizer, epoch, F_txt):\n\tsteps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))\n\tif steps > 0:\n\t\tnew_lr = opt.lr * (opt.lr_decay_rate ** steps)\n\t\tprint('Learning rate: %f' %new_lr)\n\t\tprint('Learning rate: %f' %new_lr, file=F_txt)\n\t\tfor param_group in optimizer.param_groups:\n\t\t\tparam_group['lr'] = new_lr", "def adjust_learning_rate_adam(optimizer, epoch):\n \n boundary = [args.epochs//5*4]\n lr = args.lr * 0.2 ** int(bisect.bisect_left(boundary, epoch))\n print('Learning rate: %f'%lr)\n #print(epoch, lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \n return lr", "def adjust_learning_rate(optimizer, decay=0.1):\n for param_group in optimizer.param_groups:\n param_group['lr'] = decay * param_group['lr']", "def adjust_learning_rate(optimizer, epoch):\n lr = opt.lr * (0.5 ** (epoch // opt.step))\n return lr", "def set_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def set_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr" ]
[ "0.82430375", "0.8091104", "0.8081948", "0.8057003", "0.7882125", "0.7729992", "0.7728464", "0.7728464", "0.77180463", "0.7680515", "0.7672647", "0.7670474", "0.76550764", "0.76442766", "0.762199", "0.76109266", "0.7595715", "0.758477", "0.7532394", "0.7522077", "0.75184435", "0.75164783", "0.7510736", "0.75061715", "0.75036716", "0.7503188", "0.7501985", "0.7486009", "0.74847925", "0.7483046", "0.7466416", "0.7453013", "0.74525166", "0.74525166", "0.74525166", "0.74525166", "0.745227", "0.7449238", "0.7449238", "0.7449238", "0.7444405", "0.74337715", "0.7386639", "0.73852015", "0.73797816", "0.73722535", "0.7371448", "0.7370674", "0.7370674", "0.7370674", "0.73666644", "0.73665017", "0.7358202", "0.73541516", "0.7349919", "0.7348124", "0.73480767", "0.7343184", "0.7339578", "0.73375005", "0.73370606", "0.7330537", "0.73293024", "0.7328796", "0.73269045", "0.73256457", "0.7323285", "0.7319275", "0.731393", "0.73118967", "0.7296129", "0.72921395", "0.72880685", "0.7284732", "0.7280056", "0.7274082", "0.7272155", "0.7268889", "0.7264714", "0.7264185", "0.7261742", "0.726159", "0.7260042", "0.7256089", "0.725024", "0.72468495", "0.72468495", "0.72468495", "0.7245352", "0.72452456", "0.7243234", "0.7241914", "0.72390044", "0.7235371", "0.72308016", "0.7225626", "0.72228616", "0.7213339", "0.7212687", "0.7212687" ]
0.81921303
1
construct any person or destroyable object
def __init__( self, id: int, coordinates: Tuple[int, int], name: str, health: 'Health', ai: 'Ai', parts: Dict[str, 'Part'], actions: Set['Action'], skills: Set['Skill'] = None, effects: Effects = None, team: 'Team' = None, ) -> None: self.id = id self.position = coordinates self.name = name self.health = health self.ai = ai self.parts = parts self.actions = actions self.skills = skills or set() self.effects = effects or Effects() self.team = team
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_persona(x):\n return Persona(x)", "def make_object():\n return object()", "def create_person(person: Person = Body(...)):\n return person", "def new_object(self):\r\n\t\tpass", "def new(self, obj):\n pass", "def instantiate(obj):\n return obj() if isinstance(obj, type) else obj", "def create_object(object_name):\n if object_name == 'deathstar':\n return Deathstar()\n elif object_name == 'mercury':\n return Mercury()\n elif object_name == 'venus':\n return Venus()\n elif object_name == 'mars':\n return Mars()\n elif object_name == 'earth':\n return Earth()\n elif object_name == 'moon':\n return Moon()\n elif object_name == 'tatooine':\n return Tatooine()\n elif object_name == 'mordor':\n return Mordor()\n elif object_name == 'xwing':\n return Xwing()", "def init_obj(obj_name):\n ret = type(obj_name, (object,), {})\n return ret", "def person_object_factory():\n person = {\n 'lastName': rl_fake().last_name(),\n 'gender': random.choice(('M', 'F'))\n }\n\n # Make the person's name match their gender.\n person['firstName'] = rl_fake().first_name_male() if person['gender'] == 'M' else rl_fake().first_name_female()\n\n # These are all optional in the DB. Over time, we'll try all possibilities.\n if flip():\n person['birthday'] = rl_fake().date_of_birth(minimum_age=18).strftime('%Y-%m-%d')\n if flip():\n person['phone'] = rl_fake().phone_number()\n if flip():\n person['email'] = rl_fake().email()\n return person", "def make_object(obj, kwargs):\n return obj(**kwargs)", "def create():", "def create():", "def init_test():\n # What we want:\n # Want to create an object with initial state regardless of constructor\n # args.\n\n class Person(object):\n \n age = 10\n\n def __new__(cls, *args, **kargs) :\n # It seems to me the args are passed only to allow customisation based\n # on them, since they are then passed to __init__ following this call in\n # typical creation.\n \n # Create the instance, also passing args - since may also be used for\n # customisation.\n self = super(Person, cls).__new__(cls, *args, **kargs)\n # Initialise some variables.\n self.name = None\n self.surname = None\n self.age = 3\n\n # Return the instance.\n return self\n \n def __init__(self, name, surname):\n d(\"Constructor called\")\n self.name, self.surname = name, surname\n \n def __str__(self) :\n return \"[%s, %s]\" % (self.name, self.surname)\n\n\n person = Person(\"john\", \"smith\")\n assert(person.name == \"john\" and person.surname == \"smith\")\n person = Person.__new__(Person)\n assert(person.name == None and person.surname == None)\n\n # So it seems python falls back on class var if obj var of same name not found.\n d(person.__class__.__dict__)\n d(person.age)\n d(person.__class__.age)", "def create_person(self):", "def create_individual(self):\n pass", "def __init__(self, name):\n # Person class has an attribute called name\n self.name = name", "def new(name=None):", "def __init__(self, **params):\n self.__object = object_param(**params)", "def make_objects(self):\n pass", "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj", "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj", "def create_instance(c_instance):\n return MonoPedal(c_instance)", "def instantiate(name, *args, **kwargs):\n ...", "def create_instance(self,name):\n print \"INFO : new %s\" % name\n return self.get_class(name)()", "def obj_factory (d):\n try:\n kind = d['kind']\n o = self.vtable.get (kind, Obj) (d)\n except KeyError:\n o = obj (d)\n return o", "def __newobj__(cls, *args):\n return cls.__new__(cls, *args)", "def creator():\n return SeamlessFkIk()", "def default_factory(*args, **kwargs):\n obj = RandomGameEntity()\n obj.build(*args, **kwargs)\n return obj", "def create(cls, _):\n return cls", "def __init__(self, RoomName = \"living\"):\n self.room_name = RoomName\n self.objects = Objects()\n self.character = Player()", "def rpConstruct(cls):\r\n return cls(None, None, None)", "def __init__(self,typing,reflection,year):\n self.name = str(self)\n self.typing = typing\n self.reflection = reflection\n self.year = year", "def createNewEmptyObject(objName=\"new Empty Object\"):\n # create a new object, which mesh will be replaced by the new BMesh\n mesh = bpy.data.meshes.new(objName)\n obj = bpy.data.objects.new(objName, mesh)\n obj.location = bpy.context.scene.cursor.location\n bpy.context.collection.objects.link(obj)\n\n # set object as active\n bpy.context.view_layer.objects.active = obj\n\n return obj, mesh", "def create(*args):", "def create_ion_object(self, object_params):\n new_obj = IonObject(object_params[\"type_\"])\n\n # Iterate over the parameters to add to object; have to do this instead\n # of passing a dict to get around restrictions in object creation on setting _id, _rev params\n for param in object_params:\n self.set_object_field(new_obj, param, object_params.get(param))\n\n new_obj._validate() # verify that all of the object fields were set with proper types\n return new_obj", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self, *args):\n this = _libsbml.new_Species(*args)\n try: self.this.append(this)\n except: self.this = this", "def create(cls, **dictionary):\n new_inst = cls.__new__(cls)\n if cls.__name__ == \"Rectangle\":\n new_inst.__init__(42, 98)\n elif cls.__name__ == \"Square\":\n new_inst.__init__(42)\n new_inst.update(**dictionary)\n return new_inst", "def make(self, **kwargs):\n raise NotImplementedError", "def __init__(self, str=None, type=None, dna=None, r=None, b=None, g=None):\n # have they passed in a stringified DNA object?\n if (str != None):\n self.makeFromNetString(str)\n # have they specified what type of DNA?\n elif (type != None):\n if (type == 's'): # Suit\n self.newSuit()\n else:\n # Invalid type\n assert 0\n else:\n # mark DNA as undefined\n self.type = 'u'", "def make_actor(self, obs=None, reuse=False, scope=\"pi\"):\n raise NotImplementedError", "def __init__(self, obj):\n if type(obj) is Monomial:\n Polynomial.__init__(self, obj)\n else:\n Polynomial.__init__(self, *obj.monomials)", "def __init__(self):\n IContainsAnimals.__init__(self, 15)\n IContainsPlants.__init__(self, 3)\n Identifiable.__init__(self)\n Biome.__init__(self, \"Coastline\")", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n object = cls(1, 1)\n object.update(**dictionary)\n return object\n\n if cls.__name__ == \"Square\":\n object = cls(1)\n object.update(**dictionary)\n return object", "def createBasicObject(self):\n\n\t\treturn self._createBasicObjFunct(self)", "def __init__(self, author, title, text=\"\", haiku=\"False\"):\n self.author = author\n self.title = title\n self.text = text\n self.haiku = haiku\n self.name = self\n Poem.obj_created += 1", "def construct(obj):\n if isinstance(obj, OrderedDict):\n new_obj = OrderedDict()\n for key, value in obj.items():\n new_obj[key] = construct(value)\n elif not isinstance(obj, OrderedDict) and isinstance(obj, dict):\n new_obj = dict()\n for key, value in obj.items():\n new_obj[key] = construct(value)\n elif isinstance(obj, list):\n new_obj = list()\n for value in obj:\n new_obj.append(construct(value))\n elif isinstance(obj, tuple):\n base = list()\n for value in obj:\n base.append(construct(value))\n new_obj = tuple(base)\n elif isinstance(obj, str):\n new_obj = str(obj)\n elif isinstance(obj, (int, float, complex, type(None))) or inspect.isclass(obj):\n new_obj = obj\n else:\n raise TypeError(\"Object of unsupported type was passed to construct function: %s\" % type(obj))\n return new_obj", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def _instantiate(decoder, model=None, dataset=None):\n if decoder is None: return U.Identity()\n\n if isinstance(decoder, str): name = decoder\n else: name = decoder['_name_']\n\n # Extract arguments from attribute names\n dataset_args = utils.config.extract_attrs_from_obj(dataset, *dataset_attrs.get(name, []))\n model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))\n # Instantiate decoder\n obj = utils.instantiate(registry, decoder, *model_args, *dataset_args)\n return obj", "def __init__(self, *args):\n this = _libsbml.new_CompartmentType(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, name, age):\r\n self.name = name\r\n self.age = age", "def create():\n pass", "def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory", "def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory", "def __constructor__(self):\n return type(self)", "def test_method_new_with_one_param(self):\n\n regex = \"object has no attribute 'id'\"\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new(None)\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new([])\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new([1, 2, 3])\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new({})\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new({1, 2, 3})\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new(True)\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new(False)\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new(dict())\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new({'id': 123})\n with self.assertRaisesRegex(AttributeError, regex):\n storage.new('Ranmod value')", "def create(cls, **dictionary):\n if cls.__name__ == 'Square':\n object = cls(1)\n object.update(**dictionary)\n return object\n\n if cls.__name__ == 'Rectangle':\n object = cls(1, 2)\n object.update(**dictionary)\n return object", "def __init__(self,partnership_type,person1,person2,s):\n \n self.type = partnership_type\n self.persons = [person1,person2]\n \n person1.add_partner(person2,s)\n person2.add_partner(person1,s)", "def account_object_factory(person_id):\n fake = Faker() # Use a generic one; others may not have all methods.\n account = {\n 'username': username_factory(),\n 'password': fake.password(),\n 'personId': person_id\n }\n return account", "def __init__(self, name, objects, description, exits={}):\n self.name = name\n self.objects = objects\n self.description = description\n self.exits = exits", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def test_base_object_create(self):\n obj1 = Base()\n test1 = str(obj1)\n self.assertTrue(test1[:50], '<models.base.Base object at ')\n self.assertTrue(obj1.id, 1)\n\n obj2 = Base(420)\n test2 = str(obj2)\n self.assertTrue(test2[:50], '<models.base.Base object at ')\n self.assertTrue(obj2.id, 420)\n\n obj3 = Base()\n test3 = str(obj3)\n self.assertTrue(test3[:50], '<models.base.Base object at ')\n self.assertTrue(obj3.id, 2)", "def new_object(cls):\n return cls.for_value([])", "def create_factory(cls, *args):\n raise NotImplementedError", "def create_instance(c_instance):\n\treturn 0", "def _from_other(cls, obj):", "def _instantiate(clz, **data):\n\n new_obj = clz()\n setattr(new_obj, \"data\", data)\n for key, val in deepcopy(data).items():\n setattr(new_obj, key, val)\n return new_obj", "def __init__(self, name, surname, phone_number, creation_date):\n self.name = name\n self.surname = surname\n self.phone_number = phone_number\n self.creation_date = creation_date", "def createNew(cls, x0, y0, z0, a1, b1, c1, a2, b2, c2):\n p0 = Point(x0, y0, z0)\n d1 = Vector(a1, b1, c1)\n d2 = Vector(a2, b2, c2)\n return cls(p0, d1, d2)", "def build_person(first_name, last_name, age=''):\n person = {'first': first_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def build_person(first_name, last_name, age=''):\n person = {'first': first_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def make(self):\n pass", "def _create(self, **attributes: Dict[str, object]) -> str:\n pass", "def build_person(first_name, last_name, age=''):\r\n person = {'first':first_name, 'last':last_name}\r\n if age:\r\n person['age'] = age\r\n return person", "def __init__(self, name, color, age):\n self.name = name\n self.color = color\n self.age = age\n self.breed = \"something\"", "def test_create(self):\n cat = self.animal_factory.create(\"cat\")\n dog = self.animal_factory.create(\"dog\")\n\n self.assertEquals(self.cat_class, cat.__class__)\n self.assertEquals(self.dog_class, dog.__class__)\n\n self.assertEquals(\"Meow\", cat.speak())\n self.assertEquals(\"Woof\", dog.speak())", "def __init__(self, *args):\n this = _libsbml.new_SpeciesType(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, nom, prenom):\r\n self.nom = nom\r\n self.prenom = prenom\r\n self.age = 33", "def __init__(self, name=None, phone=None, country=None, fax=None, email=None, address=None, description=None):\n \n \n\n self._name = None\n self._phone = None\n self._country = None\n self._fax = None\n self._email = None\n self._address = None\n self._description = None\n self.discriminator = None\n\n if name is not None:\n self.name = name\n if phone is not None:\n self.phone = phone\n if country is not None:\n self.country = country\n if fax is not None:\n self.fax = fax\n if email is not None:\n self.email = email\n if address is not None:\n self.address = address\n if description is not None:\n self.description = description", "def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this", "def build_person(first_name, last_name, middle_name='', age=None): \n person = {'first': first_name, 'middle': middle_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def Create(self):\n raise NotImplementedError()", "def create(cls, **dictionary):\n dummy_obj = cls(1, 1)\n dummy_obj.update(**dictionary)\n return dummy_obj", "def __init__(self, id=None, name=None, created=None, creator=None):\n\n self._id = None\n self._name = None\n self._created = None\n self._creator = None\n\n self.id = id\n self.name = name\n self.created = created\n self.creator = creator", "def make_bb_object(name, data):\n global BLACKBOARD, TRACE_LEVEL\n bb_obj = BB_object(name, data)\n if TRACE_LEVEL > 2:\n print \"\\tCreating {0} object: {1}\".format( type(data), bb_obj )\n BLACKBOARD[name] = bb_obj\n signal_creation_event(bb_obj)\n return bb_obj", "def __init__(self, name, age, gender):\n\n self._name = name\n self._age = age\n self._gender = gender\n self._friend = None", "def create(cls, data=None):\n # allow create() calls with no input\n if not data:\n data = {}\n\n return cls(**data)", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self):\n PrimaryObject.__init__(self)\n NoteBase.__init__(self)\n AddressBase.__init__(self)\n UrlBase.__init__(self)\n self.type = RepositoryType()\n self.name = \"\"", "def __init__(self, name, race, sex, age):\n self.Race = race\n self.Sex = sex\n self.Age = age\n self.Name = name", "def __init__(self, name, created_by):\n self.name = name\n self.created_by = created_by", "def create_novel(self, title, author, isbn):\n # new_fiction = Fiction(title, author, isbn)\n # return new_fiction\n return Fiction(title, author, isbn)", "def _finishConstruction(self, obj):\n return obj", "def _finishConstruction(self, obj):\n return obj", "def naked(**kwds):\n # get the constructor\n from .Naked import Naked\n\n # build one and return it\n return Naked(**kwds)", "def factory(self):\n raise NotImplementedError()" ]
[ "0.726856", "0.7046766", "0.6489531", "0.6340825", "0.6317375", "0.62946725", "0.6185293", "0.61081696", "0.6088009", "0.6050287", "0.59465456", "0.59465456", "0.59365165", "0.5905628", "0.5864767", "0.58611476", "0.585532", "0.5852625", "0.58511806", "0.58508277", "0.58508277", "0.5822777", "0.5805516", "0.5805085", "0.57722646", "0.5770833", "0.57506347", "0.57264376", "0.5718462", "0.5698952", "0.56941134", "0.568297", "0.5680539", "0.5669099", "0.5665449", "0.5659543", "0.5636501", "0.5625614", "0.5617737", "0.56154877", "0.5586899", "0.5585832", "0.55798054", "0.5571039", "0.5570286", "0.556881", "0.55529034", "0.55506206", "0.55506206", "0.55506206", "0.55392283", "0.5526039", "0.55218923", "0.5521318", "0.55140775", "0.55140775", "0.5503623", "0.5500371", "0.549981", "0.5492331", "0.5491675", "0.54863924", "0.54847026", "0.54847026", "0.54847026", "0.54847026", "0.5481512", "0.54715353", "0.5467533", "0.54650086", "0.54648995", "0.54646033", "0.5463082", "0.5461729", "0.545964", "0.545964", "0.5456578", "0.54434866", "0.54298764", "0.54267", "0.54182273", "0.541298", "0.54058063", "0.5396311", "0.53926235", "0.53883874", "0.5381071", "0.53804046", "0.536869", "0.5366147", "0.53646505", "0.53627133", "0.5361407", "0.5358966", "0.5355957", "0.5355265", "0.5351996", "0.5342197", "0.5342197", "0.53388536", "0.53375465" ]
0.0
-1
choose targets and actions
def do_action(self, ally: Set['Entity'], enemy: Set['Entity']) -> None: self.effects.update() available_actions, available_ally_targets, available_enemy_targets = self.get_actions(ally, enemy) action, target = self.ai.choose_action(self, available_actions, available_ally_targets, available_enemy_targets) if not action: action = pass_action target = self action.do(self, target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self, targets):", "def choose_action(self):\r\n pass", "def Action(self):\n for target in self._targets:\n target.Action()", "def choose_action(self, board, possible_actions):\r\n pass", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions_next = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in\n zip(self.maddpg_agent, obs_all_agents)]\n return target_actions_next", "def select_action(self):\n pass", "def choose_action(self, obs, **kwargs):\n pass", "def onActionChosen(self, agent, action):\n\n pass", "def select_action(self, state):", "def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]", "def state_choose_do(cfg, app, win, events):", "def actions():\n pass", "def choose_action(self, state, task=0):\n pass", "def targets_placeholder(self):", "def chooseAction(self):\n print \"nothing\"\n pass", "def pickUpActionAny(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colour = kwargs[\"fname\"]\n self.locator.recognise_grid()\n red = self.locator.detect_colour(0, 'red')\n rospy.loginfo(\"permutation(): looking for red object: %s\" % str(red))\n blue = self.locator.detect_colour(0, 'blue')\n rospy.loginfo(\"permutation(): looking for blue object: %s\" % str(blue))\n if red[0] < blue[0]:\n colour = 'blue'\n else:\n colour = 'red'\n\n self.locator.update_pose() #get current pose of arm\n\n success = self.locator.locate(colour, pose_offset, 1)\n self.mm.loadMenu(\"actionMenu\")", "def _run_actions(self):\n\n if \"install-bento\" in self.actions:\n self._do_action_bento_setup()\n\n if \"create-tables\" in self.actions:\n self._do_action_tables_create()\n\n if \"import-ratings\" in self.actions:\n self._do_action_import_ratings()\n\n if \"import-user-info\" in self.actions:\n self._do_action_import_user_info()\n\n if \"import-movie-info\" in self.actions:\n self._do_action_import_movie_info()\n\n if \"train-item-item-cf\" in self.actions:\n self._do_action_train()\n\n if \"register-freshener\" in self.actions:\n self._do_action_register_freshener()", "def target_act(self, obs, noise=0.0):\n #return target_actions\n target_actions = torch.zeros(obs.shape[:2] + (self.action_size,), dtype=torch.float, device=device)\n for i in range(self.num_agents):\n target_actions[:, i, :] = self.maddpg_agent[i].target_act(obs[:, i])\n \n return target_actions", "def actions() -> None:\n pass", "def cursorless_multiple_target_command(\n action: str,\n targets: list[dict],\n arg1: any = NotSet,\n arg2: any = NotSet,\n arg3: any = NotSet,\n ):\n args = list(filter(lambda x: x is not NotSet, [arg1, arg2, arg3]))\n actions.user.vscode_with_plugin_and_wait(\n \"cursorless.command\",\n get_spoken_form(),\n action,\n targets,\n *args,\n )", "async def _targets_heist(self, ctx):\r\n guild = ctx.guild\r\n theme = await self.thief.get_guild_theme(guild)\r\n targets = await self.thief.get_guild_targets(guild)\r\n t_vault = theme[\"Vault\"]\r\n\r\n if len(targets.keys()) < 0:\r\n msg = (\"There aren't any targets! To create a target use {}heist \"\r\n \"createtarget .\".format(ctx.prefix))\r\n else:\r\n target_names = [x for x in targets]\r\n crews = [int(subdict[\"Crew\"]) for subdict in targets.values()]\r\n success = [str(subdict[\"Success\"]) + \"%\" for subdict in targets.values()]\r\n vaults = [subdict[\"Vault\"] for subdict in targets.values()]\r\n data = list(zip(target_names, crews, vaults, success))\r\n table_data = sorted(data, key=itemgetter(1), reverse=True)\r\n table = tabulate(table_data, headers=[\"Target\", \"Max Crew\", t_vault, \"Success Rate\"])\r\n msg = \"```C\\n{}```\".format(table)\r\n\r\n await ctx.send(msg)", "def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action", "def actionSelector(self): \n if self.Temp!=0:\n if len(self.lessons) > 60 and self.var_T: \n # if the agent haven't already gotten food since a certain time \n # we increase the temperature by 0.001 \n if self.count_without_food>12:\n self.Temp += 0.01 \n if self.Temp>=(self.var_T[0]): \n self.Temp = self.var_T[0] \n # otherwise we decrease the temperatur by 0.001 \n else: \n self.Temp -= 0.001\n if self.Temp <= (self.var_T[-1]):\n self.Temp = self.var_T[-1]\n \n s = np.sum([np.exp(float(k)/self.Temp) for k in self.U_list])\n\n self.action_proba =[np.exp(float(m)/self.Temp)/s for m in self.U_list]\n action = np.random.choice(np.arange(4),p=self.action_proba) # choice a random choice relating to the probability distribution given by the softmax algorith \n else:\n action = np.argmax(self.U_list)\n return action", "def cursorless_single_target_command(\n action: str,\n target: dict,\n arg1: Any = NotSet,\n arg2: Any = NotSet,\n arg3: Any = NotSet,\n ):\n actions.user.cursorless_multiple_target_command(\n action, [target], arg1, arg2, arg3\n )", "def select_actions(self, inputs, avail_actions, tformat, info, hidden_states=None, test_mode=False, **kwargs):\n noise_params = kwargs.get(\"noise_params\", None)\n\n T_env = info[\"T_env\"]\n test_suffix = \"\" if not test_mode else \"_test\"\n\n if self.args.agent_level1_share_params:\n\n # --------------------- LEVEL 1\n\n if self.is_obs_noise(test_mode):\n inputs_level1, inputs_level1_tformat = _build_model_inputs(self.input_columns_level1_noisy,\n inputs,\n to_variable=True,\n inputs_tformat=tformat)\n inputs_level1_tformat = \"a*bs*t*v\"\n else:\n inputs_level1, inputs_level1_tformat = _build_model_inputs(self.input_columns_level1,\n inputs,\n to_variable=True,\n inputs_tformat=tformat)\n if self.args.debug_mode:\n _check_nan(inputs_level1)\n\n out_level1, hidden_states_level1, losses_level1, tformat_level1 = self.model.model_level1(inputs_level1[\"agent_input_level1\"],\n hidden_states=hidden_states[\"level1\"],\n loss_fn=None,\n tformat=inputs_level1_tformat,\n n_agents=self.n_agents,\n test_mode=test_mode,\n **kwargs)\n\n\n if self.args.debug_mode:\n _check_nan(inputs_level1)\n\n if self.is_obs_noise(test_mode):\n # have to do correlated sampling of what pair id everyone agrees on\n bs = out_level1.shape[_bsdim(inputs_level1_tformat)]\n ftype = th.FloatTensor if not out_level1.is_cuda else th.cuda.FloatTensor\n sampled_pair_ids = ftype(*out_level1.shape[:-1], 1)\n for _b in range(bs):\n ps = out_level1[:, _b]\n rn = np.random.random()\n for _a in range(ps.shape[0]):\n act = 0\n s = ps[_a, 0, act]\n while s <= rn:\n act += 1\n s += ps[_a, 0, act]\n sampled_pair_ids[_a, _b, 0, :] = act\n\n modified_inputs_level1 = inputs_level1\n selected_actions_format_level1 = \"a*bs*t*v\"\n else:\n # TODO: This is the pair-product encoded ID of both selected pairs.\n sampled_pair_ids, modified_inputs_level1, selected_actions_format_level1 = self.action_selector.select_action({\"policies\":out_level1},\n avail_actions=None,\n tformat=tformat_level1,\n test_mode=test_mode)\n _check_nan(sampled_pair_ids)\n\n if self.args.debug_mode in [\"level2_actions_fixed_pair\"]:\n \"\"\"\n DEBUG MODE: LEVEL2 ACTIONS FIXED PAIR\n Here we pick level2 actions from a fixed agent pair (0,1) and the third action from IQL\n \"\"\"\n assert self.n_agents == 3, \"only makes sense in n_agents=3 scenario\"\n sampled_pair_ids.fill_(0.0)\n\n # sample which pairs should be selected\n # TODO: HAVE TO ADAPT THIS FOR NOISY OBS!\n if self.is_obs_noise(test_mode):\n self.selected_actions_format = selected_actions_format_level1\n else:\n self.actions_level1 = sampled_pair_ids.clone()\n self.selected_actions_format = selected_actions_format_level1\n self.policies_level1 = modified_inputs_level1.squeeze(0).clone()\n\n if self.is_obs_noise(test_mode):\n inputs_level2, inputs_level2_tformat = _build_model_inputs(self.input_columns_level2_noisy,\n inputs,\n to_variable=True,\n inputs_tformat=tformat,\n )\n else:\n inputs_level2, inputs_level2_tformat = _build_model_inputs(self.input_columns_level2,\n inputs,\n to_variable=True,\n inputs_tformat=tformat,\n )\n\n assert self.args.agent_level2_share_params, \"not implemented!\"\n\n\n if \"avail_actions_pair\" in inputs_level2[\"agent_input_level2\"]:\n pairwise_avail_actions = inputs_level2[\"agent_input_level2\"][\"avail_actions_pair\"]\n else:\n assert False, \"NOT SUPPORTED CURRENTLY.\"\n avail_actions1, params_aa1, tformat_aa1 = _to_batch(inputs_level2[\"agent_input_level2\"][\"avail_actions_id1\"], inputs_level2_tformat)\n avail_actions2, params_aa2, _ = _to_batch(inputs_level2[\"agent_input_level2\"][\"avail_actions_id2\"], inputs_level2_tformat)\n pairwise_avail_actions = th.bmm(avail_actions1.unsqueeze(2), avail_actions2.unsqueeze(1))\n pairwise_avail_actions = _from_batch(pairwise_avail_actions, params_aa2, tformat_aa1)\n\n ttype = th.cuda.FloatTensor if pairwise_avail_actions.is_cuda else th.FloatTensor\n delegation_avails = Variable(ttype(pairwise_avail_actions.shape[0],\n pairwise_avail_actions.shape[1],\n pairwise_avail_actions.shape[2], 1).fill_(1.0), requires_grad=False)\n pairwise_avail_actions = th.cat([delegation_avails, pairwise_avail_actions], dim=_vdim(tformat))\n\n\n out_level2, hidden_states_level2, losses_level2, tformat_level2 \\\n = self.model.models[\"level2_{}\".format(0)](inputs_level2[\"agent_input_level2\"],\n hidden_states=hidden_states[\"level2\"],\n loss_fn=None,\n tformat=inputs_level2_tformat,\n # sampled_pair_ids=sampled_pair_ids, # UNUSED?\n pairwise_avail_actions=pairwise_avail_actions,\n test_mode=test_mode,\n seq_lens=inputs[\"agent_input_level2__agent0\"].seq_lens,\n **kwargs)\n\n if self.is_obs_noise(test_mode):\n\n # have to do correlated sampling of what pair id everyone agrees on\n bs = out_level2.shape[_bsdim(inputs_level2_tformat)]\n ftype = th.FloatTensor if not out_level2.is_cuda else th.cuda.FloatTensor\n pair_sampled_actions = ftype(*out_level2.shape[:-1], 1).view(int(out_level2.shape[0]/2),\n 2,\n *out_level2.shape[1:-1],\n 1)\n for _b in range(bs):\n ps = out_level2.view(int(out_level2.shape[0]/2),\n 2,\n *out_level2.shape[1:])[:, :, _b]\n avail_actions = pairwise_avail_actions.view(int(out_level2.shape[0]/2),\n 2,\n *out_level2.shape[1:])[:, :, _b]\n\n _sum0 = th.sum(ps[:, 0] * avail_actions[:, 0], dim=-1, keepdim=True)\n _sum0_mask = (_sum0 == 0.0)\n _sum0.masked_fill_(_sum0_mask, 1.0)\n ps[:, 0] = ps[:, 0] * avail_actions[:, 0] / _sum0\n\n _sum1 = th.sum(ps[:, 1] * avail_actions[:, 1], dim=-1, keepdim=True)\n _sum1_mask = (_sum1 == 0.0)\n _sum1.masked_fill_(_sum1_mask, 1.0)\n ps[:, 1] = ps[:, 1] * avail_actions[:, 1] / _sum1\n\n rns = np.random.random(ps.shape[0]) #one seed for each pair / batch\n for _a in range(ps.shape[0]):\n for _j in range(2):\n act = 0\n s = ps[_a, _j, 0, act]\n while s <= rns[_a]:\n act += 1\n s += ps[_a, _j, 0, act]\n if act == 122: # DEBUG\n a = 5\n pass\n pair_sampled_actions[_a, _j, _b, 0, :] = act\n\n # TODO: Fix the return values so I can debug in episode buffer!!!\n modified_inputs_level2 = inputs_level2\n selected_actions_format_level2 = \"a*bs*t*v\"\n else:\n\n # TODO: Implement for noisy obs!! # Need again correlated sampling\n pair_sampled_actions, \\\n modified_inputs_level2, \\\n selected_actions_format_level2 = self.action_selector.select_action({\"policies\":out_level2},\n avail_actions=pairwise_avail_actions.data,\n tformat=tformat_level2,\n test_mode=test_mode)\n # if th.sum(pair_sampled_actions == 26.0) > 0.0:\n # a = 5\n\n if sampled_pair_ids.shape[_tdim(tformat_level1)] > 1: # only used for mackrl sampling\n sampled_pairs = th.cat([ self.magic_map[sampled_pair_ids[:,:,_t:_t+1,:].long()].squeeze(2) for _t in range(sampled_pair_ids.shape[_tdim(tformat_level1)]) ],\n dim=_tdim(tformat_level1))\n else:\n sampled_pairs = self.magic_map[sampled_pair_ids.long()].squeeze(2)\n\n self.actions_level2 = pair_sampled_actions.clone()\n\n if self.is_obs_noise(test_mode):\n self.actions_level2_sampled = []\n for _aid in range(self.n_agents):\n self.actions_level2_sampled.append([])\n for i in range(sampled_pairs.shape[-1]):\n self.actions_level2_sampled[_aid].append(\n pair_sampled_actions[:, i].gather(0, sampled_pairs[_aid:_aid+1, :, :, i:i + 1].long()))\n self.actions_level2_sampled[_aid] = th.cat(self.actions_level2_sampled[_aid], 0)\n else:\n # ToDO: Gather across all selected pairs!!\n self.actions_level2_sampled = []\n for i in range(sampled_pairs.shape[-1]):\n self.actions_level2_sampled.append(pair_sampled_actions.gather(0, sampled_pairs[:,:,:,i:i+1].long()))\n\n self.actions_level2_sampled = th.cat(self.actions_level2_sampled, 0)\n self.selected_actions_format_level2 = selected_actions_format_level2\n self.policies_level2 = modified_inputs_level2.clone()\n\n\n inputs_level3, inputs_level3_tformat = _build_model_inputs(self.input_columns_level3,\n inputs,\n to_variable=True,\n inputs_tformat=tformat,\n )\n\n action_tensor = None\n if self.is_obs_noise(test_mode):\n action_tensor = ttype(self.n_agents,\n sampled_pairs.shape[_bsdim(tformat)],\n sampled_pairs.shape[_tdim(tformat)],\n 1).fill_(float(\"nan\"))\n for _bid in range(sampled_pairs.shape[_bsdim(tformat)]):\n # each agent has it's own assumptions about what pair-wise actions were sampled!\n for _aid in range(self.n_agents):\n # work out which pair id agent _aid is in (if any) and whether at first or second position\n partid = None\n posid = None\n #for _partid, _part in enumerate(_ordered_2_agent_pairings(self.n_agents)):\n combid = int(sampled_pair_ids[_aid, _bid, 0, 0].item())\n part = list(_ordered_2_agent_pairings(self.n_agents))[combid]\n for pid, p in enumerate(part):\n agentids = _pairing_id_2_agent_ids(p, self.n_agents)\n if agentids[0] == _aid:\n partid = pid\n posid = 0\n break\n if agentids[1] == _aid:\n partid = pid\n posid = 1\n break\n pass\n if partid is not None:\n # ok so what actions did agent _aid finally select?\n joint_act = self.actions_level2_sampled[_aid][partid,_bid,0,0].item()\n joint_act_dec = _joint_actions_2_action_pair(int(joint_act), self.n_actions)\n if joint_act_dec == 11: # DEBUG\n a = 5\n if joint_act_dec != 0: # else delegate\n action_tensor[_aid,_bid,0,:] = joint_act_dec[posid]\n else:\n # decentralized anyway!\n pass\n else:\n action_tensor = ttype(self.n_agents,\n pair_sampled_actions.shape[_bsdim(tformat)],\n pair_sampled_actions.shape[_tdim(tformat)],\n 1).fill_(float(\"nan\"))\n for i in range(sampled_pairs.shape[-1]):\n sampled_pair = sampled_pairs[:,:,:,i:i+1]\n pair_id1, pair_id2 = _pairing_id_2_agent_ids__tensor(sampled_pair, self.n_agents,\n \"a*bs*t*v\") # sampled_pair_ids.squeeze(0).squeeze(2).view(-1), self.n_agents)\n\n avail_actions1 = inputs_level3[\"agent_input_level3\"][\"avail_actions\"].gather(\n _adim(inputs_level3_tformat), Variable(pair_id1.repeat(1, 1, 1, inputs_level3[\"agent_input_level3\"][\n \"avail_actions\"].shape[_vdim(inputs_level3_tformat)])))\n avail_actions2 = inputs_level3[\"agent_input_level3\"][\"avail_actions\"].gather(\n _adim(inputs_level3_tformat), Variable(pair_id2.repeat(1, 1, 1, inputs_level3[\"agent_input_level3\"][\n \"avail_actions\"].shape[_vdim(inputs_level3_tformat)])))\n\n # selected_level_2_actions = pair_sampled_actions.gather(0, sampled_pair_ids.long())\n this_pair_sampled_actions = pair_sampled_actions.gather(0, sampled_pair.long())\n\n actions1, actions2 = _joint_actions_2_action_pair_aa(this_pair_sampled_actions.clone(),\n self.n_actions,\n avail_actions1,\n avail_actions2)\n # count how often level2 actions are un-available at level 3\n # TODO: Verify that 'this_pair_sampled_actions != 0' is the right thing to do!!\n pair_action_unavail_rate = (th.mean(((actions1 != actions1) & (this_pair_sampled_actions != 0)).float()).item() +\n th.mean(((actions2 != actions2) & (this_pair_sampled_actions != 0)).float()).item()) / 2.0\n if pair_action_unavail_rate != 0.0 and hasattr(self.args, \"mackrl_delegate_if_zero_ck\") and self.args.mackrl_delegate_if_zero_ck:\n #assert False, \"pair action unavail HAS to be zero in mackrl_delegate_if_zero_ck setting!\"\n self.logging_struct.py_logger.warning(\"ERROR: pair action unavail HAS to be zero in mackrl_delegate_if_zero_ck setting!\")\n\n self._add_stat(\"pair_action_unavail_rate__runner\",\n pair_action_unavail_rate,\n T_env=T_env,\n suffix=test_suffix,\n to_sacred=False)\n\n # Now check whether any of the pair_sampled_actions violate individual agent constraints on avail_actions\n ttype = th.cuda.FloatTensor if self.args.use_cuda else th.FloatTensor\n\n\n action_tensor.scatter_(0, pair_id1, actions1)\n action_tensor.scatter_(0, pair_id2, actions2)\n\n avail_actions_level3 = inputs_level3[\"agent_input_level3\"][\"avail_actions\"].clone().data\n self.avail_actions = avail_actions_level3.clone()\n\n inputs_level3[\"agent_input_level3\"][\"avail_actions\"] = Variable(avail_actions_level3,\n requires_grad=False)\n\n out_level3, hidden_states_level3, losses_level3, tformat_level3 = self.model.models[\"level3_{}\".format(0)](inputs_level3[\"agent_input_level3\"],\n hidden_states=hidden_states[\"level3\"],\n loss_fn=None,\n tformat=inputs_level3_tformat,\n test_mode=test_mode,\n seq_lens=inputs[\"agent_input_level3__agent0\"].seq_lens,\n **kwargs)\n # extract available actions\n avail_actions_level3 = inputs_level3[\"agent_input_level3\"][\"avail_actions\"]\n\n individual_actions, \\\n modified_inputs_level3, \\\n selected_actions_format_level3 = self.action_selector.select_action({\"policies\":out_level3},\n avail_actions=avail_actions_level3.data,\n tformat=tformat_level3,\n test_mode=test_mode)\n\n self.actions_level3 = individual_actions\n action_tensor[action_tensor != action_tensor] = individual_actions[action_tensor != action_tensor]\n\n # set states beyond episode termination to NaN\n if self.is_obs_noise(test_mode):\n action_tensor = _pad_nan(action_tensor, tformat=tformat_level3,\n seq_lens=inputs[\"agent_input_level1__agent0\"].seq_lens) # DEBUG\n else:\n action_tensor = _pad_nan(action_tensor, tformat=tformat_level3, seq_lens=inputs[\"agent_input_level1\"].seq_lens) # DEBUG\n # l2 = action_tensor.squeeze() # DEBUG\n if self.args.debug_mode in [\"level3_actions_only\"]:\n \"\"\"\n DEBUG MODE: LEVEL3 ACTIONS ONLY\n Here we just pick actions from level3 - should therefore just correspond to vanilla COMA!\n \"\"\"\n action_tensor = individual_actions\n\n self.final_actions = action_tensor.clone()\n if th.sum(self.final_actions == 11).item() > 0: # DEBUG\n a = 5\n pass\n\n if self.is_obs_noise(test_mode):\n selected_actions_list = []\n selected_actions_list += [dict(name=\"actions\",\n select_agent_ids=list(range(self.n_agents)),\n data=self.final_actions)]\n modified_inputs_list = []\n else:\n #self.actions_level3 = individual_actions.clone()\n self.selected_actions_format_level3 = selected_actions_format_level3\n self.policies_level3 = modified_inputs_level3.clone()\n self.avail_actions_active = avail_actions_level3.data\n\n selected_actions_list = []\n for _i in range(_n_agent_pair_samples(self.n_agents) if self.args.n_pair_samples is None else self.args.n_pair_samples): #_n_agent_pair_samples(self.n_agents)):\n selected_actions_list += [dict(name=\"actions_level1__sample{}\".format(_i),\n data=self.actions_level1[_i])]\n for _i in range(_n_agent_pair_samples(self.n_agents)):\n selected_actions_list += [dict(name=\"actions_level2__sample{}\".format(_i),\n data=self.actions_level2_sampled[_i])] # TODO: BUG!?\n selected_actions_list += [dict(name=\"actions_level2\",\n select_agent_ids=list(range(_n_agent_pairings(self.n_agents))),\n data=self.actions_level2)]\n selected_actions_list += [dict(name=\"actions_level3\",\n select_agent_ids=list(range(self.n_agents)),\n data=self.actions_level3)]\n selected_actions_list += [dict(name=\"actions\",\n select_agent_ids=list(range(self.n_agents)),\n data=self.final_actions)]\n\n modified_inputs_list = []\n modified_inputs_list += [dict(name=\"policies_level1\",\n data=self.policies_level1)]\n for _i in range(_n_agent_pair_samples(self.n_agents)):\n modified_inputs_list += [dict(name=\"policies_level2__sample{}\".format(_i),\n data=self.policies_level2[_i])]\n modified_inputs_list += [dict(name=\"policies_level3\",\n select_agent_ids=list(range(self.n_agents)),\n data=self.policies_level3)]\n modified_inputs_list += [dict(name=\"avail_actions_active\",\n select_agent_ids=list(range(self.n_agents)),\n data=self.avail_actions_active)]\n modified_inputs_list += [dict(name=\"avail_actions\",\n select_agent_ids=list(range(self.n_agents)),\n data=self.avail_actions)]\n\n #modified_inputs_list += [dict(name=\"avail_actions\",\n # select_agent_ids=list(range(self.n_agents)),\n # data=self.avail_actions)]\n\n selected_actions_list += [dict(name=\"actions_onehot\",\n select_agent_ids=list(range(self.n_agents)),\n data=_onehot(self.final_actions, rng=(0, self.n_actions)))]\n\n hidden_states = dict(level1=hidden_states_level1,\n level2=hidden_states_level2,\n level3=hidden_states_level3)\n\n return hidden_states, selected_actions_list, modified_inputs_list, self.selected_actions_format\n\n pass\n\n else:\n assert False, \"Not implemented\"", "def select_action(self, state):\n pass", "def ChooseAction(self):\n self.lastAction = None\n self.lastState = None\n if(self.attention is None or self.attention == \"\"): return\n # find best action for the currently attended node\n actions = list(self.vi.Q[self.states.index(self.attention)])\n actionIndex = actions.index(max(actions))\n actionName = self.actions[actionIndex]\n # execute the best action for the currently attended node\n self.nodes[actionName].Activate()\n self.lastAction = actionName\n self.lastState = self.attention", "def execute_actions(self, actions):\n execute_actions(self.board, self.agent_locs, actions)", "def select(self, target):", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def demonstrate(self,**kwargs):\n\n members = self.bl.getAllSavedActions()\n entries={}\n\n for param in members:\n entries[str(param)] = self.executeAction # save param names in entries\n\n# entries['search colour for position'] = self.search_menu\n entries['move block to position'] = self.move_block_menu\n entries['move arm to position'] = self.move_menu\n self.mm.addGenericMenu(\"actionMenu\",self.mm.cur_page,\"Select the action to demonstrate\", entries)\n self.mm.loadMenu(\"actionMenu\")", "def target_factory(targets, user_args):\n finished = []\n if user_args.config_file is not None or user_args.cli_apikeys is not None:\n api_keys = get_config_from_file(user_args)\n else:\n api_keys = None\n init_targets_len = len(targets)\n\n for counter, t in enumerate(targets):\n c.info_news(\"Target factory started for {target}\".format(target=t))\n time.sleep(1 ) #tototo\n current_target = target(t)\n if not user_args.skip_defaults:\n current_target.get_hibp()\n current_target.get_hunterio_public()\n if api_keys is not None:\n c.info_news(\"Factory is calling API keys\")\n if \"hunterio\" in api_keys:\n current_target.get_hunterio_private(api_keys[\"hunterio\"])\n # If chase option. Check we're not chasing added target\n if user_args.chase_limit and counter < init_targets_len:\n chase_limiter = 1\n for i in range(len(current_target.data)):\n if (\n len(current_target.data[i]) >= 2 # Has header & data\n and \"HUNTER_RELATED\" in current_target.data[i][0]\n and chase_limiter <= user_args.chase_limit\n ):\n c.good_news(\n \"Adding {new_target} using HunterIO chase\".format(\n new_target=current_target.data[i][1]\n )\n )\n targets.append(current_target.data[i][1])\n chase_limiter += 1\n\n if \"snusbase_token\" in api_keys:\n current_target.get_snusbase(\n api_keys[\"snusbase_url\"], api_keys[\"snusbase_token\"]\n )\n if \"leak-lookup_priv\" in api_keys:\n current_target.get_leaklookup_priv(api_keys[\"leak-lookup_priv\"])\n if \"leak-lookup_pub\" in api_keys:\n print(\"tototo\")\n current_target.get_leaklookup_pub(api_keys[\"leak-lookup_pub\"])\n if \"weleakinfo_endpoint\" in api_keys and \"weleakinfo_key\" in api_keys:\n from .helpers import weleakinfo_get_auth_token\n\n token = weleakinfo_get_auth_token(\n api_keys[\"weleakinfo_endpoint\"], api_keys[\"weleakinfo_key\"]\n )\n current_target.get_weleakinfo(token)\n\n finished.append(current_target)\n return finished", "def actions(self, state):\n\t\traise NotImplementedError", "def choose_action(self, board):\n raise NotImplementedError", "def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))", "def select_action(self, **kwargs):\n raise NotImplementedError('This method should be overriden.')", "def state_chosen_do(cfg, app, win, events):", "def actions(self):\n raise NotImplementedError", "def execute(targets, lines):", "def actions(self, states, agent_indices):\n return NotImplementedError()", "def _init_targets(self):\n for ga_main, ga_targ in zip(self.ga.variables, self.ga_.variables):\n ga_targ.assign(ga_main)\n if self.use_lyapunov:\n for lc_main, lc_targ in zip(self.lc.variables, self.lc_.variables):\n lc_targ.assign(lc_main)\n else:\n for q_1_main, q_1_targ in zip(self.q_1.variables, self.q_1_.variables):\n q_1_targ.assign(q_1_main)\n for q_2_main, q_2_targ in zip(self.q_2.variables, self.q_2_.variables):\n q_2_targ.assign(q_2_main)", "def choose_action(self, state, network, features=(), noise=True):\r\n state = np.concatenate(state)\r\n state = np.reshape(state, (-1, self.feature_number))\r\n if network == 'main':\r\n action = self.actor.predict(state)\r\n elif network == 'target':\r\n action = self.target_actor.predict(state)\r\n if noise:\r\n \"\"\"Ornstein-Uhlenbeck Process\"\"\"\r\n noisy_action = self.add_noise(action, features)\r\n if noisy_action > np.amax(self.action_range):\r\n noisy_action = np.amax(self.action_range)\r\n elif noisy_action < np.amin(self.action_range):\r\n noisy_action = np.amin(self.action_range)\r\n return noisy_action, action\r\n else:\r\n return action", "def chooseAction(self, gameState):\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n ghosts = self.getGhosts(gameState)\n\n if self.lastState:\n myLastState = self.lastState.getAgentState(self.index)\n \n ghosts = self.getGhosts(gameState)\n if len(ghosts) > 0:\n minDistToGhost = min([self.getMazeDistance(myPos, a.getPosition()) for a in ghosts])\n if not myState.isPacman and myLastState.isPacman and minDistToGhost <= 3 and self.specificPath == []:\n path, target = self.getAlternativePath(gameState, minPathLength=5)\n self.specificPath = path\n self.targetPos = target\n \n if len(self.specificPath) > 0:\n return self.specificPath.pop(0)\n elif self.isStucking(gameState):\n actions, target = self.getAlternativePath(gameState, minPathLength=5)\n if len(actions) > 0:\n self.specificPath = actions\n self.targetPos = target\n return self.specificPath.pop(0)\n else:\n actions = gameState.getLegalActions(self.index)\n return random.choice(actions)\n\n actions = gameState.getLegalActions(self.index)\n actions.remove('Stop')\n\n if len(ghosts) > 0:\n distToGhost = min([self.getMazeDistance(myPos, a.getPosition()) for a in ghosts])\n if not self.isOppoentsScared(gameState) and myState.isPacman and distToGhost <= 6:\n safeActions = self.getSafeActions(gameState)\n if len(safeActions) > 0:\n actions = safeActions\n\n values = [self.getQValue(gameState, a) for a in actions]\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n bestAction = random.choice(bestActions)\n \n self.doAction(gameState, bestAction)\n\n return bestAction", "def choose_action(self, *args, **kwargs):\n return NotImplementedError", "def targets(obj, reftype):", "def step(self, action):\n # Call the webdriver and perform the action\n if type(action) is str:\n cmd = action\n else:\n action = int(action)\n cmd = self.action_number_to_cmd[action]\n #print(\"action type:\", type(action))\n #raise Exception(\"Wrong the action type\")\n #sys.exit()\n\n print(\"cmd:\", cmd)\n #cmd = self.action_number_to_cmd[action]\n reward = 0\n discovered_elements = self.driver.get_discovered_elements()\n current_element = None\n\n if cmd == \"WAIT\":\n pass\n\n elif cmd in {\"CHOOSE_FIRST_CLICK\", \"CHOOSE_FIRST_SELECT\", \"CHOOSE_FIRST_ENTER\"}:\n cmd_to_chosen_type = {\n \"CHOOSE_FIRST_CLICK\": \"clickables\",\n \"CHOOSE_FIRST_SELECT\": \"selectables\",\n \"CHOOSE_FIRST_ENTER\": \"enterables\"\n }\n self.chosen_type = cmd_to_chosen_type[cmd]\n if len(discovered_elements[self.chosen_type]) > 0:\n #current_element = discovered_elements[self.chosen_type][0]\n self.chosen_number = 0\n else:\n reward = wrong_movement()\n\n elif cmd == \"NEXT\":\n if self.chosen_type:\n if len(discovered_elements[self.chosen_type]) > self.chosen_number + 1:\n self.chosen_number += 1\n #current_element = discovered_elements[self.chosen_type][self.chosen_number]\n else:\n reward = wrong_movement()\n else:\n reward = wrong_movement()\n\n elif cmd in {\"CLICK\", \"ENTER\", \"SELECT\"}:\n\n if not (self.chosen_type and self.chosen_number < len(discovered_elements[self.chosen_type])):\n reward = wrong_movement()\n else:\n current_element = discovered_elements[self.chosen_type][self.chosen_number]\n if cmd == \"CLICK\":\n self.driver.click(current_element)\n elif cmd == \"ENTER\":\n self.driver.enter(current_element, data=\"Hello world\")\n elif cmd == \"SELECT\":\n pass\n\n done = self.have_winner() or len(self.legal_actions()) == 0\n\n #reward = 1 if self.have_winner() else 0\n if self.have_winner():\n reward = 5\n\n return self.get_observation(), reward, done, {}", "def mouse_click(self, tv, event, alltargets=False):\n\t\t\n\t\tif event.button == 3:\n\n\t\t\t# create the menu and submenu objects\n\t\t\trightclickmenu = Gtk.Menu()\n\t\t\t\n\t\t\ttargets = []\n\t\t\tgeneric = []\n\n\t\t\t# check\n\t\t\tif self.on_services_view:\n\t\t\t\tif alltargets:\n\t\t\t\t\t(model, pathlist) = self.services_list.servicestree.get_selection().get_selected_rows()\n\t\t\t\telse:\n\t\t\t\t\t(model, pathlist) = self.services_view.treeview.get_selection().get_selected_rows()\n\t\t\telse:\n\t\t\t\t(model, pathlist) = self.work.treeview.get_selection().get_selected_rows()\n\n\t\t\tif len(pathlist) < 1:\n\t\t\t\t# right click on nothing\n\t\t\t\treturn False \n\n\t\t\t# get selected port\n\t\t\ttry:\n\t\t\t\tfor path in pathlist :\n\t\t\t\t\ttree_iter = model.get_iter(path)\n\n\t\t\t\t\tif self.on_services_view:\n\t\t\t\t\t\tif alltargets:\n\t\t\t\t\t\t\tservice = self._filter_service(model.get_value(tree_iter,0)) # selected service\n\t\t\t\t\t\t\t# set shell conf section from user selection\n\t\t\t\t\t\t\tself._selected_opt[\"service\"] = service\n\n\t\t\t\t\t\t\tfor port in self.engine.database.get_ports_by_service(service):\n\t\t\t\t\t\t\t\ttargets.append(port)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# set selected port\n\t\t\t\t\t\t\tselected_port = model.get_value(tree_iter,1) \n\t\t\t\t\t\t\tself._selected_opt[\"port\"] = selected_port \n\n\t\t\t\t\t\t\t# set selected host if on service view\n\t\t\t\t\t\t\tself._selected_opt[\"host\"] = model.get_value(tree_iter,4) \n\t\t\t\t\t\t\ttargets.append(self.engine.database.get_port(model.get_value(tree_iter,7) ))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\t# set selected port\n\t\t\t\t\t\tselected_port = model.get_value(tree_iter,1) \n\t\t\t\t\t\tself._selected_opt[\"port\"] = selected_port \n\n\t\t\t\t\t\t# set selected service if not on service view\n\t\t\t\t\t\tselected_service = model.get_value(tree_iter,4) # selected service\n\t\t\t\t\t\ttargets.append(self.engine.database.get_port(model.get_value(tree_iter,7)))\n\t\t\t\t\t\tself._selected_opt[\"service\"] = selected_service \n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tpass\n\t\t\t\n\t\t\t#print('si')\n\t\t\t# fix some multiple names\n\t\t\tself._selected_opt[\"service\"] = self._filter_service(self._selected_opt[\"service\"])\n\n\t\t\t# get extra extensions\n\t\t\textra = self.engine.get_menu(self._selected_opt[\"service\"])\n\n\t\t\tfor extension in extra:\n\t\t\t\tif extension == \"shell\":\n\t\t\t\t\t# little trick for shell ext\n\t\t\t\t\tiE = Gtk.MenuItem(self._selected_opt[\"service\"])\n\t\t\t\telse:\n\t\t\t\t\tiE = Gtk.MenuItem(extension)\n\n\t\t\t\tiE.show()\n\t\t\t\trightclickmenu.append(iE)\n\n\t\t\t\t# check if there is a submenu for the current extension\n\t\t\t\ttry:\n\t\t\t\t\ttabs = {}\n\t\t\t\t\textension_ext_menu = Gtk.Menu()\n\t\t\t\t\tsubmenu = extra[extension].submenu(self._selected_opt[\"service\"])\n\n\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t#print(sub_item)\n\t\t\t\t\t\tif len(sub_item.split(\"/\")) > 1:\n\t\t\t\t\t\t\tprev = \"\"\n\t\t\t\t\t\t\tprevst = \"\"\n\n\t\t\t\t\t\t\tfor sub in sub_item.split(\"/\"):\n\t\t\t\t\t\t\t\tif sub != sub_item.split(\"/\")[-1]:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# new category\n\t\t\t\t\t\t\t\t\tt_menu = Gtk.Menu()\n\t\t\t\t\t\t\t\t\tt = Gtk.MenuItem(sub)\n\t\t\t\t\t\t\t\t\tt.show()\n\t\t\t\t\t\t\t\t\tt.set_submenu(t_menu)\n\n\t\t\t\t\t\t\t\t\tif not sub in tabs:\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\ttabs[sub] = t_menu\n\n\t\t\t\t\t\t\t\t\t\tif prevst != \"\":\n\t\t\t\t\t\t\t\t\t\t\tprev.append(t)\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\textension_ext_menu.append(t)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprev = tabs[sub]\n\t\t\t\t\t\t\t\t\tprevst = sub\n\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t#print(sub)\n\t\t\t\t\t\t\t\t\titem = Gtk.MenuItem( sub ) \n\t\t\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], sub_item)\n\n\t\t\t\t\t\t\t\t\tprev.append(item)\n\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# extension in any sub-categories\n\t\t\t\t\t\t\titem = Gtk.MenuItem(sub_item)\n\t\t\t\t\t\t\textension_ext_menu.append(item)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# show and connect the extension\n\t\t\t\t\t\t\titem.show()\n\t\t\t\t\t\t\titem.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], sub_item)\n\n\t\t\t\t\tif len(tabs) == 0:\n\t\t\t\t\t\tnot_found = Gtk.MenuItem(\"nothing\")\n\t\t\t\t\t\tnot_found.show()\n\t\t\t\t\t\textension_ext_menu.append(not_found)\n\t\t\t\t\t\n\t\t\t\t\tiE.set_submenu(extension_ext_menu)\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t#print(e)\n\t\t\t\t\tiE.connect('activate', self.run_multi_extra, targets, extra[extension], self._selected_opt[\"service\"], extra[extension].menu[\"label\"]) #.menu[\"label\"])\n\n\t\t\t\ttry:\n\t\t\t\t\t# try if there is generic for the current extension\n\t\t\t\t\tsubmenu = extra[extension].submenu(\"generic\")\n\n\t\t\t\t\tfor sub_item in submenu:\n\t\t\t\t\t\t# remove _ and show spaces\n\t\t\t\t\t\tgeneric.append(sub_item.replace(\"_\",\" \"))\n\t\t\t\texcept: pass\n\n\t\t\tseparator = Gtk.SeparatorMenuItem()\n\t\t\tseparator.show()\n\t\t\trightclickmenu.append(separator)\n\n\t\t\tgen_x = self.engine.get_menu(\"generic\")\n\n\t\t\tfor gen in generic:\n\n\t\t\t\ti2 = Gtk.MenuItem(gen)\n\t\t\t\ti2.show()\n\t\t\t\trightclickmenu.append(i2)\n\n\t\t\t\ti2.connect(\"activate\", self.run_multi_extra, targets, extra[\"shell\"], \"generic\", gen)\n\n\t\t\trightclickmenu.popup(None, None, None, None, 0, Gtk.get_current_event_time())\n\n\t\t\treturn True", "def actionBuild():\n\n #Init builder logger\n Builder.init()\n\n for target in Settings.targets:\n targetsToBuild, combineLibs, copyToOutput = Builder.getTargetGnPath(target)\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_PREPARE, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n result = Builder.run(target, targetsToBuild, platform, cpu, configuration, combineLibs, copyToOutput)\n Summary.addSummary(ACTION_BUILD, target, platform, cpu, configuration, result, Builder.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed building ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.RED)\n #Terminate script execution if stopExecutionOnError is set to True in userdef\n shouldEndOnError(result)\n else:\n Logger.printEndActionMessage('Build ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration)\n else:\n Logger.printColorMessage('Build cannot run because preparation has failed for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n Logger.printEndActionMessage('Build not run for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)", "def action(self, target, text):\n raise NotImplementedError", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def executeAction(self,**kwargs):\n try:\n action = kwargs[\"fname\"]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n entries = {}\n pose_offset = 'empty'\n if action in self.bl.getAllSavedActions():\n pose_offset = self.bl.baxter_actions[str(action)]['joint_position']\n entries['Show action only'] = [self.moveBy, pose_offset]\n entries['Show pick up action'] = [self.pickUpActionColour, pose_offset]\n# entries['Add condition'] = self.addEmptyCondition\n# entries['Rename '+str(action)] = [self.renameAction, action]\n entries['Learn '+str(action)] = getattr(self.bl, 'demoAction')\n\n self.mm.addGenericMenu(\"learnMenu\", self.mm.cur_page,\"Action saved as: %s\" % (str(pose_offset)),entries)\n self.mm.loadMenu(\"learnMenu\")", "def step(self, action, update=True):\n\n if self.centralized_planning:\n agent_states = [human.get_full_state() for human in self.humans]\n if self.robot.visible:\n agent_states.append(self.robot.get_full_state())\n human_actions = self.centralized_planner.predict(\n agent_states, self.group_membership, self.obstacles\n )[:-1]\n else:\n human_actions = self.centralized_planner.predict(\n agent_states, self.group_membership, self.obstacles\n )\n else:\n human_actions = []\n for human in self.humans:\n # Choose new target if human has reached goal and in perpetual mode:\n if human.reached_destination() and self.perpetual:\n if self.train_val_sim == \"square_crossing\":\n gx = (\n np.random.random() * self.square_width * 0.5 * np.random.choice([-1, 1])\n )\n gy = (np.random.random() - 0.5) * self.square_width\n human.set(human.px, human.py, gx, gy, 0, 0, 0)\n elif self.train_val_sim == \"circle_crossing\":\n human.set(human.px, human.py, -human.px, -human.py, 0, 0, 0)\n else:\n if np.random.rand(1) > 0.5:\n gx = (\n np.random.random()\n * self.square_width\n * 0.5\n * np.random.choice([-1, 1])\n )\n gy = (np.random.random() - 0.5) * self.square_width\n human.set(human.px, human.py, gx, gy, 0, 0, 0)\n else:\n human.set(human.px, human.py, -human.px, -human.py, 0, 0, 0)\n # observation for humans is always coordinates\n human_ob = [\n other_human.get_observable_state()\n for other_human in self.humans\n if other_human != human\n ]\n if self.robot.visible:\n human_ob += [self.robot.get_observable_state()]\n human_actions.append(human.act(human_ob, self.group_membership))\n # collision detection\n dmin = float(\"inf\")\n collisions = 0\n human_distances = list()\n for i, human in enumerate(self.humans):\n px = human.px - self.robot.px\n py = human.py - self.robot.py\n if self.robot.kinematics == \"holonomic\":\n vx = human.vx - action.vx\n vy = human.vy - action.vy\n else:\n vx = human.vx - action.v * np.cos(action.r + self.robot.theta)\n vy = human.vy - action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n # closest distance between boundaries of two agents\n human_dist = (\n point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius\n )\n if human_dist < 0:\n collisions += 1\n self.episode_info[\"collisions\"] -= self.collision_penalty\n # logging.debug(\"Collision: distance between robot and p{} is {:.2E}\".format(i, human_dist))\n break\n elif human_dist < dmin:\n dmin = human_dist\n human_distances.append(human_dist)\n\n # collision detection between robot and static obstacle\n static_obstacle_dmin = float(\"inf\")\n static_obstacle_collision = 0\n obstacle_distances = list()\n min_dist = self.robot.radius\n px = self.robot.px\n py = self.robot.py\n\n if self.robot.kinematics == \"holonomic\":\n vx = action.vx\n vy = action.vy\n else:\n vx = action.v * np.cos(action.r + self.robot.theta)\n vy = action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n for i, obstacle in enumerate(self.obstacles):\n robot_position = ex, ey\n obst_dist = line_distance(obstacle, robot_position)\n if obst_dist < min_dist:\n static_obstacle_collision += 1\n self.episode_info[\n \"static_obstacle_collisions\"\n ] -= self.static_obstacle_collision_penalty\n break\n\n # collision detection between humans\n human_num = len(self.humans)\n for i in range(human_num):\n for j in range(i + 1, human_num):\n dx = self.humans[i].px - self.humans[j].px\n dy = self.humans[i].py - self.humans[j].py\n dist = (\n (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius\n )\n if dist < 0:\n # detect collision but don't take humans' collision into account\n logging.debug(\"Collision happens between humans in step()\")\n # check if reaching the goal\n end_position = np.array(self.robot.compute_position(action, self.time_step, self.closed))\n reaching_goal = (\n norm(end_position - np.array(self.robot.get_goal_position()))\n < self.robot.radius + self.goal_radius\n )\n done = False\n info = Nothing()\n reward = -self.time_penalty\n goal_distance = np.linalg.norm(\n [\n (end_position[0] - self.robot.get_goal_position()[0]),\n (end_position[1] - self.robot.get_goal_position()[1]),\n ]\n )\n progress = self.previous_distance - goal_distance\n self.previous_distance = goal_distance\n reward += self.progress_reward * progress\n self.episode_info[\"progress\"] += self.progress_reward * progress\n if self.global_time >= self.time_limit:\n done = True\n info = Timeout()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 1.0\n if collisions > 0:\n reward -= self.collision_penalty * collisions\n if self.end_on_collision:\n done = True\n info = Collision()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 1.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 0.0\n\n if static_obstacle_collision > 0:\n reward -= self.static_obstacle_collision_penalty * static_obstacle_collision\n if self.end_on_collision:\n done = True\n info = Collision()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 1.0\n self.episode_info[\"did_timeout\"] = 0.0\n if reaching_goal:\n reward += self.success_reward\n done = True\n info = ReachGoal()\n self.episode_info[\"goal\"] = self.success_reward\n self.episode_info[\"did_succeed\"] = 1.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 0.0\n for human_dist in human_distances:\n if 0 <= human_dist < self.discomfort_dist * self.discomfort_scale:\n discomfort = (\n (human_dist - self.discomfort_dist * self.discomfort_scale)\n * self.discomfort_penalty_factor\n * self.time_step\n )\n reward += discomfort\n self.episode_info[\"discomfort\"] += discomfort\n\n forces = self.centralized_planner.get_force_vectors(coeff=[1] * 6)\n\n if forces is not None:\n # separate human and robot forces\n robot_forces = forces[-1]\n human_forces = forces[:-1]\n # calculate average of human forces and append them to the log\n for i, force in enumerate(self.force_list):\n self.episode_info.get(\"avg_\" + force).append(\n np.average(np.hypot(*human_forces[:, i, :].transpose()))\n )\n # add robot social force\n self.episode_info.get(\"robot_social_force\").append(np.hypot(*robot_forces[1]))\n\n human_num = len(self.humans)\n for i in range(human_num):\n px = self.humans[i].px\n py = self.humans[i].py\n gx = self.humans[i].gx\n gy = self.humans[i].gy\n\n self.episode_info[\"pedestrian_distance_traversed\"][i].append([px,py])\n self.episode_info[\"pedestrian_goal\"][i].append([gx,gy])\n\n self.episode_info[\"pedestrian_velocity\"][i].append([vx,vy])\n\n\n\n # penalize group intersection\n robot_pos = [self.robot.px, self.robot.py]\n robot_vel = [self.robot.vx, self.robot.vy]\n\n self.episode_info[\"robot_distance_traversed\"].append(robot_pos)\n self.episode_info[\"robot_velocity\"].append(robot_vel)\n\n\n\n convex = 1\n\n for idx, group in enumerate(self.group_membership):\n # get the members of the group\n points = []\n for human_id in group:\n ind_points = [\n point_along_circle(\n self.humans[human_id].px,\n self.humans[human_id].py,\n self.humans[human_id].radius,\n )\n for _ in range(10)\n ]\n points.extend(ind_points)\n\n if convex == 1:\n\n # compute the convex hull\n hull = ConvexHull(points)\n\n group_col = point_in_hull(robot_pos, hull)\n\n # min spanning circle\n else:\n circle_def = minimum_enclosing_circle(points)\n\n group_col = is_collision_with_circle(\n circle_def[0][0], circle_def[0][1], circle_def[1], robot_pos[0], robot_pos[1]\n )\n\n if group_col:\n group_discomfort = -self.group_discomfort_penalty\n reward += group_discomfort\n self.episode_info[\"group_discomfort\"] += group_discomfort\n\n # we only want to track number of violations once per group per episode\n self.episode_info[\"group_intersection_violations\"][idx] = 1.0\n\n if (\n len(human_distances) > 0\n and 0 <= min(human_distances) < self.discomfort_dist * self.discomfort_scale\n ):\n info = Danger(min(human_distances))\n if update:\n # update all agents\n self.robot.step(action, self.closed)\n for i, human_action in enumerate(human_actions):\n self.humans[i].step(human_action, self.closed)\n self.global_time += self.time_step\n for i, human in enumerate(self.humans):\n # only record the first time the human reaches the goal\n if self.human_times[i] == 0 and human.reached_destination():\n self.human_times[i] = self.global_time\n # compute the observation\n if self.robot.sensor == \"coordinates\":\n ob = [human.get_observable_state() for human in self.humans]\n\n if self.enable_intent:\n if self.intent_type == \"individual\":\n target_maps = np.array([human.get_target_map() for human in self.humans])\n elif self.intent_type == \"group\":\n target_maps = np.array([human.get_target_map() for human in self.humans])\n\n # average intent map across group members\n for group in self.group_membership:\n # get the members of the group\n avg = np.average([target_maps[human_id] for human_id in group], axis=0)\n for human_id in group:\n target_maps[human_id] = avg\n\n # add target_map to observation\n for i in range(len(ob)):\n ob[i].update_target_map(target_maps[i])\n else:\n print(\n \"unrecognized intent type, only valid options are individual or group, received: \",\n self.intent_type,\n )\n\n elif self.robot.sensor.lower() == \"rgb\" or self.robot.sensor.lower() == \"gray\":\n snapshot = self.get_pixel_obs()\n prior_planes = snapshot.shape[1] * (self.num_frames - 1)\n self.obs_history = np.concatenate(\n (self.obs_history[:, -prior_planes:, :, :], snapshot), axis=1\n )\n ob = self.obs_history\n else:\n raise ValueError(\"Unknown robot sensor type\")\n # store state, action value and attention weights\n self.states.append(\n [\n self.robot.get_full_state(),\n [human.get_full_state() for human in self.humans],\n self.centralized_planner.get_force_vectors(),\n ]\n )\n if hasattr(self.robot.policy, \"action_values\"):\n self.action_values.append(self.robot.policy.action_values)\n if hasattr(self.robot.policy, \"get_attention_weights\"):\n self.attention_weights.append(self.robot.policy.get_attention_weights())\n else:\n if self.robot.sensor == \"coordinates\":\n ob = [\n human.get_next_observable_state(action, self.closed)\n for human, action in zip(self.humans, human_actions)\n ]\n elif self.robot.sensor.lower() == \"rgb\" or self.robot.sensor.lower() == \"gray\":\n snapshot = self.get_pixel_obs()\n prior_planes = snapshot.shape[1] * (self.num_frames - 1)\n self.obs_history = np.concatenate(\n (self.obs_history[:, -prior_planes:, :, :], snapshot), axis=1\n )\n ob = self.obs_history\n else:\n raise ValueError(\"Unknown robot sensor type\")\n if done:\n self.episode_info[\"time\"] = -self.global_time * self.time_penalty / self.time_step\n self.episode_info[\"global_time\"] = self.global_time\n info = self.episode_info # Return full episode information at the end\n return ob, reward, done, info", "def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)", "def actionPrepare():\n \n #Do preparation that is common for all platforms. Pass true if ortc is one of targets\n result = Preparation.setUp('ortc' in Settings.targets)\n if result != NO_ERROR:\n #Terminate execution, because prepration common for all targets and platforms has failed.\n System.stopExecution(result)\n\n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n Logger.printStartActionMessage('Prepare ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.YELLOW)\n result = Preparation.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_PREPARE, target, platform, cpu, configuration, result, Preparation.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed preparing ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration,ColoredFormatter.RED)\n #Terminate script execution if stopExecutionOnError is set to True in userdef\n shouldEndOnError(result)\n else:\n Logger.printEndActionMessage('Prepare ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration)", "def projectMenuActions( self, action ):\n\tif ( action.text() == 'Create Project' ): \n\t self.CreateProjectWidget()", "def actionBackup():\n Backup.init()\n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n if System.checkIfCPUIsSupportedForPlatform(cpu,platform):\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_BUILD, target, platform, cpu, configuration):\n Backup.run(target, platform, cpu, configuration)", "def decide(self, state: OthelloState, actions: list):\n # -------- TASK 2 ------------------------------------------------------\n # Your task is to implement an algorithm to choose an action form the\n # given `actions` list. You can implement any algorithm you want.\n # However, you should keep in mind that the execution time of this\n # function is limited. So, instead of choosing just one action, you can\n # generate a sequence of increasing good action.\n # This function is a generator. So, you should use `yield` statement\n # rather than `return` statement. To find more information about\n # generator functions, you can take a look at:\n # https://www.geeksforgeeks.org/generators-in-python/\n #\n # If you generate multiple actions, the last action will be used in the\n # game.\n #\n # Tips\n # ====\n # 1. During development of your algorithm, you may want to find the next\n # state after applying an action to the current state; in this case,\n # you can use the following patterns:\n # `next_state = current_state.successor(action)`\n #\n # 2. If you need to simulate a game from a specific state to find the\n # the winner, you can use the following pattern:\n # ```\n # simulator = Game(FirstAgent(), SecondAgent())\n # winner = simulator.play(starting_state=specified_state)\n # ```\n # The `MarkovAgent` has illustrated a concrete example of this\n # pattern.\n #\n # 3. You are free to choose what kind of game-playing agent you\n # implement. Some of the obvious approaches are the following:\n # 3.1 Implement alpha-beta (and investigate its potential for searching deeper\n # than what is possible with Minimax). Also, the order in which the actions\n # are tried in a given node impacts the effectiveness of alpha-beta: you could\n # investigate different ways of ordering the actions/successor states.\n # 3.2 Try out better heuristics, e.g. ones that take into account the higher\n # importance of edge and corner cells. Find material on this in the Internet.\n # 3.3 You could try out more advanced Monte Carlo search methods (however, we do\n # not know whether MCTS is competitive because of the high cost of the full\n # gameplays.)\n # 3.4 You could of course try something completely different if you are willing to\n # invest more time.\n #\n # GL HF :)\n # ----------------------------------------------------------------------\n\n # Replace the following lines with your algorithm\n best_action = actions[0]\n yield best_action", "def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()", "def action(self, gstate, actions=None):\n raise NotImplementedError", "def select_action(images, n_actions, device, eps_threshold=-1):\n actions = []\n\n for i in images:\n if eps_threshold == -1:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n else:\n sample = random.random()\n if sample > eps_threshold:\n with torch.no_grad():\n # t.min(1) will return smallest column value of each row.\n # second column on min result is index of where min element was\n # found, so we pick action with the lower expected reward.\n actions.append(policy_net(i.unsqueeze(0)).min(1)[1].view(1, 1))\n else:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n\n return torch.tensor(actions, device=device)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def get_targets(self):\n\t\n\t\tself.target = []\n\t\ttarget_ins = self.settings['target']\n\t\tfor key in target_ins.keys():\n\t\t\tif key == 'raw':\n\t\t\t\tself.target.append(target_ins[key])\n\t\t\telif key == 'textfile':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,_].+\\s*:\\s*[A-Z].+$',t):\n\t\t\t\t\t\tself.target.append(tuple([i.strip() for i in t.split(':')]))\n\t\t\telif key == 'textfile_rna':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,0-9,_].+\\s*:\\s*[A-Z,a-z].+$',t):\n\t\t\t\t\t\tself.target.append(list([i.strip() for i in t.split(':')]))\n\t\t\t\t\t\trnaseq = self.target[-1][1]\n\t\t\t\t\t\t#---extra substitutions for later\n\t\t\t\t\t\tif 'regex_subs' in self.settings.keys():\n\t\t\t\t\t\t\tfor regex in self.settings['regex_subs']:\n\t\t\t\t\t\t\t\trnaseq = re.sub(regex[0],regex[1],rnaseq)\n\t\t\t\t\t\trnaseq = rnaseq.upper()\n\t\t\t\t\t\trnaseq = re.sub('T','U',rnaseq)\n\t\t\t\t\t\taminoseq = ''.join([dna_mapping[i] for i in [rnaseq[i:i+3] \n\t\t\t\t\t\t\tfor i in range(0,len(rnaseq),3)]])\n\t\t\t\t\t\tself.target[-1][1] = re.sub('T','U',aminoseq)\n\t\t\t\t\t\tself.target[-1] = tuple(self.target[-1])\n\t\t\telse: raise Exception('except: unclear target type')", "def autofixTargets(self, local_ctx):\n pass", "def choose_action(self, features_all_arms) -> Tuple[torch.Tensor, torch.Tensor]:\n actor_output = self.policy.act(obs=features_all_arms)\n chosen_action = torch.argmax(actor_output.action, dim=1)\n log_prob = actor_output.log_prob\n return torch.unsqueeze(chosen_action, 1), log_prob", "def target(self):", "def _pick(self):\n self._source = self._chooser(self.targets, self.cat, self._kwargs,\n self._cat_kwargs)\n if self.input_container != \"other\":\n assert self._source.container == self.input_container\n\n self.metadata['target'] = self._source.metadata\n if self.container is None:\n self.container = self._source.container", "def post_process(self, relevant_targets):\r\n pass", "def main():\n parser = argparse.ArgumentParser(conflict_handler='resolve')\n parser.the_error = parser.error\n parser.error = lambda m: None\n\n resources = {\n 'app': AppCommands,\n 'assembly': AssemblyCommands,\n 'languagepack': LanguagePackCommands\n }\n\n choices = resources.keys()\n\n parser.add_argument('resource', choices=choices,\n help=\"Target noun to act upon\")\n\n resource = None\n try:\n parsed, _ = parser.parse_known_args()\n resource = parsed.resource\n except Exception:\n print(\"Invalid target specified to act upon.\\n\")\n parser.print_help()\n sys.exit(1)\n\n if resource in resources:\n try:\n resources[resource](parser)\n except Exception as e:\n print(strutils.safe_encode(six.text_type(e)), file=sys.stderr)\n sys.exit(1)\n\n else:\n cli_utils.show_help(resources)\n print(\"\\n\")\n parser.print_help()", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def _generate_actions(self) -> list:\n pass", "def t_action(option,opt_str,value,parser):\n\n local_target_descriptions = {\"unit\":\"Quick unit tests using nosetests and doctest.\",\n \"all\":\"All correctness tests (i.e. all tests but speed, coverage).\",\n \"coverage\":\"Same as unit but measuring test coverage.\",\n \"exhaustive\":\"Slow system tests.\",\n \"speed\":\"Test for changes in execution speed.\",\n \"quick\":\"All tests whose runtimes are in seconds.\",\n \"flakes\":\"Run pyflakes static code checker.\"}\n\n local_targets = []\n\n env = os.environ.copy()\n pypath = env.get('PYTHONPATH','')\n env['PYTHONPATH'] = pypath + ':'.join(p for p in sys_paths())\n\n # Targets handled in this file\n if value in [\"list\",\"unit\",\"flakes\",\"coverage\"]:\n local_targets += [value]\n value = None\n\n # Other targets require runtests.py\n if value == \"quick\":\n local_targets += [\"unit\",\"flakes\"]\n\n if value == \"all\":\n local_targets += [\"unit\",\"flakes\"]\n value = \"exhaustive\"\n\n import subprocess\n global return_code\n\n # JABALERT: Unlike the tests in runtests.py, will not use xvfb-run\n # to hide GUI windows being tested. Once runtests.py is made into\n # a module, the code it contains for conditionally using xvfb-run\n # can be applied here as well.\n if \"flakes\" in local_targets:\n targets = [\"topo\",\n \"external/param\",\n \"external/paramtk\",\n \"external/holoviews\",\n \"external/imagen\",\n \"external/lancet\"]\n ret = subprocess.call([\"python\",\"topo/tests/buildbot/pyflakes-ignore.py\",\"--ignore\", \"topo/tests\",\"--total\"] + targets)\n return_code += 0 # abs(ret) # CEBALERT: ignore all of pyflakes.\n\n if \"unit\" in local_targets:\n proc = subprocess.Popen([\"nosetests\", \"-v\", \"--with-doctest\",\n \"--doctest-extension=txt\"], env=env)\n proc.wait()\n return_code += abs(proc.returncode)\n\n if \"coverage\" in local_targets:\n proc = subprocess.Popen([\"nosetests\", \"-v\", \"--with-doctest\",\n \"--doctest-extension=txt\",\n \"--with-cov\", \"--cov-report\", \"html\"], env=env)\n proc.wait()\n return_code += abs(proc.returncode)\n\n from topo.tests.runtests import target_description\n\n if value is not None:\n if value not in [\"quick\",\"exhaustive\"] and value not in target_description:\n print \"\\nCould not find test target %r.\\n\" % value\n local_targets =['list']\n else:\n global_params.exec_in_context(\"targets=['%s']\" % value)\n # Call runtests.run_tests() as if it were a proper module\n ns={}\n execfile('./topo/tests/runtests.py',ns,ns)\n return_code += len(ns[\"run_tests\"]())\n\n\n\n if \"list\" in local_targets:\n available_items = sorted((target_description.items() + local_target_descriptions.items()))\n max_len = max(len(k) for k,_ in available_items)\n print (\"---------------\\nAvailable tests\\n---------------\\n%s\"\n % \"\\n\".join('%s%s : %s'% (k,' '*(max_len-len(k)),v)\n for k,v in available_items))\n\n global something_executed\n something_executed=True", "def actions(self, state):\n raise NotImplementedError # Override this!", "def choose_action(cll,\n all_actions: int,\n epsilon: float,\n biased_exploration_prob: float) -> int:\n if np.random.uniform() < epsilon:\n logger.debug(\"\\t\\tExploration path\")\n return explore(cll, all_actions, biased_exploration_prob)\n\n logger.debug(\"\\t\\tExploitation path\")\n return exploit(cll, all_actions)", "def act(self):\n self.features = self.next_features\n self.choose_random = np.random.choice(2,p=(1-self.epsilon,self.epsilon)) # Chooses whether to explore or exploit with probability 1-self.epsilon\n # Selects the best action index in current state\n if self.choose_random:\n self.chosenA = np.random.choice(4)\n else:\n self.chosenA = self.argmaxQsa(self.features)\n # Records reward for printing and performs action\n self.action = self.idx2act[self.chosenA]\n # Execute the action and get the received reward signal\n self.reward = self.move(self.action)\n self.total_reward += self.reward\n # IMPORTANT NOTE:\n # 'action' must be one of the values in the actions set,\n # i.e. Action.LEFT, Action.RIGHT, Action.ACCELERATE or Action.BRAKE\n # Do not use plain integers between 0 - 3 as it will not work", "def choose_action(self, board, possible_actions):\r\n return np.random.choice(possible_actions)", "def chooseAction(self, gameState):\n\n '''\n You should change this in your own agent.\n '''\n problem = foodsearchproblem(gameState,self)\n return self.astarsearch(problem,gameState,self.foodhuristic)[0]", "def tests_ti_file_get_actions(self):\n file = cast(File, self.ti_helper.create_indicator())\n action_targets = []\n ips = [self.ti_helper.rand_ip(), self.ti_helper.rand_ip()]\n indicator_data = {\n 'confidence': randint(0, 100),\n 'ip': ips[0],\n 'owner': self.owner,\n 'rating': randint(0, 5),\n }\n action_targets.append(self.ti.address(**indicator_data))\n indicator_data = {\n 'confidence': randint(0, 100),\n 'ip': ips[1],\n 'owner': self.owner,\n 'rating': randint(0, 5),\n }\n action_targets.append(self.ti.address(**indicator_data))\n for target in action_targets:\n target.create()\n action = 'traffic'\n file.add_action(action, action_targets[0])\n file.add_action(action, action_targets[1])\n length = 0\n for action in file.actions(action, self.ti.indicator()):\n length += 1\n assert action.get('summary') in ips\n ips.remove(action.get('summary'))\n assert length == 2\n for target in action_targets:\n target.delete()", "def send_actions(self, actions):\n pass", "def choose_action(self, game_state):\n util.raise_not_defined()", "def InitActionCheck(initActionList, init):\n for actions in initActionList:\n action_class = getNameFromIRI(actions.is_a[0].iri)\n # if the action is a SpeedAction class\n if action_class == \"SpeedAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n target_speed = actions.has_target_speed[0]\n ontology_transition_dynamics = actions.has_transition_dynamics[0]\n xosc_transition_dynamics = checkTransitionDynamics(ontology_transition_dynamics)\n init.add_init_action(action_entity_ref, xosc.AbsoluteSpeedAction(target_speed, xosc_transition_dynamics))\n continue\n #if the action is TeleportAction\n if action_class == \"TeleportAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n # if the action has position as parameter set\n s: int = 0\n offset = 0\n lane_id = 0\n road_id = 0\n if len(actions.has_position) != 0:\n position = actions.has_position[0]\n if len(position.has_s) != 0:\n s = position.has_s[0]\n\n if len(position.has_offset) != 0:\n offset = position.has_offset[0]\n\n if len(position.has_lane_id) != 0:\n lane_id = position.has_lane_id[0]\n\n if len(position.has_road_id) != 0:\n road_id = position.has_road_id[0]\n\n init.add_init_action(action_entity_ref, xosc.TeleportAction(xosc.LanePosition(s, offset, lane_id, road_id)))\n continue\n if action_class == \"EnvironmentAction\": # if the action is an EnvironmentAction\n xosc_environment_action = checkEnvironmentAction(actions)\n init.add_global_action(xosc_environment_action)\n return init", "def _select_target(self, action, msg):\n if action == 'reply':\n return msg.message_id\n elif action == 'transitiveReply':\n if msg.reply_to_message and msg.reply_to_message.message_id:\n return msg.reply_to_message.message_id\n elif action == 'send':\n return None\n elif action == 'none':\n raise IndexError('Just skip this one')\n else:\n raise Exception('Unknown action: %s' % action)\n return None", "def pickUpActionColour(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n\n try:\n action = kwargs[\"fname\"]\n except:\n rospy.logwarn(\"Could not get the current action selection\")\n\n# position = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colours = self.locator.tetris_blocks.keys() \n entries = {}\n\n for block in colours:\n entries[str(block)] = [self.pickUpAction, pose_offset]\n entries['any'] = [self.pickUpActionAny, pose_offset]\n self.mm.addGenericMenu(\"colourMenu\",self.mm.cur_page,\"Select the block colour for %s\" %action, entries)\n self.mm.loadMenu(\"colourMenu\")", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n obs = gameState.getAgentDistances()\n for o in self.opponents:\n self.observe(o, obs[o], gameState)\n self.displayDistributionsOverPositions(self.distributions)\n\n # You can profile your evaluation time by uncommenting these lines\n start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n #self.elapseTime(gameState)\n\n return random.choice(bestActions)", "def select_action(self, state, explore=False):\n if (np.random.rand() < self.EPSILON) and explore:\n actionIdx = np.random.randint(0, self.numJoinAction)\n actionIdxTuple = actionIndexInt2Tuple(actionIdx, self.numActionList)\n else:\n self.Q_network.eval()\n state = torch.from_numpy(state).float().to(self.device)\n state_action_values = self.Q_network(state)\n Q_mtx = (\n state_action_values.detach().cpu().reshape(\n self.numActionList[0], self.numActionList[1]\n )\n )\n pursuerValues, colIndices = Q_mtx.max(dim=1)\n _, rowIdx = pursuerValues.min(dim=0)\n colIdx = colIndices[rowIdx]\n actionIdxTuple = (np.array(rowIdx), np.array(colIdx))\n actionIdx = actionIndexTuple2Int(actionIdxTuple, self.numActionList)\n return actionIdx, actionIdxTuple", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def create_target(self):\n\n # I used a random number variable (rand_target) in order to randomize the target created each time this function\n # is called.\n stand = StandardTarget()\n strong = StrongTarget()\n safe = SafeTarget()\n bird = Bird()\n\n rand_target = random.randint(1, 4)\n if rand_target == 1:\n self.targets.append(stand)\n elif rand_target == 2:\n self.targets.append(strong)\n elif rand_target == 3:\n self.targets.append(safe)\n elif rand_target == 4:\n self.targets.append(bird)", "def create_target(self):\r\n if random.randint(1, 4) == 1:\r\n target = StandardTarget()\r\n self.targets.append(target)\r\n \r\n elif random.randint(1, 4) == 2:\r\n target = StrongTarget()\r\n self.targets.append(target)\r\n \r\n elif random.randint(1, 4) == 3:\r\n target = SafeTarget()\r\n self.targets.append(target)\r\n \r\n elif random.randint(1, 4) == 4:\r\n target = BonusTarget()\r\n self.targets.append(target)\r\n # TODO: Decide what type of target to create and append it to the list\r", "def user_action():\n\t### This is the function that takes and executes the users choices\n\twhile battle_on:\n\t\tchoosing = True\n\t\twhile choosing:\n\t\t\tmenu(\"general\")\n\t\t\tanswer()\n\t\t\tif ans == \"attack\":\n\t\t\t\tattack(my_pokemon, enemy)\n\t\t\t\tcalc_hp(enemy, \"attack\")\n\t\t\t\tshow_hp(enemy)\n\t\t\t\tprint \" \"\n\t\t\t\treturn\n\t\t\telif ans == \"flee\":\n\t\t\t\tchance = uniform(0, 100)\n\t\t\t\tif chance > 90:\n\t\t\t\t\twin(\"flee\")\n\t\t\t\telse:\n\t\t\t\t\tprint \"You failed to escape!\"\n\t\t\t\t\treturn\n\t\t\telif ans == \"potion\":\n\t\t\t\tuse_potion(my_pokemon)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint \"i dont know what you mean :)\"\n\t\t\t\tprint \"lets try again!\"\n\t\t\t\tchoosing = True", "def run(self,**kwargs):\n members = getmembers(self.bs)\n targets = \"scenario\"\n scenarios=[m[0] for m in members if m[0].startswith(targets)]\n entries={}\n for scenario in scenarios:\n entries[scenario[len(targets):]] = getattr(self.bs,scenario) # save scenario names in entries\n self.mm.addGenericMenu(targets,self.mm.cur_page,\"Select your desired scenario\", entries)\n self.mm.loadMenu(targets)", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def __call__(self, images, targets):\n return images, targets", "def actions(self, state, player):\r\n raise NotImplementedError", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def select_action(self, state, **kwargs):\n if kwargs['stage'] == 'collecting':\n return self._uniformRandomPolicy.select_action();\n elif kwargs['stage'] == 'training':\n q_values = self.calc_q_values(state);\n return self._linearDecayGreedyEpsilonPolicy.select_action(q_values\n , True);\n elif kwargs['stage'] == 'testing':\n q_values = self.calc_q_values(state);\n return self._linearDecayGreedyEpsilonPolicy.select_action(q_values\n , False);\n elif kwargs['stage'] == 'greedy':\n q_values = self.calc_q_values(state);\n return self._greedyPolicy.select_action(q_values);" ]
[ "0.6905978", "0.66720754", "0.64456934", "0.64400035", "0.6392093", "0.6392093", "0.63541424", "0.63423127", "0.6292723", "0.6272857", "0.6237775", "0.6205516", "0.61756647", "0.6061701", "0.6053934", "0.6031384", "0.6015014", "0.59976435", "0.59927475", "0.596754", "0.5917681", "0.5909248", "0.5907432", "0.59041417", "0.588691", "0.5883191", "0.5871173", "0.58570975", "0.5856509", "0.58473074", "0.58407676", "0.5822818", "0.58035856", "0.5797496", "0.57940376", "0.5793965", "0.5782512", "0.5773696", "0.5745838", "0.5744906", "0.5739353", "0.5713619", "0.571188", "0.5711149", "0.57109624", "0.5667287", "0.5663428", "0.5662897", "0.5631167", "0.5625666", "0.56159526", "0.5612004", "0.5591906", "0.5580923", "0.5570713", "0.55385834", "0.55318284", "0.5530839", "0.5529834", "0.5524757", "0.55246973", "0.55176973", "0.5507699", "0.5507699", "0.5491475", "0.5486639", "0.54839545", "0.5482967", "0.5478786", "0.5477442", "0.54701054", "0.5457725", "0.5457725", "0.5439051", "0.5438947", "0.5430314", "0.54147315", "0.5412846", "0.54070044", "0.5404694", "0.5397327", "0.53893584", "0.5389195", "0.53790355", "0.53765917", "0.5369787", "0.5368023", "0.5361649", "0.5358864", "0.5347939", "0.53467506", "0.53360426", "0.5331391", "0.5327588", "0.5327588", "0.5327588", "0.5327588", "0.5326276", "0.5321717", "0.53159565", "0.53150547" ]
0.0
-1
get list of available actions
def get_actions(self, ally: Set['Entity'], enemy: Set['Entity']) -> Tuple[ Set['Action'], Set['Entity'], Set['Entity']]: available_actions = set() for action in self.actions: if action.cool_down.name not in [effect.name for effect in self.effects.effects]: available_actions.add(action) # print(f'{self.name_color} has {[action.name for action in available_actions]}') return available_actions, ally, enemy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_actions(self):\n return self.actions", "def get_actions(self):\n return []", "def get_list_of_actions(self):\n return self.actions", "def actions(self):\n return self._action_list", "def get_available_actions(self, state):\n pass", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def get_actions(self):\n return self.agent.get_actions()", "def get_actions(self):\n\n if self.description == exceptions.NotAvailableError:\n raise exceptions.NotAvailableError('Can\\'t get actions because a description for this service is'\n ' not available.')\n return list(self.actions.values())", "def actions(self):\n return self._actions", "def actions(self):\n\n return self._actions.getSlice(0)", "def actions(self):\n\n return self._actions.getSlice(0)", "def getActions(self, state): \n util.raiseNotDefined()", "def actions(self) -> list:\n if self.debug: print(f\"AState.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._actions = self._generate_actions()\n self._examined = True\n return self._actions", "def actions(self):\n from moztrap.view.lists.actions import actions\n return actions", "def getActions(self):\n actions = self.actions[:]\n return actions", "def getActions():\n return getPlugins(IRenamingAction, plugins)", "def actions(self, request, action_list, group):\n return action_list", "def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())", "def _generate_actions(self) -> list:\n pass", "def available_action(self):\n return range(self.actions)", "def get_actions(self, state: TState = None) -> Sequence[TAction]:\n pass", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def list(self):\n\n return list(\n filter(\n lambda x: x.get('type') != 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )", "def get_plugin_actions(self):\n return []", "def get_plugin_actions(self):\n return []", "def actions(self):\r\n return self.puzzle.actions", "def actions(self):\n r = self.session.query(models.Action).all()\n return [x.type_name for x in r]", "def actions(self) -> List[str]:\n return list(self.__endpoints.keys())", "def actions(self) -> Sequence[_A_out]:\n return self._actions", "def actions(self):\n raise NotImplementedError", "def actions(self, state):\n myActionList= (1,2);\n return myActionList", "def GetCustomActions(debug, verbose, explicit_configurations):\r\n\r\n return []", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def get_available_actions() -> tuple:\n return tuple(method for method in dir(cli_commands) if callable(getattr(cli_commands, method)))", "def get_legal_actions(self):\n pass", "def actions(self):\n return {0, 1, 2, 3, 4, 5, 11, 12}", "def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")", "def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")", "def list_actions() -> None:\n colorama_init()\n max_action_name_len = max(len(name) for name in KNOWN_ACTIONS.keys())\n wrapper = textwrap.TextWrapper(\n width=80 - max_action_name_len - 3,\n subsequent_indent=' ' * (max_action_name_len + 3),\n )\n print(\n '{bright}{name:<{max_action_name_len}} -{normal} {doc}'.format(\n bright=Style.BRIGHT,\n name='name',\n max_action_name_len=max_action_name_len,\n normal=Style.NORMAL,\n doc='description [(argument: type, ...)]',\n )\n )\n print('-' * 80)\n for name, action in KNOWN_ACTIONS.items():\n wrapped_doc = wrapper.fill(' '.join(str(action.__doc__).split()))\n print(\n '{bright}{name:<{max_action_name_len}} -{normal} {doc}'.format(\n bright=Style.BRIGHT,\n name=name,\n max_action_name_len=max_action_name_len,\n normal=Style.NORMAL,\n doc=wrapped_doc,\n )\n )\n return None", "def actions(self):\n actions = []\n\n for name, item in self._definition.get('actions', {}).items():\n name = self._get_name('action', name)\n actions.append(Action(name, item, self._resource_defs))\n\n return actions", "def _getAvailableActions(self, board):\r\n myPits = board.mySide(self.id)\r\n return [i for i in myPits if i > 0]", "def _getAvailableActions(self, board):\r\n myPits = board.mySide(self.id)\r\n return [i for i in myPits if i > 0]", "def actions():\n pass", "def actions(self):\r\n return actions.Actions(self)", "def _get_legal_actions(self):\n return self.game.get_legal_actions()", "def getAllHumanActions(self):\n return self.human_policy.actions", "def actions(self):\n self._actions = {}\n self._actions['getItems'] = ('FileCrawler', None)\n #self._actions['getContents'] = ('ParseContents', ('path'))\n return self._actions", "def get_actions(self):\n actions = []\n for section in self._sections:\n for (sec, action) in self._actions:\n if sec == section:\n actions.append(action)\n\n actions.append(MENU_SEPARATOR)\n return actions", "def _get_legal_actions(self):\n raise NotImplementedError", "def actions(cls):\n return [m for m in cls.__dict__ if not \"__\" in m]", "def all_action_names() -> List[str]:\n\n return list(map(lambda c: c.name, LoggingActions))", "def get_available_actions(self):\n actions = [self.ACTIONS_INDEXES['IDLE']]\n\n # Shall we also restrict LEFT & RIGHT actions ?\n\n if self.spacecraft.velocity_index < self.spacecraft.SPEED_COUNT - 1:\n actions.append(self.ACTIONS_INDEXES['FASTER'])\n if self.spacecraft.velocity_index > 0:\n actions.append(self.ACTIONS_INDEXES['SLOWER'])\n return actions", "def getAllRobotActions(self):\n return self.robot.actions", "def legal_actions(self):\n return self.env.legal_actions()", "def list(self):\n return list(\n filter(\n lambda x: x.get('type') == 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )", "def actions() -> None:\n pass", "def get_actions(self):\r\n return -4,4", "def all_actions(self):\n actions = self.actions.stream[:]\n for eq in self.equipment:\n actions.extend(eq.actions)\n return actions", "def legal_actions(self):\n raise NotImplementedError", "def actions(self) -> List['outputs.PreventionJobTriggerInspectJobAction']:\n return pulumi.get(self, \"actions\")", "def get_all_action_types() -> List[str]:\n\n actions: List[str] = []\n for name, val in actions_module_dict.items(): # iterate through every module's attributes\n if inspect.isclass(val) and issubclass(val, Action) and val != Action:\n actions.append(name)\n\n return actions", "def get_action_meanings(self) -> list[str]:\n keys = ale_py.Action.__members__.values()\n values = ale_py.Action.__members__.keys()\n mapping = dict(zip(keys, values))\n return [mapping[action] for action in self._action_set]", "def getAllActions(self):\n decision_rules = self.getAllDecisionRules()\n return list(itertools.product(decision_rules, self.getAllRobotActions()))", "def getActions(self, actionSetType):\n try:\n return self.__actions[actionSetType][:]\n except KeyError:\n return []", "def getLegalActions(self, state):\n return self.actionFn(state)", "def getLegalActions(self,state):\n return self.actionFn(state)", "def get_available_actions(self, user=None, action_name=None):\n return sorted(set([transition.action_name for transition in\n self.get_available_transitions(user, action_name)]))", "def available_actions(speaker, action, args, soco_function, use_local_speaker_list):\n print(\"Currently available playback actions: {}\".format(speaker.available_actions))\n return True", "def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]", "def actions(self):\r\n return Actions(self)", "def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)", "def actions(self, phrase: str) -> list:\n return self._current_scope.get(phrase, [])", "def get_actions(self, request):\n actions = super(RateLimitedIPAdmin, self).get_actions(request)\n del actions['delete_selected']\n return actions", "def get_action_list(program_name: str) -> str:\n short_name = int(program_name[8:])\n es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n res = es.search(\n index='actions-index', \n params= {'size': 1}, \n body={\"query\": {\"match\": {'name' : short_name}}})\n for hit in res['hits']['hits']:\n return hit['_source']['actions']\n return \"\"", "def getStateActionFeatures(self,state,action):\n return [state, self.actions[action]]", "def _get_of_actions(of_flow_stats):\n # Add list of high-level actions\n # Filter action instructions\n apply_actions = InstructionType.OFPIT_APPLY_ACTIONS\n of_instructions = (ins for ins in of_flow_stats.instructions\n if ins.instruction_type == apply_actions)\n # Get actions from a list of actions\n return chain.from_iterable(ins.actions for ins in of_instructions)", "def get_types_of_actions(self):\n if(self._types_of_actions == None):\n self._logger.write(\"Error! types_of_action contains no value\")\n elif(len(self._types_of_actions) == 0):\n self._logger.write(\"Error! types_of_actions list is empty\")\n else:\n try:\n return self._types_of_actions\n except Exception as e:\n self._logger.write(\"Error! Could not fetch the list of types_of_actions: \\n %s\" % e)", "def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]", "def get_available_keys(self) -> List[Tuple[str, ActionType]]:\n available_keys = []\n if self.key_to_action != {}:\n for key, idx in sorted(self.key_to_action.items(), key=operator.itemgetter(1)):\n if key != ():\n key_names = [self.renderer.get_key_names([k])[0] for k in key]\n available_keys.append((self.action_space.descriptions[idx], ' + '.join(key_names)))\n elif type(self.action_space) == DiscreteActionSpace:\n for action in range(self.action_space.shape):\n available_keys.append((\"Action {}\".format(action + 1), action + 1))\n return available_keys", "def list_commands(self, ctx):\n return self.daemon.list_actions()", "def get_actions(self, request):\n actions = super().get_actions(request)\n actions.pop('delete_selected', None)\n\n return actions", "def get_all_actions(self, custom_move_ordering = False) :\n raise NotImplementedError", "def get_actions(\n self, observations: Observations, action_space: gym.Space\n ) -> Actions:\n return super().get_actions(observations, action_space)", "def actions(self):\n isinst = isinstance\n return [c.widget for c in self.children() if isinst(c, QtAction)]", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def actions(self, state):\n\t\traise NotImplementedError", "def required_actions(self, name: str):\n assert name in self._required_processes\n\n return self._required_actions.get(name, list())", "def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]", "def get_open_chipsactions(self):\n return self.__open_chipsactions[:]", "def actions(self):\n return self._separated_constructs(RuleAction)", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def get_legal_actions(self, index):\n actions = []\n agent = self.agent_states[index]\n for action in ACTIONS:\n pos = agent.pos[0] + action[0], agent.pos[1] + action[1]\n if MAP[pos[0]][pos[1]] not in WALL:\n actions.append(action)\n return actions", "def _help_actions(self):\n actions_str = \"\"\n for (key, value) in self.actions_help.items():\n actions_str += \"command: %s\\n%s\\n\\n\" % (key, value)\n print(actions_str)\n sys.exit(0)", "def get_actions(g: Game):\n\n act_actions = []\n act = [Income, ForeignAid, Coup, Tax, Assassinate, Exchange, Steal]\n\n opponents = g.get_opponents()\n\n if g.players[g.action_player].coins >= 10:\n act = [Coup]\n\n for x in act:\n if g.players[g.action_player].coins >= x.cost:\n if x.attack_action:\n for p in range(len(opponents)):\n if g.players[opponents[p]].in_game:\n act_actions.append((x, p))\n else:\n act_actions.append((x, None))\n\n return act_actions", "def list_actions(self, query_params: Dict[str, object] = None) -> List[Action]:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/action/v1beta2/actions\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Action)", "def get_action_results(self,last_states,actions,time_i):\n return [],actions", "def getLegalActions(self):\n return ['BOT', 'SLD']", "def action_templates(self) -> List[ActionTemplate]:\n return self._action_templates", "def getLegalActions(self):\n return ['forward', 'left', 'right', None]" ]
[ "0.88604504", "0.8727885", "0.8649391", "0.8512479", "0.8356988", "0.8249469", "0.8249469", "0.8249469", "0.8200832", "0.80950415", "0.80412275", "0.7978333", "0.7978333", "0.7912648", "0.7903887", "0.7893374", "0.78658706", "0.77716225", "0.77087396", "0.7706551", "0.7704463", "0.76978004", "0.76270884", "0.75879306", "0.7546991", "0.75365204", "0.75365204", "0.7517188", "0.7467641", "0.74639446", "0.74580425", "0.74510664", "0.7441834", "0.74368125", "0.74155754", "0.7384114", "0.7373427", "0.73644584", "0.7352711", "0.7352711", "0.7340481", "0.73308754", "0.73170424", "0.73170424", "0.7305351", "0.730451", "0.7303598", "0.7302056", "0.7292767", "0.72870594", "0.72646964", "0.7260048", "0.7240125", "0.722794", "0.72242486", "0.72217196", "0.71925265", "0.71442825", "0.7141142", "0.71391195", "0.7102593", "0.70840335", "0.7058087", "0.70372045", "0.7030784", "0.70181173", "0.7018059", "0.7017328", "0.69991755", "0.69982696", "0.6982029", "0.6950827", "0.69439846", "0.6942935", "0.69079274", "0.6904978", "0.6898326", "0.68734217", "0.68720984", "0.68252814", "0.68180937", "0.681523", "0.6812502", "0.68028593", "0.6802256", "0.6780517", "0.6749769", "0.67291826", "0.6728424", "0.6721858", "0.6719398", "0.67092216", "0.6703153", "0.6703117", "0.6673733", "0.6660776", "0.6653748", "0.66461444", "0.66338205", "0.6611998", "0.6579456" ]
0.0
-1
Measure distance between two entities
def measure_distance(actor: Entity, target: Entity) -> int: dst = int(actor is not target) return dst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(self, other):\n ...", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def _object_distance(self, object1, object2):\n return np.linalg.norm(np.array(object1) - np.array(object2))", "def __distance_to(self, other: Any) -> float:\n return np.linalg.norm(self.pos - other.pos)", "def distance(self, a, b):\n raise NotImplementedError()", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def distanceTo(self,other):\n if not isinstance(other,Point):\n return \n return math.sqrt((self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2)", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))", "def get_distance(self, resp1, resp2):\n feed_dict = {self.anchor: resp1}\n embed1 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n feed_dict = {self.anchor: resp2}\n embed2 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n return np.sqrt(np.sum((embed1-embed2)**2, 1))", "def distance(self, other):\n # distance = math.sqrt((self.position.x - other.position.x) ** 2 +\n # (self.position.y - other.position.y) ** 2)\n distance = math.sqrt(sum((self.position - other.position) ** 2))\n return distance", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def distance(self, other):\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def distance(self, other_pt, is_lla=True):\n return 0.0", "def query_distance(self, instance1=(), instance2=()):\n distance = sum([pow((a - b), 2) for a, b in zip(instance1, instance2)])\n return distance", "def obj_distance(obj1, obj2):\n distance = aversine(obj1.details['lon'], obj1.details['lat'], obj2.details['lon'], obj2.details['lat'])\n return distance", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def distanceTo(self, other):\n dist = haversine(\n self.longitude, self.latitude, other.longitude, other.latitude)\n # Convert km to feet\n return kilometersToFeet(dist)", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def distance(self, other):\n\n return hypot(self.x - other.x, self.y - other.y)", "def distance(self, other):\n x_diff_sq = (self.x-other.x)**2\n y_diff_sq = (self.y-other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5", "def dist(self, node_0, node_1):\n coord_0, coord_1 = self.coords[node_0], self.coords[node_1]\n return math.sqrt((coord_0[0] - coord_1[0]) ** 2 + (coord_0[1] - coord_1[1]) ** 2)", "def distance_to(self, other):\n x0,y0 = self.x, self.y\n x1,y1 = other.x, other.y\n dist = math.sqrt((x1-x0) ** 2 + (y1-y0) ** 2)\n return int(dist)", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def measure_distance():\n # Get the active object\n obj = bpy.context.active_object\n \n # Switch in object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Get the two selected vertices\n twoVerts = [None, None]\n index = 0\n for vertex in obj.data.vertices:\n if vertex.select:\n twoVerts[index] = (obj.matrix_world @ vertex.co)\n index = index + 1\n if index == 2:\n break \n \n print(twoVerts)\n \n # Calculate the distance between the two points\n if twoVerts[0] != None and twoVerts[1] != None:\n bpy.context.scene.distance[0] = abs(twoVerts[0].x - twoVerts[1].x)\n bpy.context.scene.distance[1] = abs(twoVerts[0].y - twoVerts[1].y)\n bpy.context.scene.distance[2] = abs(twoVerts[0].z - twoVerts[1].z)\n bpy.context.scene.distance[3] = sqrt(bpy.context.scene.distance[0]**2 + bpy.context.scene.distance[1]**2 + bpy.context.scene.distance[2]**2)\n else:\n bpy.context.scene.distance[0] = 0\n bpy.context.scene.distance[1] = 0\n bpy.context.scene.distance[2] = 0\n bpy.context.scene.distance[3] = 0 \n \n # Switch in edit mode\n bpy.ops.object.mode_set(mode='EDIT')", "def DistanceDimension(\n self,\n entity1: ConstrainedSketchVertex,\n entity2: ConstrainedSketchVertex,\n textPoint: tuple[float],\n value: float = None,\n reference: Boolean = OFF,\n ):\n pass", "def distance(self, other):\n # only used in triangle.__str__\n return hypot(self.x - other.x, self.y - other.y)", "def dist(self, other):\n return math.sqrt((self.x - other.x)**2 +\n (self.y - other.y)**2 +\n (self.z - other.z)**2)", "def distance_to(self, other):\n return abs(self.x-other.x) + abs(self.y-other.y) + abs(self.z-other.z)", "def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))", "def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km", "def distance(self, source, target):\r\n raise NotImplementedError('Distance calculation not implemented yet')", "def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not %s and %s.\" % (a, b)\n )\n return abs(a - b)", "def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not %s and %s.\" % (a, b)\n )\n return abs(a - b)", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5", "def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length", "def distance_between(self, first_node_object, second_node_object):\n\n (first_column, first_row) = first_node_object\n (second_column, second_row) = second_node_object\n\n return numpy.sqrt((first_row - second_row) ** 2 +\n (first_column - second_column) ** 2)", "def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not {} and {}.\".format(a, b)\n )\n return 1 if a != b else 0", "def distance(self, a, b):\n \n # -----------------------------\n # Your code\n '''R = 3963 # radius of Earth (miles)\n lat1, lon1 = math.radians(a[0]), math.radians(a[1])\n lat2, lon2 = math.radians(b[0]), math.radians(b[1])\n \n return math.acos(math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R*0.000621371'''\n return abs(a[0] - b[0]) + abs(a[1] - b[1])\n \n \n # -----------------------------", "def distance_to(self, other):\n p_self, p_other = self.closest_points(other)\n return np.linalg.norm(p_self - p_other)", "def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def distance(self, coord1, coord2):\n\n delta_x = self.delta_long_miles(coord1.lat, coord1.delta_long(coord2))\n delta_y = self.delta_lat_miles(coord1.delta_lat(coord2))\n\n return self.cartesian_dist(delta_x, delta_y)", "def distance(self, coord1, coord2):\n return (abs(coord1.x - coord2.x) + abs(coord1.y - coord2.y) + abs(coord1.z - coord2.z))//2", "def distanceTo(self, other):\n return self.position.distanceTo(other.position)", "def distance(self, other):\n return _binary_op(arctern.ST_Distance, self, other)", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)", "def distances(self):", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d", "def distance(self, other: \"Location\") -> float:\n return haversine(self.latitude, self.longitude, other.latitude, other.longitude)", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def distance(v1, v2):\r\n return magnitude(*subtract(v2, v1))", "def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5", "def __distance(start_x, start_y, end_x, end_y):\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance", "def dist(a, b):\n return np.sum((a-b)**2.0)**.5", "def distance_to(self, other):\n ox, oy = other\n return math.hypot(self[0] - ox, self[1] - oy)", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def hausdorff_distance(self, other):\n ...", "def distance(coords1, coords2):\n dx = coords1.x - coords2.x\n dy = coords1.y - coords2.y\n return math.sqrt(dx * dx + dy * dy)", "def distance_between(self, n1, n2):\n if self.distance_method == 'direct':\n n1_relevants = 0\n n2_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], n1.anchor):\n n1_relevants += 1\n if is_relevant(self.sample.iloc[i], n2.anchor):\n n2_relevants += 1\n return (n1_relevants - n2_relevants)/len(self.sample)\n else:\n return 0.5", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "def _node_distance(self, first, second):\r\n\r\n name_1 = first.name.split(' ')[0]\r\n name_2 = second.name.split(' ')[0]\r\n\r\n seq1 = self.msa_by_name[name_1]\r\n seq2 = self.msa_by_name[name_2]\r\n\r\n distance = self._seq_distance(seq1, seq2)\r\n\r\n return distance", "def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def distance(self,pt1,pt2):\n #productive #frequent\n if frequent: profprint()\n d = ( ( float(pt1[0]) - float(pt2[0]) )**2 + ( float(pt1[1]) - float(pt2[1]) )**2 + ( float(pt1[2]) - float(pt2[2]) )**2 )**0.5\n return d", "def distanceTo(self, other):\n result = (other._x - self._x) * (other._x - self._x) \\\n + (other._y - self._y) * (other._y - self._y)\n return result ** 0.5", "def distanceTwoPoints(self, A, B):\r\n # productive\r\n # used by addNeedleToScene\r\n if frequent: profprint()\r\n length = ((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2 + (A[2] - B[2]) ** 2) ** 0.5\r\n return length", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def _distance_between(self, n1, n2):\n return cartesian_distance(n1[0], n1[1], n2[0], n2[1])", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def CalculateDistance(q1, q2):\r\n return np.sqrt((q1[0] - q2[0])**2 + (q1[1] - q2[1])**2)", "def distance(self,pose1, pose2):\n return math.sqrt((pose1[0] - pose2[0]) ** 2 + (pose1[1] - pose2[1]) ** 2) + 0.001", "def distance(A, B):\n return abs(A - B)", "def distance(A, B):\n return abs(A - B)", "def calculate_distance(a,b,data):\r\n d=0 #distance\r\n for i in range(data.numAttributes):\r\n if a[i]!=data.labelMissingData and b[i]!=data.labelMissingData: \r\n if not data.attributeInfo[i][0]: #Discrete Attribute\r\n if a[i] != b[i]:\r\n d+=1\r\n else: #Continuous Attribute\r\n min_bound=float(data.attributeInfo[i][1][0])\r\n max_bound=float(data.attributeInfo[i][1][1])\r\n d+=abs(float(a[i])-float(b[i]))/float(max_bound-min_bound) #Kira & Rendell, 1992 -handling continiuous attributes\r\n return d" ]
[ "0.7556397", "0.74695444", "0.74531186", "0.7383299", "0.7376668", "0.7279911", "0.72782665", "0.72525454", "0.72392297", "0.72290546", "0.72261333", "0.7214202", "0.7161101", "0.7153692", "0.7150545", "0.7123836", "0.7117742", "0.71136373", "0.71060133", "0.7101069", "0.7084995", "0.7071285", "0.70631576", "0.70610803", "0.7044516", "0.70323706", "0.7023455", "0.7014034", "0.7011583", "0.7010262", "0.7005272", "0.70040697", "0.6974711", "0.6968224", "0.69562995", "0.6948686", "0.6926449", "0.69121873", "0.69110656", "0.6910559", "0.6910428", "0.6908969", "0.69015276", "0.68934286", "0.68903005", "0.68903005", "0.68885994", "0.6869951", "0.6867753", "0.6864423", "0.6862893", "0.68595463", "0.6856798", "0.68556976", "0.6854613", "0.68469024", "0.68458664", "0.68392944", "0.68390167", "0.68376356", "0.6837163", "0.68360305", "0.68335235", "0.68312645", "0.6821156", "0.6817219", "0.68167645", "0.6814624", "0.68143463", "0.6811767", "0.6794225", "0.67853713", "0.6781543", "0.67736983", "0.6761943", "0.6758333", "0.6749979", "0.6747167", "0.6741189", "0.67411876", "0.6733119", "0.6729597", "0.67260367", "0.67188764", "0.6718067", "0.67145145", "0.67074966", "0.6704079", "0.670281", "0.6701671", "0.6701132", "0.67003745", "0.66973895", "0.66963816", "0.6692085", "0.6691339", "0.6687314", "0.66815424", "0.66747665", "0.66747665", "0.6673627" ]
0.0
-1
Return apitools message object for give message name.
def GetApiMessage(message_name): messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION) return getattr(messages, message_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg(name):\n msg = Message.ByKeys(name)\n if msg is not None:\n txt = msg.message_ml\n if msg_is_ignored(name):\n txt = IGNORE_PREFIX + txt\n else:\n misc.cdblogv(misc.kLogErr, 0,\n \"bomcreator: could not find message '%s'\" % name)\n txt = name\n return txt", "def GetAioMessageStruct(message_type_name):\n try:\n if message_type_name == 'kMessageTypeControlTelemetry':\n return getattr(pack_control_telemetry, 'ControlTelemetry')\n elif message_type_name == 'kMessageTypeControlSlowTelemetry':\n return getattr(pack_control_telemetry, 'ControlSlowTelemetry')\n elif message_type_name == 'kMessageTypeControlDebug':\n return getattr(pack_control_telemetry, 'ControlDebugMessage')\n elif message_type_name == 'kMessageTypeSimTelemetry':\n return getattr(pack_sim_telemetry, 'SimTelemetry')\n elif message_type_name == 'kMessageTypeGroundTelemetry':\n return getattr(pack_ground_telemetry, 'GroundTelemetry')\n elif message_type_name in ('kMessageTypeDynamicsReplay',\n 'kMessageTypeEstimatorReplay',\n 'kMessageTypeSimCommand',\n 'kMessageTypeSimSensor',\n 'kMessageTypeSimTetherDown'):\n return getattr(pack_sim_messages,\n message_type_name[len('kMessageType'):] + 'Message')\n else:\n return getattr(pack_avionics_messages,\n message_type_name[len('kMessageType'):] + 'Message')\n except AttributeError:\n raise AioClientException(\n 'No struct for AIO message type: ' + message_type_name)", "def get_message(self, _id):\n return Message.deserialize(self._get_single('messages', {'id': _id}))", "def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj", "def makeMessage( name, *structure ):\n return X12Message( name, *structure )", "def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj", "def _ns_message(self, queue, message_id):\n return self._ns(queue, \"messages\", message_id)", "def message(self, message_id):\r\n return Message(self, message_id)", "def get_message_by_id(message_id):\n return Message.query.get(message_id)", "def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg", "def UnpackMessage(swig_obj_pointer, msg_name):\n\n ptr = int(swig_obj_pointer)\n c_array = ctypes.c_char * aio.GetPackMessageSize(msg_name)\n received = c_array.from_address(ptr)\n\n msg_type = MESSAGE_TYPE_HELPER.Value(msg_name)\n return c_helpers.Unpack(received[:], MESSAGE_STRUCTS[msg_type])", "def _get_message(self, sender_message):\n # type: (str) -> Message or None\n st_re = self.SENDER_TEXT.search(sender_message)\n if st_re is None:\n return None\n else:\n return Message(speaker=st_re.group(1), text=st_re.group(2).strip())", "def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json", "def getMessage():\n return message", "def get_message(self, message_id):\n r = requests.get('https://outlook.office.com/api/v2.0/me/messages/' + message_id, headers=self._headers)\n check_response(r)\n return Message._json_to_message(self, r.json())", "def get_message(self, message_id: int) -> discord.Message:\n return self._connection._get_message(message_id)", "def GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n return message\n except errors.HttpError:\n print('An error occurred: ')", "def _get_message(self):\n return self.__message", "def load_message(message_id):\n pathname = \"messages/{}.json\".format(message_id)\n return _load_message(pathname)", "def from_string(name: str) -> MessageOperation:\n if name == \"cipher\":\n return MessageOperation.cipher\n elif name == \"decipher\":\n return MessageOperation.decipher\n elif name == \"attack\":\n return MessageOperation.attack", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)", "def get_message(self, bulk_id):\n res = self.client.get(\"/v1/messages/\" + str(bulk_id))\n\n try:\n return Message(res.data[\"message\"])\n except:\n raise ValueError(\"returned response not valid\")", "def message(self, *args, **kwargs) -> Message:\n return Message(self.handle, *args, **kwargs)", "def GetMessage(service, user_id, msg_id):\n try:\n #take out format='raw' if don't want base64\n message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()\n\n print('Message snippet: %s' % message['snippet'])\n\n return message\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def GetMessage(service, user_id, msg_id, snippetMessage=True):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n #print('Message snippet: %s' % message['snippet'])\n #print('Message snippet: %s' % message['payload']['headers'])\n #print(unicode('Message snippet: %s' % message['snippet'],'utf-8'))\n\n if snippetMessage:\n return message['snippet']\n else:\n return message\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def get_message(message_id, service):\n message = service.users().messages().get(userId='me', id=message_id).execute()\n return message", "def get_message(self):\n return self.msg", "def get_message(self):\n return self.message", "def get_message(self):\n return self.message", "def get_message(self) -> Union[\"Message\", None]:\n raw_data = (\n self.raw_data.get(\"message\") or\n self.raw_data.get(\"edited_message\")\n )\n\n if raw_data:\n return Message(raw_data)\n\n return None", "def get_message(request):\r\n message_key = request.GET.get('message', None)\r\n message = None\r\n message_type = None\r\n\r\n if ((not message_key or message_key == 'upload_success') and\r\n QIFParser.get_status() == 'in_progress'):\r\n message_key = 'in_progress_quicken_file'\r\n\r\n try:\r\n message = Message.MESSAGES[message_key]\r\n message_type = message['type']\r\n message = message['message']\r\n except KeyError:\r\n pass\r\n\r\n return {'message': message,\r\n 'message_key': message_key,\r\n 'message_type': message_type}", "def from_handover_message(cls, msg):\n return cls.from_items(msg.to_match().items())", "def get_message (self) :\n return self._message", "def get_message(self, resp):\n return resp['message']", "def getMessage(self, queue_name, max=None, project_id=None):\n if project_id is None:\n project_id = self.project_id\n n = \"\"\n if max is not None:\n n = \"&n=%s\" % max\n url = \"%sprojects/%s/queues/%s/messages?oauth=%s%s\" % (self.url,\n project_id, queue_name, self.token, n)\n body = self.__get(url)\n\n return json.loads(body)", "def fetch_message(conn, msg_uid ):\n # TODO: Could we fetch just the envelope of the response to save bandwidth?\n rv, data = conn.uid('fetch', msg_uid, \"(RFC822)\")\n if rv != 'OK':\n print (\"ERROR fetching message #\", msg_uid)\n return {}\n\n return email.message_from_bytes(data[0][1]) # dict-like object", "def get_message(self, id):\n url = \"https://api.imgur.com/3/message/{0}\".format(id)\n resp = self._send_request(url)\n return Message(resp, self)", "def GetOrCreatePredefinedMessage(message_type, lab_name, content):\n exisiting_predefined_message_entity = GetPredefinedMessage(\n message_type=message_type, lab_name=lab_name, content=content)\n if exisiting_predefined_message_entity:\n return exisiting_predefined_message_entity\n else:\n return datastore_entities.PredefinedMessage(\n type=message_type,\n content=content,\n lab_name=lab_name,\n create_timestamp=datetime.datetime.utcnow())", "def get_message(self):\n return self.__mess", "def get_json_message(message_key):\n file_path = (os.getcwd() + '/ufo/static/locales/' +\n flask.session['language_prefix'] + '/messages.json')\n try:\n with open(file_path) as json_file:\n messages = json.load(json_file)\n return messages[message_key]\n except:\n return message_key", "def get_message(message_id): # noqa: E501\n rtxFeedback = RTXFeedback()\n return rtxFeedback.getMessage(message_id)", "def get_message(message_id): # noqa: E501\n rtxFeedback = RTXFeedback()\n return rtxFeedback.getMessage(message_id)", "def gen_message(msg: Message) -> str:\n msg_dict = msg._asdict()\n msg_dict.update({MSG_TYPE_NAME: type(msg).__name__})\n\n return json.dumps(msg_dict)", "def pop_message(self):\n try:\n result = self.messages.get()\n except Queue.Empty:\n return None\n else:\n return Message(body=result.getBody(), subject=result.getBody(), sender=result.getFrom())", "def message(self, sid):\r\n return Message(self, sid)", "def message(self, msg):\n if (AZMessage.is_agilezen_xmpp_message(msg)):\n try:\n az_message = AZMessage(msg)\n except (MessageCreationException, api.APIException) as ex:\n print ex\n return None\n for handler in self.handlers:\n handler.handle(az_message)", "def GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_string(msg_str)\n data = {}\n data['to'] = mime_msg['To']\n data['from'] = mime_msg['From']\n data['date'] = mime_msg['Date']\n data['subject'] = mime_msg['Subject']\n data['message'] = \"\"\n return data\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def get_own_message_by_id(id):\n msg = g.db.query(Message).filter(Message.id == id).first()\n if msg is None:\n abort(404, \"Message doesn't exist.\")\n if msg.user_id != g.user.id:\n abort(403, \"What do you think you're doing?\")\n\n return msg", "def getMessage(self):\n m = self.messages\n l = len(m)\n if (l == 0):\n return \"\"\n elif (l == 1):\n return self.acronym + \" | \" + m[0]\n else:\n msg = self.acronym + \" | \"\n for i in range(0,l):\n msg += m[i]\n if (i < l-1):\n msg += \" | \" # error message separator\n return msg", "def status_message(message):\n return StatusMessage(message)", "def get_message(self, message_id):\n req_data = [ str(message_id) ]\n return self.request(\"find:Message.stats, Message.content\", req_data)", "def message():\n # Retrieve JSON parameters data.\n data = request.get_json() or {}\n data.update(dict(request.values))\n msg = data.get(\"msg\")\n if not msg:\n raise abort(400, \"missing 'msg' data\")\n\n # Deffer the message as a task.\n result = tasks.process_message.delay(msg, delta=10)\n task_id = result.task_id\n if not task_id or result.failed():\n raise abort(400, \"task failed\")\n # Then check and return ID.\n return {\n \"task_id\": result.id\n }", "def get_message_info(self, msgid=None):\n raise NotImplementedError('This method is not supported '\n 'with v2 messaging')\n if msgid:\n return self.sms_client.get_message(msgid)", "def get_message_class_by_type(msgtype):\n\n try:\n module = importlib.import_module('platypush.message.' + msgtype)\n except ImportError as e:\n logging.warning('Unsupported message type {}'.format(msgtype))\n raise RuntimeError(e)\n\n cls_name = msgtype[0].upper() + msgtype[1:]\n\n try:\n msgclass = getattr(module, cls_name)\n except AttributeError as e:\n logging.warning('No such class in {}: {}'.format(\n module.__name__, cls_name))\n raise RuntimeError(e)\n\n return msgclass", "def message(self) -> Optional[str]:\n return pulumi.get(self, \"message\")", "def Message(self, *args, **kwargs):\n return Message(self, *args, **kwargs)", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message", "def get_message(self):\n data = self.socket.recv(1024)\n if not data:\n logging.error('Failed to read data from socket')\n return\n\n return self.decode_message(data)", "def get(self, message_id: int, lang: str = None) -> Message:\n if lang is None:\n lang = SettingsDAO().get_value('language', str)\n\n data = dict(self.database.select(self.DATABASE_TABLE, {'ID': message_id})[0])\n\n curDate = datetime.fromordinal(data.get('date')) if data.get('date') else None\n\n message = Message(message_id, data['text'], curDate, bool(data['isMine']), data.get('partyCharacterId'),\n data.get('characterId'))\n\n return message", "def from_msg(cls, msg: Msg) -> Message:\n tokens = msg.reply.split(\".\")\n if len(tokens) != 9 or tokens[0] != \"$JS\" or tokens[1] != \"ACK\":\n raise ValueError(\n \"Failed to parse message. Message is not a valid JetStream message\"\n )\n message = Message(\n subject=msg.subject,\n seq=tokens[6],\n data=msg.data,\n time=datetime.fromtimestamp(\n int(tokens[7]) / 1_000_000_000.0, tz=timezone.utc\n ),\n hdrs=msg.headers,\n )\n message._msg = msg\n return message", "def get_object(self):\n # read the URL data values into variables\n astronaut_pk = self.kwargs['astronaut_pk']\n message_pk = self.kwargs['message_pk']\n\n # find the SendMessage object, and return it\n st_cfh = SendMessage.objects.get(pk=message_pk)\n return st_cfh", "def message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"message\")", "def get_localized_message(message, user_locale):\r\n if isinstance(message, Message):\r\n if user_locale:\r\n message.locale = user_locale\r\n return unicode(message)\r\n else:\r\n return message", "def getMessage(self, msg_id: str) -> str:\n message = self.service.users().messages().get(userId='me', id=msg_id, format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n message_main_type = mime_msg.get_content_maintype()\n \n if message_main_type == 'multipart':\n for part in mime_msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif message_main_type == 'text':\n return mime_msg.get_payload()", "def get_message():\n\tincoming_message = conn.recv(1024)\n\tincoming_message = incoming_message.decode()\n\treturn incoming_message", "def get_app_message(self):\n return self.messages[\"app\"].get()", "def get_message_details(self, message_id):\n\n for message in self.message_list:\n if message['id'] == message_id:\n return message\n \n raise Exception('No message with this message id')", "def _load_message(self,\n message_pb: descriptor_pb2.DescriptorProto,\n address: metadata.Address,\n path: Tuple[int],\n resources: Mapping[str, wrappers.MessageType],\n ) -> wrappers.MessageType:\n address = address.child(message_pb.name, path)\n\n # Load all nested items.\n #\n # Note: This occurs before piecing together this message's fields\n # because if nested types are present, they are generally the\n # type of one of this message's fields, and they need to be in\n # the registry for the field's message or enum attributes to be\n # set correctly.\n nested_enums = self._load_children(\n message_pb.enum_type,\n address=address,\n loader=self._load_enum,\n path=path + (4,),\n resources=resources,\n )\n nested_messages = self._load_children(\n message_pb.nested_type,\n address=address,\n loader=self._load_message,\n path=path + (3,),\n resources=resources,\n )\n\n oneofs = self._get_oneofs(\n message_pb.oneof_decl,\n address=address,\n path=path + (7,),\n )\n\n # Create a dictionary of all the fields for this message.\n fields = self._get_fields(\n message_pb.field,\n address=address,\n path=path + (2,),\n oneofs=oneofs,\n )\n fields.update(self._get_fields(\n message_pb.extension,\n address=address,\n path=path + (6,),\n oneofs=oneofs,\n ))\n\n # Create a message correspoding to this descriptor.\n self.proto_messages[address.proto] = wrappers.MessageType(\n fields=fields,\n message_pb=message_pb,\n nested_enums=nested_enums,\n nested_messages=nested_messages,\n meta=metadata.Metadata(\n address=address,\n documentation=self.docs.get(path, self.EMPTY),\n ),\n oneofs=oneofs,\n )\n return self.proto_messages[address.proto]", "def contact_get_message_string(user_name, contact_name, contact_email,\n contact_message):\n if user_name:\n message = (\n \"Message from FreeFrom \\nName: \" +\n contact_name +\n \"\\nUser Name: \" + user_name +\n \"\\nEmail Address: \" + contact_email +\n \"\\nMessage: \" + contact_message)\n else:\n message = (\n \"Message from FreeFrom \\nName: \" +\n contact_name +\n \"\\nEmail Address: \" + contact_email +\n \"\\nMessage: \" + contact_message)\n return message", "def GetPredefinedMessage(message_type, lab_name, content):\n predefined_message_entities = (\n datastore_entities.PredefinedMessage.query()\n .filter(datastore_entities.PredefinedMessage.type == message_type).filter(\n datastore_entities.PredefinedMessage.lab_name == lab_name).filter(\n datastore_entities.PredefinedMessage.content == content).fetch(1))\n if predefined_message_entities:\n return predefined_message_entities[0]\n else:\n return None", "def message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message\")", "def message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message\")", "def message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message\")", "def message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message\")", "def message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message\")", "def message(self) -> \"str\":\n return self._attrs.get(\"message\")", "def message(self) -> \"str\":\n return self._attrs.get(\"message\")", "async def fetch_message(ctx: commands.Context, message_id: int) -> discord.Message:\r\n try:\r\n msg = await ctx.fetch_message(message_id)\r\n except discord.NotFound:\r\n raise CommandError(f\"Cannot find message with ID `{message_id}`!\")\r\n except discord.Forbidden:\r\n raise CommandError(\"Lacking permissions to fetch message!\")\r\n except discord.HTTPException:\r\n raise CommandError(\"Failed to retrieve message. Try again later!\")\r\n else:\r\n return msg", "def get_specific_message(message_id):\n specific_messsage = [\n message for message in user_messages\n if message[\"message_id\"] == message_id\n ]\n return specific_messsage", "def createMessage( self, *args, **kw ):\n return MailMessage( *args, **kw )", "def getmessage(self, uid):\n data = self._fetch_from_imap(str(uid), self.retrycount)\n\n # data looks now e.g.\n #[('320 (X-GM-LABELS (...) UID 17061 BODY[] {2565}','msgbody....')]\n # we only asked for one message, and that msg is in data[0].\n # msbody is in [0][1].\n body = data[0][1].replace(\"\\r\\n\", \"\\n\")\n\n # Embed the labels into the message headers\n if self.synclabels:\n m = re.search('X-GM-LABELS\\s*\\(([^\\)]*)\\)', data[0][0])\n if m:\n labels = set([imaputil.dequote(lb) for lb in imaputil.imapsplit(m.group(1))])\n else:\n labels = set()\n labels = labels - self.ignorelabels\n labels_str = imaputil.format_labels_string(self.labelsheader, sorted(labels))\n\n # First remove old label headers that may be in the message content retrieved\n # from gmail Then add a labels header with current gmail labels.\n body = self.deletemessageheaders(body, self.labelsheader)\n body = self.addmessageheader(body, '\\n', self.labelsheader, labels_str)\n\n if len(body)>200:\n dbg_output = \"%s...%s\"% (str(body)[:150], str(body)[-50:])\n else:\n dbg_output = body\n\n self.ui.debug('imap', \"Returned object from fetching %d: '%s'\"%\n (uid, dbg_output))\n return body", "def Message(title, msg):\r\n return _hiew.HiewGate_Message(title, msg)", "def serialize(self, name, *args, **kwargs): \n if '.' in name:\n unspec = self._unspecify_name(name)\n if not unspec or not (repr(unspec) in self.messages):\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n name = unspec\n elif name in self.message_rhashes:\n name = self.message_rhashes[name]\n else:\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n struct = self.messages[repr(name)]()\n index = 0\n for field in struct.DESCRIPTOR.fields:\n # Loop through the fields in order of definition\n # If we can't, the fields have to be initialized by the\n # keyword arguments\n value = args[index] if index < len(args) else kwargs.get(field.name)\n # dict.get() returns None if the entry was not found\n if value == None:\n # If a field is optional, it can be skipped\n if field.label == field.LABEL_OPTIONAL:\n continue\n raise FieldNotDefinedException(\"The field '\" + field.name +\n \"' was not defined when serializing a '\" +\n self.message_hashes[repr(name)] + \"'\")\n try:\n r = self._map_onto(getattr(struct, field.name), value, self._get_options(struct, field.name))\n if r:\n self._checked_set(struct, field.name, r[0])\n except TypeError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' to \" + str(e).replace('has type', 'which has the type'))\n except ValueError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' but \" + str(e))\n index += 1\n return pack(str(self.header_size) + 's', name) + struct.SerializePartialToString()", "def get_message(self, id, format='minimal'):\n try:\n return self.service.users().messages().get(userId='me',\n id=id,\n format=format).\\\n execute()\n\n except googleapiclient.errors.HttpError as ex:\n if ex.resp.status == 403 or ex.resp.status == 500:\n return self.get_message(id, format)\n else:\n raise ex", "def _get_message_from_proto(self, message) -> dict: \n result = { 'transcript' : '' , 'confidence' : 0.0 }\n try: \n result = MessageToDict(message._pb)['results'][0]['alternatives'][0]\n except:\n result['transcript'] = ''\n result['confidence'] = 0.0\n\n return result", "def getMessage() -> str:\n pass", "def get_msg_cls(self, known_msgs=None):\n known_msgs = known_msgs or self.known_msgs\n return known_msgs.get(self.MsgType)", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def get_method(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.method\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n return message[3]\r\n return None", "def request_message(self) -> Optional[str]:\n return pulumi.get(self, \"request_message\")", "def parse(cls, message):\r\n if isinstance(message, PlatformMessage):\r\n inst = PlatformMessage.parse(message.serialize())\r\n return inst\r\n inst = PlatformMessage()\r\n if message is not None:\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n inst.sender = message[1]\r\n inst.interface = message[2]\r\n inst.method = message[3]\r\n if len(message) > 4:\r\n assert isinstance(message[4], (list, tuple)), \"Message's args expected to be list or tuple\"\r\n inst.args = copy.deepcopy(message[4])\r\n if len(message) > 5:\r\n assert isinstance(message[5], dict), \"Message's kwargs expected to be a dict\"\r\n inst.kwargs = copy.deepcopy(message[5])\r\n return inst", "def message(self):\n return self._message", "async def get_msg(self):\n try:\n # 2^8 bytes at a time. I just like it, no special reason\n data = await self.reader.read(256)\n msg = data.decode()\n addr = writer.get_extra_info(\"peername\")\n logging.info(\"Received %s from %s\", (msg, addr))\n\n except Exception as e:\n logging.error(\"Command could not be decoded; %s\", e)\n\n return msg", "def message(self) -> str:\n return self.fields.get('msg', self.raw_string)", "def message_id(self) -> str:\n return self[\"Sns\"][\"MessageId\"]", "def getMessage(self):\n return self.message", "def getMessage(self):\n return self.message", "def GetMessageWithId(service, user_id, msg_id, format):\r\n try:\r\n message = service.users().messages().get(userId=user_id,\r\n id=msg_id,\r\n format=format).execute()\r\n msg_str = str(base64.urlsafe_b64decode(message[\"raw\"].encode(\"utf-8\")))\r\n return msg_str\r\n except errors.HttpError as error:\r\n print(\"An error occurred: %s\" % error)", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def message_thread(self):\r\n return resource.MessageThread(self)" ]
[ "0.69583714", "0.65979904", "0.64236474", "0.62720525", "0.62713873", "0.625511", "0.6236496", "0.6140541", "0.61393994", "0.6108671", "0.6034471", "0.6006937", "0.5988172", "0.5965715", "0.5953879", "0.5941471", "0.5921525", "0.5919041", "0.5918195", "0.590729", "0.5900649", "0.589385", "0.5874447", "0.58742064", "0.5866837", "0.58479506", "0.58422524", "0.5823816", "0.5823816", "0.57933754", "0.5778257", "0.57697463", "0.5738902", "0.57251227", "0.57235813", "0.56977475", "0.56943685", "0.5652601", "0.56518966", "0.5641412", "0.56116366", "0.56116366", "0.55952054", "0.5590308", "0.5589006", "0.55843896", "0.55371714", "0.5511487", "0.5506033", "0.55057186", "0.55019695", "0.54982954", "0.549064", "0.54807687", "0.54764247", "0.54683876", "0.5467523", "0.54672545", "0.54625684", "0.54515886", "0.54514897", "0.5447369", "0.5445202", "0.54452", "0.54418033", "0.5441459", "0.5425287", "0.5417419", "0.5407876", "0.54004604", "0.5393693", "0.5393693", "0.5393693", "0.5393693", "0.5393693", "0.53931254", "0.53931254", "0.53909904", "0.5377141", "0.5377036", "0.5376441", "0.53703314", "0.536755", "0.53643894", "0.53564864", "0.5353465", "0.53482294", "0.53352815", "0.53288037", "0.5324641", "0.5315429", "0.5308712", "0.5305386", "0.5303326", "0.5303311", "0.52953976", "0.52953976", "0.52922875", "0.5279692", "0.5278494" ]
0.80781776
0
Builds a bigquery AccessValueListEntry array from input file. Expects YAML or JSON formatted file.
def PermissionsFileProcessor(input_file): access_value_msg = GetApiMessage('Dataset').AccessValueListEntry try: permissions_array = [] permissions_from_file = yaml.load(input_file[0]) permissions_from_file = permissions_from_file.get('access', None) if not permissions_from_file or not isinstance(permissions_from_file, list): raise PermissionsFileError( 'Error parsing permissions file: no access list defined in file') for access_yaml in permissions_from_file: permission = encoding.PyValueToMessage(access_value_msg, access_yaml) if _ValidatePermission(permission): permissions_array.append(permission) else: raise PermissionsFileError(('Error parsing permissions file:' ' invalid permission definition' ' [{}]'.format(permission))) return sorted(permissions_array, key=lambda x: x.role) except yaml.YAMLParseError as ype: raise PermissionsFileError('Error parsing permissions file [{}]'.format( ype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])", "def load_vals(txtfile):\n import ast\n \n data = []\n li = load_help(txtfile)\n \n for i in xrange(len(li)):\n if li[i] == 'Value' and i < len(li)-1:\n dic = ast.literal_eval(li[i+1])\n data.append(dic)\n return data", "def _read_file(self, input_file):\n with io.open(input_file, \"r\", encoding=\"UTF-8\") as file:\n examples = []\n for line in file:\n data = line.strip().split(\"_!_\")\n example = InputExample(\n guid=data[0], label=data[1], text_a=data[3])\n examples.append(example)\n\n return examples", "def build_accession_parser(rules_file):\n\n rules_data = json.load(rules_file)\n rules_by_prefix_len = {}\n for prefix_list, database, molecule_type, type_description in rules_data:\n for prefix in prefix_list:\n prefix_length = len(prefix)\n if REFSEQ_PREFIX_RE.match(prefix) is not None:\n # RefSeq whose accessions start with XX_ has its own rules\n if 'RefSeq' not in rules_by_prefix_len:\n rules_by_prefix_len['RefSeq'] = []\n rules_by_prefix_len['RefSeq'].append((prefix, database, molecule_type, type_description))\n elif '-' in prefix or '_' in prefix:\n (prefix_length, matcher) = make_range_matcher(prefix)\n if prefix_length not in rules_by_prefix_len:\n rules_by_prefix_len[prefix_length] = []\n rules_by_prefix_len[prefix_length].append((matcher, database, molecule_type, type_description))\n else:\n if prefix_length not in rules_by_prefix_len:\n rules_by_prefix_len[prefix_length] = []\n rules_by_prefix_len[prefix_length].append((prefix, database, molecule_type, type_description))\n return rules_by_prefix_len", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def parse_namelist(file, flat=False, silence_cast_errors=False):\n\n data = {}\n current_namelist = \"\"\n raw_lines = []\n with open(file) as f:\n for line in f:\n # Remove comments\n line = line.split(\"#\")[0].strip()\n if \"=\" in line or \"&\" in line:\n raw_lines.append(line)\n elif line:\n raw_lines[-1] += line\n\n for line in raw_lines:\n if line.startswith(\"&\"):\n current_namelist = line.split(\"&\")[1]\n if current_namelist: # else : it's the end of a namelist.\n data[current_namelist] = {}\n else:\n field, value = map(str.strip, line[:-1].split(\"=\"))\n try:\n value = _parse_namelist_val(value)\n except ValueError as err:\n if silence_cast_errors:\n warn(\n \"Unable to cast value {} at line {}\".format(\n value, raw_lines.index(line)\n )\n )\n else:\n raise err\n\n if \"(\" in field: # Field is an array\n field, idxs = field[:-1].split(\"(\")\n field = field.casefold()\n if field not in data[current_namelist]:\n data[current_namelist][field] = []\n # For generality, we will assign a slice, so we cast in list\n value = value if isinstance(value, list) else [value]\n idxs = [\n slice(int(idx.split(\":\")[0]) - 1, int(idx.split(\":\")[1]))\n if \":\" in idx\n else slice(int(idx) - 1, int(idx))\n for idx in idxs.split(\",\")\n ]\n\n datafield = data[current_namelist][field]\n # Array are 1D or 2D, if 2D we extend it to the good shape,\n # filling it with [] and pass the appropriate sublist.\n # Only works with slice assign (a:b) in first position.\n missing_spots = idxs[-1].stop - len(datafield)\n if missing_spots > 0:\n datafield.extend([] for i in range(missing_spots))\n if len(idxs) == 2:\n datafield = datafield[idxs[1].start]\n datafield[idxs[0]] = value\n else:\n data[current_namelist][field.casefold()] = value\n\n if flat:\n for namelist in list(data.keys()):\n data.update(data.pop(namelist))\n\n return data", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def build_data_from_file(cls,file_path,number_elems=None):\n raise NotImplementedError('Abstract method has not been implemented')", "def read_cfg(file):\n result = []\n if isfile(file):\n with open(file) as f:\n cfg = json.load(f)\n for entry in cfg:\n if \"start\" in entry:\n filter = (entry[\"start\"], entry.get(\"end\", None))\n result.append(filter)\n return result", "def read_file_into_list(source_file):\n\twith open(source_file, 'r') as source:\n\t\tdata = base64.b64encode(source.read())\n\t\treturn [data[i:i+SPLIT_LENGTH] for i in range(0, len(data), SPLIT_LENGTH)]", "def load_from_file(cls):\n\n try:\n list_of_ins = []\n with open(cls.__name__ + '.json') as my_file:\n dicts = Base.from_json_string(my_file.read())\n for key in dicts:\n list_of_ins += [cls.create(**key)]\n return (list_of_ins)\n except:\n return ([])", "def get_ade20_vqa_data(file_name=\"ade20k_vqa.jsonl\"):\n conf = get_config()\n vqa_file = conf[\"ade20k_vqa_dir\"]\n file = os.path.join(vqa_file, file_name)\n print(f\"Reading {file}\")\n with jsonlines.open(file) as reader:\n data = [i for i in iter(reader)]\n return data", "def read_file_into_list(source_file):\n with open(source_file, 'r') as source:\n data = base64.b64encode(source.read())\n return [data[i:i+SPLIT_LENGTH] for i in range(0, len(data), SPLIT_LENGTH)]", "def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]", "def parse_datafile(file):\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data", "def parse_datafile(file):\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data", "def _read_input(self, in_file):\n result = {}\n with open(in_file, \"r\") as f:\n reader = csv.DictReader(f, delimiter=str(\"\\t\"))\n for row in reader:\n result[row[\"accession\"]] = {\n \"transcript_sequence\": row[\"transcript_sequence\"],\n \"cds_start_i\": int(row[\"cds_start_i\"]),\n \"cds_end_i\": int(row[\"cds_end_i\"]),\n }\n\n return result", "def read_csvfile(inputfn):\n with open(inputfn, 'rU') as fd:\n datastruct = gen_csv_data(fd, returntype='list') # Make sure to store as list before closing file.\n return datastruct", "def load_builtin_data(name):\n\t\n\tpath = Path(resource_filename('pyospray', f'data/{name}.txt'))\n\tret = {}\n\tvalues = None\n\twith path.open('r') as f:\n\t\tlines = (line.rstrip('\\n') for line in f)\n\t\tfor token, content in tokenize(lines):\n\t\t\tif token == 'key':\n\t\t\t\tvalues = []\n\t\t\t\tret[content] = values\n\t\t\t\n\t\t\telif token == 'values':\n\t\t\t\tvalues.extend(content)\n\t\t\t\n\t\t\telse:\n\t\t\t\traise NotImplementedError\n\t\n\treturn ret", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def load_data_from_file(file_name):\n file_path = os.getcwd() + '/data/' + file_name\n\n constructors = []\n if os.path.exists(file_path):\n with open(file_path, 'r') as csvfile:\n file_rows = csvfile.readlines()\n\n constructors = [line.replace('\\n', '').split('|') for line in file_rows]\n\n return constructors", "def readData(file):\n \n inputValues=list()\n outputValue=list()\n totalData=list()\n \n with open(file) as fp :\n for line in fp:\n if line.strip( ) == '':\n continue\n attributeValue = line.strip().split(\",\")\n inputValue1 = float(attributeValue[0])\n inputValue2 = float(attributeValue[1])\n \n inputValues+=[[inputValue1]+[inputValue2]]\n outputValue+=[int(attributeValue[2])]\n totalData+=[[inputValue1]+[inputValue2]+[int(attributeValue[2])]]\n \n \n return inputValues,outputValue,totalData", "def load_reference_from_stream(self, f):\n qids_to_relevant_docids = {}\n for l in f:\n vals = l.strip().split('\\t')\n if len(vals) != 4:\n vals = l.strip().split(' ')\n if len(vals) != 4:\n pdb.set_trace()\n raise IOError('\\\"%s\\\" is not valid format' % l)\n\n qid = vals[0]\n if qid in qids_to_relevant_docids:\n pass\n else:\n qids_to_relevant_docids[qid] = []\n _rel = int(vals[3])\n if _rel > 0:\n qids_to_relevant_docids[qid].append(vals[2])\n\n return qids_to_relevant_docids", "def read_data(self, filepath, is_build_vocab=False):\r\n\r\n with open(\"general_list.pkl\", \"rb\") as file:\r\n self.general_list = pl.load(file)\r\n self.vocab.token2idx = {\"<pad>\": 0, \"<unk>\": 1}\r\n print(len(self.general_list))\r\n ll = 2\r\n for token in self.general_list:\r\n self.vocab.token2idx[token] = ll\r\n ll+=1\r\n\r\n print(\"max id\", max(list(self.vocab.token2idx.values())), len(self.vocab.token2idx))\r\n self.vocab.idx2token = {idx: token for token, idx in self.vocab.token2idx.items()}\r\n #print(\"max_len\", self.vocab.token2idx)\r\n datas = []\r\n\r\n with open(filepath, \"r\", encoding=\"utf-8\") as reader:\r\n for line in reader:\r\n line = line.strip()\r\n if not line:\r\n continue\r\n obj = json.loads(line)\r\n datas.append(obj)\r\n\r\n return datas", "def from_file_to_list(input_file):\n\tfile = open(input_file)\n\n\tdict_values = [\"\" for k in range(8)]\n\n\tfor line in file:\n\t\ts = line.split(\" \")\n\t\ts.pop(0) # first column only indicate line's number\n\t\ts.remove('\\n')\n\t\tfor idx, a in enumerate(s):\n\t\t\tdict_values[idx] += a\n\n\n\tfile.close\n\n\treturn dict_values", "def test_reading_nested_user_map_definition_from_file():\n with open(\"definitions/Person.buf\") as f:\n Person = Map.from_open_file(f)\n\n expected = Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"members\", List(Person))\n )\n\n with open(\"definitions/Club.buf\") as f:\n assert expected == Map.from_open_file(f, \"definitions\")\n assert expected == Map.from_file(\"definitions/Club.buf\")\n assert expected == Map.from_file(\"./definitions/Club.buf\")", "def read_reference(conformation_fname):\r\n\r\n #Create empty set\r\n reference_atoms = []\r\n\r\n #Try/catch if file cannot be found. Open file in read mode\r\n #For eveyr line in the text file, strip all white spaces from front and back\r\n #If not empty line, split line on commas and put integers in set. These correspond to atom numbers of the key atoms\r\n #Return this list\r\n\r\n try:\r\n with open(conformation_fname, \"r\") as fin :\r\n num = 1\r\n for line in fin:\r\n if num < 10:\r\n num = num + 1\r\n continue\r\n content = line.strip()\r\n if content == '':\r\n continue\r\n else:\r\n reference_atoms.append(content.split())\r\n #reference_atom_num.update([int(i) for i in content.split(',')])\r\n return reference_atoms\r\n #Catch OS error\r\n except OSError:\r\n print('OS error')\r\n sys.exit()\r\n #Catch value error (not appropriate values to be converted to int)\r\n except ValueError:\r\n print('Could not convert data to integer')\r\n sys.exit()", "def _read_jsonl(cls, input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n lines = []\n for line in f:\n lines.append(json.loads(line))\n return lines", "def test_reading_user_map_definition_with_list():\n assert Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"phones\", List(String))\n ) == Map.from_lines([\n \"1. name: string\",\n \"2. phones: list(string)\"\n ])", "def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}", "def load_from_file(cls):\n filename = cls.__name__ + \".json\"\n listOfInst = []\n try:\n with open(filename, \"r\") as f:\n listOfInst = cls.from_json_string(f.read())\n for num, val in enumerate(listOfInst):\n listOfInst[num] = cls.create(**listOfInst[num])\n except:\n pass\n return listOfInst", "def init_data(partitions_file):\n mapping = []\n\n drive_size = None\n for line in partitions_file:\n if drive_size is None:\n drive_size = parse_drive_size(line.rstrip())\n else:\n partitions_list = parse_partitions(line.rstrip())\n mapping.append((drive_size, partitions_list))\n drive_size = None\n\n return mapping", "def read_csv(file, column_number):\n l = []\n\n # Read the file content\n with open(file, \"r\", encoding='utf-8', errors='ignore') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count != 0:\n l.append(row[column_number])\n line_count += 1\n\n line_count = 1\n length = len(l)\n\n # Writes the accessions in the required format\n accessions = ('[')\n for element in l:\n accessions += '[\"' + str(element) + '\"]'\n if line_count < length:\n accessions += ', '\n line_count += 1\n accessions += (']')\n\n return accessions", "def create_table(file_to_use):\n lines = []\n for line in file_to_use:\n lines.append(line.split(\",\"))\n lines[-1][-1] = lines[-1][-1][:-1]\n return lines", "def readKeyValueFile(kv_file):\n with open(kv_file, 'r') as fp:\n lines = fp.read().splitlines()\n kv = []\n for line in lines:\n k, v = line.split('\\t')\n kv.append([k, float(v)])\n return kv", "def parse(self, fn, board):\n with open(fn) as f:\n return [(board.get_region(i['name']), i['base']) for i in json.loads(f.read())]", "def parse_data_file(self, file_name: str) -> List[Tuple[str, int]]:\n with open(file_name, \"r\") as f:\n data_list = []\n for line in f.readlines():\n path, target = line.split()\n if not os.path.isabs(path):\n path = os.path.join(self.root, path)\n target = int(target)\n data_list.append((path, target))\n return data_list", "def load_tokens_from_file(filename: str) -> List[Page]:\n\n with open(filename, \"r\") as fp:\n source_data = json.load(fp)\n\n return [\n Page(\n page=PageInfo(**page_data[\"page\"]),\n tokens=[Token(**token) for token in page_data[\"tokens\"]],\n )\n for page_data in source_data\n ]", "def BqTableDataFileProcessor(file_arg):\n data_insert_request_type = GetApiMessage('TableDataInsertAllRequest')\n insert_row_type = data_insert_request_type.RowsValueListEntry\n data_row_type = GetApiMessage('JsonObject')\n\n try:\n data_json = yaml.load(file_arg)\n\n if not data_json or not isinstance(data_json, list):\n raise TableDataFileError(\n 'Error parsing data file: no data records defined in file')\n\n rows = []\n for row in data_json:\n rows.append(insert_row_type(json=encoding.DictToMessage(\n row, data_row_type)))\n\n return rows\n except yaml.YAMLParseError as ype:\n raise TableDataFileError('Error parsing data file [{}]'.format(ype))", "def load_from_file(cls):\n new_list = []\n try:\n with open(\"%s.json\" % cls.__name__, mode='r') as f:\n file = cls.from_json_string(f.read())\n for i in file:\n new_list.append(cls.create(**i))\n except Exception:\n pass\n return new_list", "def importMods():\n try:\n with open(\"mods.txt\", \"r\") as fp:\n ModsList = []\n for line in fp:\n #expload string on comma\n lineArray = line.split(\",\")\n #Split each var up\n name = lineArray[0].split(\":\")[1]\n email = lineArray[1].split(\":\")[1]\n nickname = lineArray[2].split(\":\")[1]\n passwd = lineArray[3].split(\":\")[1]\n # clean up end of nickname string\n passwd = passwd.strip(\" }\\n\")\n ModsList.append(Mods(name, email, nickname, passwd))\n except IOError:\n ModsList = None\n return ModsList", "def load_input(input_name):\n with open(input_name) as input_file:\n input_list = list(map(int,input_file.readline().split(\",\")))\n return input_list", "def load_info_from_file(filepath):\n with open(filepath, 'r') as f:\n tokens = json.load(f)\n\n return tokens", "def readLinkoJson(file):\n with open(file, 'r') as jsonFile:\n preLinko = json.load(jsonFile)\n\n linko = Linkograph([], preLinko[0])\n\n for entry in preLinko[1:]:\n linko.append((set(entry[0]), set(entry[1]), set(entry[2])))\n linko.uuids.append(entry[3])\n\n return linko", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def parseInputFileList (self) :\n filelist = []\n try :\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"#\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: cfg file \" , self.cfgName , \" not found\"\n return\n\n #return filelist", "def initializeValueDict(values, fileList, rootDir, fileExt):\r\n if fileList is not None:\r\n with open(fileList) as pFile_data:\r\n data = json.load(pFile_data)\r\n files = data['relative']\r\n\r\n # absolute path from the start python file, e.g. runAllTests.py\r\n absolute_start_path = os.path.abspath(os.path.dirname(fileList))\r\n abs_path_to_files = absolute_start_path + \"/../\"\r\n\r\n for i in range(0, len(files)):\r\n # create a absolute path from the absolute start path and the relative path\r\n files[i] = os.path.join(abs_path_to_files, files[i])\r\n # read all available status/command/parameter\r\n for file in files:\r\n logging.info(\"loading {} file: {}\".format(fileExt, file))\r\n with open(file) as json_data:\r\n d = json.load(json_data)\r\n values[d['name']] = d\r\n\r\n if rootDir is not None:\r\n if os.name == 'nt':\r\n path_sep_ext = '\\\\**\\\\*.{}'.format(fileExt)\r\n # check if path is already absolute\r\n if not os.path.isabs(rootDir):\r\n rootDir = os.getcwd() + \"\\\\\" + rootDir\r\n else:\r\n path_sep_ext = '/**/*.{}'.format(fileExt)\r\n fileList = glob.iglob(rootDir + path_sep_ext, recursive=True)\r\n # read all available status/command/parameter\r\n for file in fileList:\r\n logging.info(\"loading {} file: {}\".format(fileExt, file))\r\n with open(file) as json_data:\r\n d = json.load(json_data)\r\n values[d['name']] = d", "def from_json_file(filename, check_format=True):\n filename = os.path.abspath(filename)\n directory = os.path.dirname(filename)\n with open(filename, \"r\") as infile:\n return ExperimentListFactory.from_json(\n infile.read(), check_format=check_format, directory=directory\n )", "def main():\n lines_list = []\n with open(bookmark_file, 'r') as f:\n lines_list = f.readlines()\n entries_list = []\n for idx, line in enumerate(lines_list):\n entry = {}\n if re.match(r'^<DT>', line):\n entry['url'] = re.match(r'^.*HREF=\\\"([^\\\"]+)\\\"', line).group(1)\n entry['add_date'] = re.match(r'^.*ADD_DATE=\\\"([^\\\"]+)\\\"', line).group(1)\n entry['private'] = re.match(r'^.*PRIVATE=\\\"([^\\\"]*)\\\"', line).group(1)\n entry['tags'] = re.match(r'^.*TAGS=\\\"([^\\\"]*)\\\"', line).group(1).split(',')\n entry['title'] = re.match(r'^.*<A [^>]+>(.*)</A>', line).group(1)\n if re.match(r'^<DD>', lines_list[idx + 1]):\n dd_tmp = []\n increment = 1\n try:\n while True:\n if re.match(r'^<DT>', lines_list[idx + increment]):\n break\n dd_tmp.append(re.match(r'^(<DD>)?(.*)$', lines_list[idx + increment]).group(2))\n increment += 1\n except:\n pass\n entry['description'] = '\\n'.join(dd_tmp)\n entries_list.append(entry)\n return entries_list", "def read (self, path):\n\n\t\tself.data = []\n\t\t# print \"*** path: %s***\" % path\n\t\tdir, filename = os.path.split (path)\n\t\troot, ext = os.path.splitext (filename)\n\t\t# encoding = 'ISO-8859-1' # utf-8\n\t\ts = codecs.open(path,'r', self.encoding).read()\n\t\t## s = unicode(f.read(),'utf-8')\n\t\ts = self.preprocess (s)\n\t\tlines = split (s, self.linesep)\n\t\tschema = self.splitline(lines[0])\n\n\t\t## print \"** %s **\" % os.path.splitext(filename)[0]\n\t\tif self.verbose:\n\t\t\tprint \"read %d lines from %s\" % (len(lines), path)\n\n\t\tfor i in range(1,len(lines)):\n\t\t\tif not lines[i].strip(): \n\t\t\t\t# print 'skipping line (%d)' % i\n\t\t\t\tcontinue\n\t\t\tfields = self.splitline(lines[i])\n\t\t\titem = self.entry_class (fields, schema)\n\t\t\tif self.accept (item):\n\t\t\t\tself.add (item)\n\n\t\tself.schema = schema\n\t\t# self.data.sort (lastNameCmp)", "def _experimentlist_from_file(filename, directory=None):\n filename = resolve_path(filename, directory=directory)\n try:\n with open(filename, \"r\") as infile:\n return json.load(infile, object_hook=_decode_dict)\n except IOError:\n raise IOError(\"unable to read file, %s\" % filename)", "def from_file(cls, world, data_file):\n date_ranges = []\n data = {}\n with open(data_file, 'rb') as picklefile:\n data_dict = pickle.load(picklefile)\n for era, value in data_dict.items():\n daterange = DateRange.from_string(era)\n date_ranges.append(daterange)\n data[daterange] = value\n\n accumulator = cls(world, date_ranges)\n accumulator.data = data\n return accumulator", "def parse_file_into_db(self, filename: str, limit: Optional[int] = None):\n return parse_file(filename, self._session, self._engine, self._raw_ftrace_entry_filter, limit)", "def _load_semantic_cuts_from_file(self, file):\n semantic_cuts = []\n with open(file, 'r') as fd:\n semantic_cuts = [{\n \"head_uids\" : [],\n \"tail_uids\" : [],\n }]\n for line in fd:\n if \"#\" in line:\n semantic_cuts.append({\n \"head_uids\" : [],\n \"tail_uids\" : [],\n })\n else:\n for offending_symbol in \"% \\n\": # ignore pychecker: iteration over string is intended\n line = line.replace(offending_symbol, \"\")\n head_label, tail_label = line.split(',')\n head_uid = self._label2uid[head_label]\n tail_uid = self._label2uid[tail_label]\n semantic_cuts[-1][\"head_uids\"].append(head_uid)\n semantic_cuts[-1][\"tail_uids\"].append(tail_uid)\n return semantic_cuts", "def import_parameters(self, file_name):\n parameters = []\n\n with open(file_name) as in_file:\n parameters = json.load(in_file)\n\n if parameters:\n self.put_parameters(parameters)", "def read_run_info_from_file(file):\n\n with open(file) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n runs = []\n for line in lines:\n if line == '':\n continue\n comma_splits = line.split(',')\n train_acc = float(comma_splits[0].strip())\n test_acc = float(comma_splits[1].strip())\n feature_set = set(w for w in comma_splits[2].split() if not w == '')\n runs.append(\n {'fts': feature_set, 'train_acc': train_acc, 'test_acc': test_acc}\n )\n return runs", "def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r", "def lens_from_fai(fname):\n with open(fname) as f:\n l = [int(i.split(\"\\t\")[1]) for i in f.read().rstrip().split(\"\\n\")]\n return l", "def test_parse_from_file_list(ending, content, mock_os_environ, tmpdir):\n climate = core.Climate()\n filepath = tmpdir.join(\"testvarfile\" + ending)\n filename = str(filepath)\n with open(filename, \"w\") as f:\n f.write(content)\n update_dict = {\"this_var_from_file\": filename}\n climate.update(update_dict)\n assert isinstance(climate.settings, Mapping)\n actual = dict(climate.settings)\n expected = {\"this_var\": [\"this\", \"that\"]}\n assert actual == expected", "def load_file_lines(option_value):\n if not hasattr(option_value, 'read'):\n raise IncompetentDeveloperError(\"Input type must be a file object.\")\n \n return [line.strip() for line in option_value]", "def create_mappings_from_file(filename, organization, user, import_file_id=None):\n\n mappings = []\n if os.path.isfile(filename):\n with open(filename, 'rU') as csvfile:\n for row in csv.reader(csvfile):\n data = {\n \"from_field\": row[0],\n \"to_table_name\": row[1],\n \"to_field\": row[2],\n # \"to_display_name\": row[3],\n # \"to_data_type\": row[4],\n # \"to_unit_type\": row[5],\n }\n mappings.append(data)\n else:\n raise Exception(\"Mapping file does not exist: {}\".format(filename))\n\n if len(mappings) == 0:\n raise Exception(\"No mappings in file: {}\".format(filename))\n else:\n return Column.create_mappings(mappings, organization, user, import_file_id)", "def creat_posting_list_obj_list(query, filename=POSTING_LIST_PATH):\n posting_list_obj_list = []\n word_list = query.split()\n file_reader = open(\"../data/\" + filename, \"r\")\n line = file_reader.readline()\n posting_list_line = line.split()\n \n while posting_list_line != []:\n if posting_list_line[0] in word_list:\n posting_list_obj = creat_posting_list_obj(posting_list_line=posting_list_line)\n posting_list_obj_list.append(posting_list_obj)\n \n # read a new line. \n line = file_reader.readline()\n posting_list_line = line.split()\n file_reader.close()\n return posting_list_obj_list", "def parseInputFileList (self):\n filelist = []\n try:\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"@@@\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: label cfg file \" , self.cfgName , \" not found\"\n return", "def __init__(self, file_name: Optional[str] = None):\n self.entries = OrderedDict() # Dict[str, PathElement]\n self.file_name = file_name # input file for logging\n self.jsonf = None # json image of input file\n self.namespaces = Namespaces('http://hl7.org/fhir/StructureDefinition/')\n self.path_map = {} # map from path to name (Dict[str, str])", "def get_map(self, filename):\n map_file = open(filename)\n map_array = []\n for line in map_file:\n line = line.strip()\n map_row = line.split(\",\")\n for index, item in enumerate(map_row):\n map_row[index] = int(item)\n map_array.append(map_row)\n return map_array", "def _read_jsonl(self, input_file, quotechar=None):\n with open(input_file, 'r', encoding='utf-8') as json_file:\n #line = list(json_file)[2]\n lines = []\n for line in json_file:\n json_text = json.loads(line)\n #reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines.append(json_text)\n return lines", "def deserialize_from_file(self):\n\n # We cannot read from a file unless the user provides it in the\n # constructor.\n if not self._input_file:\n raise Exception(\"No input file provided to deserialize from.\")\n\n # Build a record list from the file contents.\n records = []\n for record in self.deserialize_next_from_file():\n records.append(record)\n\n self.close_file_deserializer()\n\n return records", "def from_file(cls, filename):\n scans = list()\n scan_strings = cls.file_to_strings(filename)\n for scan_string in scan_strings:\n try:\n scans.append(cls.from_string(scan_string))\n except Exception:\n pass\n\n return scans", "def load_url_list(url_list_file):\n url_list = []\n with open(url_list_file, 'r') as f:\n for eachline in f:\n eachline = eachline.rstrip('\\n')\n parts = eachline.split('\\t')\n domain, script_url = parts\n url_list.append((domain, script_url))\n\n return url_list", "def load_from_file(cls):\n \"\"\"1.- Create the file name, 2.- if path file no exits return []\n 3.- open the file name, 4.- loop through the file\n 5.- return list of ints\"\"\"\n filename = \"{}.json\".format(cls.__name__)\n if not os.path.exists(filename):\n return []\n list_int = []\n with open(filename, \"r\") as f:\n dicts = cls.from_json_string(f.readline())\n for i in dicts:\n list_int.append(cls.create(**i))\n return list_int", "def load_from_file(cls):\n lists = []\n filename = cls.__name__ + \".json\"\n try:\n with open(filename, \"r\") as f:\n instances = cls.from_json_string(f.read())\n for k, v in enumerate(instances):\n lists.append(cls.create(**instances[k]))\n\n except:\n pass\n return lists", "def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list", "def read_data(tokenized_dialog_path, buckets, max_size=None, reversed=False):\n data_set = [[] for _ in buckets]\n\n with gfile.GFile(tokenized_dialog_path, mode=\"r\") as fh:\n utterences = fh.readline().split('\\t')\n source = utterences[0] if len(utterences) >= 2 else None\n target = utterences[1] if len(utterences) >= 2 else None\n\n if reversed:\n source, target = target, source # reverse Q-A pair, for bi-direction model\n counter = 0\n while source and target and (not max_size or counter < max_size):\n counter += 1\n if counter % 100000 == 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n\n source_ids = [int(x) for x in source.split()]\n target_ids = [int(x) for x in target.split()]\n target_ids.append(data_utils.EOS_ID)\n\n for bucket_id, (source_size, target_size) in enumerate(buckets):\n if len(source_ids) < source_size and len(target_ids) < target_size:\n data_set[bucket_id].append([source_ids, target_ids])\n break\n\n utterences = fh.readline().split('\\t')\n source = utterences[0] if len(utterences) >= 2 else None\n target = utterences[1] if len(utterences) >= 2 else None\n return data_set", "def read_input_files(input_file: str) -> list[Food]:\n with open(input_file) as input_fobj:\n foods = [Food.from_raw(line.strip()) for line in input_fobj]\n return foods", "def load_params_from_file(self, input_file):\n\n ### FILL IN ###", "def load_from_file(cls):\n\n l = []\n if o.exists(cls.__name__ + \".json\"):\n with open(cls.__name__ + \".json\") as f:\n for line in f:\n s = cls.from_json_string(line)\n for d in s:\n l.append(cls.create(**d))\n\n return l", "def _read_jsonl(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8\") as jsonl_file:\n lines = []\n for line in jsonl_file:\n lines.append(json.loads(line))\n return lines", "def parse_entry(lines):\n entry = {}\n for line in lines:\n line = line.replace('\\n', '').replace('\\r', '')\n if ':: ' in line:\n (key, value) = line.split(':: ')\n value = base64.b64decode(value).decode('utf-8')\n elif ': ' in line:\n (key, value) = line.split(': ')\n else:\n continue\n if key not in entry:\n entry[key] = []\n entry[key].append(value)\n return entry", "def loadFromFile(self, filename):\n\t\treturn []", "def get_field_list(filename):\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n field_mapping = {}\n for row in reader:\n field_mapping[row[0]] = row[1]\n return field_mapping", "def load_queries(self, file):\n queries = []\n with open(file, 'r') as f:\n for line in f:\n reg_match = re.match(r'^(\\d+).(.*)', line)\n tokens = self.es_helper.get_tokens(reg_match.group(2).strip())\n queries.append(Query(reg_match.group(1).strip(), self.es_helper, tokens))\n self.queries = queries", "def json_file_to_list(filename, config=None):\n with open(filename) as fp:\n return json.load(fp)", "def read_reference_list(next_word, file_word):\n\n result = []\n\n if next_word == 'cdots':\n return result\n\n while next_word[-1] == ',':\n result.extend([int(r) for r in next_word[:-1].split(',')])\n next_word = next(file_word)\n result.extend([int(r) for r in next_word[:-1].split(',')])\n return result", "def load_from_file(cls):\n try:\n with open(cls.__name__ + '.json', 'r') as f:\n jstr = f.read()\n list_d = Base.from_json_string(jstr)\n list_o = []\n for item in list_d:\n list_o.append(cls.create(**item))\n return list_o\n except FileNotFoundError:\n return []", "def read_input(input_file):\n \n logging.info(\"reading file {0}...this may take a while\".format(input_file))\n max=20000\n with json_lines.open(input_file, 'rb') as f:\n for item in f:\n if max>0:\n #max=max-1\n yield item['s']", "def ProcessEntryFile(fd):\n\tglobal reference\n\n\tname = ''\n\tfilename = ''\n\tdd = {}\n\teof = False\n\twhile not eof:\n\t\tline = fd.readline()\n\t\tif len(line) == 0:\n\t\t\teof = True\n\t\t\tif name in reference.keys():\n\t\t\t\treference[name] = dd\n\t\t\telif name != '':\n\t\t\t\treference[name] = dd\n\t\t\t#if verbose: print reference\n\t\telse:\n\t\t\tline = line.strip()\n\t\t\tif line.startswith('name'):\n\t\t\t\tif name in reference.keys() or name != '':\n\t\t\t\t\treference[name] = dd\n\t\t\t\ttokens = line.split()\n\t\t\t\tnn = tokens[0].split('=')\n\t\t\t\tname = nn[1]\n\t\t\t\tdd = {}\n\t\t\telif line.startswith('file'):\n\t\t\t\tfilename = line[len('file='):]\n\t\t\t\tif name in reference.keys():\n\t\t\t\t\tdd \t= reference[name]\n\t\t\t\t\tif dd.has_key(filename):\n\t\t\t\t\t\tfilename = ''\n\t\t\telse:\n\t\t\t\tif filename != '':\n\t\t\t\t\ttokens = line.split()\n\t\t\t\t\tlength = len(tokens)\n\t\t\t\t\t#print tokens\n\t\t\t\t\tfirst = True\n\t\t\t\t\tfor t in range(0,length,2):\n\t\t\t\t\t\tpos = tokens[t].find('=')\n\t\t\t\t\t\tcountline = int(tokens[t][pos+1:])\n\t\t\t\t\t\tpos = tokens[t+1].find('=')\n\t\t\t\t\t\tref = tokens[t+1][pos+1:]\n\t\t\t\t\t\ttline = (countline,ref)\n\t\t\t\t\t\tif first:\n\t\t\t\t\t\t\tdd[filename] = [tline]\n\t\t\t\t\t\t\tfirst = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tff = dd[filename] #list of tuples (line,ref)\t\t\t\t\n\t\t\t\t\t\t\tff.append(tline)\n\t\t\t\t\t\t\tdd[filename] = ff", "def _decode_list(fp):\n tag_id = _decode_byte(fp)\n size = _decode_int(fp)\n return [_MAP[tag_id](fp) for _ in range(size)]", "def candidates_import_from_sample_file():\n # Load saved json from local file\n logger.info(\"Loading CandidateCampaigns from local file\")\n\n with open(\"candidate/import_data/candidates_sample.json\") as json_data:\n structured_json = json.load(json_data)\n\n return candidates_import_from_structured_json(structured_json)", "def read_users(users_fp):\n users = []\n with open(users_fp, 'r') as fp:\n fields = fp.readline().rstrip().split(\",\")\n for line in fp:\n user = dict(zip(fields, line.rstrip().split(\",\")))\n users.append(user)\n return users", "def parse_file(file_path):\n with open(file_path) as f:\n return XmlPropertyListParser().parse(f)", "def data_parser(filepath):\n tmp = open(filepath).read().split('\\n')\n return [int(x) for x in tmp]", "def __csv_schema_generator(file):\n try:\n # Parses the first line of the file to get all the headers.\n metadata = str(file.readline().decode('utf-8')).strip().split(',')\n # Will be further implemented in phase 3.\n return SchemaGenerator.__build_schema(metadata)\n except Exception as e:\n logging.error('Failed to parse csv file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from csv file.\")", "def file_parser(filename):\n LOG_FORMAT = \"%a %l %u %t \\\"%r\\\" %>s %b %D\"\n\n line_parser = apache_log_parser.make_parser(LOG_FORMAT)\n\n parsed_entries = []\n\n with open(filename) as f:\n for line in f:\n parsed_entries.append(line_parser(line))\n\n # Sort the parsed log entries by timestamp. Some of the log entries in the\n # provided example take a long time to process so they are not in order,\n # this messes up splitting the entries into minute chunks for processing.\n parsed_entries.sort(key=lambda x: x.get('time_received_utc_datetimeobj'))\n\n return parsed_entries", "def _import(format, input, config):\n if input:\n with open(input, 'rb') as f:\n data = f.read()\n else:\n data = sys.stdin.read()\n\n dataset = tablib.Dataset()\n setattr(dataset, format, data)\n\n _add_changelogs(config, dataset.dict)", "def _read_one_file(file_name, label_list):\n lines = tf.io.gfile.GFile(file_name, \"r\").readlines()\n examples = []\n label_id_map = {label: i for i, label in enumerate(label_list)}\n sentence_id = 0\n example = InputExample(sentence_id=0)\n for line in lines:\n line = line.strip(\"\\n\")\n if line:\n # The format is: <token>\\t<label> for train/dev set and <token> for test.\n items = line.split(\"\\t\")\n assert len(items) == 2 or len(items) == 1\n token = items[0].strip()\n\n # Assign a dummy label_id for test set\n label_id = label_id_map[items[1].strip()] if len(items) == 2 else 0\n example.add_word_and_label_id(token, label_id)\n else:\n # Empty line indicates a new sentence.\n if example.words:\n examples.append(example)\n sentence_id += 1\n example = InputExample(sentence_id=sentence_id)\n\n if example.words:\n examples.append(example)\n return examples", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def parse_mapping_file(mapping_file, default, check_file):\n mapping_dict = {\n \"to_add\": {},\n \"to_delete\": {},\n \"to_swap\": {}\n }\n with open(mapping_file) as file:\n for line in file:\n if line.startswith(\"#\"): # ignore comments\n continue\n if line.startswith(\"====\"): # ignore headers\n continue\n field_name = line.split('-->')[1].split(\":\")[0] # get field name\n\n if check_file: # if checking mapping file\n mode = line.split(':')[1].strip(\" \").strip(\"\\n\") # get value from file\n if mode == \"\": # skip blank values\n continue\n if mode == \"DELETE\": # field value to delete = field name\n field_value = field_name\n else:\n field_value = mode.split(\"-\")[1]\n mode = mode.split(\"-\")[0]\n\n if default: # read value from default mapping value\n try:\n mode = default_mapping[field_name][\"mode\"]\n field_value = default_mapping[field_name][\"value\"]\n except KeyError:\n continue\n\n key_list = []\n keys = line.split(\"-->\")[0]\n key_reg = r\"\\[(.*?)\\]\"\n keys = re.findall(key_reg, keys) # get each key in keylist\n for key in keys:\n key_list.append(key.strip(\"[\").strip(\"]\").strip(\"'\")) # get string from key\n if mode.lower() == \"delete\":\n add_or_update_list_HELPER(mapping_dict[\"to_delete\"],\n field_name,\n {\"index\": key_list, \"value\": field_value}\n )\n if mode.lower() == \"swap\": # swap value with field name\n add_or_update_list_HELPER(mapping_dict[\"to_swap\"],\n field_name,\n {\"index\": key_list, \"value\": field_value}\n )\n if mode.lower() == \"add\":\n add_or_update_list_HELPER(mapping_dict[\"to_add\"],\n field_name,\n {\"index\": key_list, \"value\": field_value},\n )\n if mode.lower() == \"rename\": # swap field name with value\n add_or_update_list_HELPER(mapping_dict[\"to_swap\"],\n field_value,\n {\"index\": key_list, \"value\": field_name}\n )\n\n return mapping_dict", "def load(self, filename):\n\n file = open(filename, \"r\")\n text = file.read()\n file.close()\n text = text.replace(']', '],').replace('],]', ']]').replace(']],', ']]')\n text = text.replace('.', ',').replace(',]', ']')\n aList = eval(text)\n return aList", "def read_file(self):\n\n\t\twith open(self.filename, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif len(line)>1:\n\t\t\t\t\tlenght_value,array_values = line.split(';')\n\t\t\t\t\tlist_values = [int(x) for x in array_values.split(',')]\n\t\t\t\t\tprint self.get_arraysurdit(list_values)" ]
[ "0.56744987", "0.5283599", "0.5243404", "0.5234778", "0.5156196", "0.51066506", "0.5005265", "0.5005265", "0.49895862", "0.4981226", "0.49759138", "0.49112916", "0.49017558", "0.48792005", "0.48355103", "0.48244855", "0.48244855", "0.48212677", "0.4802652", "0.47956467", "0.47937998", "0.47522512", "0.47453332", "0.47386205", "0.47376984", "0.4733793", "0.47195417", "0.47126207", "0.47107288", "0.47105974", "0.46972096", "0.46970254", "0.46963403", "0.4695972", "0.46915537", "0.46877274", "0.46810275", "0.46759123", "0.4657233", "0.46530297", "0.46424147", "0.46249387", "0.46169037", "0.4614975", "0.4611402", "0.460718", "0.4605367", "0.46050563", "0.45946947", "0.4590021", "0.45867503", "0.4580449", "0.45797268", "0.45795608", "0.4573974", "0.457211", "0.45671806", "0.45646232", "0.4561215", "0.4556217", "0.45532697", "0.45490977", "0.4548338", "0.45402664", "0.45388138", "0.45345336", "0.4529447", "0.45259026", "0.45151684", "0.451294", "0.45068616", "0.45059657", "0.45009837", "0.44989237", "0.44971085", "0.44964424", "0.44905633", "0.44901624", "0.448219", "0.44819856", "0.4478186", "0.4477506", "0.44769827", "0.44732267", "0.44717163", "0.44710413", "0.44707692", "0.4469032", "0.4468291", "0.4466525", "0.44627464", "0.44619197", "0.44571096", "0.44566795", "0.44475982", "0.4446368", "0.4445965", "0.4443245", "0.44396016", "0.44351858" ]
0.5508567
1
Convert commandline duration into epoch timeoffset (in ms).
def ProcessTableExpiration(expire_duration): t = times.GetDateTimePlusDuration(datetime.datetime.now(), expire_duration) return int(time.mktime(t.timetuple())) * 1000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_epoch_duration(self):\n\n now = time.time()\n epoch_duration = datetime.datetime.fromtimestamp(now - self.prev_time).strftime(\"%M:%S.%f\")[:-4]\n self.prev_time = now\n return epoch_duration", "def ms2pts(ms, dt):\n return int(ms/dt)", "def np_dt_epoch_msec(value):\n return value.astype(long) / 1000", "def epoch():\n\treturn time.time()", "def epoch():\n return datetime2epoch(datetime.now())", "def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000", "def epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs", "def epoch_time(when):\n if not when: return 0\n epoch = datetime.utcfromtimestamp(0)\n delta = when - epoch\n return int(delta.total_seconds())", "def epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n\n return elapsed_mins, elapsed_secs", "def epoch(value):\n if isinstance(value, datetime.datetime):\n return int(calendar.timegm(value.timetuple())*1000)\n return '' #fails silently for non-datetime objects", "def _convert_time(self, duration):\n in_sec = int(int(duration) / 1000)\n in_time = int(in_sec / 60) + (0.01 * (in_sec % 60))\n return in_time", "def pts2ms(pts, dt):\n return pts*dt", "def epoch_time(self, start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs", "def milliseconds_offset(cls, timestamp, now=None):\n if isinstance(timestamp, (int, float)):\n base = timestamp\n else:\n base = cls.to_unix(timestamp)\n if now is None:\n now = time.time()\n return (now - base) * 1000", "def datetime2epoch(dt):\n return int(mktime(dt.timetuple())*1000)", "def __get_duration_from_line(self, line):\n # TODO: catch exceptions\n duration_str = line.split('=')[1]\n return int(duration_str)", "def event_time_1970(event_time):\n return event_time + start_time", "def add_milliseconds_to_epoch_time(epoch_time):\n epoch_time = int(epoch_time / 1000 + 1) / 1000\n return epoch_time", "def _TIME2STEPS(time):\n return int(time*1000)", "def parse_time(s):\n\n dt = dateutil.parser.parse(s)\n# epoch_time = int((dt - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())\n epoch_time = int(dt.replace(tzinfo=timezone.utc).timestamp())\n\n return epoch_time", "def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)", "def epoch_time(start_time: float, end_time: float) -> Tuple[int, int]:\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs", "def run_duration(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"run_duration\")", "def sbetime2unixtime(value):\n if not isinstance(value, int):\n raise InstrumentParameterException(\"value not a int\")\n\n return SBE_EPOCH + value", "def start_delta_string(self):\r\n delta = int(self.start_time) - int(self.root().start_time)\r\n return '%02d:%02d' % (delta / 60, delta % 60)", "def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)", "def to_unix_milli(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_unix_milli = str(int((dt_obj - self.epoch_1970).total_seconds()*1000))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_unix_milli = False\n return self.out_unix_milli", "def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)", "def convert_to_epoch(event_time_date) -> int:\n pattern = '%Y-%m-%d %H:%M'\n return int(time.mktime(time.strptime(event_time_date, pattern)))#to epoch value", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def epoch_time_now():\n return int(time.time())", "def as_millis(self):\n return int(ntplib.ntp_to_system_time(self.start) * 1000), int(ntplib.ntp_to_system_time(self.stop) * 1000)", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal", "def _duration_to_secs(duration):\n secs = int(duration[:-1])\n if duration[-1] == 's':\n pass\n elif duration[-1] == 'm':\n secs *= 60\n elif duration[-1] == 'h':\n secs *= 60 * 60\n elif duration[-1] == 'd':\n secs *= 60 * 60 * 24\n else:\n raise ValueError('Invalid duration: %r' % duration)\n\n return secs", "def epochnow():\n return time.time()", "def epoch(self):\n return _coordsys.coordsys_epoch(self)", "def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds", "def get_track_length(duration):\n try:\n length = time.strptime(duration, '%M:%S')\n except ValueError:\n return None\n return length.tm_min * 60 + length.tm_sec", "def convert_epoch(aepoch):\n\tprint \"time given: \" + aepoch\n\tepoch = time.strftime(\"%a %d %b %Y %H:%M:%S +0000\", time.gmtime(float(aepoch)))\n\tprint \"converted time: \" + epoch", "def get_annotation_duration(annotation):\n return ANNOTATION_INITIAL_PAUSE_TIME + len(annotation[\"text\"]) * 0.2", "def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):\n return float(pcr_t2-pcr_t1)/90000.0 + offset", "def _current_epoch_secs():\n now = datetime.datetime.utcnow()\n epoch = datetime.datetime(1970, 1, 1)\n return (now - epoch).total_seconds()", "def to_deltatime(self,\n epoch: str | tuple | list | np.ndarray,\n scale: float = 1.0\n ):\n # convert epochs to numpy datetime variables\n epoch1 = np.datetime64(datetime.datetime(*_mjd_epoch))\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # calculate the difference in epochs in days\n delta_time_epochs = (epoch - epoch1) / np.timedelta64(1, 'D')\n # return the date in time (default days) since epoch\n return scale*np.array(self.MJD - delta_time_epochs, dtype=np.float64)", "def time_to_point(distance):\n if distance <= (125 / 9) ** 2:\n return distance ** .5\n return distance * 9 / 250 + 125 / 18", "def millis() -> int:", "def epoch(self):\n return self._epoch", "def datetime_to_epoch(datetime_obj):\n return int(datetime_obj.strftime(\"%s\")) * 1000", "def elapsed_millis(start: int, /) -> int:", "def elapsed_micros(start: int, /) -> int:", "def chrome_timestamp_to_epoch(chrome_timestamp):\n return (chrome_timestamp / 1000000) - 11644473600", "def extract_from_date(epochseconds, offset=0):\n return time.gmtime(epochseconds+offset*3600)", "def _parse_duration(path):\n tag = \"[FlowShaper] Application complete after \" # xxx ms\n found = None\n with (path / \"stdout.txt\").open(mode=\"r\") as stdout:\n found = [line for line in stdout if line.startswith(tag)][-1]\n assert found, f\"Run never completed! {path}\"\n\n # Parse the next word as an integer\n return int(found[len(tag):].split()[0])", "def to_prtime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_prtime = str(int((dt_obj - self.epoch_1970).total_seconds() * 1000000))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_prtime = False\n return self.out_prtime", "def epoch_M(self):\n return self._epoch_M", "def get_running_time(self):\n with open(self.running_time_file, 'r') as file:\n return int(file.read().strip())", "def parse_duration_str(self, duration):\n try:\n dl = duration.split(\":\")\n except Exception:\n return None\n if len(dl) > 4:\n return None\n while len(dl) < 4:\n dl.insert(0, 0)\n\n ret = int(dl[0]) * 60 * 60 * 24 + int(dl[1]) * \\\n 60 * 60 + int(dl[2]) * 60 + int(dl[3])\n return ret * 1000", "def epoch2UTCstr(timestamp=time(), fmat=\"%Y-%m-%d %H:%M:%S\"):\n return strftime(fmat, gmtime(timestamp))", "def timestamp_to_unix(timestamp):\n return timestamp / 1e6", "def get_epoch(timestamp):\n timestamp_dt = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n epoch = calendar.timegm(timestamp_dt.timetuple())\n return epoch", "def get_utc_offset(timestamp: str):\n offset_direction = timestamp[-5] # A plus or minus sign\n offset_hours = int(timestamp[-4:-2])\n offset_minutes = int(timestamp[-2:])\n offset_seconds = offset_hours * 3600 + offset_minutes * 60\n return datetime.timezone(datetime.timedelta(seconds=int(offset_direction + str(offset_seconds))))", "def getEpochTime(self, time_str=None):\n self.pushMode(CLI_MODES.shell)\n if time_str is None:\n system_time = self.sendCmd(r\"date +%s\")\n else:\n system_time = self.sendCmd(\"date -d '%s' \" % time_str + r\"+%s\")\n\n try:\n system_time = float(system_time)\n logger.info(\"Time %f\" % system_time)\n except:\n logger.error(\"Invalid system time '%s' on node '%s'\" % (system_time, self))\n raise ValueError(\"Invalid system time '%s' on node '%s'\" % (system_time, self))\n self.popMode()\n return system_time", "def get_annotations_added_duration(annotations):\n if (len(annotations)) == 0:\n return ANNOTATION_INITIAL_PAUSE_TIME\n\n duration = 0\n for annotation in annotations:\n duration += get_annotation_duration(annotation)\n return duration", "def set_epoch(self, epoch):\r\n pass", "def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()", "def _STEPS2TIME(step):\n return step/1000.", "def get_duration(f):\n return 0", "def datetime_to_utc(timestamp):\n\n epoch = datetime.utcfromtimestamp(0)\n delta = timestamp-epoch\n\n return long(delta.total_seconds() * 1000)", "async def toEpochTime(self, ctx, *, timeStr:str):\n\t\t_, time = (search_dates(\n\t\t\ttimeStr.upper(), settings={'RETURN_AS_TIMEZONE_AWARE': True})[0])\n\t\tawait ctx.send(f\"`{int(time.timestamp())}` is the timestamp for `{time.strftime('%c in timezone %Z')}`\\nThe basic timestamp would look like this: <t:{int(time.timestamp())}:F>\")", "def checktime(dt=None):\n if dt is None:\n epoch_time = str(int(time()))\n else:\n epoch_time = str(int(mktime(dt.timetuple())))\n return epoch_time + \"000\"", "def _hx_time_to_epoch(self, timestr: str) -> int: # pragma: no cover\n\n time_obj = datetime.datetime.strptime(timestr, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n return int(time_obj.strftime(\"%s\"))", "def stringTimeToUnix_NEW(st):\n y, m, d, h, n, s, ms = stringTimeToTuple(st)\n epochSecs = mktime(map(int ,(y, m, d, h, n, s, 0, 0, 0)))-timezone\n #print \"seconds is %f\\n\" % (epochSecs + int(ms)/1000.0)\n return epochSecs + int(ms)/1000.0", "def training_time_left(current_epoch, total_epochs, epoch_time):\n epochs_rem = total_epochs - current_epoch - 1\n time_rem = epochs_rem * epoch_time\n return str(datetime.timedelta(seconds=time_rem))", "def datetime_to_epoch(datetime):\n return datetime.astype('int64') // 1e9", "def get_epoch_time(utc_datetime=None):\n if not utc_datetime:\n utc_datetime = datetime.datetime.utcnow()\n return math.ceil((utc_datetime - EPOCH_START).total_seconds())", "def get_duration(file):\n cmd = 'ffprobe -i \"{}\" -show_entries format=duration -v quiet -of csv=\"p=0\"'.format(file)\n try:\n output = subprocess.check_output(\n cmd,\n shell=True, # Let this run in the shell\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print(e.output)\n output = 0\n # return round(float(output)) # ugly, but rounds your seconds up or down\n return float(output)", "def duration(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"duration\")", "def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)", "def epochCalc(timestamps):\n\tsplitTimes = unixTimeConv(timestamps)\n\tepochTimes = []\n\thour=int(splitTimes[3])\n\n\tif (hour >0 and hour <=9) or hour>=23:\n\t\tepoch='night'\n\telse:\n\t\tepoch='not_night'\n\tepochTimes.append((epoch,splitTimes[6]))\n\treturn epochTimes", "def run_duration(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"run_duration\")", "def EpochNano():\n return int(time.time() * 1000000000)", "def conv_time(l, h):\n\t# Function modified from post on ActiveState by John Nielsen\n\n\t#converts 64-bit integer specifying the number of 100-nanosecond\n\t#intervals which have passed since January 1, 1601.\n\t#This 64-bit value is split into the\n\t#two 32 bits stored in the structure.\n\td = 116444736000000000L \n\n\t# Some LNK files do not have time field populated \n\tif l + h != 0:\n\t\tnewTime = (((long(h) << 32) + long(l)) - d)/10000000 \n\telse:\n\t\tnewTime = 0\n\n\treturn time.strftime(\"%Y/%m/%d %H:%M:%S %a\", time.localtime(newTime))", "def _arrival_time(self):\n \n return self.mkt_time + timedelta(0, 0, self.latency)", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def calc_time(self, distance):\r\n if distance < 400:\r\n return 2*math.sqrt(distance / 1406.25)\r\n else:\r\n distance -= 400\r\n return distance / 750 + 16 / 15", "def track_length_string(length):\n return str(timedelta(microseconds=length))", "def get_duration(distance, power):\n assert power == 1\n if distance <= 16.8:\n t = distance_polynomial(distance) * 1000.0\n else:\n # s = (t - 300) * 0.063 + 18.9\n t = (distance - 16.8) / 0.063 + 300\n return int(t)", "def get_duration(self):\n try:\n if self.is_skipped:\n return \"00:00\"\n assert self.start_time\n assert self.stop_time\n if self.stop_time < self.start_time:\n return \"XX:XX\"\n return(\n f\"{str(int(self.stop_time - self.start_time) // 60).zfill(2)}:\"\n f\"{str(int(self.stop_time - self.start_time) % 60).zfill(2)}\")\n\n except Exception: # pylint: disable=broad-except\n self.__logger.error(\"Please run test before getting the duration\")\n return \"XX:XX\"", "def get_game_length(self, duration):\n minutes = int(duration / 60)\n seconds = int(duration % 60)\n return \"{0}:{1}\".format(minutes, str(seconds).zfill(2))", "def duration(self):\n return self.end_abs - self.start", "def _get_tell_time(elem):\n return int(elem[3])", "def set_epoch(self, epoch=None):\n params = {'epoch': epoch if epoch is not None else int(time.time())}\n return self._jadeRpc('set_epoch', params)", "def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)", "def timestamp(self, offset=0):\n return int(self._base_ts + offset) * 1000", "def _from_microseconds(value):\n return _UTC_EPOCH + datetime.timedelta(microseconds=value)", "def get_subtitle_offset(annotation, seen_annotations, clip):\n if not annotation[\"time\"] in seen_annotations:\n return clip.h - SUBTILE_OFFSET\n else:\n return clip.h - SUBTILE_OFFSET * (seen_annotations[annotation[\"time\"]] + 1)", "def _get_timestamp(self, timestamp):\n return int(timestamp * 1e6)", "def convert_delta_time(\n delta_time: np.ndarray,\n epoch1: str | tuple | list | np.datetime64 | None = None,\n epoch2: str | tuple | list | np.datetime64 | None = None,\n scale: float = 1.0\n ):\n # convert epochs to datetime variables\n if isinstance(epoch1, (tuple, list)):\n epoch1 = np.datetime64(datetime.datetime(*epoch1))\n elif isinstance(epoch1, str):\n epoch1 = np.datetime64(parse(epoch1))\n if isinstance(epoch2, (tuple, list)):\n epoch2 = np.datetime64(datetime.datetime(*epoch2))\n elif isinstance(epoch2, str):\n epoch2 = np.datetime64(parse(epoch2))\n # calculate the total difference in time in seconds\n delta_time_epochs = (epoch2 - epoch1) / np.timedelta64(1, 's')\n # subtract difference in time and rescale to output units\n return scale*(delta_time - delta_time_epochs)", "def convert_delta_time(\n delta_time: np.ndarray,\n epoch1: str | tuple | list | np.datetime64 | None = None,\n epoch2: str | tuple | list | np.datetime64 | None = None,\n scale: float = 1.0\n ):\n # convert epochs to datetime variables\n if isinstance(epoch1, (tuple, list)):\n epoch1 = np.datetime64(datetime.datetime(*epoch1))\n elif isinstance(epoch1, str):\n epoch1 = np.datetime64(parse(epoch1))\n if isinstance(epoch2, (tuple, list)):\n epoch2 = np.datetime64(datetime.datetime(*epoch2))\n elif isinstance(epoch2, str):\n epoch2 = np.datetime64(parse(epoch2))\n # calculate the total difference in time in seconds\n delta_time_epochs = (epoch2 - epoch1) / np.timedelta64(1, 's')\n # subtract difference in time and rescale to output units\n return scale*(delta_time - delta_time_epochs)", "def startup_time_delta(self):\n return int((time.time() - self.startup_timestamp) * 1000.0)", "def duration(self):\n return self._end - self._begin" ]
[ "0.5740035", "0.56960464", "0.55968964", "0.5498718", "0.54860854", "0.5484328", "0.5429148", "0.54070693", "0.53953534", "0.53865397", "0.53487533", "0.5276387", "0.52748066", "0.52547956", "0.52436954", "0.52381575", "0.5197884", "0.5156624", "0.51419455", "0.5118423", "0.5093782", "0.5069282", "0.5063411", "0.5059325", "0.5058929", "0.5048091", "0.50435907", "0.50280434", "0.50262785", "0.50115514", "0.5010826", "0.5007418", "0.5006992", "0.4980454", "0.497187", "0.49706444", "0.49691653", "0.49669665", "0.4961616", "0.4936073", "0.49223015", "0.49190468", "0.49143624", "0.49131238", "0.49023747", "0.4901729", "0.48892078", "0.48775604", "0.48763993", "0.48587692", "0.4847619", "0.48448938", "0.48413882", "0.48381653", "0.4823335", "0.48084617", "0.48028553", "0.4796588", "0.47929507", "0.4780423", "0.476925", "0.47661272", "0.47599286", "0.47573486", "0.475518", "0.4747802", "0.4745858", "0.4729913", "0.47226653", "0.4721929", "0.47146732", "0.471362", "0.47133383", "0.4705931", "0.47033358", "0.47001725", "0.46994978", "0.46962976", "0.46841857", "0.46841788", "0.46794772", "0.46740955", "0.46730113", "0.46646568", "0.46639442", "0.4662792", "0.46459842", "0.4643234", "0.46385413", "0.46335572", "0.46307093", "0.46298242", "0.4628809", "0.46273786", "0.46242326", "0.46224645", "0.4619042", "0.46156088", "0.46156088", "0.46083117", "0.46066943" ]
0.0
-1
Convert Input JSON file into TableSchema message.
def BqTableSchemaFileProcessor(file_arg): table_schema_type = GetApiMessage('TableSchema') schema_field_type = GetApiMessage('TableFieldSchema') try: schema_json = yaml.load(file_arg) schema_json = schema_json.get('schema', None) if not schema_json or not isinstance(schema_json, list): raise SchemaFileError( 'Error parsing schema file: no schema field list defined in file') all_fields = [] for field in schema_json: new_field = schema_field_type(name=field['name'], type=field['type'], mode=field.get('mode', 'NULLABLE')) all_fields.append(new_field) return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name)) except yaml.YAMLParseError as ype: raise SchemaFileError('Error parsing schema file [{}]'.format(ype)) except (AttributeError, KeyError) as e: raise SchemaFileError( 'Error parsing schema file, invalid field definition [{}]'.format(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadSchemaFile(schema_file, bigquery_messages):\n\n if os.path.exists(schema_file):\n with open(schema_file, mode='r') as f:\n try:\n def UpperOrNone(string):\n return string and string.upper()\n field_schemas = [\n bigquery_messages.TableFieldSchema(\n name=json_object.get('name'),\n type=json_object.get('type').upper(),\n mode=UpperOrNone(json_object.get('mode')))\n for json_object in json.load(f)]\n return bigquery_messages.TableSchema(fields=field_schemas)\n except ValueError as e:\n raise bigquery.SchemaError(\n 'Error decoding JSON schema from file {0}: {1}.'.format(\n schema_file, e))\n else:\n raise bigquery.SchemaError(\n 'Error reading schema: File \"{0}\" was not found.'.format(schema_file))", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def CreateTableFromFile(self, table_name, schema_path):\n try:\n schema_file = open(schema_path)\n schema_json = schema_file.read()\n schema_file.close()\n except IOError, e:\n raise SchemaError('Could not read file (%s):\\n%s' %\n (schema_path, str(e)))\n return self.CreateTableFromJson(table_name, schema_json)", "def read_json(file) -> Table:\n try:\n with Path(file).open(mode=\"r\", encoding=\"utf-8\") as __f:\n res = load(__f)\n print(str(res)[:100])\n return res\n except JSONDecodeError as err:\n if err.msg != \"Extra data\":\n raise\n # Extra data, so try load line by line\n res = []\n for line in Path(file).read_text(encoding=\"utf-8\").splitlines():\n try:\n if line.strip() == \"\":\n continue\n res.append(loads(line))\n except Exception:\n ic(line)\n ic(\"exc2\")\n raise\n return res", "def __json_schema_generator(file):\n try:\n data = json.load(file)\n metadata_set = set()\n try:\n for datum in data['meta']['view']['columns']:\n metadata_set.add(datum['name'])\n except Exception as e:\n metadata_set.clear()\n for datum in data:\n if isinstance(datum, str):\n metadata_set.add(datum)\n else:\n for datum_property in datum:\n metadata_set.add(str(datum_property))\n\n metadata_list = list(metadata_set)\n # assumes list of objects with sparsse data\n # OR\n # for data_property in data[0]:\n # metadata_list.append(data_property)\n # assumes list of objects and that first entry has full list of properties\n\n return SchemaGenerator.__build_schema(metadata_list)\n except Exception as e:\n logging.error('Failed to parse json file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from json file.\")", "def CreateTableFromJson(self, table_name, schema_json):\n try:\n schema = json.JSONDecoder().decode(schema_json)\n except ValueError, e:\n raise SchemaError('Could not parse fields:\\n%s\\n%s' %\n (schema_json, str(e)))\n\n conn = self._Connect()\n result = conn.Call(\n dict(method='bigquery.tables.insert',\n collection='tables',\n operation=bq.REST.INSERT,\n params=dict(name=table_name, fields=schema)))\n return result", "def ReadSchema(schema, bigquery_messages):\n\n return bigquery_messages.TableSchema(\n fields=[\n _TableFieldSchemaForEntry(entry, bigquery_messages)\n for entry in schema.split(',')])", "def insert_data(table, jsonfile):\n with open(jsonfile) as infile:\n data = json.load(infile)\n table_models_map[table]['insert'](data)", "def generate_wc_schema():\n json_str = json.dumps({'fields': [\n {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def main(input_json, output_markdown):\n parser = Parser()\n output_md = parser.parse_schema(json.load(input_json))\n output_markdown.writelines(output_md)\n click.secho(\"✔ Successfully parsed schema!\", bold=True, fg=\"green\")", "def _convert(self, fn, suffix='json', path='jsonschema', name=None,\n root_class_name=None, data_files=[], target_class=None):\n ie = JsonSchemaImportEngine()\n d = os.path.join(INPUT_DIR, path)\n schema = ie.load(os.path.join(d, f'{fn}.{suffix}'), name=name, format=suffix, root_class_name=root_class_name)\n model_path = os.path.join(OUTPUT_DIR, f'{fn}.yaml')\n write_schema(schema, model_path)\n roundtrip_path = os.path.join(OUTPUT_DIR, f'{fn}.roundtrip.json')\n with open(roundtrip_path, 'w') as stream:\n stream.write(JsonSchemaGenerator(model_path).serialize())\n python_path = os.path.join(OUTPUT_DIR, f'{fn}.py')\n with open(python_path, 'w') as stream:\n stream.write(PythonGenerator(model_path).serialize())\n compile_python(python_path)\n # TODO: test data_files\n return schema", "def load_schema(filename):\n with open(filename) as f:\n schema = json.load(f)\n\n return schema", "def test_convert_json():\n schema = pa.schema([\n pa.field(\"foo\", pa.int32()),\n pa.field(\"bar\", pa.int64())\n ])\n\n input_path = \"{}/tests/fixtures/simple_json.txt\".format(os.getcwd())\n expected_file = \"{}/tests/fixtures/simple.parquet\".format(os.getcwd())\n with tempfile.NamedTemporaryFile() as f:\n output_file = f.name\n client.convert_json(input_path, output_file, schema)\n output = pq.ParquetFile(output_file)\n expected = pq.ParquetFile(expected_file)\n assert output.metadata.num_columns == expected.metadata.num_columns\n assert output.metadata.num_rows == expected.metadata.num_rows\n assert output.schema.equals(expected.schema)\n assert output.read_row_group(0).to_pydict() == expected.read_row_group(0).to_pydict()", "def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance", "def process_data(data):\n # Table Name from Json file\n table_name = data['table_name']\n\n # No of Column\n column_count = data['column_count']\n\n # No of Row\n row_count = data['row_count']\n\n # Table columns schema from Json file\n column_properties = data['column_properties']\n\n # Get the row row_properties\n row_properties = data['row_properties']\n return table_name, column_count, row_count, column_properties, row_properties", "def deduce_schema(self, input_data, *, schema_map=None):\n\n if self.input_format == 'csv':\n if self.csv_dialect:\n reader = csv.DictReader(input_data, dialect=self.csv_dialect)\n else:\n reader = csv.DictReader(input_data)\n elif self.input_format == 'json' or self.input_format is None:\n reader = json_reader(input_data)\n elif self.input_format == 'dict':\n reader = input_data\n else:\n raise Exception(f\"Unknown input_format '{self.input_format}'\")\n\n if schema_map is None:\n schema_map = OrderedDict()\n\n try:\n for json_object in reader:\n\n # Print a progress message periodically.\n self.line_number += 1\n if self.line_number % self.debugging_interval == 0:\n logging.info(f'Processing line {self.line_number}')\n\n # Deduce the schema from this given data record.\n if isinstance(json_object, dict):\n self.deduce_schema_for_record(\n json_object=json_object,\n schema_map=schema_map,\n )\n elif isinstance(json_object, Exception):\n self.log_error(\n f'Record could not be parsed: Exception: {json_object}'\n )\n if not self.ignore_invalid_lines:\n raise json_object\n else:\n self.log_error(\n 'Record should be a JSON Object '\n f'but was a {type(json_object)}'\n )\n if not self.ignore_invalid_lines:\n raise Exception(f'Record must be a JSON Object '\n f'but was a {type(json_object)}')\n finally:\n logging.info(f'Processed {self.line_number} lines')\n\n return schema_map, self.error_logs", "def _create_schema(self, cypher_file):\n if len(self.graph.nodes) > 0:\n msg = \"Cypher file specified but the graph is not empty. Aborting.\"\n raise ValueError(msg)\n cyp = open(cypher_file, 'r').read()\n self.graph.run(cyp)", "def BqTableDataFileProcessor(file_arg):\n data_insert_request_type = GetApiMessage('TableDataInsertAllRequest')\n insert_row_type = data_insert_request_type.RowsValueListEntry\n data_row_type = GetApiMessage('JsonObject')\n\n try:\n data_json = yaml.load(file_arg)\n\n if not data_json or not isinstance(data_json, list):\n raise TableDataFileError(\n 'Error parsing data file: no data records defined in file')\n\n rows = []\n for row in data_json:\n rows.append(insert_row_type(json=encoding.DictToMessage(\n row, data_row_type)))\n\n return rows\n except yaml.YAMLParseError as ype:\n raise TableDataFileError('Error parsing data file [{}]'.format(ype))", "def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema", "def create_json_table(cur, country_json, body_json):\n print(\"Creating table from JSON file\")\n cur.execute('CREATE TABLE IF NOT EXISTS state_json(id INTEGER PRIMARY KEY, state TEXT, album TEXT)')\n for idx, album in enumerate(country_json[body_json['state']]):\n cur.execute('INSERT INTO state_json VALUES (\"%s\", \"%s\", \"%s\")' % (idx, body_json['state'], album))", "def run(\n self,\n input_file=sys.stdin,\n output_file=sys.stdout,\n schema_map=None,\n ):\n schema_map, error_logs = self.deduce_schema(\n input_file, schema_map=schema_map\n )\n\n for error in error_logs:\n logging.info(\n f\"Problem on line {error['line_number']}: {error['msg']}\"\n )\n\n if self.debugging_map:\n json.dump(schema_map, output_file, indent=2)\n print(file=output_file)\n else:\n schema = self.flatten_schema(schema_map)\n json.dump(schema, output_file, indent=2)\n print(file=output_file)", "def create_db(db, schema_json):\n with open(schema_json) as of:\n schema = json.load(of, object_pairs_hook=OrderedDict)\n # OrderedDict so that tables are created in the order specified,\n # allowing foreign keys to reference previously defined tables\n\n for table_name, columns in schema.items():\n col_types = columns.items() # dict -> tuple\n make_table(db, table_name, col_types)", "def main(event, context):\n # pylint: enable=unused-argument\n data = decode(event[\"data\"])\n to_table([data], PROJECT, DATASET, TABLE)", "def parse_raw(sqlContext, input, user):\n df = sqlContext.read.json(input + \"/\" + user + \"/\" + user + \".json\", multiLine=True)\n return df", "def json_to_thrift(json_str, root_thrift_class):\r\n return json.loads(json_str, cls=ThriftJSONDecoder, root_thrift_class=root_thrift_class)", "def extract_data(filename: str, schema_filename: str) -> DataFrame:\n data = []\n try:\n with open(schema_filename) as f:\n schema = json.load(f)\n with open(filename) as f:\n for line in f:\n json_doc = json.loads(line)\n if is_valid_data(json_doc, schema):\n data.append(json_doc)\n except ValueError as e:\n log.error(f\"Error parsing json: {e}\")\n except FileNotFoundError as e:\n log.error(f\"File not found error: {e}\")\n raise e\n except Exception as e:\n log.error(e)\n raise e\n return DataFrame(data)", "def _schema_write(self, table: TableSchema) -> None:\n with open(self.schemas / (table['name'] + '.json'), 'w') as f:\n json.dump(table, f, indent=True)", "def read_json(self, inputfile):\n transtransfile = json.load(inputfile)\n self.language = transfile['lang']\n self.translations = transfile['strings']", "def generate_cooccur_schema():\n json_str = json.dumps({'fields': [\n {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def generate_url_schema():\n json_str = json.dumps({'fields': [\n {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def generate_bq_schema(self, file_name, schema_file_name=None):\n if not schema_file_name:\n schema_file_name = f'{self.directory}/schema_temp.json'\n os.system(f\"generate-schema --keep_nulls < {file_name} > {schema_file_name}\")\n\n schema = open(schema_file_name, 'r').read()\n\n os.remove(schema_file_name)\n\n return json.loads(schema)", "def generate_data(self):\n print(\"generate_data - init\")\n with open(self.input_file, \"r\") as f:\n\n # read JSON data from input file\n data = json.loads(f.read())\n\n for idx, row in enumerate(data): \n # serialize Python dict to string\n msg = self.serialize_json(row)\n #print(f\"Linha: {row}\")\n self.send(self.topic, msg)\n self.flush()\n #print(\"Sleeping\")\n time.sleep(1)", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def test_test_body_with_file_schema(self):\n pass", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def load_json_schema(filename):\n relative_path = join('../schema', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(\n schema_file.read(), base_uri=base_uri, jsonschema=True)", "def convert_json_to_flatbuffer_binary(json, schema, out_dir):\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)", "def _load_data_from_file(self, input_file_path):\n with FileOrBufferHandler(input_file_path, 'r', \n encoding=self.file_encoding) as input_file:\n try:\n data = json.load(input_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n input_file.seek(0)\n data = data_utils.read_json(\n data_generator=input_file,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n return data", "def load_schema(self, schema_file):\n with open(schema_file) as fp:\n for line in io.lines_in(fp):\n parts = line.strip().split('\\t')\n if len(parts) != 3:\n raise ValueError('invalid type declaration %r' % line.strip())\n self.declare_relation(parts[0], parts[1], parts[2])", "def transform_file(input_file_path: str, output_file_path: str):\n\n # Open json\n with open(input_file_path, mode=\"r\") as in_file:\n input_data = json.load(in_file)\n\n # Transform and write\n with jsonlines.open(output_file_path, mode=\"w\", compact=True) as out_file:\n for item in input_data[\"items\"]:\n out_file.write(transform_item(item))", "def parse(json_string):\n try:\n json_data = json.loads(json_string)\n except Exception as exn:\n raise SchemaParseException(\n 'Error parsing schema from JSON: %r. '\n 'Error message: %r.'\n % (json_string, exn))\n\n # Initialize the names object\n names = Names()\n\n # construct the Avro Schema object\n return schema_from_json_data(json_data, names)", "def get_inputs_from_file(filename=\"\"):\n import json\n\n with open(filename) as input_text:\n json_obj = json.load(input_text)\n return json_obj", "def json_input():\r\n with open (FILE_INPUT,'rb') as file:\r\n data = json.load(file)\r\n return data['input_data'][0]", "def intermediary_to_schema(tables, relationships, output):\n dot_file = _intermediary_to_dot(tables, relationships)\n #graph = AGraph()\n #graph = graph.from_string(dot_file)\n extension = output.split('.')[-1]\n #graph.draw(path=output, prog='dot', format=extension)\n #Source.from_file(filename, engine='dot', format=extension)\n return Source(dot_file, engine='dot', format=extension)", "def populate_from_json(db: Session, model: DatabaseModel, json_file: str):\n data_exists = db.query(model).first()\n if data_exists:\n return\n with open(json_file, \"r\") as file:\n data = json.loads(file.read())\n for obj in data:\n db.add(model(**obj))\n db.commit()", "def parse_and_format(file_in_path: str, file_out_path: str) -> None:\n\n with open(file_in_path) as input:\n payload = json.load(input)\n\n records = [\"# Data is in the format:\",\n \"# kem_id,kdf_id,aead_id,info,skRm,skEm,pkRm,pkEm,exporter_context,L,exported_value\"]\n\n for key in payload:\n # Skip these to test only capabilities exposed by BoringSSL\n if (key[\"mode\"] != MODE_BASE or\n key[\"kem_id\"] != KEM_DHKEM_X25519_SHA256 or\n key[\"kdf_id\"] != KDF_HKDF_SHA256 or\n key[\"aead_id\"] == AEAD_EXPORT_ONLY):\n continue\n\n for exportKey in key[\"exports\"]:\n records.append(\"{},{},{},{},{},{},{},{},{},{},{}\"\n .format(str(key[\"kem_id\"]),\n str(key[\"kdf_id\"]),\n str(key[\"aead_id\"]),\n str(key[\"info\"]),\n str(key[\"skRm\"]),\n str(key[\"skEm\"]),\n str(key[\"pkRm\"]),\n str(key[\"pkEm\"]),\n str(exportKey[\"exporter_context\"]),\n str(exportKey[\"L\"]),\n str(exportKey[\"exported_value\"])))\n\n\n with open(file_out_path, \"w\") as output:\n output.write(\"\\n\".join(records))", "def jsons_to_table(dir_jsons, dir_out, name, format='html'):\n # sanity of paths\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format))\n # reading JSON files\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n # DataFrame conversion\n df = pd.DataFrame.from_dict(table)\n # writing HTML table\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def all_to_intermediary(filename_or_input, schema=None):\n # Try to convert from the name of the class\n input_class_name = filename_or_input.__class__.__name__\n try:\n this_to_intermediary = switch_input_class_to_method[input_class_name]\n tables, relationships = this_to_intermediary(filename_or_input)\n return tables, relationships\n except KeyError:\n pass\n\n # try to read markdown file.\n if isinstance(filename_or_input, basestring):\n if filename_or_input.split('.')[-1] == 'er':\n return markdown_file_to_intermediary(filename_or_input)\n\n # try to read a markdown in a string\n if not isinstance(filename_or_input, basestring):\n if all(isinstance(e, basestring) for e in filename_or_input):\n return line_iterator_to_intermediary(filename_or_input)\n\n # try to read DB URI.\n try:\n make_url(filename_or_input)\n return database_to_intermediary(filename_or_input, schema=schema)\n except ArgumentError:\n pass\n\n msg = 'Cannot process filename_or_input {}'.format(input_class_name)\n raise ValueError(msg)", "def __csv_schema_generator(file):\n try:\n # Parses the first line of the file to get all the headers.\n metadata = str(file.readline().decode('utf-8')).strip().split(',')\n # Will be further implemented in phase 3.\n return SchemaGenerator.__build_schema(metadata)\n except Exception as e:\n logging.error('Failed to parse csv file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from csv file.\")", "def schema_load(filename):\n print(uc.schema_load(filename))", "def read_json_schema(schema_file_path):\n with open(schema_file_path) as f:\n schema = json.load(f)\n return schema", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def from_json(self, file_path):\n with open(file_path) as file:\n jsonstr = file.read()\n handler_dict = json.loads(jsonstr)\n self.from_dict(handler_dict)", "def import_schemas_from_file():\n with open('./tblSchemas') as schemas_file:\n schemas = {}\n for line in schemas_file:\n line = line.split()\n if len(line) == 0: continue\n if line[0] == 'tblname':\n tbl_name = line[1]\n schemas[tbl_name] = []\n else:\n schemas[tbl_name].append(line)\n return schemas", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def convert_schema(bco, filename, mapping_dict):\n for item in mapping_dict['to_swap']:\n value_list = mapping_dict['to_swap'][item]\n for i in range(0, len(value_list)):\n rename_dict_key_HELPER(bco, value_list[i]['index'],\n value_list[i]['value']) # Change key name\n\n for item in mapping_dict['to_delete']:\n value_list = mapping_dict['to_delete'][item]\n for i in range(0, len(value_list)):\n delete_key_HELPER(bco, value_list[i]['index'], value_list[i]['value']) # delete key\n\n for item in mapping_dict['to_add']:\n value_list = mapping_dict['to_add'][item]\n for i in range(0, len(value_list)):\n set_key_in_dict_HELPER(bco, value_list[i]['index'], value_list[i]['value']) # add key\n\n new_bco = bco\n try:\n new_bco['provenance_domain'][\n 'modified'] = datetime.now().isoformat() # change date to current\n\n temp_bco = dict(new_bco)\n del temp_bco['object_id'], temp_bco['etag'], temp_bco['spec_version']\n\n new_bco['spec_version'] = \"https://w3id.org/ieee/ieee-2791-schema/2791object.json\"\n new_bco[\"etag\"] = sha256(json.dumps(temp_bco).encode('utf-8')).hexdigest()\n except KeyError: # Vital field was missing, will be caught by final error checker\n pass\n file = open(filename, \"w\")\n json.dump(new_bco, file, indent=4)", "def test_json():\n schemas = {\n 'schema-languages': 'bible/languages.json',\n 'schema-book-metadata': 'bible/book-metadata.json',\n 'schema-bible': 'bible/bible-*.json'\n }\n for schema_name, data_path_glob in schemas.items():\n schema_path = 'schemas/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_glob)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n yield jsonschema.validate, data, schema", "def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)", "def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)", "def get_schema(filename: str) -> dict:\n return _load_json_schema(filename)", "def _import(format, input, config):\n if input:\n with open(input, 'rb') as f:\n data = f.read()\n else:\n data = sys.stdin.read()\n\n dataset = tablib.Dataset()\n setattr(dataset, format, data)\n\n _add_changelogs(config, dataset.dict)", "def read_json(self, filename, multi_line=False, schema=None):\n self.logger.info(\"# Reading a JSON file \" + filename)\n sql_context = SQLContext(self.spark)\n df = None\n if schema is None and multi_line == False:\n df = sql_context.read.json(filename)\n elif multi_line:\n df = sql_context.read.json(filename, multiLine=multi_line)\n elif schema is not None:\n df = sql_context.read.json(filename, schema=schema)\n return df", "def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)", "def readjamschema(schema):\n raise NotImplementedError(msg)", "def load(file_name):\n with open(file_name, newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\n schema = [x.strip() for x in data[0]]\n table = [[int(el) for el in row] for row in data[1:]]\n\n return schema, table", "def get_schema(path):\n with open(path, 'r') as f:\n return json.load(f)", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def create_table_from_file():\n\n full_path = os.getcwd()\n file_name = full_path + \"/inventory/inventory.csv\"\n\n if os.path.exists(file_name):\n table = data_manager.get_table_from_file(file_name)\n\n else:\n ui.print_error_message(\"There is no file to read!\")\n table = []\n\n return table", "def tsv_to_json(tsv_file, json_file):\n import csv\n import json\n\n try:\n with open(tsv_file, 'r') as tsvFile:\n file_reader = csv.DictReader(tsvFile, dialect='excel-tab')\n row_list = list(file_reader)\n with open(json_file, 'w+') as jsonFile:\n jsonFile.write(json.dumps(row_list, indent=4))\n return 1\n except (ValueError, FileNotFoundError):\n return 0", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def read_data(self):\n if self._file.is_file():\n try:\n self._data = read_json_file(self._file)\n except (OSError, json.JSONDecodeError):\n _LOGGER.warning(\"Can't read %s\", self._file)\n self._data = {}\n\n # Validate\n try:\n self._data = self._schema(self._data)\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't parse %s: %s\",\n self._file, humanize_error(self._data, ex))\n\n # Reset data to default\n _LOGGER.warning(\"Reset %s to default\", self._file)\n self._data = self._schema({})", "def _mashup_json_to_table(json_obj, col_config=None):\n\n dataTable = Table(masked=True)\n absCorr = None\n\n if not all(x in json_obj.keys() for x in ['fields', 'data']):\n raise KeyError(\"Missing required key(s) 'data' and/or 'fields.'\")\n\n for col, atype in [(x['name'], x['type']) for x in json_obj['fields']]:\n\n # Removing \"_selected_\" column\n if col == \"_selected_\":\n continue\n\n # reading the colum config if given\n ignoreValue = None\n if col_config:\n colProps = col_config.get(col, {})\n ignoreValue = colProps.get(\"ignoreValue\", None)\n\n # making type adjustments\n if atype == \"string\":\n atype = \"str\"\n ignoreValue = \"\" if (ignoreValue is None) else ignoreValue\n if atype == \"boolean\":\n atype = \"bool\"\n if atype == \"int\": # int arrays do not admit Non/nan vals\n atype = np.int64\n ignoreValue = -999 if (ignoreValue is None) else ignoreValue\n if atype == \"date\":\n atype = \"str\"\n ignoreValue = \"\" if (ignoreValue is None) else ignoreValue\n\n # Make the column list (don't assign final type yet or there will be errors)\n colData = np.array([x.get(col, ignoreValue) for x in json_obj['data']], dtype=object)\n if ignoreValue is not None:\n colData[np.where(np.equal(colData, None))] = ignoreValue\n\n # no consistant way to make the mask because np.equal fails on ''\n # and array == value fails with None\n if atype == 'str':\n colMask = (colData == ignoreValue)\n else:\n colMask = np.equal(colData, ignoreValue)\n\n # add the column\n dataTable.add_column(MaskedColumn(colData.astype(atype), name=col, mask=colMask))\n\n return dataTable", "def load_dict_to_delta_table(spark, s3_data_bucket, table_schema, table_name, data, overwrite=False):\n table_to_col_names_dict = {}\n table_to_col_names_dict[\"transaction_fabs\"] = TRANSACTION_FABS_COLUMNS\n table_to_col_names_dict[\"transaction_fpds\"] = TRANSACTION_FPDS_COLUMNS\n table_to_col_names_dict[\"transaction_normalized\"] = list(TRANSACTION_NORMALIZED_COLUMNS)\n table_to_col_names_dict[\"awards\"] = list(AWARDS_COLUMNS)\n table_to_col_names_dict[\"financial_accounts_by_awards\"] = list(FINANCIAL_ACCOUNTS_BY_AWARDS_COLUMNS)\n\n table_to_col_info_dict = {}\n for tbl_name, col_info in zip(\n (\"transaction_fabs\", \"transaction_fpds\"), (TRANSACTION_FABS_COLUMN_INFO, TRANSACTION_FPDS_COLUMN_INFO)\n ):\n table_to_col_info_dict[tbl_name] = {}\n for col in col_info:\n table_to_col_info_dict[tbl_name][col.dest_name] = col\n\n # Make sure the table has been created first\n call_command(\n \"create_delta_table\",\n \"--destination-table\",\n table_name,\n \"--alt-db\",\n table_schema,\n \"--spark-s3-bucket\",\n s3_data_bucket,\n )\n\n if data:\n insert_sql = f\"INSERT {'OVERWRITE' if overwrite else 'INTO'} {table_schema}.{table_name} VALUES\\n\"\n row_strs = []\n for row in data:\n value_strs = []\n for col_name in table_to_col_names_dict[table_name]:\n value = row.get(col_name)\n if isinstance(value, (str, bytes)):\n # Quote strings for insertion into DB\n value_strs.append(f\"'{value}'\")\n elif isinstance(value, (date, datetime)):\n # Convert to string and quote\n value_strs.append(f\"\"\"'{value.isoformat()}'\"\"\")\n elif isinstance(value, bool):\n value_strs.append(str(value).upper())\n elif isinstance(value, (Sequence, Set)):\n # Assume \"sequences\" must be \"sequences\" of strings, so quote each item in the \"sequence\"\n value = [f\"'{item}'\" for item in value]\n value_strs.append(f\"ARRAY({', '.join(value)})\")\n elif value is None:\n col_info = table_to_col_info_dict.get(table_name)\n if (\n col_info\n and col_info[col_name].delta_type.upper() == \"BOOLEAN\"\n and not col_info[col_name].handling == \"leave_null\"\n ):\n # Convert None/NULL to false for boolean columns unless specified to leave the null\n value_strs.append(\"FALSE\")\n else:\n value_strs.append(\"NULL\")\n else:\n value_strs.append(str(value))\n\n row_strs.append(f\" ({', '.join(value_strs)})\")\n\n sql = \"\".join([insert_sql, \",\\n\".join(row_strs), \";\"])\n spark.sql(sql)", "def convert_json_1(json):\n\n # TODO Add batch details to json format\n # TODO Get default direct entry batch details if not provided\n\n LOGGER.debug('convert json message:%s', json)\n direct_entry = {\n 'record_type': '1',\n 'reel_seq_num': '01',\n 'name_fin_inst': 'SUN',\n 'user_name': 'hello',\n 'user_num': '123456',\n 'file_desc': 'payroll',\n 'date_for_process': datetime.strptime(json['post_date'], '%Y-%m-%d').strftime('%d%m%y'),\n 'bsb_number': json['to_routing'],\n 'account_number': json['to_account'],\n 'indicator': ' ',\n 'tran_code': '13' if json['tran_type'] == 'db' else '53',\n 'amount': '{amount:010}'.format(amount=json['amount']), # $2.00\n 'account_title': json['to_name'],\n 'lodgement_ref': json['to_description'],\n 'trace_bsb_number': json['from_routing'],\n 'trace_account_number': json['from_account'],\n 'name_of_remitter': json['from_name'],\n 'withholding_tax_amount': '00000000',\n }\n\n return direct_entry", "def create_datastructure_from_yaml_file(self):\n ## loading the YAML file\n try:\n with open(self.source) as f:\n hbaseSchemaDic = yaml.load(f) \n except:\n msg = \"Error: the HBase substructure could not be created. File %s could not be loaded. Please check the syntax of the '.yml' file.\" % self.source \n raise createDataStructureException(msg)\n status = \"failed\" \n\n try:\n c = Connection(host = self.host, port = int(self.port))\n tbls = c.tables()\n tbls = [str(t) for t in tbls]\n ## check that none of the tables already exists \n for t in hbaseSchemaDic.keys():\n if t in tbls:\n msg = \"Error: the table %s already exists. If you use starbase in python you can drop the table by using \\n>>> from starbase import Connection\\n>>> c = Connection()\\n>>> t = c.table(%s)\\n>>> t.drop()\" % (t,t) \n print(msg)\n status = \"failed\"\n raise createDataStructureException(msg)\n\n ## if none of the table(s) do(es) not exist, let's create them(it) \n for t in hbaseSchemaDic.keys():\n columnFamilies = hbaseSchemaDic[t]['columnFamilies'].keys()\n tC = c.table(t)\n tC.create(*columnFamilies)\n status = \"succeeded\"\n \n except:\n msg = \"Error: the HBase substructure could not be created. Please check your connection parameters or the syntax in your '.yml' file.\"\n raise createDataStructureException(msg)\n status = \"failed\"\n\n return(status)", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())", "def _meta_json_to_database(self):\n\n sqlalchemy_metadata = MetaData() # this is unrelated to our meta.json\n meta_table = Table('meta', sqlalchemy_metadata,\n Column('meta', String))\n\n sqlalchemy_metadata.create_all(self.engine)\n json_string = json.dumps(self.meta)\n ins = meta_table.insert().values(meta=json_string)\n conn = self.engine.connect()\n conn.execute(\"DELETE FROM meta;\")\n conn.execute(ins)", "def parse_migration_tables(self, tabels_schema: MigrationTablesSchema):\n try:\n self.source_table = tabels_schema.migrationTable.SourceTable.dict()\n self.destination_table = tabels_schema.migrationTable.DestinationTable.dict()\n self.columns = tabels_schema.migrationTable.MigrationColumns\n except Exception as err:\n logger.error(\"parse_migration_tables [error] -> %s\" % err)", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def create_deft_table_json_mappings():\n mappings = list()\n mappings.append(JsonColumnMapping(columnName=\"rownumber\", jsonPath=\"$.rownumber\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"rowguid\", jsonPath=\"$.rowguid\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdouble\", jsonPath=\"$.xdouble\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xfloat\", jsonPath=\"$.xfloat\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xbool\", jsonPath=\"$.xbool\", cslDataType=\"bool\"))\n mappings.append(JsonColumnMapping(columnName=\"xint16\", jsonPath=\"$.xint16\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint32\", jsonPath=\"$.xint32\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint64\", jsonPath=\"$.xint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint8\", jsonPath=\"$.xuint8\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint16\", jsonPath=\"$.xuint16\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint32\", jsonPath=\"$.xuint32\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint64\", jsonPath=\"$.xuint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xdate\", jsonPath=\"$.xdate\", cslDataType=\"datetime\"))\n mappings.append(JsonColumnMapping(columnName=\"xsmalltext\", jsonPath=\"$.xsmalltext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtext\", jsonPath=\"$.xtext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xnumberAsText\", jsonPath=\"$.xnumberAsText\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtime\", jsonPath=\"$.xtime\", cslDataType=\"timespan\"))\n mappings.append(\n JsonColumnMapping(columnName=\"xtextWithNulls\", jsonPath=\"$.xtextWithNulls\", cslDataType=\"string\")\n )\n mappings.append(\n JsonColumnMapping(columnName=\"xdynamicWithNulls\", jsonPath=\"$.xdynamicWithNulls\", cslDataType=\"dynamic\")\n )\n return mappings", "def create_schema(overwrite=False):\n if (not overwrite) and os.path.isfile(_schema_file):\n raise RuntimeError(\"Schema file already exists.\")\n schema = {\n 'title': 'obj',\n 'description': 'A mapping container for Obj 3D data.',\n 'type': 'object',\n 'required': ['vertices', 'faces'],\n 'definitions': {\n 'vertex': {\n 'description': 'Map describing a single vertex.',\n 'type': 'object', 'required': ['x', 'y', 'z'],\n 'additionalProperties': False,\n 'properties': {'x': {'type': _coord_type},\n 'y': {'type': _coord_type},\n 'z': {'type': _coord_type},\n 'red': {'type': _color_type},\n 'blue': {'type': _color_type},\n 'green': {'type': _color_type},\n 'w': {'type': _coord_type, 'default': 1.0}}},\n 'param': {\n 'description': 'Map describing a single parameter space point.',\n 'type': 'object', 'required': ['u', 'v'],\n 'additionalProperties': False,\n 'properties': {'u': {'type': _coord_type},\n 'v': {'type': _coord_type},\n 'w': {'type': _coord_type, 'default': 1.0}}},\n 'normal': {\n 'description': 'Map describing a single normal.',\n 'type': 'object', 'required': ['i', 'j', 'k'],\n 'additionalProperties': False,\n 'properties': {'i': {'type': _coord_type},\n 'j': {'type': _coord_type},\n 'k': {'type': _coord_type}}},\n 'texcoord': {\n 'description': 'Map describing a single texture vertex.',\n 'type': 'object', 'required': ['u'],\n 'additionalProperties': False,\n 'properties': {'u': {'type': _coord_type},\n 'v': {'type': _coord_type, 'default': 0.0},\n 'w': {'type': _coord_type, 'default': 0.0}}},\n 'point': {\n 'description': 'Array of vertex indices describing a set of points.',\n 'type': 'array', 'minItems': 1,\n 'items': {'type': _index_type}},\n 'line': {\n 'description': ('Array of vertex indices and texture indices '\n + 'describing a line.'),\n 'type': 'array', 'minItems': 2,\n 'items': {'type': 'object', 'required': ['vertex_index'],\n 'additionalProperties': False,\n 'properties':\n {'vertex_index': {'type': _index_type},\n 'texcoord_index': {'type': _index_type}}}},\n 'face': {\n 'description': ('Array of vertex, texture, and normal indices '\n + 'describing a face.'),\n 'type': 'array', 'minItems': 3,\n 'items': {'type': 'object', 'required': ['vertex_index'],\n 'additionalProperties': False,\n 'properties':\n {'vertex_index': {'type': _index_type},\n 'texcoord_index': {'type': _index_type},\n 'normal_index': {'type': _index_type}}}},\n 'curve': {\n 'description': 'Properties of describing a curve.',\n 'type': 'object', 'required': ['starting_param', 'ending_param',\n 'vertex_indices'],\n 'additionalProperties': False,\n 'properties': {\n 'starting_param': {'type': _coord_type},\n 'ending_param': {'type': _coord_type},\n 'vertex_indices': {\n 'type': 'array', 'minItems': 2,\n 'items': {'type': _index_type}}}},\n 'curve2D': {\n 'description': ('Array of parameter indices describine a 2D curve on '\n + 'a surface.'),\n 'type': 'array', 'minItems': 2,\n 'items': {'type': _index_type}},\n 'surface': {\n 'description': 'Properties describing a surface.',\n 'type': 'object', 'required': ['starting_param_u', 'ending_param_u',\n 'starting_param_v', 'ending_param_v',\n 'vertex_indices'],\n 'additionalProperties': False,\n 'properties': {\n 'starting_param_u': {'type': _coord_type},\n 'ending_param_u': {'type': _coord_type},\n 'starting_param_v': {'type': _coord_type},\n 'ending_param_v': {'type': _coord_type},\n 'vertex_indices': {\n 'type': 'array', 'minItems': 2,\n 'items': {'type': 'object', 'required': ['vertex_index'],\n 'additionalProperties': False,\n 'properties': {\n 'vertex_index': {'type': _index_type},\n 'texcoord_index': {'type': _index_type},\n 'normal_index': {'type': _index_type}}}}}}},\n 'properties': {\n 'material': {\n 'description': 'Name of the material to use.',\n 'type': ['unicode', 'string']},\n 'vertices': {\n 'description': 'Array of vertices.',\n 'type': 'array', 'items': {'$ref': '#/definitions/vertex'}},\n 'params': {\n 'description': 'Array of parameter coordinates.',\n 'type': 'array', 'items': {'$ref': '#/definitions/param'}},\n 'normals': {\n 'description': 'Array of normals.',\n 'type': 'array', 'items': {'$ref': '#/definitions/normal'}},\n 'texcoords': {\n 'description': 'Array of texture vertices.',\n 'type': 'array', 'items': {'$ref': '#/definitions/texcoord'}},\n 'points': {\n 'description': 'Array of points.',\n 'type': 'array', 'items': {'$ref': '#/definitions/point'}},\n 'lines': {\n 'description': 'Array of lines.',\n 'type': 'array', 'items': {'$ref': '#/definitions/line'}},\n 'faces': {\n 'description': 'Array of faces.',\n 'type': 'array', 'items': {'$ref': '#/definitions/face'}},\n 'curves': {\n 'description': 'Array of curves.',\n 'type': 'array', 'items': {'$ref': '#/definitions/curve'}},\n 'curve2Ds': {\n 'description': 'Array of curve2Ds.',\n 'type': 'array', 'items': {'$ref': '#/definitions/curve2D'}},\n 'surfaces': {\n 'description': 'Array of surfaces.',\n 'type': 'array', 'items': {'$ref': '#/definitions/surface'}}},\n 'dependencies': {\n 'lines': ['vertices'],\n 'faces': ['vertices'],\n 'curves': ['vertices'],\n 'curve2Ds': ['params'],\n 'surfaces': ['vertices']}}\n with open(_schema_file, 'w') as fd:\n encode_json(schema, fd, indent='\\t')", "def generate_package_from_report_filepath(input_path, options = None):\n try:\n vt_file = open(input_path, 'r')\n vt_dict = json.load(vt_file)\n except:\n print('\\nError: Error in parsing input file. Please check to ensure that it is valid JSON.')\n return\n \n return vtpack.vt_report_to_maec_package(vt_dict, options)", "def import_into_db(json_file):\n db = get_db()\n\n with current_app.open_resource(json_file) as f:\n reports = json.load(f)\n\n for element in reports[\"elements\"]:\n entry = (\n element[\"id\"],\n element[\"source\"],\n element[\"sourceIdentityId\"],\n element[\"state\"],\n element[\"created\"],\n element[\"reference\"][\"referenceId\"],\n element[\"reference\"][\"referenceType\"],\n element[\"payload\"][\"reportType\"],\n element[\"payload\"][\"reportId\"],\n element[\"payload\"][\"referenceResourceId\"],\n element[\"payload\"][\"referenceResourceType\"],\n element[\"payload\"][\"message\"],\n )\n db.execute(INSERT_INTO_REPORTS, entry)\n\n db.commit()", "def json_schema(schema_file=None, output=\"-\"):\n schemas = read_yaml(schema_file)\n dump_yaml(output, JsonSchemaConverterFromAccessSchema.convert_schemas(schemas))", "def from_json_file(cls, json_file):\n with open(json_file, 'r', encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))" ]
[ "0.6506698", "0.6255999", "0.6189637", "0.59361964", "0.5880678", "0.5861754", "0.5762274", "0.56616706", "0.56331164", "0.5612857", "0.55776805", "0.5533528", "0.55065984", "0.55045223", "0.548444", "0.547434", "0.54314774", "0.53971493", "0.5346988", "0.5327368", "0.5307453", "0.5267999", "0.52455944", "0.52326703", "0.52325064", "0.5219136", "0.5207536", "0.5187294", "0.5183731", "0.51769954", "0.51745486", "0.51707107", "0.5170645", "0.5158985", "0.51522464", "0.51227444", "0.51213485", "0.5113382", "0.5110196", "0.5099857", "0.5093744", "0.5089529", "0.5088614", "0.508273", "0.5077681", "0.5069668", "0.5065103", "0.5055063", "0.50539035", "0.50539035", "0.50469744", "0.50469744", "0.50306594", "0.50274265", "0.502586", "0.5005657", "0.5004016", "0.5004016", "0.4997759", "0.4997609", "0.4986268", "0.498196", "0.49683973", "0.49655133", "0.496366", "0.49594378", "0.49467462", "0.49407235", "0.4928758", "0.492373", "0.4923461", "0.49174178", "0.49124745", "0.48915982", "0.48891523", "0.48878503", "0.48864958", "0.48769042", "0.48686188", "0.48567456", "0.48541322", "0.48476815", "0.4841311", "0.48368114", "0.48329535", "0.48315528", "0.48306122", "0.48293504", "0.48196423", "0.4817549", "0.4817549", "0.4817549", "0.48168716", "0.48168716", "0.48162666", "0.4815631", "0.481176", "0.4809378", "0.48069832", "0.4803606" ]
0.60093594
3
Convert Input JSON file into TableSchema message.
def BqTableDataFileProcessor(file_arg): data_insert_request_type = GetApiMessage('TableDataInsertAllRequest') insert_row_type = data_insert_request_type.RowsValueListEntry data_row_type = GetApiMessage('JsonObject') try: data_json = yaml.load(file_arg) if not data_json or not isinstance(data_json, list): raise TableDataFileError( 'Error parsing data file: no data records defined in file') rows = [] for row in data_json: rows.append(insert_row_type(json=encoding.DictToMessage( row, data_row_type))) return rows except yaml.YAMLParseError as ype: raise TableDataFileError('Error parsing data file [{}]'.format(ype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadSchemaFile(schema_file, bigquery_messages):\n\n if os.path.exists(schema_file):\n with open(schema_file, mode='r') as f:\n try:\n def UpperOrNone(string):\n return string and string.upper()\n field_schemas = [\n bigquery_messages.TableFieldSchema(\n name=json_object.get('name'),\n type=json_object.get('type').upper(),\n mode=UpperOrNone(json_object.get('mode')))\n for json_object in json.load(f)]\n return bigquery_messages.TableSchema(fields=field_schemas)\n except ValueError as e:\n raise bigquery.SchemaError(\n 'Error decoding JSON schema from file {0}: {1}.'.format(\n schema_file, e))\n else:\n raise bigquery.SchemaError(\n 'Error reading schema: File \"{0}\" was not found.'.format(schema_file))", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def CreateTableFromFile(self, table_name, schema_path):\n try:\n schema_file = open(schema_path)\n schema_json = schema_file.read()\n schema_file.close()\n except IOError, e:\n raise SchemaError('Could not read file (%s):\\n%s' %\n (schema_path, str(e)))\n return self.CreateTableFromJson(table_name, schema_json)", "def BqTableSchemaFileProcessor(file_arg):\n table_schema_type = GetApiMessage('TableSchema')\n schema_field_type = GetApiMessage('TableFieldSchema')\n\n try:\n schema_json = yaml.load(file_arg)\n schema_json = schema_json.get('schema', None)\n\n if not schema_json or not isinstance(schema_json, list):\n raise SchemaFileError(\n 'Error parsing schema file: no schema field list defined in file')\n\n all_fields = []\n for field in schema_json:\n new_field = schema_field_type(name=field['name'],\n type=field['type'],\n mode=field.get('mode', 'NULLABLE'))\n all_fields.append(new_field)\n\n return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))\n except yaml.YAMLParseError as ype:\n raise SchemaFileError('Error parsing schema file [{}]'.format(ype))\n except (AttributeError, KeyError) as e:\n raise SchemaFileError(\n 'Error parsing schema file, invalid field definition [{}]'.format(e))", "def read_json(file) -> Table:\n try:\n with Path(file).open(mode=\"r\", encoding=\"utf-8\") as __f:\n res = load(__f)\n print(str(res)[:100])\n return res\n except JSONDecodeError as err:\n if err.msg != \"Extra data\":\n raise\n # Extra data, so try load line by line\n res = []\n for line in Path(file).read_text(encoding=\"utf-8\").splitlines():\n try:\n if line.strip() == \"\":\n continue\n res.append(loads(line))\n except Exception:\n ic(line)\n ic(\"exc2\")\n raise\n return res", "def __json_schema_generator(file):\n try:\n data = json.load(file)\n metadata_set = set()\n try:\n for datum in data['meta']['view']['columns']:\n metadata_set.add(datum['name'])\n except Exception as e:\n metadata_set.clear()\n for datum in data:\n if isinstance(datum, str):\n metadata_set.add(datum)\n else:\n for datum_property in datum:\n metadata_set.add(str(datum_property))\n\n metadata_list = list(metadata_set)\n # assumes list of objects with sparsse data\n # OR\n # for data_property in data[0]:\n # metadata_list.append(data_property)\n # assumes list of objects and that first entry has full list of properties\n\n return SchemaGenerator.__build_schema(metadata_list)\n except Exception as e:\n logging.error('Failed to parse json file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from json file.\")", "def CreateTableFromJson(self, table_name, schema_json):\n try:\n schema = json.JSONDecoder().decode(schema_json)\n except ValueError, e:\n raise SchemaError('Could not parse fields:\\n%s\\n%s' %\n (schema_json, str(e)))\n\n conn = self._Connect()\n result = conn.Call(\n dict(method='bigquery.tables.insert',\n collection='tables',\n operation=bq.REST.INSERT,\n params=dict(name=table_name, fields=schema)))\n return result", "def ReadSchema(schema, bigquery_messages):\n\n return bigquery_messages.TableSchema(\n fields=[\n _TableFieldSchemaForEntry(entry, bigquery_messages)\n for entry in schema.split(',')])", "def insert_data(table, jsonfile):\n with open(jsonfile) as infile:\n data = json.load(infile)\n table_models_map[table]['insert'](data)", "def generate_wc_schema():\n json_str = json.dumps({'fields': [\n {'name': 'word', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'percent', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def main(input_json, output_markdown):\n parser = Parser()\n output_md = parser.parse_schema(json.load(input_json))\n output_markdown.writelines(output_md)\n click.secho(\"✔ Successfully parsed schema!\", bold=True, fg=\"green\")", "def _convert(self, fn, suffix='json', path='jsonschema', name=None,\n root_class_name=None, data_files=[], target_class=None):\n ie = JsonSchemaImportEngine()\n d = os.path.join(INPUT_DIR, path)\n schema = ie.load(os.path.join(d, f'{fn}.{suffix}'), name=name, format=suffix, root_class_name=root_class_name)\n model_path = os.path.join(OUTPUT_DIR, f'{fn}.yaml')\n write_schema(schema, model_path)\n roundtrip_path = os.path.join(OUTPUT_DIR, f'{fn}.roundtrip.json')\n with open(roundtrip_path, 'w') as stream:\n stream.write(JsonSchemaGenerator(model_path).serialize())\n python_path = os.path.join(OUTPUT_DIR, f'{fn}.py')\n with open(python_path, 'w') as stream:\n stream.write(PythonGenerator(model_path).serialize())\n compile_python(python_path)\n # TODO: test data_files\n return schema", "def load_schema(filename):\n with open(filename) as f:\n schema = json.load(f)\n\n return schema", "def test_convert_json():\n schema = pa.schema([\n pa.field(\"foo\", pa.int32()),\n pa.field(\"bar\", pa.int64())\n ])\n\n input_path = \"{}/tests/fixtures/simple_json.txt\".format(os.getcwd())\n expected_file = \"{}/tests/fixtures/simple.parquet\".format(os.getcwd())\n with tempfile.NamedTemporaryFile() as f:\n output_file = f.name\n client.convert_json(input_path, output_file, schema)\n output = pq.ParquetFile(output_file)\n expected = pq.ParquetFile(expected_file)\n assert output.metadata.num_columns == expected.metadata.num_columns\n assert output.metadata.num_rows == expected.metadata.num_rows\n assert output.schema.equals(expected.schema)\n assert output.read_row_group(0).to_pydict() == expected.read_row_group(0).to_pydict()", "def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance", "def process_data(data):\n # Table Name from Json file\n table_name = data['table_name']\n\n # No of Column\n column_count = data['column_count']\n\n # No of Row\n row_count = data['row_count']\n\n # Table columns schema from Json file\n column_properties = data['column_properties']\n\n # Get the row row_properties\n row_properties = data['row_properties']\n return table_name, column_count, row_count, column_properties, row_properties", "def deduce_schema(self, input_data, *, schema_map=None):\n\n if self.input_format == 'csv':\n if self.csv_dialect:\n reader = csv.DictReader(input_data, dialect=self.csv_dialect)\n else:\n reader = csv.DictReader(input_data)\n elif self.input_format == 'json' or self.input_format is None:\n reader = json_reader(input_data)\n elif self.input_format == 'dict':\n reader = input_data\n else:\n raise Exception(f\"Unknown input_format '{self.input_format}'\")\n\n if schema_map is None:\n schema_map = OrderedDict()\n\n try:\n for json_object in reader:\n\n # Print a progress message periodically.\n self.line_number += 1\n if self.line_number % self.debugging_interval == 0:\n logging.info(f'Processing line {self.line_number}')\n\n # Deduce the schema from this given data record.\n if isinstance(json_object, dict):\n self.deduce_schema_for_record(\n json_object=json_object,\n schema_map=schema_map,\n )\n elif isinstance(json_object, Exception):\n self.log_error(\n f'Record could not be parsed: Exception: {json_object}'\n )\n if not self.ignore_invalid_lines:\n raise json_object\n else:\n self.log_error(\n 'Record should be a JSON Object '\n f'but was a {type(json_object)}'\n )\n if not self.ignore_invalid_lines:\n raise Exception(f'Record must be a JSON Object '\n f'but was a {type(json_object)}')\n finally:\n logging.info(f'Processed {self.line_number} lines')\n\n return schema_map, self.error_logs", "def _create_schema(self, cypher_file):\n if len(self.graph.nodes) > 0:\n msg = \"Cypher file specified but the graph is not empty. Aborting.\"\n raise ValueError(msg)\n cyp = open(cypher_file, 'r').read()\n self.graph.run(cyp)", "def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema", "def create_json_table(cur, country_json, body_json):\n print(\"Creating table from JSON file\")\n cur.execute('CREATE TABLE IF NOT EXISTS state_json(id INTEGER PRIMARY KEY, state TEXT, album TEXT)')\n for idx, album in enumerate(country_json[body_json['state']]):\n cur.execute('INSERT INTO state_json VALUES (\"%s\", \"%s\", \"%s\")' % (idx, body_json['state'], album))", "def run(\n self,\n input_file=sys.stdin,\n output_file=sys.stdout,\n schema_map=None,\n ):\n schema_map, error_logs = self.deduce_schema(\n input_file, schema_map=schema_map\n )\n\n for error in error_logs:\n logging.info(\n f\"Problem on line {error['line_number']}: {error['msg']}\"\n )\n\n if self.debugging_map:\n json.dump(schema_map, output_file, indent=2)\n print(file=output_file)\n else:\n schema = self.flatten_schema(schema_map)\n json.dump(schema, output_file, indent=2)\n print(file=output_file)", "def create_db(db, schema_json):\n with open(schema_json) as of:\n schema = json.load(of, object_pairs_hook=OrderedDict)\n # OrderedDict so that tables are created in the order specified,\n # allowing foreign keys to reference previously defined tables\n\n for table_name, columns in schema.items():\n col_types = columns.items() # dict -> tuple\n make_table(db, table_name, col_types)", "def main(event, context):\n # pylint: enable=unused-argument\n data = decode(event[\"data\"])\n to_table([data], PROJECT, DATASET, TABLE)", "def parse_raw(sqlContext, input, user):\n df = sqlContext.read.json(input + \"/\" + user + \"/\" + user + \".json\", multiLine=True)\n return df", "def json_to_thrift(json_str, root_thrift_class):\r\n return json.loads(json_str, cls=ThriftJSONDecoder, root_thrift_class=root_thrift_class)", "def extract_data(filename: str, schema_filename: str) -> DataFrame:\n data = []\n try:\n with open(schema_filename) as f:\n schema = json.load(f)\n with open(filename) as f:\n for line in f:\n json_doc = json.loads(line)\n if is_valid_data(json_doc, schema):\n data.append(json_doc)\n except ValueError as e:\n log.error(f\"Error parsing json: {e}\")\n except FileNotFoundError as e:\n log.error(f\"File not found error: {e}\")\n raise e\n except Exception as e:\n log.error(e)\n raise e\n return DataFrame(data)", "def _schema_write(self, table: TableSchema) -> None:\n with open(self.schemas / (table['name'] + '.json'), 'w') as f:\n json.dump(table, f, indent=True)", "def read_json(self, inputfile):\n transtransfile = json.load(inputfile)\n self.language = transfile['lang']\n self.translations = transfile['strings']", "def generate_cooccur_schema():\n json_str = json.dumps({'fields': [\n {'name': 'w1', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'w2', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'log_weight', 'type': 'FLOAT', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def generate_url_schema():\n json_str = json.dumps({'fields': [\n {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def generate_bq_schema(self, file_name, schema_file_name=None):\n if not schema_file_name:\n schema_file_name = f'{self.directory}/schema_temp.json'\n os.system(f\"generate-schema --keep_nulls < {file_name} > {schema_file_name}\")\n\n schema = open(schema_file_name, 'r').read()\n\n os.remove(schema_file_name)\n\n return json.loads(schema)", "def generate_data(self):\n print(\"generate_data - init\")\n with open(self.input_file, \"r\") as f:\n\n # read JSON data from input file\n data = json.loads(f.read())\n\n for idx, row in enumerate(data): \n # serialize Python dict to string\n msg = self.serialize_json(row)\n #print(f\"Linha: {row}\")\n self.send(self.topic, msg)\n self.flush()\n #print(\"Sleeping\")\n time.sleep(1)", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def test_test_body_with_file_schema(self):\n pass", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def load_json_schema(filename):\n relative_path = join('../schema', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(\n schema_file.read(), base_uri=base_uri, jsonschema=True)", "def convert_json_to_flatbuffer_binary(json, schema, out_dir):\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)", "def _load_data_from_file(self, input_file_path):\n with FileOrBufferHandler(input_file_path, 'r', \n encoding=self.file_encoding) as input_file:\n try:\n data = json.load(input_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n input_file.seek(0)\n data = data_utils.read_json(\n data_generator=input_file,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n return data", "def load_schema(self, schema_file):\n with open(schema_file) as fp:\n for line in io.lines_in(fp):\n parts = line.strip().split('\\t')\n if len(parts) != 3:\n raise ValueError('invalid type declaration %r' % line.strip())\n self.declare_relation(parts[0], parts[1], parts[2])", "def transform_file(input_file_path: str, output_file_path: str):\n\n # Open json\n with open(input_file_path, mode=\"r\") as in_file:\n input_data = json.load(in_file)\n\n # Transform and write\n with jsonlines.open(output_file_path, mode=\"w\", compact=True) as out_file:\n for item in input_data[\"items\"]:\n out_file.write(transform_item(item))", "def parse(json_string):\n try:\n json_data = json.loads(json_string)\n except Exception as exn:\n raise SchemaParseException(\n 'Error parsing schema from JSON: %r. '\n 'Error message: %r.'\n % (json_string, exn))\n\n # Initialize the names object\n names = Names()\n\n # construct the Avro Schema object\n return schema_from_json_data(json_data, names)", "def get_inputs_from_file(filename=\"\"):\n import json\n\n with open(filename) as input_text:\n json_obj = json.load(input_text)\n return json_obj", "def json_input():\r\n with open (FILE_INPUT,'rb') as file:\r\n data = json.load(file)\r\n return data['input_data'][0]", "def intermediary_to_schema(tables, relationships, output):\n dot_file = _intermediary_to_dot(tables, relationships)\n #graph = AGraph()\n #graph = graph.from_string(dot_file)\n extension = output.split('.')[-1]\n #graph.draw(path=output, prog='dot', format=extension)\n #Source.from_file(filename, engine='dot', format=extension)\n return Source(dot_file, engine='dot', format=extension)", "def populate_from_json(db: Session, model: DatabaseModel, json_file: str):\n data_exists = db.query(model).first()\n if data_exists:\n return\n with open(json_file, \"r\") as file:\n data = json.loads(file.read())\n for obj in data:\n db.add(model(**obj))\n db.commit()", "def parse_and_format(file_in_path: str, file_out_path: str) -> None:\n\n with open(file_in_path) as input:\n payload = json.load(input)\n\n records = [\"# Data is in the format:\",\n \"# kem_id,kdf_id,aead_id,info,skRm,skEm,pkRm,pkEm,exporter_context,L,exported_value\"]\n\n for key in payload:\n # Skip these to test only capabilities exposed by BoringSSL\n if (key[\"mode\"] != MODE_BASE or\n key[\"kem_id\"] != KEM_DHKEM_X25519_SHA256 or\n key[\"kdf_id\"] != KDF_HKDF_SHA256 or\n key[\"aead_id\"] == AEAD_EXPORT_ONLY):\n continue\n\n for exportKey in key[\"exports\"]:\n records.append(\"{},{},{},{},{},{},{},{},{},{},{}\"\n .format(str(key[\"kem_id\"]),\n str(key[\"kdf_id\"]),\n str(key[\"aead_id\"]),\n str(key[\"info\"]),\n str(key[\"skRm\"]),\n str(key[\"skEm\"]),\n str(key[\"pkRm\"]),\n str(key[\"pkEm\"]),\n str(exportKey[\"exporter_context\"]),\n str(exportKey[\"L\"]),\n str(exportKey[\"exported_value\"])))\n\n\n with open(file_out_path, \"w\") as output:\n output.write(\"\\n\".join(records))", "def jsons_to_table(dir_jsons, dir_out, name, format='html'):\n # sanity of paths\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format))\n # reading JSON files\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n # DataFrame conversion\n df = pd.DataFrame.from_dict(table)\n # writing HTML table\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def all_to_intermediary(filename_or_input, schema=None):\n # Try to convert from the name of the class\n input_class_name = filename_or_input.__class__.__name__\n try:\n this_to_intermediary = switch_input_class_to_method[input_class_name]\n tables, relationships = this_to_intermediary(filename_or_input)\n return tables, relationships\n except KeyError:\n pass\n\n # try to read markdown file.\n if isinstance(filename_or_input, basestring):\n if filename_or_input.split('.')[-1] == 'er':\n return markdown_file_to_intermediary(filename_or_input)\n\n # try to read a markdown in a string\n if not isinstance(filename_or_input, basestring):\n if all(isinstance(e, basestring) for e in filename_or_input):\n return line_iterator_to_intermediary(filename_or_input)\n\n # try to read DB URI.\n try:\n make_url(filename_or_input)\n return database_to_intermediary(filename_or_input, schema=schema)\n except ArgumentError:\n pass\n\n msg = 'Cannot process filename_or_input {}'.format(input_class_name)\n raise ValueError(msg)", "def __csv_schema_generator(file):\n try:\n # Parses the first line of the file to get all the headers.\n metadata = str(file.readline().decode('utf-8')).strip().split(',')\n # Will be further implemented in phase 3.\n return SchemaGenerator.__build_schema(metadata)\n except Exception as e:\n logging.error('Failed to parse csv file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from csv file.\")", "def schema_load(filename):\n print(uc.schema_load(filename))", "def read_json_schema(schema_file_path):\n with open(schema_file_path) as f:\n schema = json.load(f)\n return schema", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def from_json(self, file_path):\n with open(file_path) as file:\n jsonstr = file.read()\n handler_dict = json.loads(jsonstr)\n self.from_dict(handler_dict)", "def import_schemas_from_file():\n with open('./tblSchemas') as schemas_file:\n schemas = {}\n for line in schemas_file:\n line = line.split()\n if len(line) == 0: continue\n if line[0] == 'tblname':\n tbl_name = line[1]\n schemas[tbl_name] = []\n else:\n schemas[tbl_name].append(line)\n return schemas", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def convert_schema(bco, filename, mapping_dict):\n for item in mapping_dict['to_swap']:\n value_list = mapping_dict['to_swap'][item]\n for i in range(0, len(value_list)):\n rename_dict_key_HELPER(bco, value_list[i]['index'],\n value_list[i]['value']) # Change key name\n\n for item in mapping_dict['to_delete']:\n value_list = mapping_dict['to_delete'][item]\n for i in range(0, len(value_list)):\n delete_key_HELPER(bco, value_list[i]['index'], value_list[i]['value']) # delete key\n\n for item in mapping_dict['to_add']:\n value_list = mapping_dict['to_add'][item]\n for i in range(0, len(value_list)):\n set_key_in_dict_HELPER(bco, value_list[i]['index'], value_list[i]['value']) # add key\n\n new_bco = bco\n try:\n new_bco['provenance_domain'][\n 'modified'] = datetime.now().isoformat() # change date to current\n\n temp_bco = dict(new_bco)\n del temp_bco['object_id'], temp_bco['etag'], temp_bco['spec_version']\n\n new_bco['spec_version'] = \"https://w3id.org/ieee/ieee-2791-schema/2791object.json\"\n new_bco[\"etag\"] = sha256(json.dumps(temp_bco).encode('utf-8')).hexdigest()\n except KeyError: # Vital field was missing, will be caught by final error checker\n pass\n file = open(filename, \"w\")\n json.dump(new_bco, file, indent=4)", "def test_json():\n schemas = {\n 'schema-languages': 'bible/languages.json',\n 'schema-book-metadata': 'bible/book-metadata.json',\n 'schema-bible': 'bible/bible-*.json'\n }\n for schema_name, data_path_glob in schemas.items():\n schema_path = 'schemas/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_glob)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n yield jsonschema.validate, data, schema", "def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)", "def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)", "def get_schema(filename: str) -> dict:\n return _load_json_schema(filename)", "def _import(format, input, config):\n if input:\n with open(input, 'rb') as f:\n data = f.read()\n else:\n data = sys.stdin.read()\n\n dataset = tablib.Dataset()\n setattr(dataset, format, data)\n\n _add_changelogs(config, dataset.dict)", "def read_json(self, filename, multi_line=False, schema=None):\n self.logger.info(\"# Reading a JSON file \" + filename)\n sql_context = SQLContext(self.spark)\n df = None\n if schema is None and multi_line == False:\n df = sql_context.read.json(filename)\n elif multi_line:\n df = sql_context.read.json(filename, multiLine=multi_line)\n elif schema is not None:\n df = sql_context.read.json(filename, schema=schema)\n return df", "def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)", "def readjamschema(schema):\n raise NotImplementedError(msg)", "def load(file_name):\n with open(file_name, newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\n schema = [x.strip() for x in data[0]]\n table = [[int(el) for el in row] for row in data[1:]]\n\n return schema, table", "def get_schema(path):\n with open(path, 'r') as f:\n return json.load(f)", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def create_table_from_file():\n\n full_path = os.getcwd()\n file_name = full_path + \"/inventory/inventory.csv\"\n\n if os.path.exists(file_name):\n table = data_manager.get_table_from_file(file_name)\n\n else:\n ui.print_error_message(\"There is no file to read!\")\n table = []\n\n return table", "def tsv_to_json(tsv_file, json_file):\n import csv\n import json\n\n try:\n with open(tsv_file, 'r') as tsvFile:\n file_reader = csv.DictReader(tsvFile, dialect='excel-tab')\n row_list = list(file_reader)\n with open(json_file, 'w+') as jsonFile:\n jsonFile.write(json.dumps(row_list, indent=4))\n return 1\n except (ValueError, FileNotFoundError):\n return 0", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def read_data(self):\n if self._file.is_file():\n try:\n self._data = read_json_file(self._file)\n except (OSError, json.JSONDecodeError):\n _LOGGER.warning(\"Can't read %s\", self._file)\n self._data = {}\n\n # Validate\n try:\n self._data = self._schema(self._data)\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't parse %s: %s\",\n self._file, humanize_error(self._data, ex))\n\n # Reset data to default\n _LOGGER.warning(\"Reset %s to default\", self._file)\n self._data = self._schema({})", "def _mashup_json_to_table(json_obj, col_config=None):\n\n dataTable = Table(masked=True)\n absCorr = None\n\n if not all(x in json_obj.keys() for x in ['fields', 'data']):\n raise KeyError(\"Missing required key(s) 'data' and/or 'fields.'\")\n\n for col, atype in [(x['name'], x['type']) for x in json_obj['fields']]:\n\n # Removing \"_selected_\" column\n if col == \"_selected_\":\n continue\n\n # reading the colum config if given\n ignoreValue = None\n if col_config:\n colProps = col_config.get(col, {})\n ignoreValue = colProps.get(\"ignoreValue\", None)\n\n # making type adjustments\n if atype == \"string\":\n atype = \"str\"\n ignoreValue = \"\" if (ignoreValue is None) else ignoreValue\n if atype == \"boolean\":\n atype = \"bool\"\n if atype == \"int\": # int arrays do not admit Non/nan vals\n atype = np.int64\n ignoreValue = -999 if (ignoreValue is None) else ignoreValue\n if atype == \"date\":\n atype = \"str\"\n ignoreValue = \"\" if (ignoreValue is None) else ignoreValue\n\n # Make the column list (don't assign final type yet or there will be errors)\n colData = np.array([x.get(col, ignoreValue) for x in json_obj['data']], dtype=object)\n if ignoreValue is not None:\n colData[np.where(np.equal(colData, None))] = ignoreValue\n\n # no consistant way to make the mask because np.equal fails on ''\n # and array == value fails with None\n if atype == 'str':\n colMask = (colData == ignoreValue)\n else:\n colMask = np.equal(colData, ignoreValue)\n\n # add the column\n dataTable.add_column(MaskedColumn(colData.astype(atype), name=col, mask=colMask))\n\n return dataTable", "def load_dict_to_delta_table(spark, s3_data_bucket, table_schema, table_name, data, overwrite=False):\n table_to_col_names_dict = {}\n table_to_col_names_dict[\"transaction_fabs\"] = TRANSACTION_FABS_COLUMNS\n table_to_col_names_dict[\"transaction_fpds\"] = TRANSACTION_FPDS_COLUMNS\n table_to_col_names_dict[\"transaction_normalized\"] = list(TRANSACTION_NORMALIZED_COLUMNS)\n table_to_col_names_dict[\"awards\"] = list(AWARDS_COLUMNS)\n table_to_col_names_dict[\"financial_accounts_by_awards\"] = list(FINANCIAL_ACCOUNTS_BY_AWARDS_COLUMNS)\n\n table_to_col_info_dict = {}\n for tbl_name, col_info in zip(\n (\"transaction_fabs\", \"transaction_fpds\"), (TRANSACTION_FABS_COLUMN_INFO, TRANSACTION_FPDS_COLUMN_INFO)\n ):\n table_to_col_info_dict[tbl_name] = {}\n for col in col_info:\n table_to_col_info_dict[tbl_name][col.dest_name] = col\n\n # Make sure the table has been created first\n call_command(\n \"create_delta_table\",\n \"--destination-table\",\n table_name,\n \"--alt-db\",\n table_schema,\n \"--spark-s3-bucket\",\n s3_data_bucket,\n )\n\n if data:\n insert_sql = f\"INSERT {'OVERWRITE' if overwrite else 'INTO'} {table_schema}.{table_name} VALUES\\n\"\n row_strs = []\n for row in data:\n value_strs = []\n for col_name in table_to_col_names_dict[table_name]:\n value = row.get(col_name)\n if isinstance(value, (str, bytes)):\n # Quote strings for insertion into DB\n value_strs.append(f\"'{value}'\")\n elif isinstance(value, (date, datetime)):\n # Convert to string and quote\n value_strs.append(f\"\"\"'{value.isoformat()}'\"\"\")\n elif isinstance(value, bool):\n value_strs.append(str(value).upper())\n elif isinstance(value, (Sequence, Set)):\n # Assume \"sequences\" must be \"sequences\" of strings, so quote each item in the \"sequence\"\n value = [f\"'{item}'\" for item in value]\n value_strs.append(f\"ARRAY({', '.join(value)})\")\n elif value is None:\n col_info = table_to_col_info_dict.get(table_name)\n if (\n col_info\n and col_info[col_name].delta_type.upper() == \"BOOLEAN\"\n and not col_info[col_name].handling == \"leave_null\"\n ):\n # Convert None/NULL to false for boolean columns unless specified to leave the null\n value_strs.append(\"FALSE\")\n else:\n value_strs.append(\"NULL\")\n else:\n value_strs.append(str(value))\n\n row_strs.append(f\" ({', '.join(value_strs)})\")\n\n sql = \"\".join([insert_sql, \",\\n\".join(row_strs), \";\"])\n spark.sql(sql)", "def convert_json_1(json):\n\n # TODO Add batch details to json format\n # TODO Get default direct entry batch details if not provided\n\n LOGGER.debug('convert json message:%s', json)\n direct_entry = {\n 'record_type': '1',\n 'reel_seq_num': '01',\n 'name_fin_inst': 'SUN',\n 'user_name': 'hello',\n 'user_num': '123456',\n 'file_desc': 'payroll',\n 'date_for_process': datetime.strptime(json['post_date'], '%Y-%m-%d').strftime('%d%m%y'),\n 'bsb_number': json['to_routing'],\n 'account_number': json['to_account'],\n 'indicator': ' ',\n 'tran_code': '13' if json['tran_type'] == 'db' else '53',\n 'amount': '{amount:010}'.format(amount=json['amount']), # $2.00\n 'account_title': json['to_name'],\n 'lodgement_ref': json['to_description'],\n 'trace_bsb_number': json['from_routing'],\n 'trace_account_number': json['from_account'],\n 'name_of_remitter': json['from_name'],\n 'withholding_tax_amount': '00000000',\n }\n\n return direct_entry", "def create_datastructure_from_yaml_file(self):\n ## loading the YAML file\n try:\n with open(self.source) as f:\n hbaseSchemaDic = yaml.load(f) \n except:\n msg = \"Error: the HBase substructure could not be created. File %s could not be loaded. Please check the syntax of the '.yml' file.\" % self.source \n raise createDataStructureException(msg)\n status = \"failed\" \n\n try:\n c = Connection(host = self.host, port = int(self.port))\n tbls = c.tables()\n tbls = [str(t) for t in tbls]\n ## check that none of the tables already exists \n for t in hbaseSchemaDic.keys():\n if t in tbls:\n msg = \"Error: the table %s already exists. If you use starbase in python you can drop the table by using \\n>>> from starbase import Connection\\n>>> c = Connection()\\n>>> t = c.table(%s)\\n>>> t.drop()\" % (t,t) \n print(msg)\n status = \"failed\"\n raise createDataStructureException(msg)\n\n ## if none of the table(s) do(es) not exist, let's create them(it) \n for t in hbaseSchemaDic.keys():\n columnFamilies = hbaseSchemaDic[t]['columnFamilies'].keys()\n tC = c.table(t)\n tC.create(*columnFamilies)\n status = \"succeeded\"\n \n except:\n msg = \"Error: the HBase substructure could not be created. Please check your connection parameters or the syntax in your '.yml' file.\"\n raise createDataStructureException(msg)\n status = \"failed\"\n\n return(status)", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())", "def _meta_json_to_database(self):\n\n sqlalchemy_metadata = MetaData() # this is unrelated to our meta.json\n meta_table = Table('meta', sqlalchemy_metadata,\n Column('meta', String))\n\n sqlalchemy_metadata.create_all(self.engine)\n json_string = json.dumps(self.meta)\n ins = meta_table.insert().values(meta=json_string)\n conn = self.engine.connect()\n conn.execute(\"DELETE FROM meta;\")\n conn.execute(ins)", "def parse_migration_tables(self, tabels_schema: MigrationTablesSchema):\n try:\n self.source_table = tabels_schema.migrationTable.SourceTable.dict()\n self.destination_table = tabels_schema.migrationTable.DestinationTable.dict()\n self.columns = tabels_schema.migrationTable.MigrationColumns\n except Exception as err:\n logger.error(\"parse_migration_tables [error] -> %s\" % err)", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_json_file(cls, json_file):\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def create_deft_table_json_mappings():\n mappings = list()\n mappings.append(JsonColumnMapping(columnName=\"rownumber\", jsonPath=\"$.rownumber\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"rowguid\", jsonPath=\"$.rowguid\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdouble\", jsonPath=\"$.xdouble\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xfloat\", jsonPath=\"$.xfloat\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xbool\", jsonPath=\"$.xbool\", cslDataType=\"bool\"))\n mappings.append(JsonColumnMapping(columnName=\"xint16\", jsonPath=\"$.xint16\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint32\", jsonPath=\"$.xint32\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint64\", jsonPath=\"$.xint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint8\", jsonPath=\"$.xuint8\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint16\", jsonPath=\"$.xuint16\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint32\", jsonPath=\"$.xuint32\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint64\", jsonPath=\"$.xuint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xdate\", jsonPath=\"$.xdate\", cslDataType=\"datetime\"))\n mappings.append(JsonColumnMapping(columnName=\"xsmalltext\", jsonPath=\"$.xsmalltext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtext\", jsonPath=\"$.xtext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xnumberAsText\", jsonPath=\"$.xnumberAsText\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtime\", jsonPath=\"$.xtime\", cslDataType=\"timespan\"))\n mappings.append(\n JsonColumnMapping(columnName=\"xtextWithNulls\", jsonPath=\"$.xtextWithNulls\", cslDataType=\"string\")\n )\n mappings.append(\n JsonColumnMapping(columnName=\"xdynamicWithNulls\", jsonPath=\"$.xdynamicWithNulls\", cslDataType=\"dynamic\")\n )\n return mappings", "def create_schema(overwrite=False):\n if (not overwrite) and os.path.isfile(_schema_file):\n raise RuntimeError(\"Schema file already exists.\")\n schema = {\n 'title': 'obj',\n 'description': 'A mapping container for Obj 3D data.',\n 'type': 'object',\n 'required': ['vertices', 'faces'],\n 'definitions': {\n 'vertex': {\n 'description': 'Map describing a single vertex.',\n 'type': 'object', 'required': ['x', 'y', 'z'],\n 'additionalProperties': False,\n 'properties': {'x': {'type': _coord_type},\n 'y': {'type': _coord_type},\n 'z': {'type': _coord_type},\n 'red': {'type': _color_type},\n 'blue': {'type': _color_type},\n 'green': {'type': _color_type},\n 'w': {'type': _coord_type, 'default': 1.0}}},\n 'param': {\n 'description': 'Map describing a single parameter space point.',\n 'type': 'object', 'required': ['u', 'v'],\n 'additionalProperties': False,\n 'properties': {'u': {'type': _coord_type},\n 'v': {'type': _coord_type},\n 'w': {'type': _coord_type, 'default': 1.0}}},\n 'normal': {\n 'description': 'Map describing a single normal.',\n 'type': 'object', 'required': ['i', 'j', 'k'],\n 'additionalProperties': False,\n 'properties': {'i': {'type': _coord_type},\n 'j': {'type': _coord_type},\n 'k': {'type': _coord_type}}},\n 'texcoord': {\n 'description': 'Map describing a single texture vertex.',\n 'type': 'object', 'required': ['u'],\n 'additionalProperties': False,\n 'properties': {'u': {'type': _coord_type},\n 'v': {'type': _coord_type, 'default': 0.0},\n 'w': {'type': _coord_type, 'default': 0.0}}},\n 'point': {\n 'description': 'Array of vertex indices describing a set of points.',\n 'type': 'array', 'minItems': 1,\n 'items': {'type': _index_type}},\n 'line': {\n 'description': ('Array of vertex indices and texture indices '\n + 'describing a line.'),\n 'type': 'array', 'minItems': 2,\n 'items': {'type': 'object', 'required': ['vertex_index'],\n 'additionalProperties': False,\n 'properties':\n {'vertex_index': {'type': _index_type},\n 'texcoord_index': {'type': _index_type}}}},\n 'face': {\n 'description': ('Array of vertex, texture, and normal indices '\n + 'describing a face.'),\n 'type': 'array', 'minItems': 3,\n 'items': {'type': 'object', 'required': ['vertex_index'],\n 'additionalProperties': False,\n 'properties':\n {'vertex_index': {'type': _index_type},\n 'texcoord_index': {'type': _index_type},\n 'normal_index': {'type': _index_type}}}},\n 'curve': {\n 'description': 'Properties of describing a curve.',\n 'type': 'object', 'required': ['starting_param', 'ending_param',\n 'vertex_indices'],\n 'additionalProperties': False,\n 'properties': {\n 'starting_param': {'type': _coord_type},\n 'ending_param': {'type': _coord_type},\n 'vertex_indices': {\n 'type': 'array', 'minItems': 2,\n 'items': {'type': _index_type}}}},\n 'curve2D': {\n 'description': ('Array of parameter indices describine a 2D curve on '\n + 'a surface.'),\n 'type': 'array', 'minItems': 2,\n 'items': {'type': _index_type}},\n 'surface': {\n 'description': 'Properties describing a surface.',\n 'type': 'object', 'required': ['starting_param_u', 'ending_param_u',\n 'starting_param_v', 'ending_param_v',\n 'vertex_indices'],\n 'additionalProperties': False,\n 'properties': {\n 'starting_param_u': {'type': _coord_type},\n 'ending_param_u': {'type': _coord_type},\n 'starting_param_v': {'type': _coord_type},\n 'ending_param_v': {'type': _coord_type},\n 'vertex_indices': {\n 'type': 'array', 'minItems': 2,\n 'items': {'type': 'object', 'required': ['vertex_index'],\n 'additionalProperties': False,\n 'properties': {\n 'vertex_index': {'type': _index_type},\n 'texcoord_index': {'type': _index_type},\n 'normal_index': {'type': _index_type}}}}}}},\n 'properties': {\n 'material': {\n 'description': 'Name of the material to use.',\n 'type': ['unicode', 'string']},\n 'vertices': {\n 'description': 'Array of vertices.',\n 'type': 'array', 'items': {'$ref': '#/definitions/vertex'}},\n 'params': {\n 'description': 'Array of parameter coordinates.',\n 'type': 'array', 'items': {'$ref': '#/definitions/param'}},\n 'normals': {\n 'description': 'Array of normals.',\n 'type': 'array', 'items': {'$ref': '#/definitions/normal'}},\n 'texcoords': {\n 'description': 'Array of texture vertices.',\n 'type': 'array', 'items': {'$ref': '#/definitions/texcoord'}},\n 'points': {\n 'description': 'Array of points.',\n 'type': 'array', 'items': {'$ref': '#/definitions/point'}},\n 'lines': {\n 'description': 'Array of lines.',\n 'type': 'array', 'items': {'$ref': '#/definitions/line'}},\n 'faces': {\n 'description': 'Array of faces.',\n 'type': 'array', 'items': {'$ref': '#/definitions/face'}},\n 'curves': {\n 'description': 'Array of curves.',\n 'type': 'array', 'items': {'$ref': '#/definitions/curve'}},\n 'curve2Ds': {\n 'description': 'Array of curve2Ds.',\n 'type': 'array', 'items': {'$ref': '#/definitions/curve2D'}},\n 'surfaces': {\n 'description': 'Array of surfaces.',\n 'type': 'array', 'items': {'$ref': '#/definitions/surface'}}},\n 'dependencies': {\n 'lines': ['vertices'],\n 'faces': ['vertices'],\n 'curves': ['vertices'],\n 'curve2Ds': ['params'],\n 'surfaces': ['vertices']}}\n with open(_schema_file, 'w') as fd:\n encode_json(schema, fd, indent='\\t')", "def generate_package_from_report_filepath(input_path, options = None):\n try:\n vt_file = open(input_path, 'r')\n vt_dict = json.load(vt_file)\n except:\n print('\\nError: Error in parsing input file. Please check to ensure that it is valid JSON.')\n return\n \n return vtpack.vt_report_to_maec_package(vt_dict, options)", "def import_into_db(json_file):\n db = get_db()\n\n with current_app.open_resource(json_file) as f:\n reports = json.load(f)\n\n for element in reports[\"elements\"]:\n entry = (\n element[\"id\"],\n element[\"source\"],\n element[\"sourceIdentityId\"],\n element[\"state\"],\n element[\"created\"],\n element[\"reference\"][\"referenceId\"],\n element[\"reference\"][\"referenceType\"],\n element[\"payload\"][\"reportType\"],\n element[\"payload\"][\"reportId\"],\n element[\"payload\"][\"referenceResourceId\"],\n element[\"payload\"][\"referenceResourceType\"],\n element[\"payload\"][\"message\"],\n )\n db.execute(INSERT_INTO_REPORTS, entry)\n\n db.commit()", "def json_schema(schema_file=None, output=\"-\"):\n schemas = read_yaml(schema_file)\n dump_yaml(output, JsonSchemaConverterFromAccessSchema.convert_schemas(schemas))", "def from_json_file(cls, json_file):\n with open(json_file, 'r', encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))" ]
[ "0.6506698", "0.6255999", "0.6189637", "0.60093594", "0.59361964", "0.5880678", "0.5861754", "0.5762274", "0.56616706", "0.56331164", "0.5612857", "0.55776805", "0.5533528", "0.55065984", "0.55045223", "0.548444", "0.547434", "0.54314774", "0.5346988", "0.5327368", "0.5307453", "0.5267999", "0.52455944", "0.52326703", "0.52325064", "0.5219136", "0.5207536", "0.5187294", "0.5183731", "0.51769954", "0.51745486", "0.51707107", "0.5170645", "0.5158985", "0.51522464", "0.51227444", "0.51213485", "0.5113382", "0.5110196", "0.5099857", "0.5093744", "0.5089529", "0.5088614", "0.508273", "0.5077681", "0.5069668", "0.5065103", "0.5055063", "0.50539035", "0.50539035", "0.50469744", "0.50469744", "0.50306594", "0.50274265", "0.502586", "0.5005657", "0.5004016", "0.5004016", "0.4997759", "0.4997609", "0.4986268", "0.498196", "0.49683973", "0.49655133", "0.496366", "0.49594378", "0.49467462", "0.49407235", "0.4928758", "0.492373", "0.4923461", "0.49174178", "0.49124745", "0.48915982", "0.48891523", "0.48878503", "0.48864958", "0.48769042", "0.48686188", "0.48567456", "0.48541322", "0.48476815", "0.4841311", "0.48368114", "0.48329535", "0.48315528", "0.48306122", "0.48293504", "0.48196423", "0.4817549", "0.4817549", "0.4817549", "0.48168716", "0.48168716", "0.48162666", "0.4815631", "0.481176", "0.4809378", "0.48069832", "0.4803606" ]
0.53971493
18
Set projectId value for a BigQueryXXXRequests.
def SetProjectId(ref, args, request): del ref project = args.project or properties.VALUES.core.project.Get(required=True) project_ref = resources.REGISTRY.Parse(project, collection='bigquery.projects') request.projectId = project_ref.Name() return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_project_quotas(self, project_id, request_model, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.post(\n 'project-quotas/' + project_id,\n request_model=request_model,\n response_model_type=quota_models.ProjectQuotaModel,\n extra_headers=extra_headers,\n use_auth=use_auth, user_name=user_name)\n return resp", "def set_project(project_id):\n return fluent.set_project(project_id)", "def set_project_id(self, project_id):\n self._project_id = project_id", "def project_id(self, project_id):\n\n self._project_id = project_id", "def project_id(self, project_id):\n\n self._project_id = project_id", "def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()", "def scope_project(self, project_key):\n self.raw['scope'] = 'PROJECT'\n self.raw['projectKey'] = project_key\n return self", "def set_or_create_project(conn: BlitzGateway, project: Union[str, int],\n across_groups: Optional[bool] = True) -> int:\n if isinstance(project, str):\n project_id = post_project(conn, project)\n print(f'Created new Project:{project_id}')\n elif (isinstance(project, int)):\n project_id = project\n else:\n raise TypeError(\"'project' must be str or int\")\n return project_id", "def change_project(self, project, project_format='id'):\n name = 'tenant' if self.api_version == 2 else 'project'\n self.creds['%s_%s' % (name, project_format)] = project\n opposite_format = 'name' if project_format == 'id' else 'id'\n del self.creds['%s_%s' % (name, opposite_format)]", "def updateProject(self, projectId,payload):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.put(uri,payload)\n return response", "def project_name(self, project_name):\n\n self._project_name = project_name", "def project(self, project):\n self._project = project", "def project(self, project):\n self._project = project", "def set_keystone_v3_project(self, **kwargs):\n LOG_OBJ.debug(\"Creating the project.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects/\" + \\\n str(kwargs['project_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _project_info = {\"project\": {}}\n for argument in [\"name\", \"description\", \"domain_id\",\n \"enabled\", \"disabled\"]:\n try:\n _project_info['project'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_project_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the project\")\n print (\"No response from Server while set the project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set project Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def update_project(self, project_id, project):\n\n with self._transaction.cursor() as cur:\n # ensure this project exists\n cur.execute(\n \"SELECT project_id \"\n \"FROM barcodes.project \"\n \"WHERE project_id=%s;\",\n (project_id,))\n\n row = cur.fetchone()\n if row is None:\n raise NotFound(\"No project with ID %s\" % project_id)\n\n query = f\"\"\"\n UPDATE barcodes.project\n SET {p.DB_PROJ_NAME_KEY}=%s,\n {p.SUBPROJECT_NAME_KEY}=%s,\n {p.ALIAS_KEY}=%s,\n {p.IS_MICROSETTA_KEY}=%s,\n {p.SPONSOR_KEY}=%s,\n {p.COORDINATION_KEY}=%s,\n {p.CONTACT_NAME_KEY}=%s,\n {p.ADDTL_CONTACT_NAME_KEY}=%s,\n {p.CONTACT_EMAIL_KEY}=%s,\n {p.DEADLINES_KEY}=%s,\n {p.NUM_SUBJECTS_KEY}=%s,\n {p.NUM_TIMEPOINTS_KEY}=%s,\n {p.START_DATE_KEY}=%s,\n {p.BANK_SAMPLES_KEY}=%s,\n {p.PLATING_START_DATE_KEY}=%s,\n {p.DISPOSITION_COMMENTS_KEY}=%s,\n {p.COLLECTION_KEY}=%s,\n {p.IS_FECAL_KEY}=%s,\n {p.IS_SALIVA_KEY}=%s,\n {p.IS_SKIN_KEY}=%s,\n {p.IS_BLOOD_KEY}=%s,\n {p.IS_OTHER_KEY}=%s,\n {p.DO_16S_KEY}=%s,\n {p.DO_SHALLOW_SHOTGUN_KEY}=%s,\n {p.DO_SHOTGUN_KEY}=%s,\n {p.DO_RT_QPCR_KEY}=%s,\n {p.DO_SEROLOGY_KEY}=%s,\n {p.DO_METATRANSCRIPTOMICS_KEY}=%s,\n {p.DO_MASS_SPEC_KEY}=%s,\n {p.MASS_SPEC_COMMENTS_KEY}=%s,\n {p.MASS_SPEC_CONTACT_NAME_KEY}=%s,\n {p.MASS_SPEC_CONTACT_EMAIL_KEY}=%s,\n {p.DO_OTHER_KEY}=%s,\n {p.BRANDING_ASSOC_INSTRUCTIONS_KEY}=%s,\n {p.BRANDING_STATUS_KEY}=%s\n WHERE project_id=%s;\"\"\"\n\n cur.execute(query,\n (\n project.project_name,\n project.subproject_name,\n project.alias,\n project.is_microsetta,\n project.sponsor,\n project.coordination,\n project.contact_name,\n project.additional_contact_name,\n project.contact_email,\n project.deadlines,\n project.num_subjects,\n project.num_timepoints,\n project.start_date,\n project.bank_samples,\n project.plating_start_date,\n project.disposition_comments,\n project.collection,\n project.is_fecal,\n project.is_saliva,\n project.is_skin,\n project.is_blood,\n project.is_other,\n project.do_16s,\n project.do_shallow_shotgun,\n project.do_shotgun,\n project.do_rt_qpcr,\n project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec,\n project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status,\n project_id\n ))\n return cur.rowcount == 1", "def projects(self, projects):\n if (self.local_vars_configuration.client_side_validation and\n projects is not None and not isinstance(projects, int)):\n raise ValueError(\"Parameter `projects` must be an integer\") # noqa: E501\n\n self._projects = projects", "def set_project(\n name\n):\n if not is_alive():\n err_msg = \"Cannot connect to getML engine. Make sure the engine is running and you are logged in.\"\n raise ConnectionRefusedError(err_msg)\n\n cmd = dict()\n cmd[\"type_\"] = \"set_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def _project(request, key):\n context = request.context\n if not context.project_id:\n raise exceptions.QuotaMissingTenant()\n return {key: {key + '_id': context.project_id}}", "def list_namespaced_project_request(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_project_request\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/projectrequests'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):\n pass", "def projects(self, projects):\n\n self._projects = projects", "def project(self, project):\n\n self._project = project", "def project(self, project):\n\n self._project = project", "def project(self, project):\n\n self._project = project", "def project(self, project):\n\n self._project = project", "def create_namespaced_project_request(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_project_request\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_project_request`\")\n\n resource_path = '/oapi/v1/projectrequests'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ProjectRequest',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setRequestId(self, reqid) :\n self.request_id = reqid", "def set_project(self, version):\n raise NotImplementedError(\"set_project is not implemented\")", "def test_projects_id_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def num_projects(self, num_projects):\n\n self._num_projects = num_projects", "def project_task_id(self, project_task_id):\n\n self._project_task_id = project_task_id", "def viewProject(self, projectId=None,size=None):\n\n uri = \"/v1/projects/\"\n if projectId:\n uri = uri + str(projectId)\n if size==0:\n uri =uri + \"?size=0\"\n response = self.client.get(uri)\n return response", "def set_network_quotas(self, name_or_id, **kwargs):\n\n proj = self.get_project(name_or_id)\n if not proj:\n raise exc.OpenStackCloudException(\"project does not exist\")\n\n self.network.update_quota(proj.id, **kwargs)", "def project_id(self, project_id):\n if project_id is None:\n raise ValueError(\"Invalid value for `project_id`, must not be `None`\") # noqa: E501\n\n self._project_id = project_id", "def testSetupProjectNoChange(self, mock_setproj, mock_setid,\n mock_chkproj, mock_check_billing):\n # Test project didn't change, and no need to setup client id/secret\n mock_chkproj.return_value = False\n self.gcp_env_runner.client_id = \"test_client_id\"\n self.gcp_env_runner._SetupProject(self.gcloud_runner)\n self.assertEqual(mock_setproj.call_count, 0)\n self.assertEqual(mock_setid.call_count, 0)\n mock_check_billing.assert_called_once()\n # Test project didn't change, but client_id is empty\n self.gcp_env_runner.client_id = \"\"\n self.gcp_env_runner._SetupProject(self.gcloud_runner)\n self.assertEqual(mock_setproj.call_count, 0)\n mock_setid.assert_called_once()\n self.assertEqual(mock_check_billing.call_count, 2)", "def _change_project(self):\n project_key = utils.prompt_string(\n 'You are currently managing Google Cloud Project {!r}.\\n'\n 'This project is currently saved as {!r}.\\n'\n 'All of the currently configured projects include: {}.\\n'\n 'Which project would you like to switch to?'.format(\n self._config.project, self._config.key,\n ', '.join(common.get_available_configs(self._config.path))))\n return _Manager.new(\n self._config.path, self._prefer_gcs, project_key=project_key,\n version=self._version)", "def testSetupProjectChanged(self, mock_setproj, mock_setid,\n mock_chkproj, mock_check_billing):\n mock_chkproj.return_value = True\n mock_setproj.return_value = True\n self.gcp_env_runner._SetupProject(self.gcloud_runner)\n mock_setproj.assert_called_once()\n mock_setid.assert_called_once()\n mock_check_billing.assert_called_once()", "def test_set_project_limits(self):\n pass", "def set_project_values(project, data):\n project.hashtag = data['hashtag']\n if 'name' in data and len(data['name']) > 0:\n project.name = data['name']\n else:\n project.name = project.hashtag.replace('-', ' ')\n if 'summary' in data and len(data['summary']) > 0:\n project.summary = data['summary']\n has_longtext = 'longtext' in data and len(data['longtext']) > 0\n if has_longtext:\n project.longtext = data['longtext']\n if 'autotext_url' in data and data['autotext_url'].startswith('http'):\n project.autotext_url = data['autotext_url']\n if not project.source_url or project.source_url == '':\n project.source_url = data['autotext_url']\n # MAX progress\n if 'levelup' in data and 0 < project.progress + data['levelup'] * 10 < 50:\n project.progress = project.progress + data['levelup'] * 10\n # return jsonify(data=data)\n if project.autotext_url is not None and not has_longtext:\n # Now try to autosync\n project = AddProjectData(project)\n return project", "def main() -> None:\n parser: argparse.ArgumentParser = argparse.ArgumentParser(\n description=\"Set default expiration for BigQuery datasets and optionally tables within a specified Google Cloud Project.\"\n )\n parser.add_argument(\"project_id\", help=\"Google Cloud Project ID\")\n parser.add_argument(\"dataset_name\", help=\"BigQuery Dataset Name\")\n parser.add_argument(\"-d\", \"--days\", type=int, required=True, help=\"Number of days for expiration\")\n\n # Create a mutually exclusive group for --all-tables and --table\n table_group = parser.add_mutually_exclusive_group()\n table_group.add_argument(\"-a\", \"--all-tables\", action=\"store_true\", help=\"Set expiration for all tables\")\n table_group.add_argument(\"-t\", \"--table\", type=str, help=\"Regex pattern for tables to set expiration\")\n\n parser.add_argument(\"-s\", \"--skip-tables\", type=str, help=\"Regex pattern to skip tables that should not be affected\")\n parser.add_argument(\"-n\", \"--dry-run\", action=\"store_true\", help=\"Dry run, show changes without applying them\")\n args: argparse.Namespace = parser.parse_args()\n\n try:\n client: bigquery.Client = bigquery.Client(project=args.project_id)\n dataset_id = f\"{args.project_id}.{args.dataset_name}\"\n dataset_obj: bigquery.Dataset = client.get_dataset(dataset_id) # Fetch dataset directly\n\n # Only change the dataset's default expiration if neither --all-tables nor --table is provided\n if not (args.all_tables or args.table):\n set_expiration(client, dataset_obj, args.days, args.dry_run)\n\n if args.all_tables or args.table:\n handle_tables(client, dataset_obj, args.days, args.table, args.skip_tables, args.dry_run)\n\n except KeyboardInterrupt:\n print(\"\\nOperation canceled by user. Exiting...\")\n sys.exit(0)\n except NotFound as e:\n print(f\"Error: {str(e)}\")\n sys.exit(1)", "def set_tenant_info(self, project_name, token_domain,\n token_project, project_id=None):\n old_project_info = (self.project_info[\"project_name\"],\n self.project_info[\"token_domain\"],\n self.project_info[\"token_project\"],\n self.project_info['project_id'])\n # Set the new project info.\n self.project_info['project_name'] = project_name\n self.project_info[\"token_domain\"] = token_domain\n self.project_info[\"token_project\"] = token_project\n if not project_id:\n if config.keystone_api_version == \"v3\":\n project_id = self.get_keystone_v3_project_id(project_name)\n else:\n project_id = self.get_tenant_id(project_name)\n self.project_info[\"project_id\"] = project_id\n\n LOG_OBJ.debug(\"Successfully set the project info for project: %s\" %\n project_name)\n return old_project_info", "def request_id(self, request_id):\n\n self._request_id = request_id", "def request_id(self, request_id):\n\n self._request_id = request_id", "def request_id(self, request_id):\n\n self._request_id = request_id", "def set_project(self, project: Optional[Project] = None, force: bool = False, get_only: bool = False, **kwargs):\n if self.project is not None and not force:\n info('Project is already set, to override use force=True')\n return self.project\n\n if project is None:\n project = Project(**kwargs)\n\n assert project.name is not None, 'Project name cannot be none'\n\n # does the project exist ?\n self.project = self.protocol.get_project(project)\n\n if self.project is not None:\n return self.project\n\n if get_only:\n raise RuntimeError(f'Project (name: {project.name}) was not found!')\n\n self.project = self.protocol.new_project(project)\n\n debug(f'set project to (project: {self.project.name})')\n return self.project", "def test_projects_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def replace_namespaced_project(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_project`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Project',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_project_quotas(self, project_id, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.get(\n 'project-quotas/' + project_id,\n response_model_type=quota_models.ProjectQuotaModel,\n extra_headers=extra_headers,\n use_auth=use_auth, user_name=user_name)\n return resp", "def __init__(__self__, *,\n project: Optional[pulumi.Input[str]] = None):\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def project_id(self) -> int:\n return pulumi.get(self, \"project_id\")", "def get_project_id(self, session, **kwargs):\n return None", "def _get_project_id():\n\n extras = BaseHook.get_connection('google_cloud_default').extra_dejson\n key = 'extra__google_cloud_platform__project'\n if key in extras:\n project_id = extras[key]\n else:\n raise ('Must configure project_id in google_cloud_default '\n 'connection from Airflow Console')\n return project_id", "def ModifyProxiesProject(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyProxiesProject\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyProxiesProjectResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def set_project_id(self, old_project=None, new_project=None):\n # cancel if one is no project object\n old_is_project = type(old_project) is Project\n new_is_project = type(new_project) is Project\n\n if not old_is_project or not new_is_project:\n return False\n\n # check id\n id_exists = new_project.project_id() in [\n p.project_id() for p in self.project_list\n ]\n id_is_own = old_project.project_id() == new_project.project_id()\n title_is_empty = new_project.title == ''\n\n # cancel with true, if there's no need to change the title + client_id\n if id_is_own:\n return True\n\n # cancel\n # the id already exists\n # or the new title is empty\n if id_exists or title_is_empty:\n return False\n\n # rename the file\n renamed = self.rename_project_file(\n old_project=old_project,\n new_project=new_project\n )\n\n if renamed is False:\n return False\n\n # get its index\n index = self.get_project_index(old_project)\n\n # change the title and client of the original project to the new title\n self.project_list[index].client_id = new_project.client_id\n self.project_list[index].title = new_project.title\n\n # get new project and save it\n self.save_project_to_file(project=self.project_list[index])\n\n return True", "def test_projects_patch(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PATCH',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def patch(self, project_id):\n authenticated_user_id = token_auth.current_user()\n if not ProjectAdminService.is_user_action_permitted_on_project(\n authenticated_user_id, project_id\n ):\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n try:\n project_dto = ProjectDTO(request.get_json())\n project_dto.project_id = project_id\n project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"Error validating request: {str(e)}\")\n return {\"Error\": \"Unable to update project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n ProjectAdminService.update_project(project_dto, authenticated_user_id)\n return {\"Status\": \"Updated\"}, 200\n except InvalidGeoJson as e:\n return {\"Invalid GeoJson\": str(e)}, 400\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def project_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_id\")", "def set_review_request_field(self, review_request, field, value):\r\n rid = review_request['id']\r\n\r\n self.debug('Attempting to set field \"%s\" to \"%s\" for review request \"%s\"' %\r\n (field, value, rid))\r\n\r\n self.api_call('api/review-requests/%s/draft/set/' % rid,\r\n {field: value})", "def edit_project_vm_size(\n project_name: str,\n config_client: ConfigAzureClient = Depends(get_config_azure_client),\n vm_size: VMSizes | Literal[\"\"] = Body(embed=True),\n):\n config_client.set_project_vm_size(project_name, vm_size or None)", "def test_projects_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_project_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project',\n json=expected_response,\n status=200\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/project')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/project'\n assert \"MY-PROJECT-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def project_id() -> str:\n project_id = os.environ[\"GOOGLE_CLOUD_PROJECT\"]\n\n if not project_id:\n raise MissingProjectIdError(\n \"Set the environment variable \"\n + \"GCLOUD_PROJECT to your Google Cloud Project Id.\"\n )\n return project_id", "def rename_project(request):\n data = json.loads(request.body.decode('utf-8'))\n try:\n proj = models.Project.objects.get(pk=data['projid'])\n except models.Project.DoesNotExist:\n return JsonResponse({'error': f'Project with that ID does not exist in DB'}, status=404)\n # check if new project not already exist, and user have permission for all dsets\n proj_exist = models.Project.objects.filter(name=data['newname'])\n if proj_exist.count():\n if proj_exist.get().id == proj.id:\n return JsonResponse({'error': f'Cannot change name to existing name for project {proj.name}'}, status=403)\n else:\n return JsonResponse({'error': f'There is already a project by that name {data[\"newname\"]}'}, status=403)\n if is_invalid_proj_exp_runnames(data['newname']):\n return JsonResponse({'error': f'Project name cannot contain characters except {settings.ALLOWED_PROJEXPRUN_CHARS}'}, status=403)\n dsets = models.Dataset.objects.filter(runname__experiment__project=proj)\n if not all(check_ownership(request.user, ds) for ds in dsets):\n return JsonResponse({'error': f'You do not have the rights to change all datasets in this project'}, status=403)\n # queue jobs to rename project, update project name after that since it is needed in job for path\n create_job('rename_top_lvl_projectdir', newname=data['newname'], proj_id=data['projid'])\n proj.name = data['newname']\n proj.save()\n return JsonResponse({})", "def subproject(self, subproject):\n\n self.logger.debug(\"In 'subproject' setter.\")\n\n self._subproject = subproject", "def get_project_ids(self, node=None, name=None):\n project_ids = []\n queries = []\n # Return all project_ids in the data commons if no node is provided or if node is program but no name provided\n if name == None and ((node == None) or (node == \"program\")):\n print(\"Getting all project_ids you have access to in the data commons.\")\n if node == \"program\":\n print(\n \"Specify a list of program names (name = ['myprogram1','myprogram2']) to get only project_ids in particular programs.\"\n )\n queries.append(\"\"\"{project (first:0){project_id}}\"\"\")\n elif name != None and node == \"program\":\n if isinstance(name, list):\n print(\n \"Getting all project_ids in the programs '\" + \",\".join(name) + \"'\"\n )\n for program_name in name:\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (program_name)\n )\n elif isinstance(name, str):\n print(\"Getting all project_ids in the program '\" + name + \"'\")\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (name)\n )\n elif isinstance(node, str) and isinstance(name, str):\n print(\n \"Getting all project_ids for projects with a path to record '\"\n + name\n + \"' in node '\"\n + node\n + \"'\"\n )\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"%s\",submitter_id:\"%s\"}){project_id}}\"\"\"\n % (node, name)\n )\n elif isinstance(node, str) and name == None:\n print(\n \"Getting all project_ids for projects with at least one record in the node '\"\n + node\n + \"'\"\n )\n query = \"\"\"{node (first:0,of_type:\"%s\"){project_id}}\"\"\" % (node)\n df = pd.json_normalize(self.sub.query(query)[\"data\"][\"node\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n if len(queries) > 0:\n for query in queries:\n res = self.sub.query(query)\n df = pd.json_normalize(res[\"data\"][\"project\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n my_ids = sorted(project_ids, key=str.lower)\n print(my_ids)\n return my_ids", "def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']", "def project_id(self) -> Optional[str]:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> str:\n return pulumi.get(self, \"project_id\")", "def set_projects(self, name_short, name, disc_path):\n if name not in conf.projects:\n pass # TODO add the project in the conf\n else:\n return \"Project already exist\"\n return self.datas.create_path(disc_path)", "def increment_request_limit(self, _request, client_id, client_email):\n # Avoid value explosision and protect PII info\n if not framework_helpers.IsServiceAccount(client_email):\n client_email = '[email protected]'\n self.api_requests.increment_by(\n 1, {'client_id': client_id, 'client_email': client_email})", "def test_set_project(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )\n\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 1)\n obj = AppSetting.objects.get(name=setting_name, project=self.project)\n self.assertEqual(obj.get_value(), 'value')\n tl_event = ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n self.assertIsNotNone(tl_event)\n self.assertEqual(tl_event.classified, True)\n self.assertEqual(tl_event.extra_data, {'value': 'value'})", "def test_projects_id_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def put(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to acccess the user's favourite projects\")\n get_parser.add_argument(\"project_id\", required=True, help=\"Project ID required to remove a project\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id and project_id\n user_id = args[\"user_id\"]\n project_id = args[\"project_id\"]\n\n # convert parameter ids into objectids\n try:\n user_id = ObjectId(user_id)\n project_id = ObjectId(project_id)\n except:\n return {\"message\": \"invalid user id or project id\"}, 400\n\n # add project to the user's favourites \n if ('user_id' or 'project_id') not in args.keys():\n return {\"message\": \"both user and project id are required\"}, 400\n else:\n # check if user is valid\n user = self.users.find_one({\"_id\": user_id})\n project = self.projects.find_one({\"_id\": project_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n elif project is None:\n return {\"message\": \"project not found\"}, 404\n else:\n # remove project from the user's favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n return {\"message\": \"user does not have any favourite projects\"}, 400\n else:\n new_favourite_list = user_favourites[\"favourite_projects\"]\n\n # try to remove the project if it is in the favourites\n try:\n new_favourite_list.remove(project)\n except:\n return {\"message\": \"the project is not in the favourites list\"}, 400\n\n if new_favourite_list is None:\n new_favourite_list = []\n\n updated_list = {\"favourite_projects\": new_favourite_list}\n self.favourites.update({\"user_id\": user_id}, {\"$set\": updated_list}, upsert=False)\n \n return {\"status\": \"project has been removed from favourites successfully\"}, 200", "def set_or_create_dataset(conn: BlitzGateway, project_id: Union[int, None],\n dataset: Union[str, int],\n across_groups: Optional[bool] = True\n ) -> Union[int, None]:\n if isinstance(dataset, str):\n if project_id:\n dataset_id = post_dataset(conn, dataset, project_id=project_id)\n else:\n dataset_id = post_dataset(conn, dataset)\n print(f'Created new Dataset:{dataset_id}')\n elif (isinstance(dataset, int)):\n dataset_id = dataset\n else:\n raise TypeError(\"'dataset' must be str or int\")\n return dataset_id", "def assign_project_to_client(self, project=None, client_id=None):\n is_project = type(project) is Project\n one_not_set = project is None or client_id is None\n client_exists = client_id in [c.client_id for c in self.client_list]\n client_is_self = client_id == project.client_id\n\n # cancel, if either:\n # argument is not a project\n # one argument not set\n # client_id already exists\n # client already assigned\n if not is_project or one_not_set or not client_exists or client_is_self:\n return False\n\n # assign the project to the new client\n self.delete_project_file(project=project)\n project.client_id = client_id\n self.save_project_to_file(project=project)\n return True", "def _get_project_id(self, request):\n project_id = request.environ[\"masakari.context\"].project_id\n if project_id in request.url:\n return project_id\n return ''", "def read_project(response):\n STORED_ID['project_id'] = response.json()[\"id\"]", "def patch_namespaced_project(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_project`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Project',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "def test_create_project_request(self):\n pass", "def update(self, request, pk=None):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project.objects.get(pk=pk)\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n #project.projectNote = Note.objects.get(pk=request.data['projectNote'])\n\n project.lotId = lot\n project.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def project(self):\n return self._properties.get(\"jobReference\", {}).get(\"projectId\")", "def test_list_project_request(self):\n pass", "def increment(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"\"\"INSERT OR REPLACE \n INTO projects (project, alias, called) \n VALUES \n (?,\n (SELECT alias FROM projects WHERE project=?),\n (SELECT called FROM projects WHERE project=?) + 1\n )\"\"\", (project, project, project,))", "def setJobId(self, jobid):\n self._ShREEKConfig.setJobId(jobid)", "def test_set_invalid_scope_project(self):\n setting_name = 'project_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def __init__(self, project_id, dataset_id, sandbox_dataset_id):\n desc = 'Sandbox and update invalid zip codes found in the observation table.'\n super().__init__(issue_numbers=['DC1633'],\n description=desc,\n affected_datasets=[cdr_consts.RDR],\n affected_tables=[OBSERVATION],\n project_id=project_id,\n dataset_id=dataset_id,\n sandbox_dataset_id=sandbox_dataset_id)", "def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")", "def create_project(project_id: str,\n admin_token: str,\n project_admin: str,\n csp: str='AWS',\n budget: float=10) -> dict:\n\n if budget == 10:\n print(\"Set the budget of Project {%s} to default value: $%.2f\" %(project_id, budget))\n print(\"Update the budget using: set_budget(amount)\")\n \n print('Cloud service provider: ', csp)\n\n if csp not in ['AWS', 'AZURE', 'GCP', 'PRIVATE']:\n print('CSP not supported: Current options: AWS, AZURE, GCP, PRIVATE')\n return\n\n resp = RestRequest(CredentialApi.create_project) \\\n .with_data({'project_id': project_id,\n 'budget': budget,\n 'admin_token': admin_token,\n 'csp': csp,\n \"project_admin_name\": project_admin}) \\\n .run()\n \n # cloud service provider is private cluster, create bucket accordingly\n if csp == 'PRIVATE':\n print(resp)\n bucket_name = resp['bucket_name']\n private_cluster = get_private_cluster()\n private_cluster.create_buckets(bucket_name)\n \n return resp", "def prompt(self, console: io.IO, step: str,\n args: Dict[str, Any]) -> Dict[str, Any]:\n prompter = GoogleNewProjectId()\n\n if args.get('use_existing_project', False):\n prompter = GoogleExistingProjectId(self.project_client,\n self.active_account)\n\n return prompter.prompt(console, step, args)", "def add_project(self, proj, i):\r\n self.__projects[i] = proj", "def RunWithArgs(self, resource):\n client = GetClientFromFlags()\n global_params = GetGlobalParamsFromFlags()\n request = messages.IamProjectsServiceAccountsSetIamPolicyRequest(\n resource=resource.decode('utf8'),\n )\n if FLAGS['setIamPolicyRequest'].present:\n request.setIamPolicyRequest = apitools_base.JsonToMessage(messages.SetIamPolicyRequest, FLAGS.setIamPolicyRequest)\n result = client.projects_serviceAccounts.SetIamPolicy(\n request, global_params=global_params)\n print apitools_base_cli.FormatOutput(result)", "def set_missing_id(self, data, **kwargs):\n if not data.get(\"project_id\"):\n data[\"project_id\"] = lambda: uuid.uuid4().hex\n\n return data", "def __init__(self, project_id, namespace=None):\n self._client = datastore.Client(project=project_id, namespace=namespace)" ]
[ "0.61926836", "0.6037495", "0.5940458", "0.58782727", "0.58782727", "0.5392154", "0.53831047", "0.5383003", "0.5351515", "0.5217758", "0.52084017", "0.51797605", "0.51797605", "0.51708114", "0.5099718", "0.5095747", "0.5076841", "0.50675255", "0.50642264", "0.5054526", "0.5043689", "0.5032281", "0.5032281", "0.5032281", "0.5032281", "0.50313395", "0.5015842", "0.50030303", "0.4980436", "0.4980237", "0.49698266", "0.48411617", "0.4812488", "0.4810658", "0.48073867", "0.4799563", "0.4795049", "0.4755343", "0.474582", "0.47347268", "0.4683711", "0.46784484", "0.46784484", "0.46784484", "0.465331", "0.46349317", "0.45823047", "0.45763767", "0.45722067", "0.4570607", "0.4568762", "0.4554865", "0.4549979", "0.45420465", "0.45371655", "0.4524065", "0.4508199", "0.4508199", "0.4508199", "0.4508199", "0.4508199", "0.45002657", "0.44787267", "0.44635788", "0.44400188", "0.44385755", "0.4427575", "0.44120008", "0.4407186", "0.44056413", "0.44025064", "0.44025064", "0.44025064", "0.44025064", "0.4394484", "0.4392009", "0.43757388", "0.4367188", "0.4354402", "0.43508613", "0.43481016", "0.43447968", "0.4344407", "0.43404222", "0.43345177", "0.43327397", "0.43312165", "0.43269286", "0.43181726", "0.43152732", "0.43124634", "0.43044785", "0.4302804", "0.43001324", "0.4299092", "0.4287585", "0.42819652", "0.42764556", "0.42677814", "0.42617783" ]
0.7350548
0
Ensure that view parameters are set properly tables create request.
def SetViewParameters(ref, args, request): del ref # unused if not args.view: request.table.view = None return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create(self, tables, views, schema_name, config):\n if not isinstance(tables, dict):\n return False # Raise Exception That Tables Are In A Wrong Format???!!!\n success = True\n if schema_name is not None:\n self._create_schema(schema_name)\n for table_name_instance in tables.items():\n if self._create_table(table_name_instance[1]) is False:\n success = False\n break\n if isinstance(views, dict):\n for view_name_instance in views.items():\n if self._create_view(view_name_instance[1], schema_name, config) is False:\n success = False\n break\n return success", "def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)", "def createViews(views):\n ...", "def create_view(self, repo, view, sql):\n return self.user_con.create_view(\n repo=repo, view=view, sql=sql)", "def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return", "def create_table(self):\n pass", "def pre_route_table_create(self, resource_dict):\n pass", "def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)", "def post_route_table_create(self, resource_dict):\n pass", "def create_table_request_info(self):\n table_query = f\"\"\"\n Create Table If Not Exists Request_Info(\n {self.__fields[0]} INT AUTO_INCREMENT PRIMARY KEY,\n {self.__fields[1]} TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n {self.__fields[2]} CHAR(30),\n {self.__fields[3]} CHAR(30),\n {self.__fields[4]} CHAR(30) NULL,\n {self.__fields[5]} DATE,\n {self.__fields[6]} CHAR(15),\n {self.__fields[7]} CHAR(30),\n {self.__fields[8]} CHAR(30),\n {self.__fields[9]} CHAR(30),\n {self.__fields[10]} INT(32),\n {self.__fields[11]} CHAR(30),\n {self.__fields[12]} INT(32),\n {self.__fields[13]} VARCHAR(30))\n \"\"\"\n self.execute(table_query)", "def test_create_view_returns_empty(dummy_request):\n from learning_journal.views.default import new_entry\n assert new_entry(dummy_request) == {}", "def _create_view(self, view, schema=None, config=None):\n viewname, vschema = view[\"__tablename__\"].split(' ')[0], view[\"__schema__\"].split(' ')[0]\n try:\n dve = SQL('NULL from {}.{}').format(Identifier(vschema),\n Identifier(viewname))\n veq = self.__session.query(self._sql_to_string(dve)).limit(1)\n self.__session.execute(veq)\n self._commit()\n except ProgrammingError:\n self._rollback()\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('* FROM information_schema.routines')\n count = count.filter(like).count()\n if int(count) == 0:\n self._create_extension(config)\n self.exschema = 'public'\n else:\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('routine_schema FROM'\n ' information_schema.routines')\n count = count.filter(like).limit(1)\n count = self.__session.execute(count).fetchone()[0]\n self._commit()\n self.exschema = count\n like = text(\"SELECT has_schema_privilege(:exschema, 'USAGE')\")\n like = self.__session.execute(like,\n {\"exschema\": self.exschema}).fetchone()[0]\n self._commit()\n if not like:\n self._grant_access(config)\n viewst, raw = self._sql_to_string(view[\"__statement__\"]), '{}.crosstab'\n defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))\n exsch = SQL(raw).format(Identifier(self.exschema))\n self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def creates_view(self):\n return self.statements[0].creates_view()", "def test_create_view_returns_empty_dict_on_get(dummy_request):\n from learning_journal.views.default import create_view\n result = create_view(dummy_request)\n assert result == {}", "def prepare(self, request):\n pass", "def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")", "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def pre_interface_route_table_create(self, resource_dict):\n pass", "def post(self):\n args = parser.parse_args()\n table = TableDetails(args.get('table_size'))\n db.session.add(table)\n db.session.commit()\n return table, 201", "def create_tables( self ) :\n return self._create_tables", "def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')", "def test_create_table_successfully (self):\n\n new_table = self.wrapper.create_table(self.table, [self.bob, self.jane])\n self.assertIsNone(new_table)", "def create_table(self, param, timeout):\n _abstract()", "def create_table(self, param, timeout):\n _abstract()", "def view(name, selectable, *, clear: bool = False):\n log.debug('view(%r, clear=%r)', name, clear)\n\n if clear:\n DDL[name] = None, None\n return None\n\n DDL[name] = (CreateView(name, selectable),\n DropView(name))\n\n return make_table(selectable, name=name)", "def putTestData(self):\n # print 'Not Yet implement / sample DB table create'\n tkMessageBox.showinfo(\"Message\", \"Sample DB Table Create\")", "def prePresent(self, request):", "def beforeCreate(self):", "def create(self):", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def create_all_tables(self):\n pass", "def __init__(self, tables, views, config, schema=None, connection_retries=2):\n # pylint: disable=too-many-arguments\n self.__no_of_retries = connection_retries\n self._set_database_engine(config)\n self._set_session()\n self.exschema = schema\n\n if not self._is_session_valid():\n self._reset_session()\n\n if not self._create(tables, views, schema, config):\n raise DatabaseError.TableCreationError(\"Table creation failed. Check logs!\")", "def _init_check_database(self):\n # FIXME add additional checks, for example that columns in BY,\n # ACROSS, ON are not the same ? (see task structure notes)\n # also that location columns are not used\n if self.verbose:\n print('checking input database {}'.format(self.database))\n\n # check that required columns are present\n cols = set(self.db.columns)\n message = (\n ' argument is invalid, check that all the provided attributes '\n 'are defined in the database {}'.format(self.database))\n # the argument of issuperset needs to be a list ...\n assert cols.issuperset(self.on), 'ON' + message\n assert cols.issuperset(self.across), 'ACROSS' + message\n assert cols.issuperset(self.by), 'BY' + message\n\n for col in cols:\n assert '_' not in col, \\\n col + ': you cannot use underscore in column names'\n assert '#' not in col, \\\n col + ': you cannot use \\'#\\' in column names'\n\n if self.verbose:\n print(\"input database verified\")", "def __init__(self, view_name, cursor=None, schema=None):\n self.name = view_name\n self.type = 'view' # Saves using type() or isinstance\n self.columns = {}\n self.sql = ''\n self.triggers = {}\n if schema:\n self.schema = schema\n else:\n schema = None\n if cursor:\n self._get_view(cursor)", "def test_view_set_construction(empty_model):\n viewset = ViewSet(model=empty_model)\n assert viewset.model is empty_model\n assert count(viewset.dynamic_views) == 0", "def __init__( viewname, view ):", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def create_tables():\r\n db = connect_database()\r\n table_wait = \"waiting\"\r\n table_helped = \"helped\"\r\n table_help = \"help\"\r\n param_name = ['cus_num', 'name', 'username', 'ru_id', 'os_platform', 'description']\r\n param_type1 = ['INTEGER PRIMARY KEY AUTOINCREMENT', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n param_type2 = ['INTEGER PRIMARY KEY', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n with db:\r\n create_table(db, table_wait, param_name, param_type1)\r\n create_table(db, table_helped, param_name, param_type2)\r\n create_table(db, table_help, param_name, param_type2)\r\n db.close()", "def __init__(self, request, **kwargs):\n super(PSIHDReport, self).__init__(request, **kwargs)\n calculate_fn = lambda key, _: key[len(self.place_types) + 1]\n self.columns['demo_type'] = Column(\"Worker Type\", calculate_fn=calculate_fn)\n self.columns['demo_type'].view = FunctionView(calculate_fn=calculate_fn)\n self.function_views['demo_type'] = self.columns['demo_type'].view", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def migrate(cls)->None:\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS rsvps (\n creatd_date varchar,\n meetup integer,\n user_id integer,\n response varchar,\n PRIMARY KEY(meetup,user_id)\n )\"\"\")\n database.connection.commit()", "def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")", "def setup_view(view, request=None, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def create_db(self):", "def yield_table(\n self, table_name_and_type: Tuple[str, str]\n ) -> Iterable[Optional[CreateTableRequest]]:\n table_name, table_type = table_name_and_type\n schema_name = self.context.database_schema.name.__root__\n db_name = self.context.database.name.__root__\n try:\n\n columns, table_constraints = self.get_columns_and_constraints(\n schema_name=schema_name,\n table_name=table_name,\n db_name=db_name,\n inspector=self.inspector,\n )\n\n view_definition = self.get_view_definition(\n table_type=table_type,\n table_name=table_name,\n schema_name=schema_name,\n inspector=self.inspector,\n )\n\n table_request = CreateTableRequest(\n name=table_name,\n tableType=table_type,\n description=self.get_table_description(\n schema_name=schema_name,\n table_name=table_name,\n inspector=self.inspector,\n ),\n columns=columns,\n viewDefinition=view_definition,\n tableConstraints=table_constraints if table_constraints else None,\n databaseSchema=EntityReference(\n id=self.context.database_schema.id,\n type=\"databaseSchema\",\n ),\n tags=self.get_tag_labels(\n table_name=table_name\n ), # Pick tags from context info, if any\n )\n is_partitioned, partition_details = self.get_table_partition_details(\n table_name=table_name, schema_name=schema_name, inspector=self.inspector\n )\n if is_partitioned:\n table_request.tableType = TableType.Partitioned.value\n table_request.tablePartition = partition_details\n\n if table_type == TableType.View or view_definition:\n table_view = TableView.parse_obj(\n {\n \"table_name\": table_name,\n \"schema_name\": schema_name,\n \"db_name\": db_name,\n \"view_definition\": view_definition,\n }\n )\n self.context.table_views.append(table_view)\n\n yield table_request\n self.register_record(table_request=table_request)\n\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.warning(f\"Unexpected exception to yield table [{table_name}]: {exc}\")\n self.status.failures.append(f\"{self.config.serviceName}.{table_name}\")", "def test_create_tables_cmd_success(self):\n self.runner.invoke(cli,\n args=['create-tables'],\n env={'OS_ELASTICSEARCH_ADDRESS': LOCAL_ELASTICSEARCH})\n engine = config.get_engine()\n inspector = Inspector.from_engine(engine)\n self.assertTrue('models' not in inspector.get_table_names())", "def create_view(self, start: int = 0, stop: int = 0):\n stmt = f\"\"\"create or replace view {self._view_name} as {self.qry}\"\"\"\n if start != 0 or stop != 0:\n sql = stmt + f\" limit {stop} offset {start}\"\n else:\n sql = stmt\n self.execquery(sql)", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def create_view(self, view_name='', description='', fields=None, order=None, filters=''):\n res, _ = self.clients.resource_registry.find_resources(name=view_name, id_only=True)\n if len(res) > 0:\n raise BadRequest('The view resource with name: %s, already exists.' % view_name)\n\n #======================\n # Arg Validations\n #======================\n validate_is_instance(fields,list, 'Specified fields must be a list.')\n validate_true(len(fields)>0, 'Specfied fields must be a list.')\n if order is not None:\n validate_is_instance(order,list, 'Specified order must be a list of fields')\n for field in order:\n if not field in fields:\n raise BadRequest('The specified ordering field was not part of the search fields.')\n\n fields = set(fields) # Convert fields to a set for aggregation across the catalogs\n #======================================================================================================\n # Priorty Queue Index Matching\n #======================================================================================================\n\n pq = [] # Priority queue for matching\n catalog_id = None\n catalogs, _ = self.clients.resource_registry.find_resources(restype=RT.Catalog, id_only=False)\n for catalog in catalogs:\n if set(catalog.catalog_fields).issubset(fields):\n index_num = len(self.clients.catalog_management.list_indexes(catalog._id))\n heapq.heappush(pq, (index_num,catalog))\n if pq:\n weight, catalog = heapq.heappop(pq)\n if weight < self.heuristic_cutoff:\n catalog_id = catalog._id\n\n \n if catalog_id is None:\n catalog_id = self.clients.catalog_management.create_catalog('%s_catalog'% view_name, keywords=list(fields))\n\n view_res = View(name=view_name, description=description)\n view_res.order = order\n view_res.filters = filters\n view_id, _ = self.clients.resource_registry.create(view_res)\n self.clients.resource_registry.create_association(subject=view_id, predicate=PRED.hasCatalog,object=catalog_id)\n return view_id", "def _model_table(self, create_table_query, insert_query, parameters):\n try:\n self.session.execute(create_table_query)\n self._insert_data(insert_query, parameters)\n except Exception as e:\n print('Error on creating table for query. ' + str(e))", "def setUp(self):\n self.theView = View()", "def __create_presentations_table(self, schema=PRESENTATIONS_SCHEMA_310):\r\n log.info(\"table created\")\r\n QtSql.QSqlQuery(schema)", "def test_create(self):\n pass", "def column_create(request):\n try:\n dataset = DatasetSchema.objects.get(\n slug=request.matchdict['slug']\n ) \n except DatasetSchema.DoesNotExist:\n return {\n 'success': False, \n 'message': 'No dataset named: %s' % \n (request.matchdict['slug'])\n }\n # make sure required params are here\n required_params_list = ['name', 'data_type']\n for param in required_params_list:\n if not request.POST.get(param):\n return {\n 'success': False, \n 'message': 'Param: %s missing from request' % (param),\n }\n\n name = request.POST['name']\n data_type = request.POST['data_type']\n\n # make sure datatype is acceptable\n if data_type not in VALID_DATA_TYPES:\n return {\n 'success': False,\n 'message': 'Data Type: %s not a valid data type' % (data_type),\n }\n\n # start building new field\n new_field = Field(\n name = name,\n data_type = data_type,\n created_by_user_id = request.user.id,\n created_datetime = datetime.now(),\n )\n\n # if type is datetime make sure that a format is along with it\n\n if request.POST.get('data_type') == 'datetime':\n if not request.POST.get('datetime_format'):\n return {\n 'success': False,\n 'message': 'Missing a datetime format',\n }\n else:\n # add it\n new_field.datetime_format = request.POST['datetime_format']\n\n # save the new field\n dataset.fields.append(new_field)\n dataset.save()\n return HTTPMovedPermanently(location='/dataset/get/{}'.format(dataset.slug))", "def create(self):\n ...", "def test_create_view_updates_db_on_post(db_session, dummy_request):\n from learning_journal.views.default import create_view\n dummy_request.method = \"POST\"\n dummy_request.POST[\"title\"] = \"Some Title.\"\n dummy_request.POST[\"body\"] = \"Some Body.\"\n with pytest.raises(Exception):\n create_view(dummy_request)\n\n query = db_session.query(Entries).all()\n assert query[0].title == \"Some Title.\"\n assert query[0].body == \"Some Body.\"", "def add_view(self, schema, create=True):\n if not constants.NAME_RX.match(schema[\"name\"]):\n raise ValueError(\"invalid view name\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"tables\"]):\n raise ValueError(\"name is already in use for a table\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"views\"]):\n raise ValueError(\"name is already in use for a view\")\n if create:\n sql = 'CREATE VIEW \"%s\" AS %s' % (\n schema[\"name\"],\n dbshare.query.get_sql_statement(schema[\"query\"]),\n )\n self.dbcnx.execute(sql)\n cursor = self.dbcnx.cursor()\n try:\n sql = 'PRAGMA table_info(\"%s\")' % schema[\"name\"]\n cursor.execute(sql)\n except sqlite3.Error: # Invalid view\n sql = 'DROP VIEW \"%s\"' % schema[\"name\"]\n cursor.execute(sql)\n raise ValueError(\"invalid view; maybe non-existent column?\")\n # Source names considering quotes and disregarding AS part, if any.\n schema[\"sources\"] = dbshare.query.get_from_sources(schema[\"query\"][\"from\"])\n schema[\"columns\"] = [{\"name\": row[1], \"type\": row[2]} for row in cursor]\n sql = \"INSERT INTO %s (name, schema) VALUES (?,?)\" % constants.VIEWS\n with self.dbcnx:\n self.dbcnx.execute(sql, (schema[\"name\"], json.dumps(schema)))\n self.db[\"views\"][schema[\"name\"]] = schema", "def post_interface_route_table_create(self, resource_dict):\n pass", "def test_create(self):\n cursor = connection.cursor()\n # It needs to take at least 2 args\n self.assertRaises(TypeError, db.create_table)\n self.assertRaises(TypeError, db.create_table, \"test1\")\n # Empty tables (i.e. no columns) are not fine, so make at least 1\n db.create_table(\"test1\", [('email_confirmed', models.BooleanField(default=False))])\n db.start_transaction()\n # And should exist\n cursor.execute(\"SELECT * FROM test1\")\n # Make sure we can't do the same query on an empty table\n try:\n cursor.execute(\"SELECT * FROM nottheretest1\")\n self.fail(\"Non-existent table could be selected!\")\n except:\n pass\n # Clear the dirty transaction\n db.rollback_transaction()\n db.start_transaction()\n # Remove the table\n db.delete_table(\"test1\")\n # Make sure it went\n try:\n cursor.execute(\"SELECT * FROM test1\")\n self.fail(\"Just-deleted table could be selected!\")\n except:\n pass\n # Clear the dirty transaction\n db.rollback_transaction()\n db.start_transaction()\n # Try deleting a nonexistent one\n try:\n db.delete_table(\"nottheretest1\")\n self.fail(\"Non-existent table could be deleted!\")\n except:\n pass\n db.rollback_transaction()", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def _check_params(self):\n pass", "def init(self, cr):\n\t\ttools.drop_view_if_exists(cr, 'purchase_order_line_summary')\n\n\t cr.execute(\"\"\" CREATE VIEW purchase_order_line_summary AS (\n\t SELECT max(id) as id,order_id,product_id,name,product_uom,sum(product_qty) as product_qty, \n\t\t\tsum(price_subtotal) as price_subtotal,avg(discount) as discount \n\t\t\tfrom purchase_order_line\n\t\t\tgroup by order_id,product_id,name,product_uom)\n\t\t\t\"\"\")", "def setup_method(self):\n MANAGER._tables = {}\n MANAGER._views = {}", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)", "def create_table_execute(self):\n self.execute(query=self.default_template.format(self.table_name), data=None)", "def __init__(self, params=None):\n\n rights = access.Checker(params)\n rights['unspecified'] = ['deny']\n rights['edit'] = ['deny']\n rights['show'] = [('checkIsMyEntity', [notification_logic, 'scope_path'])]\n rights['delete'] = [('checkIsMyEntity', [notification_logic, 'scope_path'])]\n rights['list'] = ['checkIsUser']\n # create is developer only for the time being to test functionality\n rights['create'] = ['checkIsDeveloper']\n\n new_params = {}\n new_params['logic'] = notification_logic\n new_params['rights'] = rights\n\n new_params['name'] = \"Notification\"\n\n new_params['no_create_with_key_fields'] = True\n new_params['create_form'] = CreateForm\n\n new_params['edit_redirect'] = '/%(url_name)s/list'\n\n params = dicts.merge(params, new_params)\n\n super(View, self).__init__(params=params)", "def test_db_table_creation_check(self):\n mock_cursor = Mock()\n mock_cursor.configure_mock(**{\"cursor.return_value.fetchone.return_value\": (\"vnf_table_2\")})\n status = misshtbtd.db_table_creation_check(mock_cursor, \"vnf_table_2\")\n self.assertEqual(status, True)", "def create_tables (self):\n cursor = self.cur()\n cursor.execute('DROP TABLE IF EXISTS person')\n cursor.execute('DROP TABLE IF EXISTS room')\n cursor.execute('DROP TABLE IF EXISTS allocation')\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS person (person_id INTEGER PRIMARY KEY, name TEXT, role TEXT)WITHOUT ROWID')\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS room (name TEXT , no_of_members INTEGER, room_type TEXT)')\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS allocation (room_name TEXT , person_id INTEGER)')", "def setUp(self):\n resume.objects.create(\n first_name='Nicholas',\n last_name='Bielinski',\n )\n experience.objects.create(\n title='Helpdesk Technician',\n location='L3 Technologies',\n start_date='6/26/2017',\n end_date='present',\n description='blah blah blah'\n )\n education.objects.create(\n institution_name='UNH Manchester',\n location='Manchester',\n degree='Bachelor',\n major='CIS',\n gpa = '3.5'\n )", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def prepare(self):", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n setattr(request, \"session\", \"session\")\n messages = FallbackStorage(request)\n setattr(request, \"_messages\", messages)\n return view", "def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()", "def initialize(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def addViewToDb(self,name):\n\t\tsql = \"INSERT INTO hudson_views(viewname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[name])", "def on_pushButton_view_clicked(self):\n content = unicode(self.comboBox.currentText())\n if content == \"职称表\":\n data = self.sql_client.get_zc_info()\n self.fill_tableview(data)\n elif content == \"文化表\":\n data = self.sql_client.get_wh_info()\n self.fill_tableview(data)\n elif content == \"部门表\":\n data = self.sql_client.get_bm_info()\n self.fill_tableview(data)", "def users_create():", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def createview_bad_request():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting only status 404\n c.execute(\"create view bad_request as select cast(time as date), \"\n \"count(*) as num \"\n \"from log \"\n \"where status = '404 NOT FOUND' \"\n \"group by cast(time as date) \"\n \"order by cast(time as date)\")\n db.commit()\n db.close()", "def prepareController(self):\n pass", "def set_up_tables():\n table_users = \"\"\"\n CREATE TABLE IF NOT EXISTS users (\n id SERIAL PRIMARY KEY,\n username VARCHAR (24) NOT NULL UNIQUE,\n firstname VARCHAR (24) NOT NULL,\n lastname VARCHAR (24) NOT NULL,\n othername VARCHAR (24),\n phone VARCHAR (24) NOT NULL,\n email VARCHAR (30) NOT NULL UNIQUE,\n password VARCHAR (128) NOT NULL,\n passportUrl VARCHAR (200),\n isPolitician BOOLEAN,\n isAdmin BOOLEAN\n )\"\"\"\n\n parties_table = \"\"\" \n CREATE TABLE IF NOT EXISTS parties (\n id SERIAL PRIMARY KEY,\n name VARCHAR (35) NOT NULL UNIQUE,\n hqAddress VARCHAR (30),\n logoUrl VARCHAR\n )\"\"\"\n\n offices_table = \"\"\"\n CREATE TABLE IF NOT EXISTS offices (\n id SERIAL PRIMARY KEY,\n name VARCHAR (35) NOT NULL UNIQUE,\n type VARCHAR (35)\n )\"\"\"\n\n canditates_table = \"\"\"\n CREATE TABLE IF NOT EXISTS candidates (\n id SERIAL,\n candidate INTEGER,\n office INTEGER,\n PRIMARY KEY (office, candidate),\n FOREIGN KEY (candidate) REFERENCES users(id) ON DELETE CASCADE,\n FOREIGN KEY (office) REFERENCES offices(id) ON DELETE CASCADE\n )\"\"\"\n\n voters_table = \"\"\"\n CREATE TABLE IF NOT EXISTS votes (\n id SERIAL,\n office INTEGER,\n candidate INTEGER,\n voter INTEGER,\n PRIMARY KEY (office, voter),\n FOREIGN KEY (office) REFERENCES offices(id) ON DELETE CASCADE,\n FOREIGN KEY (candidate) REFERENCES users(id) ON DELETE CASCADE,\n FOREIGN KEY (voter) REFERENCES users(id) ON DELETE CASCADE\n )\"\"\"\n\n return [table_users, parties_table,\n offices_table, canditates_table, voters_table]", "def __init__(self, *args, **kwargs):\n self.organisation = kwargs.pop('organisation', None)\n self.cobrand = kwargs.pop('cobrand', None)\n super(ReviewTable, self).__init__(*args, **kwargs)", "def create(self):\n\n pass", "def create_stock_view_list_table(self):\n create_stock_view_list_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS \"{0}\".\"{1}\" (\n stock_view_name text NOT NULL PRIMARY KEY,\n stock_view_relation text NOT NULL\n );\n \"\"\".format(Schemas.SCHEMA_META, Tables.TABLE_TICK_STOCK_VIEW_LIST)\n\n conn = self.db_engine.connect()\n try:\n conn.execute(create_stock_view_list_sql)\n except:\n self._logger.log_error(traceback.format_exc())\n return Error.ERROR_DB_EXECUTION_FAILED\n finally:\n conn.close()", "def insert_ticket(request):\n required_keys = {'event_id', 'section', 'rownum', 'seat', 'price', 'seller_id', 'status'}\n param_keys = set(request.POST.keys())\n print(f\"param keys - {param_keys}\")\n #if len(param_keys) >= len(required_keys):\n # return \"Invalid Request\"\n\n try:\n print(param_keys)\n new_ticket = models.tickets()\n for param in param_keys:\n if param == 'status':\n setattr(new_ticket,param, bool(request.POST[param]))\n else:\n setattr(new_ticket,param, request.POST[param])\n print(param)\n print(new_ticket)\n request.dbsession.add(new_ticket) \n request.dbsession.flush()\n except DBAPIError:\n return \"Invalid Request\"\n return {'status':'success'}", "def test_table_definition(self):\n create_table(LowercaseKeyModel)\n create_table(CapitalizedKeyModel)\n\n delete_table(LowercaseKeyModel)\n delete_table(CapitalizedKeyModel)", "def __init__(self, table_view):\n QToolBar.__init__(self)\n self.table_view = table_view\n self.buildToolbarWidgets()", "def _check_required_fields(self):\n assert self.title\n assert self.format", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def initView(self):\n return {}", "def generate_psql_views(self, schema, schema_name_v1, schema_name_v2, psql_views_path):\n psql_views = open(psql_views_path, 'w')\n psql_views.write(\"SET client_min_messages TO ERROR;\\n\")\n psql_views.write(\"DROP SCHEMA IF EXISTS %s CASCADE;\\n\\n\" % schema_name_v1)\n psql_views.write(\"CREATE SCHEMA IF NOT EXISTS %s;\\n\\n\" % schema_name_v1)\n\n for table_name_v1, table_attr in schema['tables'].iteritems():\n table_name_v2 = table_attr['name']\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n\n columns = merge_dicts(columns_pri, columns_ref, columns)\n\n columns_v2 = [ '\"'+col_attr['name']+'\"' for col_name_v1, col_attr in columns.iteritems() ]\n columns_v2 += [ 'NULL' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n columns_v1 = [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns.iteritems()]\n columns_v1 += [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n view_sql = ('CREATE VIEW %s (%s) AS \\n SELECT %s FROM %s WITH CASCADED CHECK OPTION;\\n\\n' % (\n \"%s.%s\" % (schema_name_v1, table_name_v1),\n ', '.join(columns_v1),\n ', '.join(columns_v2),\n \"%s.%s\" % (schema_name_v2, table_name_v2)\n ))\n\n psql_views.write(view_sql + \"\\n\")\n psql_views.close()", "def create():", "def create():", "def create(self, request, *args, **kwargs):\n return super(UserViewSet, self).create(request, *args, **kwargs)" ]
[ "0.63214684", "0.6122003", "0.60363513", "0.59719974", "0.583535", "0.5826202", "0.5768594", "0.57384115", "0.55392927", "0.55050695", "0.54572743", "0.54387593", "0.5411729", "0.53755325", "0.53628856", "0.5360331", "0.5358349", "0.53566074", "0.53360575", "0.5325151", "0.5300957", "0.5295821", "0.52832156", "0.5261851", "0.5261851", "0.5245268", "0.5236487", "0.5225806", "0.5225423", "0.5215412", "0.5215321", "0.52086604", "0.5208202", "0.5180909", "0.51773095", "0.5176883", "0.5173101", "0.5169828", "0.51629364", "0.5160951", "0.51532537", "0.5152801", "0.51452625", "0.5145128", "0.51326716", "0.5123922", "0.5113264", "0.5110827", "0.5107148", "0.5094529", "0.5086187", "0.50802666", "0.50687313", "0.5061227", "0.5057832", "0.5050648", "0.5043055", "0.5039795", "0.50182647", "0.50099736", "0.5005557", "0.49996373", "0.4996286", "0.4991714", "0.49902308", "0.49871486", "0.4984676", "0.49800733", "0.4975458", "0.4970336", "0.49694395", "0.49692625", "0.49670044", "0.49665195", "0.49665195", "0.49665195", "0.4957309", "0.49526906", "0.4947733", "0.49427167", "0.49412203", "0.49392113", "0.49391347", "0.4930978", "0.49244666", "0.49228418", "0.49218807", "0.49068606", "0.49061152", "0.49052164", "0.490006", "0.4898409", "0.48974073", "0.48969144", "0.48958778", "0.4894352", "0.48926705", "0.4891448", "0.4891448", "0.4888753" ]
0.65944326
0
Process the ifexists flag on datasets create.
def ProcessDatasetOverwrite(ref, args, request): del ref dataset_id = request.dataset.datasetReference.datasetId project_id = request.projectId if args.overwrite: if _DatasetExists(dataset_id, project_id): _TryDeleteDataset(dataset_id, project_id) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isExist(data):\n return True/False", "def is_version_data_existed(self):\n # if exists, skip\n # return \n\n return True", "def exist(self):", "def run(**kwargs):\n del kwargs # Unused args\n if os.path.exists(DATASET_PATH):\n LOGGER.info('... Dataset already exists. Skipping.')\n else:\n build()", "def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0", "def test_dataset_exists(client, to_delete):\n DATASET_ID = \"get_table_dataset_{}\".format(_millis())\n dataset_ref = client.dataset(DATASET_ID)\n dataset = bigquery.Dataset(dataset_ref)\n dataset = client.create_dataset(dataset)\n to_delete.append(dataset)\n\n assert dataset_exists(client, dataset_ref)\n assert not dataset_exists(client, client.dataset(\"dataset doesnot exist\"))", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def object_exists(self, fname):\n return False", "def is_dataset_created(path, suffix=\"\"):\n dataset_id = None\n try:\n with open(\"%s%sdataset%s\" % (path, os.sep, suffix)) as dataset_file:\n dataset_id = dataset_file.readline().strip()\n try:\n dataset_id = bigml.api.get_dataset_id(dataset_id)\n return True, dataset_id\n except ValueError:\n return False, None\n except IOError:\n return False, None", "def _does_not_exist_or_forced(self) -> bool:\n if os.path.exists(self.extracted_path) and self.force:\n logger.debug(f\"'-f/--force' flag set, deleting directory: '{self.extracted_path}'\")\n shutil.rmtree(self.extracted_path)\n logger.debug(f\"Deletion successful.\")\n elif os.path.exists(self.extracted_path) and not self.force:\n logger.warning(f\"{self.dataset_name} already exists at the destination directory '{self.extracted_path}'\")\n logger.warning(f\"If you wish to re-download the dataset, try 'sla-cli download -f/--force <DATASET>'\")\n logger.warning(f\"Skipping...\")\n return False\n\n return True", "def add(self, data, check_exists=True): # pragma: no cover\n raise NotImplementedError", "def object_exists(self, fname):\n return True", "def exists(self, datadir):\n return False", "def dataset_exists(es_url, id, es_index=\"grq\"):\n\n total, id = check_dataset(es_url, id, es_index)\n if total > 0:\n return True\n return False", "def is_file_exists(self):\n pass", "def _handle_exists_collection(name: str, exists: Optional[str], db: Database) -> None:\n\n if exists == \"fail\":\n if db[name].count() > 0:\n raise ValueError(f\"Collection '{name}' already exists.\")\n return\n\n if exists == \"replace\":\n if db[name].count() > 0:\n db[name].drop()\n return\n\n if exists == \"append\":\n return\n\n raise ValueError(f\"'{exists}' is not valid for if_exists\")", "def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()", "def test_exists_false(self):\n self.assertFalse(PrepSample.exists('Not_a_Sample', self.prep_template))", "def exists(self):\n return True", "def exists(self):\n return True", "def _auto_create(self):\n status = [\n os.path.exists(self.vertices_path),\n os.path.exists(self.edges_path),\n ]\n\n if not all(status):\n self._create_vertex_skel(self.path)\n self._create_edge_skel(self.path)", "def datafileexist(filename):\n filePath = os.path.join(pathtofolder(), \"datas\", filename)\n fileFormat = '.csv'\n return os.path.exists(f'{filePath+fileFormat}')", "def file_exist() -> bool:\n pass", "def _CheckFileExistsWithData(self, logs, graph):\n self.assertTrue(graph in logs, 'File %s was not output.' % graph)\n self.assertTrue(logs[graph], 'File %s did not contain data.' % graph)", "def exists(self, path):", "def test_exists_true(self):\n self.assertTrue(SampleTemplate.exists(self.test_study.id))", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def test_exists_true(self):\n self.assertTrue(PrepSample.exists(self.sample_id, self.prep_template))", "def test_ensure_exists(self):\n # with data\n client = kazoo.client.KazooClient()\n zkutils.ensure_exists(client, '/foo/bar', data='foo')\n kazoo.client.KazooClient.create.assert_called_with(\n '/foo/bar', b'foo', acl=mock.ANY, makepath=True,\n sequence=False)\n\n # non-data\n zkutils.ensure_exists(client, '/foo/bar')\n kazoo.client.KazooClient.create.assert_called_with(\n '/foo/bar', b'', acl=mock.ANY, makepath=True,\n sequence=False)", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def test_ensure_exists_existing(self):\n def raise_exists(*args_unused, **kwargs_unused):\n \"\"\"zk.create side effect, raising appropriate exception.\"\"\"\n raise kazoo.client.NodeExistsError()\n\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.create.side_effect = raise_exists\n zkutils.ensure_exists(client, '/foo/bar')\n kazoo.client.KazooClient.set_acls.assert_called_with('/foo/bar',\n mock.ANY)\n\n # ensure with data\n zkutils.ensure_exists(client, '/foo/bar', data='foo')\n kazoo.client.KazooClient.set.assert_called_with('/foo/bar', b'foo')\n kazoo.client.KazooClient.set_acls.assert_called_with('/foo/bar',\n mock.ANY)", "async def create_checkpoint_if_not_exists_async(self, partition_id):", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def ensure_data_folder_existence() -> None:\n folder_name = params.DATA_FOLDER_NAME\n if not folder_name in os.listdir('.'):\n os.mkdir(folder_name)", "def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))", "def test_exists_true(self):\n self.assertTrue(Sample.exists(self.sample_id, self.sample_template))", "def exists():\n check50.include(\"data\")\n check50.exists(\"adventure.py\")\n check50.exists(\"room.py\")", "def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True", "def check_for_new_data(self):\n return", "def test_exists_false(self):\n self.assertFalse(Sample.exists('Not_a_Sample', self.sample_template))", "def check_file_exist(self):\n return False", "def exists(self):\n f = os.path.join(pth, '..', 'static/data', self.filename)\n return os.path.isfile(f)", "def checkfolderdata(folder = 'datas'):\n if datafolderexist(folder):\n return True\n else:\n createdatafolder(folder)\n checkfolderdata(folder)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.split_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.split_dir))", "def are_datasets_created(path, number_of_datasets, suffix='parts'):\n dataset_ids = []\n try:\n with open(\"%s%sdataset_%s\" % (path, os.sep, suffix)) as datasets_file:\n for line in datasets_file:\n dataset = line.strip()\n try:\n dataset_id = bigml.api.get_dataset_id(dataset)\n dataset_ids.append(dataset_id)\n except ValueError:\n return False, dataset_ids\n if len(dataset_ids) == number_of_datasets:\n return True, dataset_ids\n else:\n return False, dataset_ids\n except IOError:\n return False, dataset_ids", "def check_and_create_sandbox_dataset(project_id, dataset_id):\n sandbox_dataset = get_sandbox_dataset_id(dataset_id)\n dataset_objs = list_datasets(project_id)\n datasets = [d.dataset_id for d in dataset_objs]\n\n if sandbox_dataset not in datasets:\n create_sandbox_dataset(project_id, dataset_id)\n return sandbox_dataset", "def file_exists(msl_data_path, filename):\n return os.path.isfile(msl_data_path + filename)", "def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False", "def document_exists(self, docid):\n raise NotImplementedError", "def test_exist(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n MetadataTemplate.exists(self.study)", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def is_existing(self):\n return self.backend.is_existing", "def CheckForExistence(requested_data, available_data):\n if requested_data is not None:\n return 1 # The requested data exists.\n elif available_data:\n return -1 # The requested data does not exist.\n else:\n return 0 # No data exists at all.", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))", "def test_create_already_exists():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model.tar\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def dataset_exists(dataset_reference, client):\n from google.cloud.exceptions import NotFound\n\n try:\n client.get_dataset(dataset_reference)\n return True\n except NotFound:\n return False", "def create_file(file_path: str, override_flag: bool) -> bool:\n return (not os.path.exists(file_path)) or override_flag", "async def create_checkpoint_store_if_not_exists_async(self):", "def _item_exists(self, location):\n \"Does nothing\"", "def _check_file_exists_helper(self, report_path, filename):\n\n if not check_data_exists(report_path, [filename]):\n raise AssertionError(\n \"{} does not exist in location {}\".format(\n filename, report_path\n )\n )", "def before_dataobj_create(self, dataobj):", "def _check_before_run(self):\r\n if not os.path.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def check_database_exists(dbname):\n if not os.path.isfile(dbname):\n create_ivr_database(dbname)", "def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)", "def test_filter_exists(self):\n datasets = [{\"exists\": True, \"name\": \"DATASET1\"}, {\"exists\": False, \"name\": \"DATASET2\"}]\n hits = filter_exists(\"HIT\", datasets)\n misses = filter_exists(\"MISS\", datasets)\n all = filter_exists(\"ALL\", datasets)\n nothing = filter_exists(\"NONE\", datasets)\n self.assertEqual(hits, [{\"exists\": True, \"name\": \"DATASET1\"}])\n self.assertEqual(misses, [{\"exists\": False, \"name\": \"DATASET2\"}])\n self.assertEqual(all, datasets)\n self.assertEqual(nothing, [])", "def check_if_entry_exists(title: str) -> bool:\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"select * from entries where title = ?\"\"\",\n (title,)\n )\n records = c.fetchall()\n return len(records) > 0\n except sqlite3.OperationalError as e:\n print(f'Exception {e} caught. Recreating database.')\n c.execute('drop table if exists entries')\n conn.commit()\n conn.close()\n create()\n return False", "def ResourceExists(self, name):\n pass", "def test_data_source_soaps_id_exists_get(self):\n pass", "def dir_exists(dirname):\n global datapath\n current_dir = './' + dirname + '/'\n parent_dir = '../' + dirname + '/'\n grandparent_dir = '../../' + dirname + '/'\n if os.path.isdir(current_dir):\n datapath = current_dir\n elif os.path.isdir(parent_dir):\n datapath = parent_dir\n elif os.path.isdir(grandparent_dir):\n datapath = grandparent_dir\n else:\n response = raw_input(\"'{}' directory does not exist. \"\n \"Create it [Y/n]? \".format(dirname))\n if not response or response[0].lower() == 'y':\n directory = os.getcwd()\n if os.path.basename(directory) == 'caribou-data-collection':\n # E.g. /caribou-data-collection/scraper.py\n datapath = current_dir\n os.makedirs(datapath)\n print 'Created', datapath\n elif os.path.basename(directory) in COUNTRIES:\n # E.g. /caribou-data-collection/country/scraper.py\n datapath = parent_dir\n os.makedirs(datapath)\n print 'Created', datapath\n elif os.path.basename(os.path.dirname(directory)) in COUNTRIES:\n # E.g. /caribou-data-collection/country/region/scraper.py\n datapath = grandparent_dir\n os.makedirs(datapath)\n print 'Created', datapath\n else:\n raise RuntimeError('Directory not created. '\n 'Please switch to the caribou-data-collection'\n ' directory and try again.')\n else:\n return False\n return True", "def fileexists(cpath, create=True):\n dpath = os.path.dirname(cpath)\n if not os.path.isfile(cpath):\n if direxists(dpath) and create:\n try:\n pathlib.Path(cpath).touch()\n except os.error:\n logging.error(\"Can't create file %s!\", cpath)\n return False\n return True", "def _ensure_exists(name, path=None):\n if not exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' does not exist\")", "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))", "def exists(anyid=None, **multi_match): # TODO multimatch\n if not any((anyid, multi_match)):\n raise RegistryError(\"specify at least one condition.\")\n\n if anyid:\n return ESDataset.exists(_id=anyid) or ESDataset.exists(identifier=anyid)\n\n return ESDataset.exists(**multi_match)", "def _index_file_exists(idx_fn):\n if os.path.exists(idx_fn + \".npy\") and os.path.exists(idx_fn + \".info\"):\n return True\n else:\n return False", "def datafolderexist(name):\n folderpath = os.path.join(pathtofolder(), name)\n return os.path.exists(folderpath)", "def perform_existence_check(set_name_to_cache: bool):\n Thread(target=_perform_existence_check, args=(set_name_to_cache,)).start()", "def exists(self, arg):\n raise NotImplementedError", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.probe_gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_gallery_dir))", "def Exists(self, path: str) -> bool:\n ...", "def _create_file_if_needed(self):\n if not os.path.exists(self._file.filename()):\n old_umask = os.umask(0o177)\n try:\n open(self._file.filename(), 'a+b').close()\n finally:\n os.umask(old_umask)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def exists(_env):\n return True", "def exists(_env):\n return True", "def images_exist(self):\n pass", "def test_exists(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n os.environ[self.test_name] = \"This is a test.\"\n\n expected = True\n actual = self.helper.exists()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]", "def check_exist(filename, status):\n\n if (status == \"r\"):\n # check to see if it exists for reading\n # (i.e. must be present)\n if (not (os.path.exists(filename))):\n print(f\"Couldn't open input file: {filename}.\")\n return False\n else:\n # check to see if it exists for writing\n # (i.e. must not exist or clobber=yes)\n if (os.path.exists(filename)):\n if (status == \"w\"):\n return True\n else:\n return False\n\n return True", "def check_file_creation(today_filename):\r\n\r\n if os.path.exists(today_filename):\r\n print(\"Today's file {} has been created\".format(today_filename))\r\n else:\r\n print(\"Today's file was not created\")", "def object_exists(self, fname):\n return self.object_exists", "def add_existing_key_fail(self, data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)", "def exists(self, obj):\n return False" ]
[ "0.61832976", "0.5865549", "0.58427125", "0.5761497", "0.57018334", "0.5658679", "0.55542874", "0.5486935", "0.5465586", "0.54652494", "0.54532725", "0.5452605", "0.5446348", "0.5409404", "0.5387412", "0.5352073", "0.53434783", "0.5338109", "0.53380615", "0.53380615", "0.5296173", "0.5286036", "0.5285628", "0.5283233", "0.5259973", "0.5251596", "0.52317667", "0.5229633", "0.5228364", "0.5209805", "0.5208793", "0.52048177", "0.51878524", "0.51805085", "0.5177314", "0.51598644", "0.5157069", "0.51526797", "0.514977", "0.51259226", "0.51156", "0.51151705", "0.5113204", "0.5093946", "0.5090204", "0.5088175", "0.5074226", "0.5074087", "0.506958", "0.50680894", "0.5068073", "0.5049205", "0.50322497", "0.50210726", "0.5018592", "0.50182825", "0.50100917", "0.5006929", "0.5002313", "0.49974167", "0.49970585", "0.49929935", "0.4985612", "0.49854606", "0.4980093", "0.49792212", "0.49719304", "0.4971483", "0.49620792", "0.49550563", "0.49473494", "0.49467596", "0.49306765", "0.49229294", "0.49214855", "0.49177673", "0.49115822", "0.49114227", "0.4910846", "0.49080873", "0.49080873", "0.49078935", "0.49045894", "0.4900532", "0.4899734", "0.4899734", "0.4899734", "0.4899734", "0.4899734", "0.4899734", "0.4899734", "0.4898616", "0.4898616", "0.48984605", "0.48972872", "0.4891578", "0.4891394", "0.48863268", "0.48840052", "0.4881104" ]
0.5203899
32
Process the overwrite flag on tables create.
def ProcessTableOverwrite(ref, args, request): dataset_id = ref.datasetId table_id = ref.Name() project_id = ref.projectId if args.overwrite: if _TableExists(dataset_id, table_id, project_id): _TryDeleteTable(dataset_id, table_id, project_id) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request", "def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)", "def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True", "def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))", "def __new_tables_statement(self):\n new_tables = self.__new_tables()\n for table in new_tables:\n with open('./update/create_tables.sql', 'a') as f:\n create_statement = self.source.query_create_table_statement(table.name)\n f.write(create_statement)\n f.write('\\n')", "def _do_action_tables_create(self):\n\n schema_shell = os.path.join(self.bento_home, \"schema-shell\", \"bin\", \"kiji-schema-shell\")\n assert os.path.isfile(schema_shell), schema_shell\n\n # Delete the table first!\n cmd = (\n \"kiji delete --target={kiji_uri} --interactive=false; \" +\n \"kiji install --kiji={kiji_uri}\" ).format(kiji_uri=self.kiji_uri)\n self._run_kiji_job(cmd)\n\n for ddl in self.ddls:\n ddl_full_path = os.path.join(self.movie_advisor_home, ddl)\n assert os.path.isfile(ddl_full_path)\n cmd = \"{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}\".format(\n schema_shell=schema_shell,\n kiji_uri=self.kiji_uri,\n ddl_full_path=ddl_full_path)\n self._run_kiji_job(cmd)", "def test_database_object_overwrite_parameter_is_set(self):\n database = generate_database_object(overwrite=True)\n\n self.assertEqual(\n True,\n database.overwrite == True,\n \"Database object did not have an overwrite flag, despite being created with one.\"\n )", "def touch(self):\n if self.marker_table_bound is None:\n self.create_marker_table()\n\n table = self.marker_table_bound\n id_exists = self.exists()\n with self.engine.begin() as conn:\n if not id_exists:\n ins = table.insert().values(\n ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n else:\n ins = table.update().where(sqlalchemy.and_(\n table.c.ParquetSource == self.parquet_source,\n table.c.Environment == self.environment,\n table.c.TargetTable == self.target_table)).\\\n values(ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n conn.execute(ins)\n assert self.exists()", "def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")", "def create_all_tables(self):\n pass", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def on_doctype_update():\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_defkey_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_defkey_index(parent, defkey)\"\"\")\n\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_parenttype_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_parenttype_index(parent, parenttype)\"\"\")", "def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query", "def _create_tables_classic(self, engine, metadata):\n if engine and metadata:\n with (yield from engine) as conn:\n for x in self._models.values():\n try:\n yield from conn.execute(CreateTable(x))\n except ProgrammingError as error:\n if hasattr(self.app, 'log') and self.app.log:\n if self.app.debug:\n self.app.log.info(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n else:\n if self.app.debug:\n print(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n return", "def create_table(self):\n pass", "def touch(self, connection=None):\n self.create_marker_table()\n\n if connection is None:\n connection = self.connect()\n connection.autocommit = True # if connection created here, we commit it here\n\n connection.cursor().execute(\n \"\"\"INSERT INTO {marker_table} (update_id, target_table)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n update_id = VALUES(update_id)\n \"\"\".format(marker_table=self.marker_table),\n (self.update_id, self.table)\n )\n # make sure update is properly marked\n assert self.exists(connection)", "def create_tables(self):\n for name, attribute in self.__dict__.items():\n if hasattr(attribute, 'create_table_in_sqlite_db'):\n attribute.create_table_in_sqlite_db()", "def _create(self, tables, views, schema_name, config):\n if not isinstance(tables, dict):\n return False # Raise Exception That Tables Are In A Wrong Format???!!!\n success = True\n if schema_name is not None:\n self._create_schema(schema_name)\n for table_name_instance in tables.items():\n if self._create_table(table_name_instance[1]) is False:\n success = False\n break\n if isinstance(views, dict):\n for view_name_instance in views.items():\n if self._create_view(view_name_instance[1], schema_name, config) is False:\n success = False\n break\n return success", "def create(self, overwrite_existing=False):\r\n\r\n #### Begin functionality here\r\n if debug: eprint(\"INFO: Creating database \" + self.filename)\r\n if os.path.exists(self.filename):\r\n os.remove(self.filename)\r\n engine = create_engine(\"sqlite:///\"+self.filename)\r\n Base.metadata.create_all(engine)\r\n self.connect()\r\n return()", "def is_overwrite_all(self):\n return self._tag == 'overwrite_all'", "def create_databases(self, overwrite = False):\r\n self.validate_config()\r\n self.template_runner.create_databases(overwrite)", "def create_tables( self ) :\n return self._create_tables", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return", "def post_migrations(self):", "def create_table_execute(self):\n self.execute(query=self.default_template.format(self.table_name), data=None)", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def apply_patch():\n assert BaseDatabaseSchemaEditor is not None\n\n def _create_unique_sql(self, *args, **kwargs):\n from django.db.backends.ddl_references import IndexName\n\n statement = orig_create_unique_sql(self, *args, **kwargs)\n\n if statement is not None:\n index_name = statement.parts['name']\n\n if (isinstance(index_name, IndexName) and\n index_name.create_index_name == self._create_index_name):\n # The result will be unquoted. Let's quote it.\n index_name.create_index_name = lambda *args, **kwargs: \\\n self.quote_name(self._create_index_name(*args, **kwargs))\n\n return statement\n\n orig_create_unique_sql = BaseDatabaseSchemaEditor._create_unique_sql\n BaseDatabaseSchemaEditor._create_unique_sql = _create_unique_sql", "def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)", "def create_triggers(self):\n triggers = self.get_source_triggers()\n if not triggers:\n self.create_insert_trigger()\n self.create_update_trigger()\n self.create_delete_trigger()\n self.commit()", "def PreCreate(self, pre):\r\n pass", "def PreCreate(self, pre):\r\n pass", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def begin_create(self):\n self.mdb.get().begin_create()", "def create_db_execute(self):\n self.execute(query=self.db_create_schema.format(self.db_name))", "def create_tables(c, drop_all = False):\n\tprint \"create_tables [status]: create tables triggered.\"\n\tautoincrement = \" autoincrement\"\n\tglobal Options, Database\n\tif drop_all:\n\t\tprint \"create_tables [status]: dropping tables initiated.\"\n\t\tif Database.type == DatabaseTypes.SQLITE:\n\t\t\tdrop_sql = open(mk_drop(\"sqlite\")).read()\n\t\telif Database.type == DatabaseTypes.MYSQL:\n\t\t\tdrop_sql = open(mk_drop(\"mysql\")).read()\n\t\telif Database.type == DatabaseTypes.POSTGRES:\n\t\t\tdrop_sql = open(mk_drop(\"postgres\")).read()\n\t\tdrop_list = drop_sql.split(\";\")\n\t\tfor line in drop_list:\n\t\t\tquery = line.strip()\n\t\t\tif query:\n\t\t\t\tc.execute(line)\n\t\tprint \"create_tables [status]: dropping tables complete.\"\n\tif Database.type == DatabaseTypes.SQLITE:\n\t\tdbf = open(mk_schema(\"sqlite\", Options.use_dict))\n\t\tquery_list = dbf.read()\n\t\tc.executescript(query_list)\n\telif Database.type == DatabaseTypes.MYSQL:\n\t\tdbf = open(mk_schema(\"mysql\", Options.use_dict))\n\t\texecutescript(c, dbf)\n\telif Database.type == DatabaseTypes.POSTGRES:\n\t\tdbf = open(mk_schema(\"postgres\", Options.use_dict))\n\t\texecutescript(c, dbf)", "def __createTableIfNotExists(self):\n query = \"SELECT name FROM sqlite_master WHERE type='table'\"\n results = self._cursor.execute(query)\n name_list = [name for r in results for name in r]\n\n if self.table_name not in name_list:\n # Timestamp is for the time from pythons datetime.now() func\n table = \"\"\"CREATE TABLE {}\n (media TEXT PRIMARY KEY,\n thumbnail TEXT, regular TEXT, large TEXT,\n [timestamp] timestamp)\n \"\"\".format(self.table_name)\n self._cursor.execute(table)", "def create_delta_table(self):\n self.execute_sql(\n sql.create_delta_table(\n self.delta_table_name,\n self.IDCOLNAME,\n self.DMLCOLNAME,\n self._old_table.engine,\n self.old_column_list,\n self._old_table.name,\n )\n )\n self.add_drop_table_entry(self.delta_table_name)\n # We will break table into chunks when calculate checksums using\n # old primary key. We need this index to skip verify the same row\n # for multiple time if it has been changed a lot\n if self._pk_for_filter_def and not self.is_full_table_dump:\n self.execute_sql(\n sql.create_idx_on_delta_table(\n self.delta_table_name,\n [col.name for col in self._pk_for_filter_def],\n )\n )", "def fix_incremental(meta, bind):\n meta.create_all(bind=bind, checkfirst=True)\n ref = inspect(bind)\n for table in meta.sorted_tables:\n orm_cols = set(col.name for col in table.c)\n ref_cols = set(col['name'] for col in ref.get_columns(table.name))\n col_to_create = orm_cols - ref_cols\n col_to_delete = ref_cols - orm_cols\n if col_to_create:\n print table.name, 'has diff to create', col_to_create\n with bind.begin() as conn:\n for col_name in col_to_create:\n col = table.c.get(col_name)\n column_sql = CreateColumn(col).compile(bind).string\n sql = 'ALTER TABLE {} ADD COLUMN {}'.format(table.name, column_sql)\n if col.default:\n sql += ' DEFAULT {!r}'.format(col.default.arg) # can break when a pickle type has callable default.\n if not col.nullable:\n sql += ' NOT NULL'\n print 'executing sql: ' + sql\n conn.execute(sql)\n\n # Workaround to ensure updated DBs start with \"False\" in ignore column\n if list(col_to_create)[0] == 'ignore':\n sessionmaker = get_sessionmaker(bind.url.database)\n session = sessionmaker()\n query_object = {'dttrialdff0s': DTTrialDff0, 'trials': Trial}[table.name]\n items = session.query(query_object).all()\n for item in items:\n item.ignore = False\n session.flush()\n\n if col_to_delete:\n print table.name, 'has diff to delete', col_to_delete, 'maybe later version.'\n \"\"\"\n BEGIN TRANSACTION;\n CREATE TEMPORARY TABLE t1_backup(a,b);\n INSERT INTO t1_backup SELECT a,b FROM t1;\n DROP TABLE t1;\n CREATE TABLE t1(a,b);\n INSERT INTO t1 SELECT a,b FROM t1_backup;\n DROP TABLE t1_backup;\n COMMIT;\n \"\"\"", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def pre_route_table_create(self, resource_dict):\n pass", "def before_insert(mapper, conn, target):\n if target.sequence_id is None:\n sql = text(\n '''SELECT max(t_sequence_id)+1 FROM tables WHERE t_d_id = :did''')\n\n max_id, = conn.execute(sql, did=target.d_id).fetchone()\n\n if not max_id:\n max_id = 1\n\n target.sequence_id = max_id\n\n Table.before_update(mapper, conn, target)", "def create_tables(self, tables=None):\n LOG.debug(f\"Creating table subset {tables}\")\n Base.metadata.create_all(self.engine, tables, checkfirst=False)", "def create(self):\n c = self.cursor()\n byte_schema = pkgutil.get_data(__package__, 'schema.sql')\n c.executescript(byte_schema.decode('utf-8'))\n self.commit()", "def ProcessDatasetOverwrite(ref, args, request):\n del ref\n dataset_id = request.dataset.datasetReference.datasetId\n project_id = request.projectId\n\n if args.overwrite:\n if _DatasetExists(dataset_id, project_id):\n _TryDeleteDataset(dataset_id, project_id)\n\n return request", "def create_tables(self):\n if self.mock:\n mock_dynamodb2(self._create_tables())\n else:\n self._create_tables()", "def _create_db(self, overwrite=False):\n current = list(self._cur.execute(\"select * from sqlite_master where type='table' and name='metrics'\"))\n if overwrite and len(current) >= 1:\n self._cur.execute('''DROP TABLE IF EXISTS metrics''')\n self._conn.commit()\n elif len(current) >= 1:\n self._fields = [x[1] for x in sorted(self._cur.execute('''PRAGMA table_info(metrics)'''))]\n return None\n self._cur.execute('''CREATE TABLE metrics (model_name text, operation_name text, metric_name text, metric_type text, metric_value real)''')\n self._fields = [\"model_name\", \"operation_name\", \"metric_name\", \"metric_type\", \"metric_value\"]\n self._conn.commit()", "def _postprocess_arena(self):\n # Create tables\n for i, (offset, rot, half_size, friction, legs) in enumerate(\n zip(self.table_offsets, self.table_rots, self.table_half_sizes, self.table_frictions, self.has_legs)\n ):\n self._add_table(\n name=f\"table{i}\",\n offset=offset,\n rot=rot,\n half_size=half_size,\n friction=friction,\n has_legs=legs,\n )", "def make_new_tbl(self):\n debug = False\n default_dd = getdata.get_default_db_dets()\n con, cur = default_dd.con, default_dd.cur\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n tblname = self.tblname_lst[0]\n if debug: print(f'DBE in make_new_tbl is: {default_dd.dbe}')\n getdata.make_sofa_tbl(\n con, cur, tblname, oth_name_types, headless=False)\n wx.MessageBox(\n _('Your new table has been added to the default SOFA database'))", "def pre_init_hook(cr):\n try:\n cr.execute(\n \"\"\"\n ALTER TABLE product_product\n ADD pricelist_id INTEGER REFERENCES product_pricelist(id),\n ADD currency_id INTEGER REFERENCES res_currency(id);\n ALTER TABLE product_template\n ADD currency_id INTEGER REFERENCES res_currency(id);\n \"\"\"\n )\n except psycopg2.ProgrammingError:\n cr.rollback()\n\n try:\n cr.execute(\n \"\"\"\n UPDATE product_product\n SET pricelist_id = 1,\n currency_id = 20\n \"\"\"\n )\n except psycopg2.ProgrammingError:\n cr.rollback()\n\n return True", "def use_table(self):\n connection = self._get_connection()\n cursor = connection.cursor()\n cursor.execute(\n 'select exists(select * from information_schema.tables where table_name=%s)',\n (self.table,),\n )\n if cursor.fetchone()[0]:\n self.logger.info('Using existing table')\n else:\n try:\n cursor.execute(\n f'CREATE TABLE {self.table} ( \\\n ID VARCHAR PRIMARY KEY, \\\n DOC BYTEA);'\n )\n self.logger.info('Successfully created table')\n except (Exception, psycopg2.Error) as error:\n self.logger.error('Error while creating table!')\n connection.commit()\n self._close_connection(connection)", "def recreate_non_unique_indexes(self):\n # Skip replaying changes for now, if don't have to recreate index\n if not self.droppable_indexes:\n return\n\n self.set_innodb_tmpdir(self.outfile_dir)\n # Execute alter table only if we have index to create\n if self.droppable_indexes:\n self.ddl_guard()\n log.info(\n \"Recreating indexes: {}\".format(\n \", \".join(col.name for col in self.droppable_indexes)\n )\n )\n self.execute_sql(sql.add_index(self.new_table_name, self.droppable_indexes))", "def creates(f):\n f.creates = True\n return f", "def no_overwrite_example():", "def recreate():\n drop()\n create()", "def recreate_db(self, run=False):\n if run:\n db_schema = open(self.db_schema_file).read().splitlines()\n for s in db_schema:\n t = s.strip()\n if len(t):\n self.cur.execute(t)", "def create_all_schemas_and_tables(self):\n for schema, tables in self.schemas.items():\n self.create_schema(schema)\n for table in tables.keys():\n self.create_table(schema, table)", "def create_all_tables():\n\tcommon_db.create_all_tables()", "def markPartitionCreated(partitionTableName):\n global partitionCreationHistory\n partitionCreationHistory.add(partitionTableName)", "def _auto_create(self):\n status = [\n os.path.exists(self.vertices_path),\n os.path.exists(self.edges_path),\n ]\n\n if not all(status):\n self._create_vertex_skel(self.path)\n self._create_edge_skel(self.path)", "def create_gt_database_template_old(self):\n pass\n with self.connection as cursor:\n fn = os.path.join(os.path.dirname(__file__), 'gtlogold.sql')\n self.cursor.execute(open(fn, \"r\").read())", "def on_ok(self, _event):\n dd = mg.DATADETS_OBJ\n if self.read_only:\n self.exiting = True\n self.Destroy()\n else:\n ## NB any changes defined in recode are already done\n new_tbl, tblname_changed, data_changed = self.get_change_status()\n if new_tbl or tblname_changed or data_changed:\n try:\n if not new_tbl:\n orig_tblname = self.tblname_lst[0]\n dd.set_tbl(tbl=orig_tblname)\n else:\n dd.set_tbl(tbl=None)\n self.make_changes()\n self.exiting = True\n self.Destroy()\n self.SetReturnCode(mg.RET_CHANGED_DESIGN)\n except FldMismatchException:\n wx.MessageBox(\n _('Unable to modify table. Some data does not match the'\n ' column type. Please edit and try again.'))\n return\n except Exception as e:\n wx.MessageBox(\n _(\"Unable to modify table.\\nCaused by error: %s\")\n % b.ue(e))\n return\n elif self.changes_made: ## not in tableconf. Must've been in recoding\n self.exiting = True\n self.Destroy()\n self.SetReturnCode(mg.RET_CHANGED_DESIGN)\n return\n else:\n wx.MessageBox(_('No changes to update.'))\n return", "def create_marker_table(self):\n connection = self.connect(autocommit=True)\n cursor = connection.cursor()\n try:\n cursor.execute(\n \"\"\" CREATE TABLE {marker_table} (\n id BIGINT(20) NOT NULL AUTO_INCREMENT,\n update_id VARCHAR(128) NOT NULL,\n target_table VARCHAR(128),\n inserted TIMESTAMP DEFAULT NOW(),\n PRIMARY KEY (update_id),\n KEY id (id)\n )\n \"\"\"\n .format(marker_table=self.marker_table)\n )\n except mysql.connector.Error as e:\n if e.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n pass\n else:\n raise\n connection.close()", "def _apply(self):\n s = [(iptables_save, iptables_restore, self.ipv4)]\n if self.use_ipv6:\n s += [(ip6tables_save, ip6tables_restore, self.ipv6)]\n\n for save, restore, tables in s:\n all_tables, _err = save()\n all_lines = all_tables.split('\\n')\n for table_name, table in six.iteritems(tables):\n start, end = self._find_table(all_lines, table_name)\n all_lines[start:end] = self._modify_rules(\n all_lines[start:end], table, table_name)\n table.dirty = False\n restore('\\n'.join(all_lines))", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def save_alter_sql(self, table_name):\n\t\talter_sql = ''\n\t\tscripted_alters = []\n\t\tcolNames,sql = self._generate_sql_parts(table_name)\n\t\tfor elem in self.joins:\n\t\t\tcolName = '{}.{}'.format(elem.Parent,elem.Column)\n\t\t\tprint(colName)\n\t\t\tif colName in colNames and colName not in scripted_alters:\n\t\t\t\tif colName == 'CommunityMart.dbo.PersonFact.SourceSystemClientID' and 'CommunityMart.dbo.PersonFact.SourceSystemClientID' not in scripted_alters:\n\t\t\t\t\tscripted_alters.append(colName)\n\t\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF NOT EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ADD {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format('PatientID',table_name.split('.')[2],table_name,'PatientID','int','PatientID')\n\t\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ALTER COLUMN {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format('PatientID',table_name.split('.')[2],table_name,'PatientID','int','PatientID')\n\t\t\t\t\talter_sql += alter\n\t\t\t\tscripted_alters.append(colName)\n\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF NOT EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ADD {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format(elem.Column,table_name.split('.')[2],table_name,elem.Column,elem.Datatype,colName)\n\t\t\t\talter = \"\\nUSE CommunityMart\\nGO\\n\\nIF EXISTS(SELECT * FROM sys.columns AS col WHERE col.name = '{}' AND OBJECT_NAME(col.object_id) = '{}') \\nBEGIN\\n\\tALTER TABLE {} ALTER COLUMN {} {} NULL;\\n\\tPRINT '{}';\\nEND\\n\".format(elem.Column,table_name.split('.')[2],table_name,elem.Column,elem.Datatype,colName)\n\n\t\t\t\talter_sql += alter\n\t\t\t\tprint(alter)\n\t\twith open('Table/ALTER-{}.sql'.format(table_name),'w') as fout:\n\t\t\tself.altered_tables.append(table_name)\n\t\t\tfout.write(alter_sql)\n\t\treturn alter_sql", "async def exec_post_create(self) -> int:\n stmt_count = 0\n for table in self._new_table_queue:\n sql = get_post_create(table)\n for stmt in sql:\n await self.conn.execute(stmt)\n stmt_count += len(sql)\n return stmt_count", "def create_tables():\n db.create_all()", "def create_table_statements() -> [str]:\n pass", "def has_desired_schema(self):\n if self._new_table == self._old_table:\n if not self.rebuild:\n log.info(\"Table already has the desired schema. \")\n return True\n else:\n log.info(\n \"Table already has the desired schema. However \"\n \"--rebuild is specified, doing a rebuild instead\"\n )\n return False\n return False", "def _remake_table(self, table_name, renames={}, deleted=[], altered={}):\r\n # Dry runs get skipped completely\r\n if self.dry_run:\r\n return\r\n # Temporary table's name\r\n temp_name = \"_south_new_\" + table_name\r\n # Work out the (possibly new) definitions of each column\r\n definitions = {}\r\n cursor = self._get_connection().cursor()\r\n for column_info in self._get_connection().introspection.get_table_description(cursor, table_name):\r\n name = column_info[0]\r\n type = column_info[1]\r\n # Deal with an alter (these happen before renames)\r\n if name in altered:\r\n type = altered[name]\r\n # Deal with a rename\r\n if name in renames:\r\n name = renames[name]\r\n # Add to the defs\r\n definitions[name] = type\r\n # Alright, Make the table\r\n self.execute(\"CREATE TABLE %s (%s)\" % (\r\n self.quote_name(temp_name),\r\n \", \".join([\"%s %s\" % (self.quote_name(cname), ctype) for cname, ctype in definitions.items()]),\r\n ))\r\n # Copy over the data\r\n self._copy_data(table_name, temp_name, renames)\r\n # Delete the old table, move our new one over it\r\n self.delete_table(table_name)\r\n self.rename_table(temp_name, table_name)", "def abstract_write(self, *params): \n section = self.table_section_from_parameter(*params)\n row_args = self.table_row_args_from_parameter(*params)\n new_row = self.table_row_class(*row_args)\n # IMPORTANTE! ASUMO QUE UN BLOQUE INVALIDO ES IGUAL A UNO VACIO!\n # TO DO: PREGUNTAR EN ISSUE\n found_index = False\n for index, table_row in enumerate(section.copy()): \n if table_row is None or not table_row.valid:\n found_index = True\n overwrite_index = index\n break\n\n if not found_index:\n overwrite_index = self.next_replace(section)\n\n replace_index = self.index_mapping_from_parameter(overwrite_index, *params)\n\n\n old_line = self.table[replace_index]\n #print(f\"{self.__class__.__name__} Replace -> Index: {replace_index}\")\n\n # Perfom the actual write\n self.table[replace_index] = new_row", "def setupAfterCreate( self, p, create_userfolder ):\n self.setupStorage( p, create_userfolder )", "def create_new_index(self, dict_pg_info):\n # ! Setting if fun can use default setting\n ruler = Rules()\n str_conn = ruler.pg_info_rules(dict_pg_info)\n conn = psycopg2.connect(str_conn)\n\n with conn:\n with conn.cursor() as cur:\n str_create_table = \"CREATE TABLE \" + dict_pg_info['table'] + \" (path varchar PRIMARY KEY);\"\n # ! Check if table already exit\n cur.execute(str_create_table)\n cur.close()\n\n conn.close()", "def _create_table_if_not_exists(self) -> None:\n COLUMN_DEFINITIONS = 'definitions'\n COLUMN_TYPE = 'type'\n\n KEY_REF = '$ref'\n\n TYPE_LOOKUP = {\n 'string': 'VARCHAR(255)',\n 'integer': 'INTEGER',\n 'boolean': 'BOOLEAN',\n 'number': 'INTEGER',\n }\n\n def ref_lookup(\n property: Dict[str, Any], fields: Dict[str, Any]\n ) -> Dict[str, Any]:\n ref = property[KEY_REF]\n property_lookup_name = ref[ref.rfind('/') + 1 :]\n return fields[COLUMN_DEFINITIONS][property_lookup_name]\n\n field_queries = []\n fields = json.loads(self.schema.schema_json())\n\n del fields[Keywords.Properties.value][\n Keywords.ID.value\n ] # Remove primary key field. It is handled with auto increment below.\n\n for property_name, property in fields[Keywords.Properties.value].items():\n if KEY_REF in property:\n property = ref_lookup(property, fields)\n field_queries.append(\n f'{property_name} {TYPE_LOOKUP[property[COLUMN_TYPE]]}'\n )\n table_columns = ', '.join(field_queries)\n\n with connect(**BaseModel.db_settings) as connection:\n cursor = connection.cursor()\n cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {self.table_name} (ID INTEGER PRIMARY KEY AUTO_INCREMENT, {table_columns})'\n )\n self._table_created[self.table_name] = True", "def create_tables(args):\n\n from bob.db.base.utils import create_engine_try_nolock\n\n engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose >= 2));\n Client.metadata.create_all(engine)\n File.metadata.create_all(engine) \n Annotation.metadata.create_all(engine)\n #Protocol_File_Association.metadata.create_all(engine)", "def create(self, table, columns, types, primary_key_index=[], is_ifnotexists=True):\n\n self.lock.acquire()\n try:\n dblist = self.client.get_list_database()\n for dbdict in dblist:\n if self.dbname in dbdict[\"name\"]:\n self.lock.release()\n return True\n\n self.client.create_database(self.dbname)\n except Exception as e:\n raise Exception(\"Error in create statement; InfluxDb, DB=%s\\n\" % self.dbname)\n\n self.lock.release()\n\n return True", "def fill_target_table(new_data, curs, conn, overwrite=False):\n for i in new_data:\n connect_database.add_target_to_database(list(i), curs, conn, overwrite_exsiting = overwrite)\n conn.commit()", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def PreCreate(self, pre):\r\n \r\n pass", "def process_IN_CREATE(self, event):", "def createTables(self):\n metadata = Base.metadata\n metadata.create_all(self._engine)\n return", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def _create_pre_commit(destination, template, context):\n # Is there already a hook?\n if isfile(destination) and not _pre_commit_has_hallmark(destination):\n raise PreCommitExists('{0} already exists'.format(destination))\n\n with open(destination, 'w') as fh:\n fh.write(template.format(**context))\n\n sinfo = stat(destination)\n mode = sinfo.st_mode | S_IXUSR | S_IXGRP | S_IXOTH\n\n # Make sure it's executable\n chmod(destination, mode)\n\n return destination", "def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)", "def post_route_table_create(self, resource_dict):\n pass", "def create_table(self, table_name: str) -> None:\n self._table_name = table_name\n meta_info = self._trip.get_meta_informations()\n\n # Check if event table already exists\n for event in meta_info.get_events_list():\n if event.get_name() == table_name:\n self._meta_event = event\n log.debug(f\"{table_name} table already exists in trip\")\n break\n else:\n log.debug(f\"Creating meta event {table_name}\")\n self._meta_event = MetaEvent()\n self._meta_event.set_name(table_name)\n # TODO Can't set isBase here because we're adding variables just below\n # meta_event.set_is_base(True)\n self._meta_event.set_comments(\"Imported from pynd's rec2trip\")\n # TODO How to set other metaevent values? (e.g. comment, etc.)\n self._trip.add_event(self._meta_event)", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def check_db_schema(self):\n if not self.db.get_tables():\n self.create_db_schema()", "def test_overwrites(self):\n\n extra_con = set([Constraint('fake', ['OVERWRITE'])])\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/%fake%/%file%/%pattern%.txt',\n 'echo', extra_constraints=extra_con)\n\n ds_result = the_process_unit.execute(simulate=True)\n\n expected_in_cons = set([Constraint('fake', ['fake_1']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n expected_out_cons = set([Constraint('fake', ['OVERWRITE']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n\n self.assertEqual(expected_in_cons, self.a_pattern_ds.constraints)\n self.assertEqual(expected_out_cons, ds_result.constraints)", "def pre_database_node_create(self, resource_dict):\n pass", "def create_tables(self):\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n Base.metadata.create_all(self.engine)" ]
[ "0.6433206", "0.6371944", "0.6202041", "0.61849165", "0.60638314", "0.5852797", "0.580371", "0.5792183", "0.5743237", "0.5713558", "0.5528236", "0.5432233", "0.54164076", "0.5352115", "0.5333362", "0.5304936", "0.52447927", "0.5226724", "0.52159977", "0.52028906", "0.5177918", "0.51768345", "0.51553905", "0.51210314", "0.5118915", "0.5118286", "0.5114869", "0.50867444", "0.50867444", "0.50867444", "0.50867444", "0.50867444", "0.50833327", "0.5080407", "0.50796217", "0.5064261", "0.5053959", "0.50513977", "0.50513977", "0.5047625", "0.5044848", "0.5031597", "0.50238246", "0.5020486", "0.5012776", "0.5011942", "0.5009241", "0.5009241", "0.4990686", "0.49874055", "0.49873638", "0.49861166", "0.49819392", "0.49803182", "0.49643046", "0.495394", "0.49347922", "0.49271744", "0.4924157", "0.4916718", "0.49069428", "0.4903045", "0.48969543", "0.4896464", "0.48948598", "0.48929358", "0.4888368", "0.48866692", "0.48780018", "0.4877153", "0.48725364", "0.48714048", "0.4871308", "0.48706442", "0.4863335", "0.48595682", "0.4855364", "0.48437744", "0.483928", "0.4839191", "0.48301318", "0.48292667", "0.4828595", "0.48168877", "0.48140103", "0.48131076", "0.48108727", "0.48098972", "0.4808456", "0.4808193", "0.48070052", "0.48069108", "0.48049206", "0.48037234", "0.479842", "0.47931126", "0.4792066", "0.47875252", "0.47845593", "0.47816607" ]
0.6969729
0
Process the overwrite flag on tables copy.
def ProcessTableCopyOverwrite(ref, args, request): del ref # Unused if args.overwrite: request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE' return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request", "def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True", "def is_overwrite_all(self):\n return self._tag == 'overwrite_all'", "def test_overwrites(self):\n\n extra_con = set([Constraint('fake', ['OVERWRITE'])])\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/%fake%/%file%/%pattern%.txt',\n 'echo', extra_constraints=extra_con)\n\n ds_result = the_process_unit.execute(simulate=True)\n\n expected_in_cons = set([Constraint('fake', ['fake_1']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n expected_out_cons = set([Constraint('fake', ['OVERWRITE']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n\n self.assertEqual(expected_in_cons, self.a_pattern_ds.constraints)\n self.assertEqual(expected_out_cons, ds_result.constraints)", "def needPartitionTableUpdate(self):\n n_table=list()\n d_table=self.destination.getPartitionTable()\n s_table=self.source.getPartitionTable()\n for i in range(len(s_table)):\n n_table.append(re.sub(self.source.getDeviceName(), \\\n self.destination.getDeviceName(), \\\n s_table[i]))\n if d_table == n_table:\n return False\n else:\n return True", "def no_overwrite_example():", "def _apply(self):\n s = [(iptables_save, iptables_restore, self.ipv4)]\n if self.use_ipv6:\n s += [(ip6tables_save, ip6tables_restore, self.ipv6)]\n\n for save, restore, tables in s:\n all_tables, _err = save()\n all_lines = all_tables.split('\\n')\n for table_name, table in six.iteritems(tables):\n start, end = self._find_table(all_lines, table_name)\n all_lines[start:end] = self._modify_rules(\n all_lines[start:end], table, table_name)\n table.dirty = False\n restore('\\n'.join(all_lines))", "def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))", "def ProcessDatasetOverwrite(ref, args, request):\n del ref\n dataset_id = request.dataset.datasetReference.datasetId\n project_id = request.projectId\n\n if args.overwrite:\n if _DatasetExists(dataset_id, project_id):\n _TryDeleteDataset(dataset_id, project_id)\n\n return request", "def pre_osc_check(self):\n # Make sure temporary table we will use during copy doesn't exist\n self.table_check()\n self.decide_pk_for_filter()\n\n # Check if we can have indexes in new table to efficiently look up\n # current old pk combinations\n if not self.validate_post_alter_pk():\n self.table_size = self.get_table_size(self.table_name)\n if self.skip_pk_coverage_check:\n log.warning(\n \"Indexes on new table cannot cover current PK of \"\n \"the old schema, which will make binary logs replay \"\n \"in an inefficient way.\"\n )\n elif self.table_size < self.pk_coverage_size_threshold:\n log.warning(\n \"No index on new table can cover old pk. Since this is \"\n \"a small table: {}, we fallback to a full table dump\".format(\n self.table_size\n )\n )\n # All columns will be chosen if we are dumping table without\n # chunking, this means all columns will be used as a part of\n # the WHERE condition when replaying\n self.is_full_table_dump = True\n self._pk_for_filter = [col.name for col in self._old_table.column_list]\n self._pk_for_filter_def = self._old_table.column_list.copy()\n elif self.is_full_table_dump:\n log.warning(\n \"Skipping coverage index test, since we are doing \"\n \"full table dump\"\n )\n else:\n old_pk_names = \", \".join(\n \"`{}`\".format(col.name)\n for col in self._old_table.primary_key.column_list\n )\n raise OSCError(\"NO_INDEX_COVERAGE\", {\"pk_names\": old_pk_names})\n\n log.info(\n \"PK filter for replaying changes later: {}\".format(self._pk_for_filter)\n )\n\n self.foreign_key_check()\n self.trigger_check()\n self.init_range_variables()\n self.get_table_chunk_size()\n self.make_chunk_size_odd()\n self.check_disk_size()\n self.ts_bootstrap_check()\n self.drop_columns_check()", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def test_merge_overwrite_missing_source_key(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"D\"] = \"new\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"D\": \"new\"})\n self.assertEqual(mdict, ret)", "def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name())\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.projectId',\n destination_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.destinationTable.tableId',\n destination_ref.Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.datasetId',\n source_ref.Parent().Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.projectId',\n source_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.tableId',\n source_ref.Name())\n return request", "def sectional_overwrite_check(self):\n\n for rule in self.options['sectional_overwrite']:\n if self.lineage_test(rule):\n return True\n return False", "def run(self):\n if not (self.table and self.columns):\n raise Exception(\"table and columns need to be specified\")\n\n connection = self.output().connect()\n\n # attempt to copy the data into mysql\n # if it fails because the target table doesn't exist\n # try to create it by running self.create_table\n for attempt in range(2):\n try:\n cursor = connection.cursor()\n print(\"caling init copy...\")\n self.init_copy(connection)\n self.copy(cursor)\n self.post_copy(connection)\n if self.enable_metadata_columns:\n self.post_copy_metacolumns(cursor)\n except Error as err:\n if err.errno == errorcode.ER_NO_SUCH_TABLE and attempt == 0:\n # if first attempt fails with \"relation not found\", try creating table\n # logger.info(\"Creating table %s\", self.table)\n connection.reconnect()\n self.create_table(connection)\n else:\n raise\n else:\n break\n\n # mark as complete in same transaction\n self.output().touch(connection)\n connection.commit()\n connection.close()", "def onUndo(self):\n pass", "def overwrite_all ( self ):\n return self.value == self.OV_ALL", "def undo(self):\n LOG.debug(\"In the undo method, will attempt to restore\")\n\n # validate detected nothing to do for this, nothing was done\n # for execute, so simply return\n if self.no_op:\n return\n\n if not self.source_dev or not self.target_dev:\n return\n LOG.debug(\"The source dictionary is: %s\", self.source_dict_restore)\n LOG.debug(\"The target dictionary is: %s\", self.target_dict_restore)\n\n # In scenario where no source IP Address...\n if self.source_dict_restore:\n self.commandex.send_ifcfg(self.source_dev,\n self.source_dict_restore)\n\n # May have failed because the ifcfg didn't even exist, nothing\n # to roll back then\n if self.target_dict_restore:\n self.commandex.send_ifcfg(self.target_dev,\n self.target_dict_restore)", "def abstract_write(self, *params): \n section = self.table_section_from_parameter(*params)\n row_args = self.table_row_args_from_parameter(*params)\n new_row = self.table_row_class(*row_args)\n # IMPORTANTE! ASUMO QUE UN BLOQUE INVALIDO ES IGUAL A UNO VACIO!\n # TO DO: PREGUNTAR EN ISSUE\n found_index = False\n for index, table_row in enumerate(section.copy()): \n if table_row is None or not table_row.valid:\n found_index = True\n overwrite_index = index\n break\n\n if not found_index:\n overwrite_index = self.next_replace(section)\n\n replace_index = self.index_mapping_from_parameter(overwrite_index, *params)\n\n\n old_line = self.table[replace_index]\n #print(f\"{self.__class__.__name__} Replace -> Index: {replace_index}\")\n\n # Perfom the actual write\n self.table[replace_index] = new_row", "def touch(self):\n if self.marker_table_bound is None:\n self.create_marker_table()\n\n table = self.marker_table_bound\n id_exists = self.exists()\n with self.engine.begin() as conn:\n if not id_exists:\n ins = table.insert().values(\n ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n else:\n ins = table.update().where(sqlalchemy.and_(\n table.c.ParquetSource == self.parquet_source,\n table.c.Environment == self.environment,\n table.c.TargetTable == self.target_table)).\\\n values(ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n conn.execute(ins)\n assert self.exists()", "def process_dump_files(self):\n if not self.open_workbooks():\n exit()\n\n self.message('*****************************************************************************')\n self.message('Only columns that exist in both the dump and destination file will be synced.')\n self.message('They must also match exactly including spelling and capitalization.')\n self.message('*****************************************************************************')\n\n self.wb_destination.active = self.wb_destination['Hypercare Incidents']\n self.parse_dump_file(self.wb_incident.active, self.wb_destination.active, self.fn_incident)\n\n self.wb_destination.active = self.wb_destination['Hypercare Defects']\n self.parse_dump_file(self.wb_defect.active, self.wb_destination.active, self.fn_defect)\n\n self.wb_destination.active = self.wb_destination['Hypercare Enhancements']\n self.parse_dump_file(self.wb_enhancement.active, self.wb_destination.active, self.fn_enhancement)\n\n self.wb_destination.active = self.wb_destination['ALM Defects']\n self.parse_dump_file(self.wb_alm.active, self.wb_destination.active, self.fn_alm)", "def process_overrides(recipes, args, production_cat, pkginfo_template):\n for recipe in recipes:\n print SEPARATOR\n\n if recipe in RECIPE_EXCLUSIONS:\n print_error(\"Not overriding %s because it is in the list of \"\n \"exclusions.\" % recipe)\n continue\n if recipe.startswith(\"local\"):\n print_error(\"Not overriding %s because it _is_ an override.\" %\n recipe)\n continue\n\n override_path = make_override(recipe, args.override_dir)\n if override_path is None:\n continue\n\n # Copy just-generated override's Input section to Input_Original.\n override = FoundationPlist.readPlist(override_path)\n override[\"Input_Original\"] = override[\"Input\"]\n override[\"Input\"] = {}\n override[\"Input\"][\"pkginfo\"] = {}\n\n current_version = get_current_production_version(\n production_cat, override, args)\n apply_current_or_orig_values(override, current_version, args)\n\n if not args.suppress_subdir:\n copy_package_path_to_input(override, current_version, args)\n\n if pkginfo_template:\n apply_pkginfo_template(override, pkginfo_template)\n\n FoundationPlist.writePlist(override, override_path)", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def copy_db(src=FRESHDB, dst=[APPDB]):\n for dest in dst:\n try:\n x = shutil.copy2(src, dest)\n print('File copied to {}'.format(x))\n except shutil.SameFileError:\n print('Both source and destination are identical.')", "def test_database_object_overwrite_parameter_is_set(self):\n database = generate_database_object(overwrite=True)\n\n self.assertEqual(\n True,\n database.overwrite == True,\n \"Database object did not have an overwrite flag, despite being created with one.\"\n )", "def copy_file_check(self):\n pass", "def run_copy(self, src, dst):\n pass", "def set_file_ingested(self, original_name, ingested, tablename):\n if ingested:\n prep_stmt = self.session.prepare(\n 'INSERT INTO {0} ({1}) VALUES (?, ?, ?)'.format(\n tablename, \",\".join(COLUMNS_META)\n ))\n bound = prep_stmt.bind([int(time.time()) * 1000, self.who, original_name])\n else:\n prep_stmt = self.session.prepare(\n 'DELETE FROM {0} WHERE {1}=?'.format(tablename, COLUMNS_META[2])\n )\n bound = prep_stmt.bind([original_name])\n # This is not asynchronous since this will be sent once per large file.\n self.session.execute(bound)", "def load_into_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def test_upload_overwrite(self):\n self.request.access.allow_overwrite = [\"everyone\"]\n name, filename = \"a\", \"a-1.tar.gz\"\n self.db.upload(filename, BytesIO(b\"old\"), name)\n self.db.upload(filename, BytesIO(b\"new\"), name)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 1)", "def test_overwrite_on_tape(overwrite_on_tape_topology, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']", "def _postprocess_staging_data(self):\n super()._postprocess_staging_data()\n with self._conn, self._conn.cursor() as cursor:\n cursor.execute(sql.SQL(\"\"\"UPDATE {0} SET rat_bitmask = translate_bands_to_rat_bitmask(bands)\"\"\")\n .format(self._staging_tbl_identifier))", "def is_complete_overwrite(self) -> Optional[bool]:\n return pulumi.get(self, \"is_complete_overwrite\")", "def test_overwrite_hops(overwrite_on_tape_topology, caches_mock, did_factory, file_factory):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3),\n 'rse': rse_core.get_rse_name(rse2_id),\n 'did_scope': did1['scope'].external,\n 'did_name': did1['name'],\n 'no_register': True,\n }\n ]\n )\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n fts_schema_version = FTS3Transfertool(external_host=TEST_FTS_HOST).version()['schema']['major']\n if fts_schema_version >= 8:\n # Newer fts version will honor the overwrite_hop\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n else:\n # FTS only recently introduced the overwrite_hops parameter. It will be ignored on old\n # fts versions and the first hop will fail with the file exists error\n # TODO: remove this else after FTS 3.12 release and after updating rucio/fts container with the new release\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']", "def update_mapping(self, flush=False, override_chunks=[]):\n\n # get set of filenames from File objects that have already been mapped\n already_mapped_inputs = set(map(lambda x: x.get_name(),self.get_inputs(flatten=True)))\n already_mapped_outputs = map(lambda x: x.get_index(),self.get_outputs())\n nextidx = 1\n if already_mapped_outputs:\n nextidx = max(already_mapped_outputs)+1\n original_nextidx = nextidx+0\n # if dataset is \"closed\" and we already have some inputs, then\n # don't bother doing get_files() again (wastes a DBS query)\n if (len(already_mapped_inputs) > 0 and not self.open_dataset):\n files = []\n else:\n files = [f for f in self.sample.get_files() if f.get_name() not in already_mapped_inputs]\n self.queried_nevents = self.sample.get_nevents()\n\n flush = (not self.open_dataset) or flush\n prefix, suffix = self.output_name.rsplit(\".\",1)\n if self.split_within_files:\n if self.total_nevents < 1 or self.events_per_output < 1:\n raise Exception(\"If splitting within files (presumably for LHE), need to specify total_nevents and events_per_output\")\n nchunks = int(self.total_nevents / self.events_per_output)\n chunks = [files for _ in range(nchunks)]\n leftoverchunk = []\n else:\n chunks, leftoverchunk = Utils.file_chunker(files, events_per_output=self.events_per_output, files_per_output=self.files_per_output, flush=flush)\n if len(override_chunks) > 0:\n self.logger.info(\"Manual override to have {0} chunks\".format(len(override_chunks)))\n chunks = override_chunks\n leftoverchunk = []\n for chunk in chunks:\n if not chunk: continue\n output_path = \"{0}/{1}_{2}.{3}\".format(self.get_outputdir(),prefix,nextidx,suffix)\n output_file = EventsFile(output_path)\n nevents_in_output = sum(map(lambda x: x.get_nevents(), chunk))\n output_file.set_nevents(nevents_in_output)\n self.io_mapping.append([chunk, output_file])\n nextidx += 1\n if (nextidx-original_nextidx > 0):\n self.logger.info(\"Updated mapping to have {0} more entries\".format(nextidx-original_nextidx))", "def post_migrate_function(obj):\n obj.a = obj.a + u\"-modified\"\n return True", "def update_table(table_name):\n for filename in table_name_to_funcs[table_name][\"filename\"]:\n choose_file_to_get(table_name_to_funcs[table_name][\"file_type\"], filename)\n\n for process_func in table_name_to_funcs[table_name][\"process\"]:\n process_func()\n for to_sql_func in table_name_to_funcs[table_name][\"to_sql\"]:\n to_sql_func(update=True)", "async def test_cached_previous_overwrites(self):\n overwrite_json = (\n '{\"send_messages\": true, \"add_reactions\": false, \"create_private_threads\": true, '\n '\"create_public_threads\": false, \"send_messages_in_threads\": true}'\n )\n await self.cog._set_silence_overwrites(self.text_channel)\n self.cog.previous_overwrites.set.assert_awaited_once_with(self.text_channel.id, overwrite_json)", "def create_copy_table(self):\n tmp_sql_obj = deepcopy(self._new_table)\n tmp_sql_obj.name = self.new_table_name\n if self.rm_partition:\n tmp_sql_obj.partition = self._old_table.partition\n tmp_sql_obj.partition_config = self._old_table.partition_config\n tmp_table_ddl = tmp_sql_obj.to_sql()\n log.info(\"Creating copy table using: {}\".format(tmp_table_ddl))\n self.execute_sql(tmp_table_ddl)\n self.partitions[self.new_table_name] = self.fetch_partitions(\n self.new_table_name\n )\n self.add_drop_table_entry(self.new_table_name)\n\n # Check whether the schema is consistent after execution to avoid\n # any implicit conversion\n if self.fail_for_implicit_conv:\n obj_after = self.fetch_table_schema(self.new_table_name)\n obj_after.name = self._new_table.name\n # Ignore partition difference, since there will be no implicit\n # conversion here\n obj_after.partition = self._new_table.partition\n obj_after.partition_config = self._new_table.partition_config\n self.populate_charset_collation(obj_after)\n if self.mysql_version.is_mysql8:\n # Remove 'USING HASH' in keys on 8.0, when present in 5.6, as 8.0\n # removes it by default\n self.remove_using_hash_for_80()\n if self.is_myrocks_table:\n log.warning(\n f\"Ignore BTREE indexes in table `{self._new_table.name}` on RocksDB\"\n )\n for idx in self._new_table.indexes:\n if idx.using == \"BTREE\":\n idx.using = None\n if obj_after != self._new_table:\n raise OSCError(\n \"IMPLICIT_CONVERSION_DETECTED\",\n {\"diff\": str(SchemaDiff(self._new_table, obj_after))},\n )", "def update_table_in_file(table, source_file):\n with open(source_file, 'r') as source, \\\n tempfile.NamedTemporaryFile('w', delete=False) as temp:\n source_lines = source.readlines()\n\n table_start = index_tag_in_lines(source_lines, tag='Table Start')\n table_end = index_tag_in_lines(source_lines, tag='Table End')\n print(f'Found table_start tag at line no: {table_start}')\n print(f'Found table_end tag at line no: {table_end}')\n assert table_end > table_start, 'Table End must be after Table Start'\n\n table_written = False\n for line_no, line in enumerate(source_lines):\n if line_no <= table_start or line_no >= table_end:\n temp.write(line)\n elif not table_written: # write table once\n temp.writelines(table)\n table_written = True\n\n backup_file = source_file.with_suffix('.md.bkp')\n os.rename(source_file, backup_file)\n print(f'Original file backed up at: {backup_file}')\n\n shutil.copy(temp.name, source_file)", "def overwrite_cmd(run, batch, source, lines, basename='xrb'):\n for i, line in enumerate(lines):\n if '\\n' not in line:\n lines[i] += '\\n'\n\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath, 'w') as f:\n f.writelines(lines)", "def update_policy(self):\n self._sess.run(self._hard_copy_to_target_op);", "def copy():\n copy2(per, per_old)", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def sync_table_partitions(self) -> None:\n log.info(\"== Stage 5.1: Check table partitions are up-to-date ==\")\n\n # we're using partitions in the ddl file, skip syncing anything\n if not self.rm_partition:\n return\n # not a partitioned table, nothing to do\n if not self.partitions:\n return\n\n # only apply this logic to RANGE partitioning, as other types\n # are usually static\n partition_method = self.get_partition_method(\n self._current_db, self.new_table_name\n )\n if partition_method != \"RANGE\":\n return\n\n try:\n new_tbl_parts = self.list_partition_names(self.new_table_name)\n orig_tbl_parts = self.list_partition_names(self.table_name)\n\n parts_to_drop = set(new_tbl_parts) - set(orig_tbl_parts)\n parts_to_add = set(orig_tbl_parts) - set(new_tbl_parts)\n\n # information schema literally has the string None for\n # non-partitioned tables. Previous checks *should* prevent us\n # from hitting this.\n if \"None\" in parts_to_add or \"None\" in parts_to_drop:\n log.warning(\n \"MySQL claims either %s or %s are not partitioned\",\n self.new_table_name,\n self.table_name,\n )\n return\n\n if parts_to_drop:\n log.info(\n \"Partitions missing from source table \"\n \"to drop from new table %s: %s\",\n self.new_table_name,\n \", \".join(parts_to_drop),\n )\n if parts_to_add:\n log.info(\n \"Partitions in source table to add to new table %s: %s\",\n self.new_table_name,\n \", \".join(parts_to_add),\n )\n self.apply_partition_differences(parts_to_drop, parts_to_add)\n except Exception:\n log.exception(\n \"Unable to sync new table %s with orig table %s partitions\",\n self.new_table_name,\n self.table_name,\n )", "def _bmaction(old, new):\n if not old:\n return b'export'\n elif not new:\n return b'delete'\n return b'update'", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n print('staging', query)\n cur.execute(query)\n conn.commit()", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n \n except psycopg2.Error as e:\n print(e)", "def test_setOverwrite(self):\n fp = FilePath(self.mktemp())\n fp.setContent(b\"I love contributing to Twisted!\")\n protocol = self.makeConnectedDccFileReceive(fp.path, overwrite=True)\n\n self.allDataReceivedForProtocol(protocol, b\"Twisted rocks!\")\n\n self.assertEqual(fp.getContent(), b\"Twisted rocks!\")", "def need_checksum_for_changes(self):\n # We don't need to run checksum for changes, if we don't want checksum\n # at all\n if not self.need_checksum():\n return False\n if self.is_full_table_dump:\n log.warning(\n \"We're adding new primary key to the table. Skip running \"\n \"checksum for changes, because that's inefficient\"\n )\n return False\n return True", "def overwrite(self, timestamp, write_data):\n\n if (self.data.dtype != write_data.dtype ):\n logger.error(\"Conflicting data types for overwrite\")\n raise SpatialDBError (\"Conflicting data types for overwrite\")\n \n self.data[timestamp,:] = overwriteDense_ctype(self.data[timestamp,:], write_data)", "def _copy_output(src: Graph, dst: Graph):\n for n_src, n_dst in zip(src.nodes, dst.nodes):\n if n_src.op == 'output':\n n_dst.meta = n_src.meta", "async def test_preserved_other_overwrites_voice(self):\n for overwrite_json in ('{\"connect\": true, \"speak\": true}', None):\n with self.subTest(overwrite_json=overwrite_json):\n self.cog.previous_overwrites.get.return_value = overwrite_json\n\n prev_overwrite_dict = dict(self.voice_overwrite)\n await self.cog._unsilence(self.voice_channel)\n new_overwrite_dict = dict(self.voice_overwrite)\n\n # Remove these keys because they were modified by the unsilence.\n del prev_overwrite_dict[\"connect\"]\n del prev_overwrite_dict[\"speak\"]\n del new_overwrite_dict[\"connect\"]\n del new_overwrite_dict[\"speak\"]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)", "def backup_overrides(overrides, action='debug'):\n backup_path = os.path.join(BACKUP_PATH, action)\n if not os.path.exists(backup_path):\n os.makedirs(backup_path)\n field = 'user_overrides'\n for chart in overrides:\n name = chart['name']\n if name not in oidc_charts:\n LOG.warning(\"oidc-auth-apps: mismatch chart name '%s'\", name)\n if chart[field]:\n document = yaml.safe_load(chart[field])\n if not document:\n LOG.debug(\"oidc-auth-apps: %s empty document\", name)\n continue\n backup_f = '_'.join([name, field])\n backup_f = '.'.join([backup_f, 'yaml'])\n backup_f = os.path.join(backup_path, backup_f)\n try:\n with open(backup_f, 'w') as file:\n yaml.dump(document, file, default_flow_style=False)\n except IOError as e:\n LOG.error(\"oidc-auth-apps: IOError: %s; file: %s\", e, backup_f)\n return 1\n LOG.info(\"oidc-auth-apps: user_overrides backed up to %s\", backup_path)\n return 0", "def _undo_action(self):\n pass", "def need_checksum(self):\n if self.skip_checksum:\n log.warning(\"Skip checksum because --skip-checksum is specified\")\n return False\n # There's no point running a checksum compare for selective dump\n if self.where:\n log.warning(\"Skip checksum because --where is given\")\n return False\n # If the collation of primary key column has been changed, then\n # it's high possible that the checksum will mis-match, because\n # the returning sequence after order by primary key may be vary\n # for different collations\n for pri_column in self._pk_for_filter:\n old_column_tmp = [\n col for col in self._old_table.column_list if col.name == pri_column\n ]\n if old_column_tmp:\n old_column = old_column_tmp[0]\n new_column_tmp = [\n col for col in self._new_table.column_list if col.name == pri_column\n ]\n if new_column_tmp:\n new_column = new_column_tmp[0]\n if old_column and new_column:\n if not is_equal(old_column.collate, new_column.collate):\n log.warning(\n \"Collation of primary key column {} has been \"\n \"changed. Skip checksum \".format(old_column.name)\n )\n return False\n # There's no way we can run checksum by chunk if the primary key cannot\n # be covered by any index of the new schema\n if not self.validate_post_alter_pk():\n if self.skip_pk_coverage_check:\n log.warning(\n \"Skipping checksuming because there's no unique index \"\n \"in new table schema can perfectly cover old primary key \"\n \"combination for search\".format(old_column.name)\n )\n return False\n else:\n # Though we have enough coverage for primary key doesn't\n # necessarily mean we can use it for checksum, it has to be an\n # unique index as well. Skip checksum if there's no such index\n if not self.find_coverage_index():\n log.warning(\n \"Skipping checksuming because there's no unique index \"\n \"in new table schema can perfectly cover old primary key \"\n \"combination for search\".format(old_column.name)\n )\n return False\n return True", "def _maybe_copy_entry_to_image(instance):\n if instance.compo.is_imagefile_copied:\n name = str('th_' + os.path.basename(instance.entryfile.name))\n instance.imagefile_original.save(name, instance.entryfile)", "def update_model_output(self):\n warnings.warn(\"Please ensure that the column names of the new file accurately corresponds to the relevant column names in the exisitng file\")\n column_names_new = self.new_data.head()\n column_names_old = self.existing_data.head()\n for column_name in column_names_new:\n if column_name in column_names_old:\n self.existing_data[column_name] = self.new_data[column_name]\n \n self.existing_data.to_csv(filename_main, index = False)", "def overwrite_original_file(self):\n return self.__overwrite_original_file", "def undo():", "def do_before_dump(self):\n self.checksummer.prepare_checksums()", "def scheduleTableAsyncCopy(self, hiveFilterDB, hiveFilterTable, copyDestination):\n\t\tlogging.debug(\"Executing copy_operations.scheduleTableAsyncCopy()\")\n\n\t\tif self.checkDBImportInstance(instance = copyDestination) == False:\n\t\t\tlogging.error(\"The specified remote DBImport instance does not exist.\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\t\thiveFilterDB = hiveFilterDB.replace('*', '%').strip()\n\t\thiveFilterTable = hiveFilterTable.replace('*', '%').strip()\n\n\t\tlocalSession = self.configDBSession()\n\t\timportTables = aliased(configSchema.importTables)\n\t\tcopyASyncStatus = aliased(configSchema.copyASyncStatus)\n\n\t\t# Check if there are any tabled in this DAG that is marked for copy against the specified destination\n\t\tresult = (localSession.query(\n\t\t\t\tcopyASyncStatus\n\t\t\t)\n\t\t\t.filter(copyASyncStatus.hive_db.like(hiveFilterDB))\n\t\t\t.filter(copyASyncStatus.hive_table.like(hiveFilterTable))\n\t\t\t.filter(copyASyncStatus.destination == copyDestination)\n\t\t\t.count())\n\n\t\tif result > 0:\n\t\t\tlogging.error(\"There is already tables that matches this DAG's filter that is scheduled for copy against\")\n\t\t\tlogging.error(\"the specified destination. This operation cant continue until all the current copies are completed\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\t\t# Calculate the source and target HDFS directories\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\t\n\t\tsourceHDFSaddress = self.common_operations.hdfs_address \n\t\tsourceHDFSbasedir = self.common_operations.hdfs_basedir \n\n\t\trow = (localSession.query(\n\t\t\t\tdbimportInstances.hdfs_address,\n\t\t\t\tdbimportInstances.hdfs_basedir\n\t\t\t)\n\t\t\t.filter(dbimportInstances.name == copyDestination)\n\t\t\t.one())\n\t\t\n\t\ttargetHDFSaddress = row[0]\n\t\ttargetHDFSbasedir = row[1]\n\n\t\tlogging.debug(\"sourceHDFSaddress: %s\"%(sourceHDFSaddress))\n\t\tlogging.debug(\"sourceHDFSbasedir: %s\"%(sourceHDFSbasedir))\n\t\tlogging.debug(\"targetHDFSaddress: %s\"%(targetHDFSaddress))\n\t\tlogging.debug(\"targetHDFSbasedir: %s\"%(targetHDFSbasedir))\n\n\t\t# Fetch a list of tables that match the database and table filter\n\t\tresult = pd.DataFrame(localSession.query(\n\t\t\t\timportTables.table_id,\n\t\t\t\timportTables.hive_db,\n\t\t\t\timportTables.hive_table,\n\t\t\t\timportTables.dbalias\n\t\t\t)\n\t\t\t.filter(importTables.hive_db.like(hiveFilterDB))\n\t\t\t.filter(importTables.hive_table.like(hiveFilterTable))\n\t\t\t)\n\n\t\tfor index, row in result.iterrows():\n\t\t\tlogging.info(\"Schedule asynchronous copy for %s.%s\"%(row['hive_db'], row['hive_table']))\n\t\t\tself.import_config.Hive_DB = row['hive_db']\n\t\t\tself.import_config.Hive_Table = row['hive_table']\n\n\t\t\ttry:\n\t\t\t\tself.import_config.getImportConfig()\n\t\t\texcept invalidConfiguration as errMsg:\n\t\t\t\tlogging.error(errMsg)\n\t\t\t\tself.import_config.remove_temporary_files()\n\t\t\t\tsys.exit(1)\n\n\t\t\tlogging.debug(\"table_id: %s\"%(self.import_config.table_id))\n\t\t\tlogging.debug(\"import_is_incremental: %s\"%(self.import_config.import_is_incremental))\n\n\t\t\tif self.import_config.import_is_incremental == True and includeIncrImports == False:\n\t\t\t\tlogging.warning(\"Asynchronous copy for incremental table %s.%s skipped\"%(row['hive_db'], row['hive_table']))\n\t\t\t\tcontinue\n\n\t\t\tsourceHDFSdir = (sourceHDFSbasedir + \"/\"+ row['hive_db'] + \"/\" + row['hive_table']).replace('$', '').replace(' ', '')\n\t\t\ttargetHDFSdir = (targetHDFSbasedir + \"/\"+ row['hive_db'] + \"/\" + row['hive_table']).replace('$', '').replace(' ', '')\n\t\t\tlogging.debug(\"sourceHDFSdir: %s\"%(sourceHDFSdir))\n\t\t\tlogging.debug(\"targetHDFSdir: %s\"%(targetHDFSdir))\n\n\t\t\tresult = (localSession.query(\n\t\t\t\t\tcopyASyncStatus\n\t\t\t\t)\n\t\t\t\t.filter(copyASyncStatus.table_id == row['table_id'])\n\t\t\t\t.filter(copyASyncStatus.destination == copyDestination)\n\t\t\t\t.count())\n\n\t\t\tif result == 0:\n\t\t\t\tnewcopyASyncStatus = configSchema.copyASyncStatus(\n\t\t\t\t\ttable_id = row['table_id'],\n\t\t\t\t\thive_db = row['hive_db'],\n\t\t\t\t\thive_table = row['hive_table'],\n\t\t\t\t\tdestination = copyDestination,\n\t\t\t\t\thdfs_source_path = \"%s%s\"%(sourceHDFSaddress, sourceHDFSdir),\n\t\t\t\t\thdfs_target_path = \"%s%s\"%(targetHDFSaddress, targetHDFSdir),\n\t\t\t\t\tcopy_status = 0)\n\t\t\t\tlocalSession.add(newcopyASyncStatus)\n\t\t\t\tlocalSession.commit()\n\n\t\tlocalSession.close()\n\n\n\t\tlogging.debug(\"Executing copy_operations.scheduleTableAsyncCopy() - Finished\")", "def copy_and_replace_keys(self, table, key_callback):\n client = self.bq_client\n t = client.get_table(table)\n\n cross_joins = []\n\n # begin query generation process\n q = f'CREATE OR REPLACE TABLE `{table}` AS (\\nSELECT \\n'\n for field in t.schema:\n q += process_field(field, None, key_callback)\n cross_joins.extend(process_cross_joins(field, \"copy_table\"))\n q = q.strip(\",\\n\")\n q += f\"\\nFROM\\n `{table}` copy_table\"\n\n for cross_join in cross_joins:\n q += cross_join\n q += \")\"\n\n return q", "def swap_tables(self):\n if self.stop_before_swap:\n return True\n log.info(\"== Stage 6: Swap table ==\")\n self.stop_slave_sql()\n self.execute_sql(sql.set_session_variable(\"autocommit\"), (0,))\n self.start_transaction()\n stage_start_time = time.time()\n self.lock_tables((self.new_table_name, self.table_name, self.delta_table_name))\n log.info(\"Final round of replay before swap table\")\n self.checksum_required_for_replay = False\n self.replay_changes(single_trx=True, holding_locks=True)\n # We will not run delta checksum here, because there will be an error\n # like this, if we run a nested query using `NOT EXISTS`:\n # SQL execution error: [1100] Table 't' was not locked with LOCK TABLES\n if self.mysql_version.is_mysql8:\n # mysql 8.0 supports atomic rename inside WRITE locks\n self.execute_sql(\n sql.rename_all_tables(\n orig_name=self.table_name,\n old_name=self.renamed_table_name,\n new_name=self.new_table_name,\n )\n )\n self.table_swapped = True\n self.add_drop_table_entry(self.renamed_table_name)\n log.info(\n \"Renamed {} TO {}, {} TO {}\".format(\n self.table_name,\n self.renamed_table_name,\n self.new_table_name,\n self.table_name,\n )\n )\n else:\n self.execute_sql(sql.rename_table(self.table_name, self.renamed_table_name))\n log.info(\n \"Renamed {} TO {}\".format(self.table_name, self.renamed_table_name)\n )\n self.table_swapped = True\n self.add_drop_table_entry(self.renamed_table_name)\n self.execute_sql(sql.rename_table(self.new_table_name, self.table_name))\n log.info(\"Renamed {} TO {}\".format(self.new_table_name, self.table_name))\n\n log.info(\"Table has successfully swapped, new schema takes effect now\")\n self._cleanup_payload.remove_drop_table_entry(\n self._current_db, self.new_table_name\n )\n self.commit()\n self.unlock_tables()\n self.stats[\"time_in_lock\"] = self.stats.setdefault(\"time_in_lock\", 0) + (\n time.time() - stage_start_time\n )\n self.execute_sql(sql.set_session_variable(\"autocommit\"), (1,))\n self.start_slave_sql()\n self.stats[\"swap_table_progress\"] = \"Swap table finishes\"", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print(e)", "def reset_data():\n shutil.copy2(\n 'data/one_producer_many_consumers.ORIG.json',\n 'data/one_producer_many_consumers.json'\n )", "def copy_table_after(table, paragraph):\n\n\ttbl, p = table._tbl, paragraph._p\n\tnew_tbl = deepcopy(tbl)\n\tp.addnext(new_tbl)", "def handle_copy_rows(request):\n\n request_body_json = json.loads(request.body)\n username = request_body_json['username']\n print(username)\n overwrite = request_body_json['overwrite']\n if overwrite == 'true':\n overwrite = True\n elif overwrite == 'false':\n overwrite = False\n\n usecases = UseCase.objects.all()\n uses = []\n json_resp = {'message':'Ok'}\n for el in usecases:\n if el.name not in uses:\n uses.append(el.name)\n if username is not None:\n try:\n for el in uses:\n copy_rows(el, username,request.session['username'],overwrite)\n\n except Exception as e:\n json_resp = {'error':'error'}\n return JsonResponse(json_resp,status=500)\n else:\n return JsonResponse(json_resp)", "def copy_files(self, new_base_dir='.', overwrite=False,\n file_type='any', mode='copy', **kwargs ):\n #== retrieve query results\n file_list = self.bids_layout.get(**kwargs)\n #== select for reg/no-reg\n if file_type == 'reg':\n file_list = [file.filename for file in file_list if hasattr(file, 'registration')]\n elif file_type == 'noreg':\n file_list = [file.filename for file in file_list if (not hasattr(file, 'registration'))]\n else:\n file_list = [file.filename for file in file_list]\n\n if not file_type=='noreg':\n print(\"=== WARNING: directory structure for registered files is not handled correctly -- verify results!\")\n\n #== generate new paths\n for old_path in file_list:\n new_path_rel = self.bids_layout.build_path(old_path, self.bids_layout.path_patterns)\n new_path_abs = os.path.join(new_base_dir, new_path_rel)\n gt.ensure_dir_exists(new_path_abs)\n if mode=='copy':\n print(\"Preparing to copy '%s' to '%s'\"%(old_path, new_path_abs))\n elif mode=='move':\n print(\"Preparing to move '%s' to '%s'\" % (old_path, new_path_abs))\n if os.path.exists(new_path_abs):\n if overwrite:\n os.remove(new_path_abs)\n shutil.copy(old_path, new_path_abs)\n else:\n print(\"File '%s' already exists ... skipping.\"%(new_path_abs))\n else:\n shutil.copy(old_path, new_path_abs)\n if mode=='move':\n os.remove(old_path)\n try:\n os.rmdir(old_path)\n except:\n pass\n\n if not file_type=='noreg':\n print(\"=== WARNING: directory structure for registered files is not handled correctly -- verify results!\")", "def merge_both_tables():\n old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')\n additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')\n\n d_old = dict(zip(old['source_id'], old['background_log_overlap']))\n d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))\n d_old.update(d_add)\n dct = d_old\n\n ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]\n print\n len(ln_bg_ols), len(wanted)\n\n wanted['background_log_overlap'] = ln_bg_ols\n print\n wanted\n\n wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')", "def post_combine(self, target):\n target_extra_files = self.target_extra_files\n if target_extra_files:\n if self.disable_cleanup:\n self.stderr.write(\"Cleanup operations disabled by user.\\n\")\n else:\n self.stderr.write(\"Found extra files not part of source tree(s): \"\n f\"{len(target_extra_files)} files.\\n\")\n\n keep_existing = create_filtered_list(\"splunk\", default=False)\n # splglob_simple: Either full paths, or simple file-only match\n keep_existing.feedall(self.keep_existing, filter=splglob_simple)\n for dest_fn in target_extra_files:\n if keep_existing.match_path(dest_fn):\n self.stderr.write(f\"Keep existing file {dest_fn}\\n\")\n elif self.disable_cleanup:\n self.stderr.write(f\"Skip cleanup of unwanted file {dest_fn}\\n\")\n else:\n self.stderr.write(f\"Remove unwanted file {dest_fn}\\n\")\n os.unlink(os.path.join(target, dest_fn))", "def copymode(src, dest):\n import shutil\n\n shutil.copymode(src, dest)", "def copyBooks(self):\n skipMods = set(('Morrowind.esm',self.fileInfo.name))\n for id,(record,modName) in (self.srcBooks.items() + self.altBooks.items()):\n if modName not in skipMods:\n self.setRecord(copy.copy(record))", "def update(self, mode=\"all\"):\n\n self._check_mode(mode)\n\n mode = [\"prod\", \"staging\"] if mode == \"all\" else [mode]\n for m in mode:\n\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n\n # if m == \"staging\":\n\n table.description = self._render_template(\n Path(\"table/table_description.txt\"), self.table_config\n )\n\n # save table description\n with open(\n self.metadata_path\n / self.dataset_id\n / self.table_id\n / \"table_description.txt\",\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(table.description)\n\n # when mode is staging the table schema already exists\n table.schema = self._load_schema(m)\n fields = [\"description\", \"schema\"] if m == \"prod\" else [\"description\"]\n self.client[f\"bigquery_{m}\"].update_table(table, fields=fields)\n\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"updated\",\n )", "def testOnOverwrite(self, widget):\n spy_signal = QtSignalSpy(widget, widget.modelModified)\n\n # check the default\n assert not widget.chkOverwrite.isChecked()\n\n # Change the state\n widget.chkOverwrite.setChecked(True)\n\n # Check the signal\n assert spy_signal.count() == 1\n\n # model dict updated\n assert widget.model['overwrite']", "async def test_preserved_other_overwrites_text(self):\n for overwrite_json in ('{\"send_messages\": true, \"add_reactions\": null}', None):\n with self.subTest(overwrite_json=overwrite_json):\n self.cog.previous_overwrites.get.return_value = overwrite_json\n\n prev_overwrite_dict = dict(self.text_overwrite)\n await self.cog._unsilence(self.text_channel)\n new_overwrite_dict = dict(self.text_overwrite)\n\n # Remove these keys because they were modified by the unsilence.\n del prev_overwrite_dict[\"send_messages\"]\n del prev_overwrite_dict[\"add_reactions\"]\n del new_overwrite_dict[\"send_messages\"]\n del new_overwrite_dict[\"add_reactions\"]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)", "def _copy_all(entry_list: List[Path], target_dir: Path, on_duplicate: str):\n for entry in entry_list:\n target_entry = target_dir / entry.name\n if not target_entry.exists() or on_duplicate == 'overwrite':\n if entry.is_file():\n shutil.copy(entry, target_entry, follow_symlinks=False)\n else:\n shutil.copytree(entry, target_entry, symlinks=True)\n elif on_duplicate == 'exception':\n raise ValueError(f\"'{target_entry}' already exists (src {entry})\")\n else: # ignore\n continue", "def restore_copy_tbl(orig_tblname):\n dd = mg.DATADETS_OBJ\n dd.con.commit()\n getdata.force_sofa_tbls_refresh(sofa_default_db_cur=dd.cur)\n SQL_rename_tbl = ('ALTER TABLE '\n f'{getdata.tblname_qtr(mg.DBE_SQLITE, mg.TMP_TBLNAME2)} RENAME TO '\n f'{getdata.tblname_qtr(mg.DBE_SQLITE, orig_tblname)}')\n dd.cur.execute(SQL_rename_tbl)", "def update_tmp_table_location_changes(file_type, database_columns, unique_identifier, fiscal_year):\n le_loc_columns_distinct = \" OR \".join(\n [\n \"website.{column} IS DISTINCT FROM broker.{column}\".format(column=column)\n for column in database_columns\n if column[:12] == \"legal_entity\"\n ]\n )\n\n pop_loc_columns_distinct = \" OR \".join(\n [\n \"website.{column} IS DISTINCT FROM broker.{column}\".format(column=column)\n for column in database_columns\n if column[:13] == \"place_of_perf\"\n ]\n )\n\n sql_statement = \"\"\"\n -- Include columns to determine whether we need a place of performance change or recipient location\n ALTER TABLE {file_type}_transactions_to_update_{fiscal_year}\n ADD COLUMN place_of_performance_change boolean, add COLUMN recipient_change boolean;\n\n UPDATE {file_type}_transactions_to_update_{fiscal_year} broker\n SET\n recipient_change = (\n CASE WHEN\n {le_loc_columns_distinct}\n THEN TRUE ELSE FALSE END\n ),\n place_of_performance_change = (\n CASE WHEN\n {pop_loc_columns_distinct}\n THEN TRUE ELSE FALSE END\n )\n FROM transaction_{file_type} website\n WHERE broker.{unique_identifier} = website.{unique_identifier};\n\n\n -- Delete rows where there is no transaction in the table\n DELETE FROM {file_type}_transactions_to_update_{fiscal_year}\n WHERE place_of_performance_change IS NULL\n AND recipient_change IS NULL;\n\n -- Adding index to table to improve speed on update\n CREATE INDEX {file_type}_le_loc_idx ON {file_type}_transactions_to_update_{fiscal_year}(recipient_change);\n CREATE INDEX {file_type}_pop_idx ON\n {file_type}_transactions_to_update_{fiscal_year}(place_of_performance_change);\n ANALYZE {file_type}_transactions_to_update_{fiscal_year};\n \"\"\".format(\n file_type=file_type,\n unique_identifier=unique_identifier,\n fiscal_year=fiscal_year,\n le_loc_columns_distinct=le_loc_columns_distinct,\n pop_loc_columns_distinct=pop_loc_columns_distinct,\n )\n return sql_statement", "def _copy_chunk(self, last_pk):\n self.execute(self.commands.copy_chunk(\n self.name,\n self._join_cols(self.intersection.dest_columns),\n self._qualify(self.source.name, self.intersection.origin_columns),\n self.source.name,\n self.primary_key_column,\n last_pk,\n self.chunk_size\n ))\n self.commit()", "def flushUndo(*args, **kwargs)->None:\n pass", "def touch(self, connection=None):\n self.create_marker_table()\n\n if connection is None:\n connection = self.connect()\n connection.autocommit = True # if connection created here, we commit it here\n\n connection.cursor().execute(\n \"\"\"INSERT INTO {marker_table} (update_id, target_table)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n update_id = VALUES(update_id)\n \"\"\".format(marker_table=self.marker_table),\n (self.update_id, self.table)\n )\n # make sure update is properly marked\n assert self.exists(connection)", "def _do_action_tables_create(self):\n\n schema_shell = os.path.join(self.bento_home, \"schema-shell\", \"bin\", \"kiji-schema-shell\")\n assert os.path.isfile(schema_shell), schema_shell\n\n # Delete the table first!\n cmd = (\n \"kiji delete --target={kiji_uri} --interactive=false; \" +\n \"kiji install --kiji={kiji_uri}\" ).format(kiji_uri=self.kiji_uri)\n self._run_kiji_job(cmd)\n\n for ddl in self.ddls:\n ddl_full_path = os.path.join(self.movie_advisor_home, ddl)\n assert os.path.isfile(ddl_full_path)\n cmd = \"{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}\".format(\n schema_shell=schema_shell,\n kiji_uri=self.kiji_uri,\n ddl_full_path=ddl_full_path)\n self._run_kiji_job(cmd)", "def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)", "def pre_modify(self):\n return 0", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def write_merged(self, content = '', table=''):\n\n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True) \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n\n '''\n if os.path.isfile('dic_obstab_attributes.npy'):\n attrs_dic = np.load('dic_obstab_attributes.npy' , allow_pickle = True).item()\n else:\n attrs_dic = {}\n '''\n attrs_dic = {}\n\n \"\"\" Retrieving the attributes \"\"\"\n if content in ['observations_table','header_table','era5fb', 'station_configuration']:\n for var in table.keys():\n if var == 'comments':\n continue \n\n attrs_dic[var] = {}\n try:\n attrs_dic[var]['description'] = bytes( self.dic_type_attributes[content][var]['description'] , 'utf-8' )\n except:\n attrs_dic[var]['description'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH DESCRIPTION: ', var , ' ' , self.dic_type_attributes[content][var]['description']) # FFF CHECK WHY SOME ARE FAILING\n\n try:\n attrs_dic[var]['external_table'] = bytes( self.dic_type_attributes[content][var]['external_table'] , 'utf-8' )\n except:\n attrs_dic[var]['external_table'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH EXTERNAL TABLE : ', var ) # FFF CHECK WHY SOME ARE FAILING \n\n\n if content == 'recordindex': # writing the recordindex, recordtimestamp, dateindex\n #logging.info('Writing the merged record indices to the netCDF output ')\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n\n elif content == 'cdm_tables':\n for k in data['cdm_tables'].keys():\n table = data['cdm_tables'][k]\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = k)\n #logging.info('Writing the cdm table %s to the netCDF output ', k)\n \n elif content == 'source_configuration': \n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = content)\n #logging.info('Writing the source_configuration table to the netCDF output ')\n\n elif content == 'station_configuration':\n for k in table.keys(): \n if k == 'station_name':\n print(0)\n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n try:\n table[k] = table[k].astype( var_type ) \n print('Done station_conf' , k )\n except:\n if k == 'secondary_id':\n table[k] = table[k].astype( bytes ) \n\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n dic = {k:table[k]} \n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n \n \n # Writing the observations_table, header_table, era5fb \n elif content in ['observations_table', 'era5fb', 'header_table']: \n\n shape = ''\n for k in table.keys(): \n if k == 'index' or k == 'hdrlen' or 'string' in k :\n continue\n if k == 'station_name':\n print(0)\n \n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n\n if k == 'hdrlen': \n continue\n try:\n #table[k] = table[k].astype( bytes ) \n table[k] = table[k].astype( var_type ) \n \n except:\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n #print('*** Writing the table ', content, ' variable ', k)\n #if k == 'duplicates':\n # table[k] = table[k].astype( bytes ) \n \n \n dic = {k:table[k]} # making a 1 colum dictionary\n shape = table[k].shape\n #print('SHAPE IS FFF ', table[k].shape )\n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n\n if content == 'observations_table' and not self.obstab_nans_filled :\n missing_cdm_var = [ v for v in self.dic_type_attributes[content].keys() if v not in self.observations_table_vars] # variables to be filled with nans \n for k in missing_cdm_var:\n if k not in ['advanced_assimilation_feedback']:\n var_type = self.dic_type_attributes[content][k]['type']\n if var_type == np.int32 :\n nan = np.int32(-2147483648)\n else:\n nan = np.float32(np.nan) \n logging.debug('Adding missing cdm colum with empty values: %s ' , k )\n dic={k:np.empty(shape,dtype=np.dtype(nan))}\n dic[k].fill(nan)\n write_dict_h5(out_name, dic, 'observations_table', self.encodings['observations_table'], var_selection=[], mode='a', attrs = attrs_dic ) ### TO DO\n self.obstab_nans_filled = True\n\n elif content == 'observations_table' and self.obstab_nans_filled:\n return", "def before_update(mapper, conn, target):\n\n assert bool(target.ref), \"File.ref can't be null (before_update)\"", "def begin_not_undoable_action(self):\n self.not_undoable_action = True", "def modify_tbl(self):\n debug = False\n dd = mg.DATADETS_OBJ\n orig_tblname = dd.tbl\n ## other (i.e. not the sofa_id) field details\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n if debug:\n print('oth_name_types to feed into '\n f'make_strict_typing_tbl {oth_name_types}')\n try: ## 1 way or other must do strict_cleanup()\n make_strict_typing_tbl(\n orig_tblname, oth_name_types, self.settings_data)\n except sqlite.IntegrityError as e: #@UndefinedVariable\n if debug: print(b.ue(e))\n strict_cleanup(restore_tblname=orig_tblname)\n raise FldMismatchException\n except Exception as e:\n strict_cleanup(restore_tblname=orig_tblname)\n raise Exception('Problem making strictly-typed table.'\n f'\\nCaused by error: {b.ue(e)}')\n copy_orig_tbl(orig_tblname)\n wipe_tbl(orig_tblname)\n final_name = self.tblname_lst[0] ## may have been renamed\n try:\n make_redesigned_tbl(final_name, oth_name_types)\n strict_cleanup(restore_tblname=final_name)\n dd.set_tbl(tbl=final_name)\n except Exception as e:\n strict_cleanup(restore_tblname=orig_tblname)\n restore_copy_tbl(orig_tblname) ## effectively removes tmp_tbl 2\n dd.set_tbl(tbl=orig_tblname)\n raise Exception('Problem making redesigned table.'\n f'\\nCaused by error: {b.ue(e)}')\n wipe_tbl(mg.TMP_TBLNAME2)", "def _copy(self):\n for d in self._current_chunk:\n self.out.write(d)", "def test_backup_merge_with_unmerged(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 2\n self.log.info(\"Merging existing incremental backups\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.log.info(\"Taking more backups\")\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 3\n self.log.info(\"Merging new backups into already merged backup\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.log.info(\"Successfully merged new backups with already merged backup\")", "def on_ok(self, _event):\n dd = mg.DATADETS_OBJ\n if self.read_only:\n self.exiting = True\n self.Destroy()\n else:\n ## NB any changes defined in recode are already done\n new_tbl, tblname_changed, data_changed = self.get_change_status()\n if new_tbl or tblname_changed or data_changed:\n try:\n if not new_tbl:\n orig_tblname = self.tblname_lst[0]\n dd.set_tbl(tbl=orig_tblname)\n else:\n dd.set_tbl(tbl=None)\n self.make_changes()\n self.exiting = True\n self.Destroy()\n self.SetReturnCode(mg.RET_CHANGED_DESIGN)\n except FldMismatchException:\n wx.MessageBox(\n _('Unable to modify table. Some data does not match the'\n ' column type. Please edit and try again.'))\n return\n except Exception as e:\n wx.MessageBox(\n _(\"Unable to modify table.\\nCaused by error: %s\")\n % b.ue(e))\n return\n elif self.changes_made: ## not in tableconf. Must've been in recoding\n self.exiting = True\n self.Destroy()\n self.SetReturnCode(mg.RET_CHANGED_DESIGN)\n return\n else:\n wx.MessageBox(_('No changes to update.'))\n return", "def transfer_missing_elements(target_dict, source_dict, transfer_type=None):\r\n\r\n if transfer_type is None:\r\n transfer_type = source_dict.get(\"_transfer_type_\", \"recursive\")\r\n\r\n for key_, val_ in source_dict.items():\r\n # print(key_,isinstance(val_, dict), val_)\r\n if isinstance(val_, dict):\r\n if key_ not in target_dict:\r\n target_dict[key_] = EasyDict()\r\n if transfer_type is None:\r\n transfer_type = val_.get(\"_transfer_type_\", \"recursive\")\r\n # print(\"*********** \",transfer_type)\r\n\r\n if transfer_type == \"recursive\":\r\n transfer_missing_elements(target_dict[key_], val_, transfer_type)\r\n elif transfer_type == \"update\":\r\n target_dict[key_].update(val_)\r\n elif transfer_type == \"overwrite\":\r\n target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n # target_dict[key_] = val_\r\n\r\n elif key_ not in target_dict:\r\n target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n # target_dict[key_] = val_\r\n # else :\r\n # target_dict[key_] = val_\r\n # target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n\r\n\r\n # if isinstance(source_dict[key_],list) and isinstance(source_dict[key_][0],dict):\r\n # if key_ not in target_dict:\r\n # target_dict[key_] = []\r\n # for src_ in source_dict[key_]:\r\n # if not isinstance(src_,dict):\r\n # continue\r\n # match = False\r\n # for tar_ in target_dict[key_]:\r\n # # TODO make a list of bool with ID keys loaded from odb and check if any(matches):\r\n # if key_matches(\"pth_full\", src_, tar_) or key_matches(\"pth_alias\", src_, tar_) :\r\n # match = True\r\n # if not match:\r\n # temp = EasyDict()\r\n # target_dict[key_].append(temp)\r\n # transfer_missing_elements(temp, src_)\r" ]
[ "0.67875683", "0.5678444", "0.5599842", "0.55489415", "0.53554213", "0.52686346", "0.52098215", "0.5204137", "0.51888007", "0.51499987", "0.5148369", "0.5104553", "0.5098129", "0.5079654", "0.50565994", "0.50067544", "0.49729812", "0.4921561", "0.49208447", "0.49190444", "0.49049547", "0.4876093", "0.48642325", "0.48454264", "0.48426992", "0.4805531", "0.47853872", "0.47764587", "0.47709787", "0.47685695", "0.47651505", "0.47421384", "0.4719523", "0.47189656", "0.46698436", "0.46601954", "0.46508545", "0.4648953", "0.4646259", "0.46388894", "0.46244058", "0.46165442", "0.4613741", "0.4610374", "0.46092814", "0.46092814", "0.46092814", "0.46092814", "0.46075898", "0.46075276", "0.46059236", "0.46030006", "0.45977128", "0.4596791", "0.45965713", "0.45929635", "0.45655447", "0.45653617", "0.45456892", "0.45455638", "0.45434755", "0.45401084", "0.45399258", "0.45389456", "0.45365837", "0.45261776", "0.45256174", "0.45215175", "0.4515388", "0.45140523", "0.4510521", "0.45051414", "0.45020434", "0.45010355", "0.44965118", "0.44958654", "0.44958392", "0.44957772", "0.44937748", "0.44931486", "0.44910946", "0.44890088", "0.4488855", "0.44835627", "0.4482444", "0.44815952", "0.44809628", "0.4478462", "0.4478174", "0.44729036", "0.44699356", "0.4466202", "0.44642127", "0.44627175", "0.44572547", "0.44546086", "0.44545653", "0.4453769", "0.44385532", "0.4437149" ]
0.8015918
0
Build JobConfigurationTableCopy from request resource args.
def ProcessTableCopyConfiguration(ref, args, request): del ref # Unused source_ref = args.CONCEPTS.source.Parse() destination_ref = args.CONCEPTS.destination.Parse() arg_utils.SetFieldInMessage( request, 'job.configuration.copy.destinationTable.datasetId', destination_ref.Parent().Name()) arg_utils.SetFieldInMessage( request, 'job.configuration.copy.destinationTable.projectId', destination_ref.projectId) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.destinationTable.tableId', destination_ref.Name()) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.datasetId', source_ref.Parent().Name()) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.projectId', source_ref.projectId) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.tableId', source_ref.Name()) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetTableCopyResourceArgs():\n table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')\n arg_specs = [\n resource_args.GetResourcePresentationSpec(\n verb='to copy from', name='source', required=True, prefixes=True,\n attribute_overrides={'table': 'source'}, positional=False,\n resource_data=table_spec_data.GetData()),\n resource_args.GetResourcePresentationSpec(\n verb='to copy to', name='destination',\n required=True, prefixes=True,\n attribute_overrides={'table': 'destination'}, positional=False,\n resource_data=table_spec_data.GetData())]\n fallthroughs = {\n '--source.dataset': ['--destination.dataset'],\n '--destination.dataset': ['--source.dataset']\n }\n return [concept_parsers.ConceptParser(arg_specs, fallthroughs)]", "def _create_jobs_table_data(self, dist_src, sectors, \r\n building_types_and_ids_and_home_based): \r\n res_sample = []; jobs_table = []\r\n no_samples = []\r\n\r\n for key in sectors.keys():\r\n sector_num = key\r\n sector = sectors[key]\r\n for zone in sector.keys():\r\n for building_type, number_of_jobs in sector[zone]:\r\n for type, id, home_based in building_types_and_ids_and_home_based:\r\n if building_type == id: \r\n dist = dist_src[type]\r\n break\r\n else:\r\n raise TypeError, (\"Invalid building type: %s\" \r\n % building_type)\r\n \r\n try:\r\n samples = self._zip_and_sample(dist[zone], \r\n int(number_of_jobs))\r\n if samples is None: \r\n no_samples += [(zone, building_type)]\r\n raise\r\n \r\n except:\r\n pass\r\n \r\n else:\r\n for type, id, home_based in building_types_and_ids_and_home_based:\r\n if building_type == id and home_based == True:\r\n home = 1 \r\n else: home = 0 \r\n \r\n for grid_id in samples:\r\n jobs_table += [{'sector':sector_num, \r\n 'home':home, \r\n 'building':building_type, \r\n 'grid':grid_id}]\r\n \r\n if len(no_samples) > 0:\r\n print ('No job samples created for (zone, building_type): %s!' \r\n % no_samples)\r\n \r\n return jobs_table", "def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args", "def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request", "def test_create_copy(self):\n\n config = {\n 'version': '2.0',\n 'input_files': {\n 'INPUT_1': [{\n 'id': 1234,\n 'type': 'PRODUCT',\n 'workspace_name': 'wksp-name',\n 'workspace_path': 'the/workspace/path/file.json',\n 'local_file_name': 'file_abcdfeg.json',\n 'is_deleted': False,\n }]\n },\n 'output_workspaces': {\n 'OUTPUT_1': 'WORKSPACE_1'\n },\n 'tasks': [\n {\n 'task_id': 'task-1234',\n 'type': 'main',\n 'resources': {'cpu': 1.0},\n 'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',\n 'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},\n 'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},\n 'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},\n 'settings': {'SETTING_NAME': 'SETTING_VALUE'},\n 'volumes': {\n 'VOLUME_NAME_1': {\n 'container_path': '/the/container/path',\n 'mode': 'ro',\n 'type': 'host',\n 'host_path': '/the/host/path'\n },\n 'VOLUME_NAME_2': {\n 'container_path': '/the/other/container/path',\n 'mode': 'rw',\n 'type': 'volume',\n 'driver': 'SUPER_DRIVER_5000',\n 'driver_opts': {'turbo': 'yes-pleez'}\n }\n },\n 'docker_params': [{'flag': 'hello', 'value': 'scale'}]\n }\n ]\n }\n exe_config = ExecutionConfiguration(config)\n\n copy = exe_config.create_copy()\n self.assertDictEqual(copy.get_dict(), config)", "def _GetMigrationJob(\n self,\n source_ref,\n destination_ref,\n conversion_workspace_ref,\n cmek_key_ref,\n args,\n ):\n migration_job_type = self.messages.MigrationJob\n labels = labels_util.ParseCreateArgs(\n args, self.messages.MigrationJob.LabelsValue\n )\n type_value = self._GetType(migration_job_type, args.type)\n source = source_ref.RelativeName()\n destination = destination_ref.RelativeName()\n params = {}\n if args.IsSpecified('peer_vpc'):\n params['vpcPeeringConnectivity'] = self._GetVpcPeeringConnectivity(args)\n elif args.IsSpecified('vm_ip'):\n params['reverseSshConnectivity'] = self._GetReverseSshConnectivity(args)\n elif args.IsSpecified('static_ip'):\n params['staticIpConnectivity'] = self._GetStaticIpConnectivity()\n\n migration_job_obj = migration_job_type(\n labels=labels,\n displayName=args.display_name,\n state=migration_job_type.StateValueValuesEnum.CREATING,\n type=type_value,\n dumpPath=args.dump_path,\n source=source,\n destination=destination,\n **params)\n if conversion_workspace_ref is not None:\n migration_job_obj.conversionWorkspace = self._GetConversionWorkspaceInfo(\n conversion_workspace_ref, args\n )\n if cmek_key_ref is not None:\n migration_job_obj.cmekKeyName = cmek_key_ref.RelativeName()\n\n if args.IsKnownAndSpecified('filter'):\n args.filter, server_filter = filter_rewrite.Rewriter().Rewrite(\n args.filter\n )\n migration_job_obj.filter = server_filter\n\n if args.IsKnownAndSpecified('dump_parallel_level'):\n migration_job_obj.performanceConfig = self._GetPerformanceConfig(args)\n\n return migration_job_obj", "def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)", "def __get_datatables_args():\n\n table_args = dict()\n\n #\n # Common Arguments\n #\n\n table_args['column-count'] = 0\n table_args['sort-col-count'] = 0\n\n if request.args.get('draw'):\n table_args['sequence'] = request.args.get('draw')\n\n if request.args.get('start'):\n table_args['offset'] = int(request.args.get('start'))\n\n if request.args.get('length'):\n table_args['limit'] = int(request.args.get('length'))\n\n if request.args.get('search[value]'):\n table_args['filter'] = request.args.get('search[value]')\n\n if request.args.get('search[regex]'):\n table_args['filter-regex'] = request.args.get('search[regex]')\n\n #\n # Custom Arguments\n #\n\n if request.args.get('time_filter'):\n table_args['time_filter'] = request.args.get('time_filter')\n\n i = 0\n while True:\n if request.args.get('columns[%d][data]' % i):\n table_args['column-count'] += 1\n table_args['mDataProp_%d' % i] = request.args.get('columns[%d][data]' % i)\n else:\n break\n\n #\n # Column Search\n #\n\n if request.args.get('columns[%d][searchable]' % i):\n table_args['bSearchable_%d' % i] = request.args.get('columns[%d][searchable]' % i)\n\n if request.args.get('columns[%d][search][value]' % i):\n table_args['sSearch_%d' % i] = request.args.get('columns[%d][search][value]' % i)\n\n if request.args.get('columns[%d][search][regex]' % i):\n table_args['bRegex_%d' % i] = request.args.get('columns[%d][search][regex]' % i)\n\n #\n # Column Sort\n #\n\n if request.args.get('columns[%d][orderable]' % i):\n table_args['bSortable_%d' % i] = request.args.get('columns[%d][orderable]' % i)\n\n if request.args.get('order[%d][column]' % i):\n table_args['sort-col-count'] += 1\n table_args['iSortCol_%d' % i] = int(request.args.get('order[%d][column]' % i))\n\n if request.args.get('order[%d][dir]' % i):\n table_args['sSortDir_%d' % i] = request.args.get('order[%d][dir]' % i)\n\n i += 1\n\n return table_args", "def initiate_build(self, config: Union[TableConfig, str, UUID],\n version: Union[str, UUID] = None) -> JobSubmissionResponse:\n if isinstance(config, TableConfig):\n if version is not None:\n logger.warning('Ignoring version {} since config object was provided.'\n .format(version))\n if config.version_number is None:\n raise ValueError('Cannot build table from config which has no version. '\n 'Try registering the config before building.')\n if config.config_uid is None:\n raise ValueError('Cannot build table from config which has no uid. '\n 'Try registering the config before building.')\n uid = config.config_uid\n version = config.version_number\n else:\n if version is None:\n raise ValueError('Version must be specified when building by config uid.')\n uid = config\n job_id = uuid4()\n logger.info('Building table from config {} version {} with job ID {}...'\n .format(uid, version, job_id))\n path = 'projects/{}/ara-definitions/{}/versions/{}/build'.format(\n self.project_id, uid, version\n )\n response = self.session.post_resource(\n path=path,\n json={},\n params={\n 'job_id': job_id\n }\n )\n submission = JobSubmissionResponse.build(response)\n logger.info('Build job submitted with job ID {}.'.format(submission.job_id))\n return submission", "def __gen_datatable__(self):\n # | - __generate_data_table\n rows_list = []\n for Job_i in self.Job_list:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop, value in Job_i.job_params.items():\n entry_param_dict[prop] = value\n\n entry_param_dict[\"Job\"] = Job_i\n entry_param_dict[\"path\"] = Job_i.full_path\n entry_param_dict[\"max_revision\"] = Job_i.max_revision\n entry_param_dict[\"revision_number\"] = Job_i.revision_number\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def __init__(self, job_template_name, job_input, device_list,\n api_server_config, logger, amqp_client,\n transaction_id, transaction_descr, args):\n self._job_template_name = job_template_name\n self._job_input = job_input\n self._device_list = device_list\n self._api_server_config = api_server_config\n self._logger = logger\n self._job_id = None\n self._job_status = None\n self._amqp_client = amqp_client\n self._transaction_id = transaction_id\n self._transaction_descr = transaction_descr\n self._args = args\n super(JobHandler, self).__init__()", "def get_merged_args(args):\n config_dict = load_config(args.config)\n\n args_dict = {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }\n\n for arg, value in args_dict.iteritems():\n if not value:\n args_dict[arg] = config_dict[arg]\n\n if args_dict[\"cleaning_policy\"] == POLICY:\n args_dict[\"cleaning_policy\"] = config_dict[\"cleaning_policy\"]\n\n if args_dict[\"storage_time\"] == STORAGE_TIME:\n args_dict[\"storage_time\"] = config_dict[\"storage_time\"]\n\n if args_dict[\"max_size\"] == MAX_SIZE:\n args_dict[\"max_size\"] = config_dict[\"max_size\"]\n\n return args_dict", "def __copy__(self):\n from bn.distribs.distribution_builder import MultivariateTableBuilder\n builder = MultivariateTableBuilder()\n for assignment in self._table.keys():\n builder.add_row(copy(assignment), self._table[assignment])\n\n return builder.build()", "def CopyTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('copyTapSettings', payload=payload, response_object=None)", "def __copy__(self):\n # prepare unnamed arguments\n args = [getattr(self, arg) for arg in self._copy_conf['args']]\n\n # prepare named arguments\n kwargs = {}\n for arg in self._copy_conf['kwargs']:\n # if arg is a tuple, the first entry will be the named kwargs, and\n # the second will be the name of the attribute to copy\n name = arg\n if isinstance(arg, tuple):\n name, arg = arg\n if hasattr(self, arg):\n kwargs[name] = getattr(self, arg)\n\n # create the new instance\n new_copy = self.__class__(*args, **kwargs)\n\n # then copy attributes\n for attr_name in self._copy_conf['attrs']:\n if hasattr(self, attr_name):\n setattr(new_copy, attr_name, getattr(self, attr_name))\n\n return new_copy", "def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)", "def create_job_configuration(start_time: str) -> ItemsJobConfig:\n # Create job configuration\n config = {\n 'source_url': os.getenv(\"ITEMS_SOURCE_URL\", default=\"\"),\n 'dest_new_url': os.getenv(\"ITEMS_DEST_NEW_URL\", default=\"\"),\n 'dest_updates_url': os.getenv(\"ITEMS_DEST_UPDATES_URL\", default=\"\"),\n 'caiasoft_api_key': os.getenv('CAIASOFT_API_KEY', default=\"\"),\n 'storage_dir': os.getenv('ITEMS_STORAGE_DIR', default=\"\"),\n 'last_success_lookup': os.getenv('ITEMS_LAST_SUCCESS_LOOKUP', default=\"\")\n }\n\n job_id_prefix = \"caia.items\"\n\n job_config = ItemsJobConfig(config, job_id_prefix, start_time)\n logger.info(f\"Job Id: {job_config['job_id']}\")\n logger.debug(f\"job_config={job_config}\")\n\n return job_config", "def __generate_data_table__(self):\n # | - __generate_data_table__\n rows_list = []\n for job in self.job_var_lst:\n revisions = self.job_revision_number(job)\n for revision in range(revisions + 1)[1:]:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop in job:\n entry_param_dict[prop[\"property\"]] = prop[\"value\"]\n\n entry_param_dict[\"variable_list\"] = job\n entry_param_dict[\"path\"] = self.var_lst_to_path(job)\n\n entry_param_dict[\"max_revision\"] = revisions\n entry_param_dict[\"revision_number\"] = revision\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def from_mapping(context: CreateCommandsContext, dry_run):\n if dry_run:\n logger.info(\"** Dry run, nothing will be sent to server **\")\n\n # Make sure no jobs are actually created\n context.client_tool.create_path_job = mock_create\n context.client_tool.create_pacs_job = mock_create\n\n job_sets = extract_job_sets(\n context.default_parameters(), context.get_mapping()\n )\n\n # inspect project name and destination to present the next question to the user\n project_names = set()\n destination_paths = set()\n for job_set in job_sets:\n project_names.add(job_set.get_param_by_type(Project).value)\n destination_paths.add(job_set.get_param_by_type(DestinationPath).value)\n\n question = (\n f\"This will create {len(job_sets)} jobs on \"\n f\"{context.get_active_server().name},\"\n f\" for projects '{list(project_names)}', writing data to \"\n f\"'{[str(x) for x in destination_paths]}'. Are you sure?\"\n )\n if not click.confirm(question):\n logger.info(\"Cancelled\")\n return\n\n created_job_ids = create_jobs(context, job_sets)\n\n if created_job_ids:\n context.add_to_batch(created_job_ids)\n\n logger.info(\"Done\")", "def _prepare(self):\n logging.info('-> copy configuration...')\n path_cofig = self.params['path_config_bUnwarpJ']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_SIFT' in self.params:\n path_cofig = self.params['path_config_IJ_SIFT']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_MOPS' in self.params:\n path_cofig = self.params['path_config_IJ_MOPS']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n copy: Optional[pulumi.Input[pulumi.InputType['JobCopyArgs']]] = None,\n extract: Optional[pulumi.Input[pulumi.InputType['JobExtractArgs']]] = None,\n job_id: Optional[pulumi.Input[str]] = None,\n job_timeout_ms: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n load: Optional[pulumi.Input[pulumi.InputType['JobLoadArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n query: Optional[pulumi.Input[pulumi.InputType['JobQueryArgs']]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['copy'] = copy\n __props__['extract'] = extract\n if job_id is None:\n raise TypeError(\"Missing required property 'job_id'\")\n __props__['job_id'] = job_id\n __props__['job_timeout_ms'] = job_timeout_ms\n __props__['labels'] = labels\n __props__['load'] = load\n __props__['location'] = location\n __props__['project'] = project\n __props__['query'] = query\n __props__['job_type'] = None\n __props__['user_email'] = None\n super(Job, __self__).__init__(\n 'gcp:bigquery/job:Job',\n resource_name,\n __props__,\n opts)", "def build_from_config(self, config: Union[TableConfig, str, UUID], *,\n version: Union[str, int] = None,\n timeout: float = 15 * 60) -> GemTable:\n job = self.initiate_build(config, version)\n return self.get_by_build_job(job, timeout=timeout)", "def _GetUpdatedMigrationJob(\n self, migration_job, source_ref, destination_ref, args):\n update_fields = self._GetUpdateMask(args)\n if args.IsSpecified('display_name'):\n migration_job.displayName = args.display_name\n if args.IsSpecified('type'):\n migration_job.type = self._GetType(self.messages.MigrationJob, args.type)\n if args.IsSpecified('dump_path'):\n migration_job.dumpPath = args.dump_path\n if args.IsSpecified('source'):\n migration_job.source = source_ref.RelativeName()\n if args.IsSpecified('destination'):\n migration_job.destination = destination_ref.RelativeName()\n if args.IsKnownAndSpecified('dump_parallel_level'):\n migration_job.performanceConfig = self._GetPerformanceConfig(args)\n self._UpdateConnectivity(migration_job, args)\n self._UpdateLabels(args, migration_job, update_fields)\n return migration_job, update_fields", "def __init__(self, job_spec: JobSpec):\n self.job_spec = job_spec\n \n self.merge_rules = spark.read.csv(job_spec.mapping_document_path, header='true').toPandas()\n\n self.join_map = JoinMap()\n \n # defined externally\n self.merge_action_map = merge_action_map", "def _init_from_config(self):\n self.arch = self.job_config.get('arch', 'x86_64')\n self.os_type = self.job_config.get(\"os_type\")\n self.flavor = self.job_config.get(\"flavor\")\n self.codename = self.job_config.get(\"codename\")\n self.os_version = self._get_version()\n # if os_version is given, prefer version/codename derived from it\n if self.os_version:\n self.os_version, self.codename = \\\n OS.version_codename(self.os_type, self.os_version)\n self.branch = self.job_config.get(\"branch\")\n self.tag = self.job_config.get(\"tag\")\n self.ref = self.job_config.get(\"ref\")\n self.distro = self._get_distro(\n distro=self.os_type,\n version=self.os_version,\n codename=self.codename,\n )\n self.pkg_type = \"deb\" if self.os_type.lower() in (\n \"ubuntu\",\n \"debian\",\n ) else \"rpm\"\n\n if not getattr(self, 'flavor'):\n # avoiding circular imports\n from teuthology.suite.util import get_install_task_flavor\n # when we're initializing from a full teuthology config, not just a\n # task config we need to make sure we're looking at the flavor for\n # the install task\n self.flavor = get_install_task_flavor(self.job_config)", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def create_compilation_job(CompilationJobName=None, RoleArn=None, InputConfig=None, OutputConfig=None, StoppingCondition=None):\n pass", "def prepare_pr_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config", "def copy(self) -> pulumi.Output[Optional['outputs.JobCopy']]:\n return pulumi.get(self, \"copy\")", "def get_template_args(csv_directory, id_field, address_field, zone_field, upload_bucket, results_bucket, upload=True):\n job_csvs = [f for f in listdir(csv_directory) if isfile(join(csv_directory, f))]\n job_template_args = []\n for job_num, job_csv in enumerate(job_csvs):\n if upload:\n upload_blob(UPLOAD_BUCKET, join(csv_directory, job_csv), job_csv)\n print(job_csv, 'uploaded')\n job_template_args.append({\n 'job_number': job_num,\n 'csv_name': job_csv,\n 'id_field': id_field,\n 'address_field': address_field,\n 'zone_field': zone_field,\n 'upload_bucket': upload_bucket,\n 'results_bucket': results_bucket\n })\n return job_template_args", "def __init__(self, settings, ui_id, job_id):\n # Call the original constructor\n super().__init__(settings, ui_id, job_id)\n\n # Set the local template\n self.local_template = 'settings/bilby_local.sh'\n # Set our job parameter path\n self.job_parameter_file = os.path.join(self.get_working_directory(), 'json_params.json')\n # Set the job output directory\n self.job_output_directory = os.path.join(self.get_working_directory(), 'output')", "def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if sim is not None:\n raise ValueError(\"Found 'sim' argument on AnalyzeExtension_SG config.\")\n if targets_yaml is None:\n return job_configs\n\n targets = load_yaml(targets_yaml)\n config_yaml = 'config.yaml'\n\n base_config = dict(roi_baseline=args['roi_baseline'],\n make_plots=args['make_plots'])\n\n for target_name, target_list in targets.items():\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n fullpath=True)\n target_dir = NAME_FACTORY.targetdir(**name_keys)\n config_path = os.path.join(target_dir, config_yaml)\n logfile = make_nfs_path(os.path.join(\n target_dir, \"%s_%s.log\" % (self.linkname, target_name)))\n job_config = base_config.copy()\n job_config.update(dict(config=config_path,\n logfile=logfile))\n job_configs[target_name] = job_config\n\n return job_configs", "def __init__(self, cfg_index, conditions, pars_dir, step_title, use_defaults, input_cfg_json_data):\n super().__init__(cfg_index, conditions, pars_dir, step_title, use_defaults, input_cfg_json_data)\n if input_cfg_json_data:\n self._read_custom_pars()\n else:\n self._combine_conditions()\n\n # Mike's new stuff\n self.outpars = self._flatten_dict(self.outpars, '', {})", "def create_arg_config(environment, region, template, parameters):\r\n raw_config = {\r\n 'Environment': environment,\r\n 'Region': region\r\n }\r\n if template:\r\n raw_config['Template'] = template\r\n if parameters:\r\n raw_config['Parameters'] = dict(parameters)\r\n return Config(raw_config)", "def deploy_features_table():\n\n logging.info(\"copy table\")\n target_dataset_ref = bigquery.DatasetReference(GCP_PROJECT, BQ_TARGET_DATASET)\n target_table_ref = bigquery.TableReference(target_dataset_ref, 'features')\n copyjob_config = bigquery.CopyJobConfig()\n copyjob_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED\n copyjob_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n bq.copy_table(temp_table_ref, target_table_ref, job_config=copyjob_config)", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n pass", "def test_copy_options_from_config_file(self):\n tempfile = self.get_temp_file()\n self.prepare(nodes=1)\n\n logger.debug('Running stress')\n stress_table = 'keyspace1.standard1'\n self.node1.stress(['write', 'n=1K', 'no-warmup', '-rate', 'threads=50'])\n\n def create_config_file(config_lines):\n config_file = self.get_temp_file()\n logger.debug('Creating config file {}'.format(config_file.name))\n\n with open(config_file.name, 'w') as config:\n for line in config_lines:\n config.write(line + os.linesep)\n config.close()\n\n return config_file.name\n\n def extract_options(out):\n prefix = 'Using options: '\n for l in out.split('\\n'):\n if l.startswith(prefix):\n return l[len(prefix):].strip().strip(\"'\").replace(\"'\", \"\\\"\")\n return ''\n\n def check_options(out, expected_options):\n opts = extract_options(out)\n logger.debug('Options: {}'.format(opts))\n d = json.loads(opts)\n for k, v in expected_options:\n assert v == d[k]\n\n def do_test(config_lines, expected_options):\n config_file = create_config_file(config_lines)\n\n cmd = \"COPY {} {} '{}'\".format(stress_table, direction, tempfile.name)\n if not use_default:\n cmd += \" WITH CONFIGFILE = '{}'\".format(config_file)\n\n cqlsh_options = []\n if use_default:\n cqlsh_options.append('--cqlshrc={}'.format(config_file))\n\n logger.debug('{} with options {}'.format(cmd, cqlsh_options))\n out, _, _ = self.run_cqlsh(cmds=cmd, cqlsh_options=cqlsh_options, skip_cqlshrc=True)\n logger.debug(out)\n check_options(out, expected_options)\n\n for use_default in [True, False]:\n for direction in ['TO', 'FROM']:\n do_test(['[copy]', 'header = True', 'maxattempts = 10'],\n [('header', 'True'), ('maxattempts', '10')])\n\n do_test(['[copy]', 'header = True', 'maxattempts = 10',\n '[copy:{}]'.format(stress_table), 'maxattempts = 9'],\n [('header', 'True'), ('maxattempts', '9')])\n\n do_test(['[copy]', 'header = True', 'maxattempts = 10',\n '[copy-from]', 'maxattempts = 9',\n '[copy-to]', 'maxattempts = 8'],\n [('header', 'True'), ('maxattempts', '8' if direction == 'TO' else '9')])\n\n do_test(['[copy]', 'header = True', 'maxattempts = 10',\n '[copy-from]', 'maxattempts = 9',\n '[copy-to]', 'maxattempts = 8',\n '[copy:{}]'.format(stress_table), 'maxattempts = 7'],\n [('header', 'True'), ('maxattempts', '7')])\n\n do_test(['[copy]', 'header = True', 'maxattempts = 10',\n '[copy-from]', 'maxattempts = 9',\n '[copy-to]', 'maxattempts = 8',\n '[copy:{}]'.format(stress_table), 'maxattempts = 7',\n '[copy-from:{}]'.format(stress_table), 'maxattempts = 6',\n '[copy-to:{}]'.format(stress_table), 'maxattempts = 5'],\n [('header', 'True'), ('maxattempts', '5' if direction == 'TO' else '6')])", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n copy: Optional[pulumi.Input[pulumi.InputType['JobCopyArgs']]] = None,\n extract: Optional[pulumi.Input[pulumi.InputType['JobExtractArgs']]] = None,\n job_id: Optional[pulumi.Input[str]] = None,\n job_timeout_ms: Optional[pulumi.Input[str]] = None,\n job_type: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n load: Optional[pulumi.Input[pulumi.InputType['JobLoadArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n query: Optional[pulumi.Input[pulumi.InputType['JobQueryArgs']]] = None,\n user_email: Optional[pulumi.Input[str]] = None) -> 'Job':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"copy\"] = copy\n __props__[\"extract\"] = extract\n __props__[\"job_id\"] = job_id\n __props__[\"job_timeout_ms\"] = job_timeout_ms\n __props__[\"job_type\"] = job_type\n __props__[\"labels\"] = labels\n __props__[\"load\"] = load\n __props__[\"location\"] = location\n __props__[\"project\"] = project\n __props__[\"query\"] = query\n __props__[\"user_email\"] = user_email\n return Job(resource_name, opts=opts, __props__=__props__)", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def main(args):\n try:\n config_path = project_path + \"/\" + args.config\n input_data_path = project_path + \"/\" + args.input\n output_data_path = project_path + \"/\" + args.output\n\n config = load_config(config_path)\n df = read_csv(input_data_path)\n product = product_dim(df, **config['product_dim'])\n\n # Write to output file\n save_csv(product, output_data_path)\n except Exception as e:\n logger.error(\"Unexpected error occurred when creating object table for products: \" + str(e))", "def constructObjectJob(self, xml_node):\n name = os.path.join(self.dir, xml_node.find(\"name\").text)\n output_name = name + \".o\"\n object_job = CopyFile(name, output_name, self.dir)\n object_job.output = output_name\n return object_job", "def __init__(\n self,\n workspace=None,\n profile=None,\n extend_path=None,\n devel_layout=None,\n install=False,\n isolate_install=False,\n cmake_args=None,\n make_args=None,\n jobs_args=None,\n use_internal_make_jobserver=True,\n use_env_cache=False,\n catkin_make_args=None,\n space_suffix=None,\n buildlist=None,\n skiplist=None,\n authors=None,\n maintainers=None,\n licenses=None,\n extends=None,\n key_origins={},\n **kwargs\n ):\n self.__locked = False\n\n # Handle deprecated arguments\n if 'whitelist' in kwargs:\n buildlist = kwargs['whitelist']\n del kwargs['whitelist']\n if 'blacklist' in kwargs:\n skiplist = kwargs['blacklist']\n del kwargs['blacklist']\n\n # Validation is done on assignment\n self.workspace = workspace\n\n self.extend_path = extend_path if extend_path else None\n self.key_origins = key_origins\n\n self.profile = profile\n\n # Handle *space assignment and defaults\n for space, space_dict in Context.SPACES.items():\n key_name = space + '_space'\n default = space_dict['default']\n value = kwargs.pop(key_name, default)\n if value == default and space_suffix and space != 'source':\n value += space_suffix\n setattr(self, key_name, value)\n\n # Check for unhandled context options\n if len(kwargs) > 0:\n print('Warning: Unhandled config context options: {}'.format(kwargs), file=sys.stderr)\n\n self.destdir = os.environ.get('DESTDIR', None)\n\n # Handle package buildlist/skiplist\n self.buildlist = buildlist or []\n self.skiplist = skiplist or []\n\n # Handle default authors/maintainers\n self.authors = authors or []\n self.maintainers = maintainers or []\n self.licenses = licenses or ['TODO']\n\n # Handle build options\n self.devel_layout = devel_layout if devel_layout else 'linked'\n self.install = install\n self.isolate_install = isolate_install\n\n # Handle additional cmake and make arguments\n self.cmake_args = cmake_args or []\n self.make_args = make_args or []\n self.jobs_args = jobs_args or []\n self.use_internal_make_jobserver = use_internal_make_jobserver\n self.use_env_cache = use_env_cache\n self.catkin_make_args = catkin_make_args or []\n\n # List of packages in the workspace is set externally\n self.packages = []\n\n # List of warnings about the workspace is set internally\n self.warnings = []\n\n # Initialize environment settings set by load_env\n self.manual_cmake_prefix_path = None\n self.cached_cmake_prefix_path = None\n self.env_cmake_prefix_path = None\n self.cmake_prefix_path = None\n\n self.extends = extends", "def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(\n args, require_sim_name=True)\n if targets_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args.get('write_full', False)\n\n targets = load_yaml(targets_yaml)\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'],\n specconfig=specconfig)\n\n for target_name, profile_list in list(targets.items()):\n for profile in profile_list:\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s:%s\" % (\n target_name, profile, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n profile=profile,\n astro_prior=astro_prior,\n fullpath=True)\n limitfile = NAME_FACTORY.sim_dmlimitsfile(**name_keys)\n first = args['seed']\n last = first + args['nsims'] - 1\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace(\n '_SEED.fits', '_summary_%06i_%06i.fits' %\n (first, last))\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs", "def copyPetscArguments(jobdir, argsfile):\n shutil.copyfile(argsfile, os.path.join(jobdir, \"petsc_commandline_arg\"))", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n\n job_spec = {\n 'display_name': job_id,\n 'job_spec': training_input,\n 'labels': job_labels,\n }\n return job_spec", "def build_input_table(cls, name='inputTableName', input_name='input'):\n obj = cls(name)\n obj.exporter = 'get_input_table_name'\n obj.input_name = input_name\n return obj", "def __init__(__self__, *,\n include_cluster_scope_resources: pulumi.Input[bool],\n object_type: pulumi.Input[str],\n snapshot_volumes: pulumi.Input[bool],\n excluded_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n excluded_resource_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n included_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n included_resource_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n label_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"include_cluster_scope_resources\", include_cluster_scope_resources)\n pulumi.set(__self__, \"object_type\", 'KubernetesClusterBackupDatasourceParameters')\n pulumi.set(__self__, \"snapshot_volumes\", snapshot_volumes)\n if excluded_namespaces is not None:\n pulumi.set(__self__, \"excluded_namespaces\", excluded_namespaces)\n if excluded_resource_types is not None:\n pulumi.set(__self__, \"excluded_resource_types\", excluded_resource_types)\n if included_namespaces is not None:\n pulumi.set(__self__, \"included_namespaces\", included_namespaces)\n if included_resource_types is not None:\n pulumi.set(__self__, \"included_resource_types\", included_resource_types)\n if label_selectors is not None:\n pulumi.set(__self__, \"label_selectors\", label_selectors)", "def input_config_to_job_input(input_batch_id, job_name, job_level, input_config):\n JobInput = namedtuple(\n \"JobInput\",\n [\"input_manifest_s3_uri\", \"label_attribute_name\", \"label_category_s3_uri\"],\n )\n\n input_manifest_s3_uri = input_config.get(\"inputManifestS3Uri\")\n if input_manifest_s3_uri is not None:\n return JobInput(\n input_manifest_s3_uri=input_manifest_s3_uri,\n label_attribute_name=None,\n label_category_s3_uri=None,\n )\n\n chain_to_job_name = job_name\n chain_from_job_name = input_config[\"chainFromJobName\"]\n\n # Only support jobs within the current batch for now.\n if job_level == 1:\n raise Exception(\"can't chain in job_level 1\")\n\n batches = chainable_batches(input_batch_id, job_level)\n if len(batches) == 0:\n raise Exception(\"no chainable batches found\")\n\n processed_job_level_batch = next(\n iter(\n db.get_batch_metadata_by_labeling_job_name(\n chain_to_job_name, BatchMetadataType.PROCESS_LEVEL\n )\n ),\n None,\n )\n\n prev_level_jobs = []\n for batch in batches:\n prev_level_jobs += db.get_child_batch_metadata(\n batch[\"BatchId\"], BatchMetadataType.JOB_LEVEL\n )\n\n for job in prev_level_jobs:\n if job[BatchMetadataTableAttributes.LABELING_JOB_NAME] == chain_from_job_name:\n # If available, use the downsampled manifest file as input to the new job\n if processed_job_level_batch:\n processed_data_location = processed_job_level_batch[\n BatchMetadataTableAttributes.JOB_INPUT_LOCATION\n ]\n else:\n processed_data_location = None\n\n batch_output_location = (\n processed_data_location or job[BatchMetadataTableAttributes.JOB_OUTPUT_LOCATION]\n )\n\n return JobInput(\n input_manifest_s3_uri=batch_output_location,\n label_attribute_name=job[BatchMetadataTableAttributes.LABEL_ATTRIBUTE_NAME],\n label_category_s3_uri=job[BatchMetadataTableAttributes.LABEL_CATEGORY_CONFIG],\n )\n\n raise Exception(f\"chain job {chain_from_job_name} not found\")", "def _create_job_spec(\n self,\n job_id: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:\n\n job_spec = {\n 'jobId': job_id,\n 'trainingInput': training_input,\n 'labels': job_labels,\n }\n return job_spec", "def svn_client_copy4(svn_commit_info_t_commit_info_p, apr_array_header_t_sources, char_dst_path, svn_boolean_t_copy_as_child, svn_boolean_t_make_parents, apr_hash_t_revprop_table, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def _deep_copy_arg_dict(input_arg_dict):\n output_arg_dict = {}\n for name, param in input_arg_dict.items():\n output_arg_dict[name] = param.copy()\n return output_arg_dict", "def test_args_copy():\n args = cli.parse_args(['-c'])\n assert args.copy\n args = cli.parse_args(['--copy'])\n assert args.copy", "def svn_client_copy5(svn_commit_info_t_commit_info_p, apr_array_header_t_sources, char_dst_path, svn_boolean_t_copy_as_child, svn_boolean_t_make_parents, svn_boolean_t_ignore_externals, apr_hash_t_revprop_table, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def copy(self):\n r = PredictionJobRequest()\n r.__dict__.update(self.__dict__)\n\n return r", "def synthesize_employment_data(self, config):\r\n jobs_by_zone_by_sector_table_name = config['jobs_by_zone_by_sector']\r\n gridcells_table_name = config['gridcells']\r\n jobs_table_name = config['jobs']\r\n gridcells_output_table_name = config['gridcells_output']\r\n jobs_output_table_name = config['jobs_output']\r\n \r\n input_db_name = config['db_config'].database_name\r\n output_db_name = config['output_database_name']\r\n \r\n sectors = config['sector_names_and_ids']\r\n building_types_and_ids_and_home_based = config[\r\n 'building_type_column_names_and_ids_and_home_based']\r\n \r\n building_types = []\r\n building_ids = []\r\n home_based = [] \r\n for type, id, home in building_types_and_ids_and_home_based:\r\n building_types += [type]\r\n building_ids += [id]\r\n home_based += [home]\r\n \r\n \r\n from_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = input_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n to_database_configuration = ScenarioDatabaseConfiguration(\r\n database_name = output_db_name,\r\n host_name = config['db_config'].host_name,\r\n user_name = config['db_config'].user_name,\r\n password = config['db_config'].password \r\n )\r\n\r\n FlattenScenarioDatabaseChain().copy_scenario_database(\r\n from_database_configuration = from_database_configuration, \r\n to_database_configuration = to_database_configuration,\r\n tables_to_copy = [gridcells_table_name, jobs_table_name])\r\n \r\n db_server = DatabaseServer(to_database_configuration) \r\n output_database = db_server.get_database(output_db_name)\r\n \r\n sector_name = 0; sector_id = 1\r\n \r\n sector = {}\r\n for entry in sectors:\r\n name = entry[sector_name]\r\n id = entry[sector_id]\r\n sector[id] = self._get_jobs_per_building_type_in_sector_by_zone(\r\n output_database, jobs_by_zone_by_sector_table_name, \r\n jobs_table_name, name, id)\r\n\r\n results = self._get_building_type_proportion_by_zone(output_database, \r\n gridcells_table_name)\r\n \r\n grid_id = 0; zone_id = 1\r\n dist = {}\r\n \r\n type_index = {}\r\n \r\n for name in building_types:\r\n for i in range(len(results[0])):\r\n column_name = results[0][i]\r\n if name == column_name:\r\n type_index[name] = i\r\n break;\r\n else:\r\n raise KeyError, ('No column by the name of \\'%s\\' found in '\r\n 'the database.' % name) \r\n\r\n for name in building_types:\r\n dist[name] = {}\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] = []\r\n \r\n for row in results[1:]:\r\n for name in building_types:\r\n dist[name][row[zone_id]] += [(row[grid_id], \r\n row[type_index[name]])]\r\n \r\n jobs_table_data = self._create_jobs_table_data(dist, sector,\r\n building_types_and_ids_and_home_based)\r\n \r\n output_database.execute('USE %(out_db)s' % {'out_db':output_db_name})\r\n \r\n output_database.execute(\"\"\"\r\n CREATE TABLE %(jobs_out)s (\r\n JOB_ID INT AUTO_INCREMENT, PRIMARY KEY(JOB_ID),\r\n GRID_ID INT, HOME_BASED INT, SECTOR_ID INT, BUILDING_TYPE INT);\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n \r\n if len(jobs_table_data) > 0:\r\n output_prefix = (\r\n \"\"\"INSERT INTO %(jobs_out)s \r\n (GRID_ID, HOME_BASED, SECTOR_ID, BUILDING_TYPE) VALUES\r\n \"\"\" % {'jobs_out':jobs_output_table_name})\r\n output_postfix = ';'\r\n \r\n step = 1000\r\n length = len(jobs_table_data)\r\n iterations = int(length/step) + 1\r\n \r\n for i in range(iterations):\r\n low = i*step\r\n high = (i+1)*step\r\n \r\n if high > length: high = length\r\n \r\n output_body = \"\"\r\n \r\n for j in range(low, high):\r\n output_body += (\r\n '(%(grid)s, %(home)s, %(sector)s, %(building)s),\\n' \r\n % jobs_table_data[j])\r\n \r\n output_query = \"%s%s%s\" % (output_prefix, \r\n output_body[:-2], \r\n output_postfix)\r\n\r\n output_database.execute(output_query)\r\n \r\n \r\n ### TODO: \r", "def _map_arguments(self, args):\n config_yaml = args['config']\n config_dict = load_yaml(config_yaml)\n\n dry_run = args.get('dry_run', False)\n\n data = config_dict.get('data')\n comp = config_dict.get('comp')\n library = config_dict.get('library')\n models = config_dict.get('models')\n scratch = config_dict.get('scratch')\n\n self._set_link('prepare', SplitAndBinChain,\n comp=comp, data=data,\n ft1file=config_dict.get('ft1file'),\n hpx_order_ccube=config_dict.get('hpx_order_ccube'),\n hpx_order_expcube=config_dict.get('hpx_order_expcube'),\n scratch=scratch,\n dry_run=dry_run)\n\n self._set_link('diffuse-comp', DiffuseCompChain,\n comp=comp, data=data,\n library=library,\n make_xml=config_dict.get('make_diffuse_comp_xml', False),\n outdir=config_dict.get('merged_gasmap_dir', 'merged_gasmap'),\n dry_run=dry_run)\n\n self._set_link('catalog-comp', CatalogCompChain,\n comp=comp, data=data,\n library=library,\n make_xml=config_dict.get('make_catalog_comp_xml', False),\n nsrc=config_dict.get('catalog_nsrc', 500),\n dry_run=dry_run)\n \n self._set_link('assemble-model', AssembleModelChain,\n comp=comp, data=data,\n library=library,\n models=models,\n hpx_order=config_dict.get('hpx_order_fitting'),\n dry_run=dry_run)", "def build_parms(args):\r\n readDir=args.dir\r\n #target_date=args.target_date\r\n target_date=args.target_date\r\n outdir=args.outdir \r\n parms = {\"readDir\":readDir,\r\n \"target_date\":target_date,\r\n \"outdir\":outdir}\r\n \r\n return(parms)", "def build(self, data: dict) -> GemTable:\n table = GemTable.build(data)\n table.project_id = self.project_id\n table.session = self.session\n return table", "def from_dict(data: Dict) -> 'JobConfig':\n job_name = list(data.keys())[0]\n return JobConfig(job_name=job_name,\n job_type=data[job_name].get('job_type', None),\n properties=data[job_name].get('properties', {}))", "def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(\n args, require_sim_name=True)\n if roster_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args['write_full']\n first = args['seed']\n last = first + args['nsims'] - 1\n\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'])\n\n roster_dict = load_yaml(roster_yaml)\n for roster_name in list(roster_dict.keys()):\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s\" % (roster_name, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n sim_name=sim,\n astro_prior=astro_prior,\n fullpath=True)\n\n limitfile = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace('_SEED.fits', '_summary.fits')\n\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n specconfig=specconfig,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n column_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n column_wildcard: Optional[pulumi.Input[pulumi.InputType['DataCellsFilterColumnWildcardArgs']]] = None,\n database_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n row_filter: Optional[pulumi.Input[pulumi.InputType['DataCellsFilterRowFilterArgs']]] = None,\n table_catalog_id: Optional[pulumi.Input[str]] = None,\n table_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def svn_client_copy(svn_client_commit_info_t_commit_info_p, char_src_path, svn_opt_revision_t_src_revision, char_dst_path, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def Copy(self, name, **kargs):\n if \"TOOLCHAIN\" in kargs and not kargs[\"TOOLCHAIN\"] is None:\n self.set_toolchain(kargs[\"TOOLCHAIN\"])\n if \"STATIC\" in kargs:\n self.static = kargs[\"STATIC\"]\n else:\n self.static = 0\n if \"TESTENV\" in kargs:\n self.testenv = kargs[\"TESTENV\"]\n else:\n self.testenv = 0\n \n new_self = Environment.Copy(self, **kargs)\n new_self.libs = copy.copy(self.libs)\n new_self.apps = copy.copy(self.apps)\n new_self.cpp_path = copy.copy(self.cpp_path)\n new_self.files = copy.copy(self.files)\n new_self.name = name\n #new_self.build_libs(*libs)\n return new_self", "def getCloneArgs(self):\n\n values = {\n \"dest\": self.subnode_dest.makeClone()\n if self.subnode_dest is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def copy_config(cfg):\n res= dict(cfg)\n #model_param = dict(cfg['model_param'])\n model_param = dict(cfg.get('model_param', {}))\n res['model_param'] = model_param\n return res", "def copy_unified_job(self, _eager_fields=None, **new_prompts):\n unified_job_class = self.__class__\n unified_jt_class = self._get_unified_job_template_class()\n parent_field_name = self._get_parent_field_name()\n fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])\n\n create_data = {}\n if _eager_fields:\n create_data = _eager_fields.copy()\n create_data[\"launch_type\"] = \"relaunch\"\n\n prompts = self.launch_prompts()\n if self.unified_job_template and (prompts is not None):\n prompts.update(new_prompts)\n prompts['_eager_fields'] = create_data\n unified_job = self.unified_job_template.create_unified_job(**prompts)\n else:\n unified_job = copy_model_by_class(self, unified_job_class, fields, {})\n for fd, val in create_data.items():\n setattr(unified_job, fd, val)\n unified_job.save()\n\n # Labels copied here\n from awx.main.signals import disable_activity_stream\n\n with disable_activity_stream():\n copy_m2m_relationships(self, unified_job, fields)\n\n return unified_job", "def parse_args(args):\n parser = argparse.ArgumentParser(description='Parse Mysql Copy account you want',add_help=False)\n connect_setting = parser.add_argument_group('connect setting')\n connect_setting.add_argument('-h','--host',dest='host',type=str,help='Host the MySQL database server located',default='127.0.0.1')\n connect_setting.add_argument('-u','--user',dest='user',type=str,help='MySQL Username to log in as',default='root')\n connect_setting.add_argument('-p','--password',dest='password',type=str,help='MySQL Password to use', default='')\n connect_setting.add_argument('-P', '--port', dest='port', type=int,help='MySQL port to use', default=3306)\n copy_user = parser.add_argument_group('copy user')\n copy_user.add_argument('--src-user',dest='srcuser',type=str,help='copy from the user',nargs='*',default='*')\n copy_user.add_argument('--src-host',dest='srchost',type=str,help='copy from the host',nargs='*',default='*')\n copy_user.add_argument('--dest-user',dest='destuser',type=str,help='copy to the user',nargs='*',default='')\n copy_user.add_argument('--dest-host',dest='desthost',type=str,help='copy to the host',nargs='*',default='')\n\n parser.add_argument('--help', dest='help', action='store_true', help='help infomation', default=False)\n\n return parser", "def _generate_ipt_args_settings_in_construct(ipt_args_in_construct, settings):\n if settings and settings.op_ipt_type:\n input_type = settings.op_ipt_type\n if input_type == InputType.TENSOR.value:\n ipt_args_settings_in_construct = ipt_args_in_construct\n elif input_type == InputType.LIST.value:\n ipt_args_settings_in_construct = f\"({ipt_args_in_construct},)\"\n else:\n raise NodeInputTypeNotSupportError(f\"Input type[{input_type}] is not supported now.\")\n else:\n ipt_args_settings_in_construct = ipt_args_in_construct\n\n if settings and settings.op_extra_input:\n settings_value = settings.op_extra_input\n if settings_value:\n settings_in_construct = ', '.join([f\"{setting_val}\" for _, setting_val in settings_value.items()])\n ipt_args_settings_in_construct = ', '.join((ipt_args_settings_in_construct, settings_in_construct))\n\n return ipt_args_settings_in_construct", "def __init__(self, *args, **kwargs):\n super(ColumnShiftTable, self).__init__(*args, **kwargs)\n # Override default template\n if hasattr(self, \"template_name\"):\n self.template_name = self.shifter_template\n else:\n self.template = self.shifter_template", "def prepare_pr_optimal_model_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = ''\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def templateargs(self, target_jar, confs=None):\r\n raise NotImplementedError()", "def load_params():\n # start arg parser\n parser = argparse.ArgumentParser(description='Apero Copy')\n # add arguments\n parser.add_argument('yaml', help='yaml file to use', type=str,\n default=None)\n parser.add_argument('--overwrite', help='overwrite existing files',\n action='store_true', default=False)\n parser.add_argument('--test', help='run in test mode (no copy or remove)',\n action='store_true', default=False)\n parser.add_argument('--symlinks', action='store_true', default=False,\n help='Copy in symlink mode (not recommended)')\n # get arguments\n args = vars(parser.parse_args())\n # ------------------------------------------------------------------\n # deal with getting yaml file\n # ------------------------------------------------------------------\n # deal with no yaml file set\n if args['yaml'] is None:\n emsg = 'yaml file must be set'\n raise AperoCopyError(emsg)\n # deal with bad path\n if not os.path.exists(args['yaml']):\n emsg = 'yaml file {0} does not exist'\n eargs = [args['yaml']]\n raise AperoCopyError(emsg.format(*eargs))\n # otherwise we read the yaml file\n params = read_yaml(args['yaml'])\n # add args to params\n for arg in args:\n if arg not in params:\n params[arg] = args[arg]\n # ------------------------------------------------------------------\n # return the parameters\n return params", "def prepare_row(task, full):\n\n # Would like to include the Job ID in the default set of columns, but\n # it is a long value and would leave little room for status and update time.\n\n row_spec = collections.namedtuple('row_spec',\n ['key', 'optional', 'default_value'])\n\n # pyformat: disable\n default_columns = [\n row_spec('job-name', False, None),\n row_spec('task-id', True, None),\n row_spec('last-update', False, None)\n ]\n short_columns = default_columns + [\n row_spec('status-message', False, None),\n ]\n full_columns = default_columns + [\n row_spec('job-id', False, None),\n row_spec('user-id', False, None),\n row_spec('status', False, None),\n row_spec('status-detail', False, None),\n row_spec('create-time', False, None),\n row_spec('end-time', False, 'NA'),\n row_spec('internal-id', False, None),\n row_spec('logging', False, None),\n row_spec('inputs', False, {}),\n row_spec('outputs', False, {}),\n row_spec('envs', False, {}),\n row_spec('labels', False, {}),\n ]\n # pyformat: enable\n\n columns = full_columns if full else short_columns\n\n row = {}\n for col in columns:\n key, optional, default = col\n\n value = task.get_field(key, default)\n if not optional or value:\n row[key] = value\n\n return row", "def init_jobs(self):\n self.jobsTableWidget.clear()\n self.jobsTableWidget.setColumnCount(6)\n self.jobsTableWidget.setHorizontalHeaderLabels(['Job Id', 'Description/Error', 'Submission Date', 'Status',\n 'Execute', 'Display'])\n header = self.jobsTableWidget.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)\n header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(4, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(5, QtWidgets.QHeaderView.ResizeToContents)", "def from_dict(cls, dikt) -> 'JobOutputRequest':\n return util.deserialize_model(dikt, cls)", "def load_raw_to_bq(event, context):\n\n import os\n\n\n print(f\"Processing .....\")\n\n file = event\n project = os.environ.get('ENV_PROJECT')\n dataset = os.environ.get('ENV_DATASET')\n bucket = file.get(\"bucket\")\n tableCsv = file.get(\"name\")\n tableDestList = tableCsv.split(\".\")\n tableDest = tableDestList[0]\n table_id = f'{project}.{dataset}.{tableDest}'\n\n from Configuration import Configuration\n\n Configuration(tableCsv,bucket,table_id)\n\n\n print(f\"End Process.\")", "def __init__(__self__,\n resource_name: str,\n args: Optional[TransferConfigArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def clone(source, destination):\n\t\treturn \"CREATE DATABASE {0} WITH TEMPLATE {1};\".format(destination, source)", "def __init__(self, table_name='casbin_rule', **kwargs):\n self.table_name = table_name\n self.dynamodb = boto3.client('dynamodb', **kwargs)\n try:\n\n self.dynamodb.create_table(\n TableName=self.table_name,\n\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n except self.dynamodb.exceptions.ResourceInUseException:\n pass", "def __init__(self, postgresRunner, tablename, table_columns):\n self.postgresRunner = postgresRunner\n self.tablename = tablename\n self.table_config = table_columns\n self.table_keys = [column.get(\"name\") for column in table_columns]\n self.__run(\n _PostgresCrud__create_table_command(tablename, table_columns)\n )", "def build_data(cmd, rel_new_path, new_md5, founded_path=None):\n data = {'cmd': cmd}\n if cmd == 'copy':\n data['file'] = {'src': founded_path,\n 'dst': rel_new_path,\n 'md5': new_md5,\n }\n else:\n data['file'] = {'filepath': rel_new_path,\n 'md5': new_md5,\n }\n return data", "def __init__(self, jobArgs):\n logging.info(\"Constructing job with args %r\" % jobArgs)\n self.args = jobArgs\n\n self._setup()", "def __init__(self, job_id=None, version=None, status=None, exit_status=None, exit_message=None, create_time=None, start_time=None, end_time=None, last_updated=None):\n self.swagger_types = {\n 'job_id': str,\n 'version': str,\n 'status': str,\n 'exit_status': str,\n 'exit_message': str,\n 'create_time': datetime,\n 'start_time': datetime,\n 'end_time': datetime,\n 'last_updated': datetime\n }\n\n self.attribute_map = {\n 'job_id': 'job_id',\n 'version': 'version',\n 'status': 'status',\n 'exit_status': 'exit_status',\n 'exit_message': 'exit_message',\n 'create_time': 'create_time',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'last_updated': 'last_updated'\n }\n\n self._job_id = job_id\n self._version = version\n self._status = status\n self._exit_status = exit_status\n self._exit_message = exit_message\n self._create_time = create_time\n self._start_time = start_time\n self._end_time = end_time\n self._last_updated = last_updated", "def constructBundleJob(self, xml_node):\n name = os.path.join(self.dir, xml_node.find(\"name\").text)\n output_name = name + \".o\"\n xar_job = BitcodeBundle(self.arch, name, output_name)\n return xar_job", "async def copy(self, _id: str, dst_id: str, *,\n rev: Optional[str] = None,\n dst_rev: Optional[str] = None,\n batch: Optional[bool] = None) -> dict:\n\n headers = dict(\n Destination=f'{dst_id}?rev={dst_rev}' if dst_rev else dst_id\n )\n\n params = dict(\n rev=rev,\n batch=\"ok\" if batch else None,\n )\n\n return await self.__connection.query('COPY', self._get_path(_id), params=params, headers=headers)", "def make_job_config_json(self, job_description):\n bench_name = job_description[0]\n bench_type = job_description[1]\n bench_preference = job_description[2]\n config_file = self._node_mgr_path / f'{bench_name}_{bench_type}_{bench_preference}' / 'job.json'\n\n # FIXME: hard coded\n # Dict[str, Dict[str, Any]]\n output = dict()\n config = dict()\n config[\"name\"] = bench_name\n config[\"type\"] = bench_type\n config[\"num_of_threads\"] = 2\n if self._node_type == NodeType.IntegratedGPU:\n config[\"binding_cores\"] = \"0,3-5\"\n elif self._node_type == NodeType.CPU:\n config[\"binding_cores\"] = \"0-3\"\n config[\"numa_nodes\"] = \"0\"\n config[\"cpu_freq\"] = 2.1\n config[\"cpu_percent\"] = 100\n if self._node_type == NodeType.IntegratedGPU:\n config[\"gpu_freq\"] = 1300500000\n\n output[\"workloads\"] = config\n\n with config_file.open('w') as fp:\n fp.seek(0)\n json.dump(output, fp, indent=4)\n return config_file", "def AddJobResourceArg(parser):\n\n return concept_parsers.ConceptParser.ForResource(\n 'job_name',\n GetJobResourceSpec(),\n 'Transcoder Job name',\n required=True).AddToParser(parser)", "def getCloneArgs(self):\n\n values = {\n \"condition\": self.subnode_condition.makeClone(),\n \"yes_branch\": self.subnode_yes_branch.makeClone()\n if self.subnode_yes_branch is not None\n else None,\n \"no_branch\": self.subnode_no_branch.makeClone()\n if self.subnode_no_branch is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def build_job_configs(self, args):\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n\n datafile = args['data']\n if datafile is None or datafile == 'None':\n return job_configs\n NAME_FACTORY.update_base_dict(args['data'])\n outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')\n\n inputfiles = create_inputlist(args['ft1file'])\n num_files = len(inputfiles)\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n\n mktimelist = copy.copy(comp.mktimefilters)\n if not mktimelist:\n mktimelist.append('none')\n evtclasslist_keys = copy.copy(comp.evtclasses)\n if not evtclasslist_keys:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n else:\n evtclasslist_vals = copy.copy(evtclasslist_keys)\n\n for mktimekey in mktimelist:\n for evtclassval in evtclasslist_vals:\n fullkey = comp.make_key(\n '%s_%s_{ebin_name}_%s_{evtype_name}' %\n (evtclassval, zcut, mktimekey))\n\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=mktimekey,\n evclass=evtclassval,\n fullpath=True)\n\n ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys))\n outfile = os.path.join(outdir_base, ccube_name)\n infiles = _make_input_file_list(outfile, num_files)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[fullkey] = dict(args=infiles,\n output=outfile,\n logfile=logfile)\n\n return job_configs", "def layout_copycat(source_network, target_network, source_column='name', target_column='name',\n grid_unmapped=True, select_unmapped=True, base_url=DEFAULT_BASE_URL):\n source_network = networks.get_network_name(source_network)\n target_network = networks.get_network_name(target_network)\n res = commands.commands_post(\n 'layout copycat sourceNetwork=\"' + source_network + '\" targetNetwork=\"' + target_network +\n '\" sourceColumn=\"' + source_column + '\" targetColumn=\"' + target_column +\n '\" gridUnmapped=\"' + str(grid_unmapped) + '\" selectUnmapped=\"' + str(select_unmapped),\n base_url=base_url)\n return res", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def copy(CopySource=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, SourceClient=None, Config=None):\n pass", "def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict", "def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs", "def assert_task_args(cls, args: \"DictConfig\", shared_state: \"SharedTaskState\") -> None:\n assert isinstance(\n shared_state, SharedStaticTaskState\n ), \"Cannot assert args on a non-static state\"\n super().assert_task_args(args, shared_state)\n\n found_task_source = args.blueprint.task_source\n assert (\n found_task_source is not None\n ), \"Must provide a path to a javascript bundle in `task_source`\"\n\n found_task_path = os.path.expanduser(found_task_source)\n assert os.path.exists(\n found_task_path\n ), f\"Provided task source {found_task_path} does not exist.\"\n\n link_task_source = args.blueprint.link_task_source\n current_architect = args.architect._architect_type\n allowed_architects = [\"local\"]\n assert link_task_source == False or (\n link_task_source == True and current_architect in allowed_architects\n ), f\"`link_task_source={link_task_source}` is not compatible with architect type: {args.architect._architect_type}. Please check your task configuration.\"\n\n if link_task_source == False and current_architect in allowed_architects:\n logger.info(\n \"If you want your server to update on reload whenever you make changes to your webapp, then make sure to set \\n\\nlink_task_source: [blue]true[/blue]\\n\\nin your task's hydra configuration and run \\n\\n[purple]cd[/purple] webapp [red]&&[/red] [green]npm[/green] run dev:watch\\n\\nin a separate terminal window. For more information check out:\\nhttps://mephisto.ai/docs/guides/tutorials/custom_react/#12-launching-the-task\\n\",\n extra={\"markup\": True},\n )", "def prepare_jobs_data(self, ecosystem, package, version):\n return \\\n {\n \"flow_arguments\": [\n {\n \"ecosystem\": ecosystem,\n \"name\": package,\n \"version\": version,\n \"force\": True,\n \"force_graph_sync\": True,\n \"recursive_limit\": 0\n }\n ],\n \"flow_name\": \"bayesianApiFlow\"\n }", "def main(conn, label_config, table_name, start_date, end_date,\r\n preprocessing_prefix):\r\n label_sql = label_config['query']\r\n label_sql = label_sql.replace('{prefix}', preprocessing_prefix)\r\n label_sql = label_sql.replace('{start_date}', start_date)\r\n label_sql = label_sql.replace('{end_date}', end_date)\r\n drop_sql = f'drop table if exists {table_name};'\r\n create_sql = f'create table {table_name} as ({label_sql});'\r\n sql.run_sql_from_string(conn, drop_sql)\r\n sql.run_sql_from_string(conn, create_sql)", "def Copy(self, copy):\n return _table.Table_Copy(self, copy)", "def __prepare_job(self, job_wrapper, job_destination):\n command_line = None\n client = None\n remote_job_config = None\n compute_environment = None\n try:\n client = self.get_client_from_wrapper(job_wrapper)\n tool = job_wrapper.tool\n remote_job_config = client.setup(tool.id, tool.version)\n rewrite_parameters = LwrJobRunner.__rewrite_parameters( client )\n prepare_kwds = {}\n if rewrite_parameters:\n compute_environment = LwrComputeEnvironment( client, job_wrapper, remote_job_config )\n prepare_kwds[ 'compute_environment' ] = compute_environment\n job_wrapper.prepare( **prepare_kwds )\n self.__prepare_input_files_locally(job_wrapper)\n remote_metadata = LwrJobRunner.__remote_metadata( client )\n dependency_resolution = LwrJobRunner.__dependency_resolution( client )\n metadata_kwds = self.__build_metadata_configuration(client, job_wrapper, remote_metadata, remote_job_config)\n remote_command_params = dict(\n working_directory=remote_job_config['working_directory'],\n metadata_kwds=metadata_kwds,\n dependency_resolution=dependency_resolution,\n )\n remote_working_directory = remote_job_config['working_directory']\n # TODO: Following defs work for LWR, always worked for LWR but should be\n # calculated at some other level.\n remote_job_directory = os.path.abspath(os.path.join(remote_working_directory, os.path.pardir))\n remote_tool_directory = os.path.abspath(os.path.join(remote_job_directory, \"tool_files\"))\n container = self._find_container(\n job_wrapper,\n compute_working_directory=remote_working_directory,\n compute_tool_directory=remote_tool_directory,\n compute_job_directory=remote_job_directory,\n )\n command_line = build_command(\n self,\n job_wrapper=job_wrapper,\n container=container,\n include_metadata=remote_metadata,\n include_work_dir_outputs=False,\n remote_command_params=remote_command_params,\n )\n except Exception:\n job_wrapper.fail( \"failure preparing job\", exception=True )\n log.exception(\"failure running job %d\" % job_wrapper.job_id)\n\n # If we were able to get a command line, run the job\n if not command_line:\n job_wrapper.finish( '', '' )\n\n return command_line, client, remote_job_config, compute_environment" ]
[ "0.6806518", "0.51471066", "0.5135233", "0.5034788", "0.4957393", "0.49131292", "0.483293", "0.4809419", "0.47891185", "0.47865072", "0.4757659", "0.46871853", "0.46683812", "0.46534342", "0.46357578", "0.46097738", "0.45914286", "0.45865327", "0.457555", "0.45609608", "0.45531315", "0.4541542", "0.45230606", "0.45217237", "0.45121175", "0.44751364", "0.44724846", "0.44585148", "0.44583514", "0.44578895", "0.44328374", "0.4430723", "0.44290823", "0.4424437", "0.43942782", "0.43902308", "0.4380478", "0.43767503", "0.4376245", "0.43692726", "0.43640503", "0.43627232", "0.43527898", "0.43488112", "0.4348266", "0.43480918", "0.43472457", "0.43357152", "0.43352816", "0.4329406", "0.43147004", "0.42907143", "0.42858192", "0.4280023", "0.42786917", "0.4275506", "0.42655027", "0.4262821", "0.42623588", "0.426164", "0.42548752", "0.42503175", "0.42485717", "0.42431003", "0.42278922", "0.42273858", "0.42170283", "0.42044568", "0.41984707", "0.4198079", "0.4197644", "0.41960672", "0.41902086", "0.41797945", "0.4178592", "0.41717142", "0.4162135", "0.4161416", "0.41609934", "0.4154808", "0.41535458", "0.4153215", "0.4151624", "0.41487232", "0.4144681", "0.41438156", "0.41397274", "0.41393596", "0.41336727", "0.4129702", "0.41267553", "0.4119714", "0.4118392", "0.41181016", "0.4117269", "0.41152474", "0.41130465", "0.41129538", "0.4112596", "0.41043875" ]
0.73063713
0
Process schema Updates (additions/mode changes) for the request. Retrieves the current table schema for ref and attempts to merge in the schema provided in the requests. This is necessary since the API backend does not handle PATCH semantics for schema updates (e.g. process the deltas) so we must always send the fully updated schema in the requests.
def ProcessSchemaUpdate(ref, args, request): table = request.table relaxed_columns = args.relax_columns if not table.schema and not relaxed_columns: # if not updating schema, return request # then just return. original_schema = _TryGetCurrentSchema(ref.Parent().Name(), ref.Name(), ref.projectId) new_schema_columns = table.schema updated_fields = _GetUpdatedSchema(original_schema, new_schema_columns, relaxed_columns) table_schema_type = GetApiMessage('TableSchema') request.table.schema = table_schema_type(fields=updated_fields) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_schema_updates(self):\n data = self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/schema-update\" % (self.project_key, self.recipe_name))\n return RequiredSchemaUpdates(self, data)", "async def upgradeSchema(self) -> None:", "def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry", "def resolve_schema_in_request_body(self, request_body):\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)", "def process(*, schemas: types.Schemas) -> None:\n # Retrieve back references\n backrefs = process_helper.get_artifacts(\n schemas=schemas, get_schema_artifacts=_get_schema_backrefs\n )\n # Map to a schema for each grouped back references\n backref_schemas = process_helper.calculate_outputs(\n artifacts=backrefs, calculate_output=_backrefs_to_schema\n )\n # Convert to list to resolve iterator\n backref_schema_list = list(backref_schemas)\n # Add backreferences to schemas\n for name, backref_schema in backref_schema_list:\n schemas[name] = {\"allOf\": [schemas[name], backref_schema]}", "def _GetUpdatedSchema(\n original_schema,\n new_columns=None,\n relaxed_columns=None):\n orig_field_map = (\n {f.name: f for f in original_schema.fields} if original_schema else {})\n\n if relaxed_columns:\n orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)\n\n if new_columns:\n orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)\n\n return sorted(orig_field_map.values(), key=lambda x: x.name)", "def resolve_schema(self, data):\n if not isinstance(data, dict):\n return\n\n # OAS 2 component or OAS 3 header\n if \"schema\" in data:\n data[\"schema\"] = self.openapi.resolve_schema_dict(data[\"schema\"])\n # OAS 3 component except header\n if self.openapi_version.major >= 3:\n if \"content\" in data:\n for content_type in data[\"content\"]:\n schema = data[\"content\"][content_type][\"schema\"]\n data[\"content\"][content_type][\n \"schema\"\n ] = self.openapi.resolve_schema_dict(schema)", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)", "def update_schema(self, engine_name, schema):\n endpoint = \"engines/{}/schema\".format(engine_name)\n data = json.dumps(schema)\n return self.swiftype_session.request('post', endpoint, data=data)", "def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)", "def test_compare_schemas_happypath(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.base_schema\n )\n\n assert status == schema_utils.Update.no_update", "def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def _update(self, schema: 'Schema'):\n for method in schema._get_methods():\n if method.id in self:\n raise ValueError(\n f\"Duplicate method id for {method.method} id: {method.id}\"\n )\n\n for combinator in schema._get_combinators():\n if combinator.id in self:\n raise ValueError(\n f\"Duplicate combinator id for {combinator.predicate} \" +\n f\"id: {combinator.id}\"\n )\n\n self.constructors += schema.constructors\n self.functions += schema.functions\n\n self._build_schema_data()", "def preprocess_schema(schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces=None, qualified=False):\n\n from .simplexml import SimpleXMLElement # here to avoid recursive imports\n\n # analyze the namespaces used in this schema\n local_namespaces = {}\n for k, v in schema[:]:\n if k.startswith(\"xmlns\"):\n local_namespaces[get_local_name(k)] = v\n if k == 'targetNamespace':\n # URI namespace reference for this schema\n if v == \"urn:DefaultNamespace\":\n v = global_namespaces[None]\n local_namespaces[None] = v\n if k == 'elementFormDefault':\n qualified = (v == \"qualified\")\n # add schema namespaces to the global namespace dict = {URI: ns prefix}\n for ns in local_namespaces.values():\n if ns not in global_namespaces:\n global_namespaces[ns] = 'ns%s' % len(global_namespaces)\n \n for element in schema.children() or []:\n if element.get_local_name() in ('import', 'include',):\n schema_namespace = element['namespace']\n schema_location = element['schemaLocation']\n if schema_location is None:\n log.debug('Schema location not provided for %s!' % schema_namespace)\n continue\n if schema_location in imported_schemas:\n log.debug('Schema %s already imported!' % schema_location)\n continue\n imported_schemas[schema_location] = schema_namespace\n log.debug('Importing schema %s from %s' % (schema_namespace, schema_location))\n # Open uri and read xml:\n xml = fetch(schema_location, http, cache, force_download, wsdl_basedir)\n\n # Parse imported XML schema (recursively):\n imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)\n preprocess_schema(imported_schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces, qualified)\n\n element_type = element.get_local_name()\n if element_type in ('element', 'complexType', \"simpleType\"):\n namespace = local_namespaces[None] # get targetNamespace\n element_ns = global_namespaces[ns] # get the prefix\n element_name = element['name']\n log.debug(\"Parsing Element %s: %s\" % (element_type, element_name))\n if element.get_local_name() == 'complexType':\n children = element.children()\n elif element.get_local_name() == 'simpleType':\n children = element('restriction', ns=xsd_uri)\n elif element.get_local_name() == 'element' and element['type']:\n children = element\n else:\n children = element.children()\n if children:\n children = children.children()\n elif element.get_local_name() == 'element':\n children = element\n if children:\n process_element(elements, element_name, children, element_type, xsd_uri, dialect, namespace, qualified)", "def modify_schema(setup_path, names, lp, creds, reporter, ldif, msg):\n\n return provision_schema(setup_path, names, lp, creds, reporter, ldif, msg, True)", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "async def update_db(appname, schema, table, action):\n loop = asyncio.get_event_loop()\n if args.action == 'info':\n await schema_info(args.appname, args.schema)\n sys.exit()\n\n dbstate = await schema_state(args.appname, args.schema, args.table)\n modstate = model_state(args.appname, args.schema, args.table)\n updiff, downdiff = compare_state(args.appname, args.schema, args.table,\n dbstate, modstate)\n if args.action == 'upgrade':\n await set_state(args.appname, updiff)\n elif args.action == 'downgrade':\n await set_state(args.appname, downdiff)\n else:\n print(\"upgrade diff looks like\")\n print(updiff)\n print(\"downgrade diff looks like\")\n print(downdiff)", "def _Dynamic_GetSchema(self, req, schema, request_id=None):\n # This is not used, but it is required for the method signature.\n del request_id\n\n app_str = req.app()\n self.__ValidateAppId(app_str)\n schema.set_more_results(False)", "def status_changes_schema():\n schema = endpoint_schema(\"status_changes\")\n items = schema[\"properties\"][\"data\"][\"properties\"][\"status_changes\"][\"items\"]\n\n # merge the state machine definitions and transition combinations rule\n state_machine_defs, transitions = common.vehicle_state_machine(\"vehicle_state\", \"event_types\")\n schema[\"definitions\"].update(state_machine_defs)\n items[\"allOf\"].append(transitions)\n\n trip_id_ref = common.load_definitions(\"trip_id_reference\")\n items[\"allOf\"].append(trip_id_ref)\n\n # verify and return\n return common.check_schema(schema)", "def update(self, schema: 'Schema'):\n self._update(schema)", "def test_schema_updates(self):\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.ContentLibraryIndexer.SCHEMA_VERSION\",\n new=0):\n result = self._create_library(slug=\"test-lib-schemaupdates-1\", title=\"Title 1\", description=\"Description\")\n library_key = LibraryLocatorV2.from_string(result['id'])\n assert len(ContentLibraryIndexer.get_items([library_key])) == 1\n\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.ContentLibraryIndexer.SCHEMA_VERSION\",\n new=1):\n assert len(ContentLibraryIndexer.get_items([library_key])) == 0\n\n call_command(\"reindex_content_library\", all=True, force=True)\n\n assert len(ContentLibraryIndexer.get_items([library_key])) == 1", "def process_updates():\n print \"[{x}] Processing Requests\".format(x=dates.now())\n WorkflowApi.process_requests()\n WorkflowApi.process_enhancements()", "def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "def ensure_internal_schema_updated(self):\n if self._internal_schema_updated:\n return\n if internalmigrations.needs_upgrading(self):\n assert not self._in_transaction\n with self.lock():\n internalmigrations.upgrade(self)\n self.connection.commit()\n self._internal_schema_updated = True", "def rebuild(self, dframe, overwrite=False):\n current_schema = self\n new_schema = schema_from_dframe(dframe, self)\n\n if current_schema and not overwrite:\n # merge new schema with existing schema\n current_schema.update(new_schema)\n new_schema = current_schema\n\n return new_schema", "def test_schema_updates(self):\n lib = self._create_library(slug=\"test-lib--block-schemaupdates-1\", title=\"Title 1\", description=\"Description\")\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.LibraryBlockIndexer.SCHEMA_VERSION\",\n new=0):\n block = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 1\n\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.LibraryBlockIndexer.SCHEMA_VERSION\",\n new=1):\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 0\n\n call_command(\"reindex_content_library\", all=True, force=True)\n\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 1", "def test_compare_schemas_major(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.major_removed_value\n )\n\n assert status == schema_utils.Update.major", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "async def schemas(\n self,\n *,\n query_params: Optional[Dict[str, any]] = None,\n headers: Optional[Dict[str, str]] = None,\n ) -> AuditLogsResponse:\n return await self.api_call(\n path=\"schemas\",\n query_params=query_params,\n headers=headers,\n )", "def parse_schema(schemaurl, schema_dir=None):\n if schema_dir:\n try:\n # attempts to open .schema file in directory schema_dir\n local_schema_path = schema_dir + \"/\" + \\\n schemaurl[schemaurl.rfind('/') + 1:-1] + \".schema\"\n print \"Looking for schema in file %s\" % (local_schema_path)\n schema = json.load(open(local_schema_path))\n except Exception as e:\n print \"Couldn't load schema %s from file %s\\n%s\" % (\n schemaurl, schema_dir, str(e))\n return None\n else:\n # load the schema directly from schemaurl, i.e., from the web\n try:\n schema = json.load(urllib2.urlopen(schemaurl))\n except Exception as e:\n print \"Couldn't load schema %s\\n%s\" % (schemaurl, str(e))\n return None\n\n if 'extends' in schema and '$ref' in schema['extends']:\n\n parent_schema = json.load(urllib2.urlopen(schema['extends']['$ref']))\n while (True): # exits loop when no additional extensions (break below)\n for key in sorted(parent_schema.keys()):\n if key not in schema:\n schema[key] = parent_schema[key]\n # need to merge these keys individually\n if key == 'properties':\n for key in sorted(parent_schema['properties'].keys()):\n if key not in schema['properties']:\n schema['properties'][key] = parent_schema[\n 'properties'][key]\n if 'extends' in parent_schema:\n parent_schema = json.load(\n urllib2.urlopen(parent_schema['extends']['$ref']))\n else:\n break\n # essentially a do while loop (exit condition)\n\n return schema", "def deduce_schema_for_record(self, json_object, schema_map, base_path=None):\n for key, value in json_object.items():\n # The canonical key is the lower-cased version of the sanitized key\n # so that the case of the field name is preserved when generating\n # the schema but we don't create invalid, duplicate, fields since\n # BigQuery is case insensitive\n canonical_key = self.sanitize_name(key).lower()\n schema_entry = schema_map.get(canonical_key)\n new_schema_entry = self.get_schema_entry(\n key=key,\n value=value,\n base_path=base_path\n )\n schema_map[canonical_key] = self.merge_schema_entry(\n old_schema_entry=schema_entry,\n new_schema_entry=new_schema_entry,\n base_path=base_path\n )", "def update_all():\n req_data = request.get_json()\n jobs = JobModel.get_one_job(job_id)\n if not jobs:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n data, error = job_schema.load(req_data, partial=True)\n if error:\n return custom_response(error, 400)\n\n for job in jobs:\n job.update(data)\n job_message = job_schema.dump(job)\n\n return custom_response(job_message, 200)", "def test_json_merge_patch():\n schemas = {}\n\n basenames = (\n 'record-package-schema.json',\n 'release-package-schema.json',\n 'release-schema.json',\n 'versioned-release-validation-schema.json',\n )\n\n if ocds_version or not use_development_version:\n url_pattern = ocds_schema_base_url + ocds_tag + '/{}'\n else:\n url_pattern = development_base_url + '/{}'\n\n for basename in basenames:\n schemas[basename] = http_get(url_pattern.format(basename)).json()\n\n if basename == 'release-schema.json':\n path = os.path.join(extensiondir, 'extension.json')\n with open(path) as f:\n metadata = json.load(f, object_pairs_hook=rejecting_dict)\n schemas[basename] = extend_schema(basename, schemas[basename], metadata, codelists=external_codelists)\n\n # This loop is somewhat unnecessary, as repositories contain at most one of each schema file.\n for path, name, text, data in walk_json_data(patch):\n if is_json_merge_patch(data):\n if name in basenames:\n unpatched = deepcopy(schemas[name])\n try:\n patched = merge(unpatched, data)\n except Exception as e:\n assert False, f'Exception: {e} {path}'\n\n # All metadata should be present.\n validate_json_schema(path, name, patched, metaschemas()['metaschema'], full_schema=True)\n\n # Empty patches aren't allowed. json_merge_patch mutates `unpatched`, so `schemas[name]` is tested.\n assert patched != schemas[name]", "def forwards_func(apps, schema_editor):\n judge_file = apps.get_model(\"judges\", \"JudgeFile\")\n db_alias = schema_editor.connection.alias\n judge_file.objects.using(db_alias).filter(\n file_type='ruling').update(file_type='rulings')\n judge_file.objects.using(db_alias).filter(\n file_type='verdict').update(file_type='verdicts')\n judge_file.objects.using(db_alias).filter(\n file_type='transcript').update(file_type='transcripts')\n judge_file.objects.using(db_alias).filter(\n file_type='image').update(file_type='campaigns')", "def update_all(self, request):\n\n schema = self.session.info['schema']\n\n for item in self.query().filter_by(schema=schema):\n self.session.delete(item)\n\n for item in ElectionCollection(self.session).query():\n self.update(item, request)\n\n for item in ElectionCompoundCollection(self.session).query():\n self.update(item, request)\n\n for item in VoteCollection(self.session).query():\n self.update(item, request)", "def _get_and_validate_schema_mapping(schema1, schema2, strict=False):\n\n len_schema1 = len(schema1)\n len_schema2 = len(schema2)\n\n # If both non-empty, must be same length\n if 0 < len_schema1 != len_schema2 > 0:\n raise ValueError(\"Attempted to merge profiles with different \"\n \"numbers of columns\")\n\n # In the case of __add__ with one of the schemas not initialized\n if strict and (len_schema1 == 0 or len_schema2 == 0):\n raise ValueError(\"Cannot merge empty profiles.\")\n\n # In the case of _update_from_chunk with uninitialized schema\n if not strict and len_schema2 == 0:\n return {col_ind: col_ind for col_ind_list in schema1.values()\n for col_ind in col_ind_list}\n\n # Map indices in schema1 to indices in schema2\n schema_mapping = dict()\n\n for key in schema1:\n # Pandas columns are int by default, but need to fuzzy match strs\n if isinstance(key, str):\n key = key.lower()\n if key not in schema2:\n raise ValueError(\"Columns do not match, cannot update \"\n \"or merge profiles.\")\n\n elif len(schema1[key]) != len(schema2[key]):\n raise ValueError(f\"Different number of columns detected for \"\n f\"'{key}', cannot update or merge profiles.\")\n\n is_duplicate_col = len(schema1[key]) > 1\n for schema1_col_ind, schema2_col_ind in zip(schema1[key],\n schema2[key]):\n if is_duplicate_col and (schema1_col_ind != schema2_col_ind):\n raise ValueError(f\"Different column indices under \"\n f\"duplicate name '{key}', cannot update \"\n f\"or merge unless schema is identical.\")\n schema_mapping[schema1_col_ind] = schema2_col_ind\n\n return schema_mapping", "def _process_json(self, json_content):\n if self._ns_sqlcon.connection is None:\n LOG.error(f'failed to open connection to DB')\n return\n entries = [entry for entry in json_content]\n LOG.info('started updating DB')\n num_of_entries = len(entries)\n for x in range(num_of_entries):\n entry = entries[x]\n try:\n self._ns_sqlcon.update_plugins_table(entry['_source'])\n except AttributeError:\n LOG.exception(f'malformed entry: {entry}')\n if x % 2000 != 0:\n continue\n LOG.info(f'Updated {x} records')\n\n LOG.info(f'Updated {num_of_entries} records')\n try:\n LOG.info('Commit started')\n self._ns_sqlcon.session.commit()\n LOG.info('Commit finished')\n except sqlalchemy.exc.IntegrityError:\n LOG.exception('failed committing updates to DB')\n self._ns_sqlcon.session.rollback()\n\n LOG.info('Finished updating DB')", "def formatSchema(self):\n schema = json.loads(self.schemaSource)\n stack = [schema]\n # Strip out all the docs\n while len(stack) > 0:\n elm = stack.pop()\n if \"doc\" in elm:\n elm[\"doc\"] = \"\"\n for value in elm.values():\n if isinstance(value, dict):\n stack.append(value)\n elif isinstance(value, list):\n for dic in value:\n if isinstance(dic, dict):\n stack.append(dic)\n jsonData = json.dumps(schema)\n output = \"\\n\".join(textwrap.wrap(jsonData)) + \"\\n\"\n return output", "def schema_helper(self, name, _, schema=None, **kwargs):\n if schema is None:\n return None\n\n schema_instance = resolve_schema_instance(schema)\n\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n\n return json_schema", "def _TryGetCurrentSchema(dataset_id, table_id, project_id):\n client = GetApiClient()\n service = client.tables\n get_request_type = GetApiMessage('BigqueryTablesGetRequest')\n get_request = get_request_type(datasetId=dataset_id,\n tableId=table_id,\n projectId=project_id)\n try:\n table = service.Get(get_request)\n if not table or table.type != 'TABLE':\n raise SchemaUpdateError('Schema modifications only supported '\n 'on TABLE objects received [{}]'.format(\n table))\n except apitools_exceptions.HttpNotFoundError:\n raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(\n project_id, dataset_id, table_id))\n\n return table.schema", "def generate_update(stmt, schema, path, rpc=None):\n if path:\n path_params = get_input_path_parameters(path)\n put = {}\n generate_api_header(stmt, put, 'Update', path)\n # Input parameters\n if path:\n put['parameters'] = create_parameter_list(path_params)\n else:\n put['parameters'] = []\n put['parameters'].append(create_body_dict(stmt.arg, schema))\n # Responses\n if rpc:\n response = create_responses(stmt.arg, rpc)\n else:\n response = create_responses(stmt.arg)\n put['responses'] = response\n return put", "def update(self, mode=\"all\"):\n\n self._check_mode(mode)\n\n mode = [\"prod\", \"staging\"] if mode == \"all\" else [mode]\n for m in mode:\n\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n\n # if m == \"staging\":\n\n table.description = self._render_template(\n Path(\"table/table_description.txt\"), self.table_config\n )\n\n # save table description\n with open(\n self.metadata_path\n / self.dataset_id\n / self.table_id\n / \"table_description.txt\",\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(table.description)\n\n # when mode is staging the table schema already exists\n table.schema = self._load_schema(m)\n fields = [\"description\", \"schema\"] if m == \"prod\" else [\"description\"]\n self.client[f\"bigquery_{m}\"].update_table(table, fields=fields)\n\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"updated\",\n )", "def deduce_schema(self, input_data, *, schema_map=None):\n\n if self.input_format == 'csv':\n if self.csv_dialect:\n reader = csv.DictReader(input_data, dialect=self.csv_dialect)\n else:\n reader = csv.DictReader(input_data)\n elif self.input_format == 'json' or self.input_format is None:\n reader = json_reader(input_data)\n elif self.input_format == 'dict':\n reader = input_data\n else:\n raise Exception(f\"Unknown input_format '{self.input_format}'\")\n\n if schema_map is None:\n schema_map = OrderedDict()\n\n try:\n for json_object in reader:\n\n # Print a progress message periodically.\n self.line_number += 1\n if self.line_number % self.debugging_interval == 0:\n logging.info(f'Processing line {self.line_number}')\n\n # Deduce the schema from this given data record.\n if isinstance(json_object, dict):\n self.deduce_schema_for_record(\n json_object=json_object,\n schema_map=schema_map,\n )\n elif isinstance(json_object, Exception):\n self.log_error(\n f'Record could not be parsed: Exception: {json_object}'\n )\n if not self.ignore_invalid_lines:\n raise json_object\n else:\n self.log_error(\n 'Record should be a JSON Object '\n f'but was a {type(json_object)}'\n )\n if not self.ignore_invalid_lines:\n raise Exception(f'Record must be a JSON Object '\n f'but was a {type(json_object)}')\n finally:\n logging.info(f'Processed {self.line_number} lines')\n\n return schema_map, self.error_logs", "def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def test_compare_schemas_minor(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.minor_change\n )\n\n assert status == schema_utils.Update.minor", "def update():\n as3s = AS3Schema()\n as3s.updateschemas()\n as3s_new = AS3Schema()\n\n if as3s.version != as3s_new.version:\n click.echo(\n f\"Updated AS3 JSON Schemas from version:{as3s.version} to:{as3s_new.version}\",\n )\n else:\n click.echo(\n f\"AS3 JSON Schemas are up-to-date, current version:{as3s.version}\",\n )", "def get_schema(self, schema_versions_info):\n schema = None\n version = api_version_request.APIVersionRequest(VOLUME_MICROVERSION)\n for items in schema_versions_info:\n min_version = api_version_request.APIVersionRequest(items['min'])\n max_version = api_version_request.APIVersionRequest(items['max'])\n # This is case where COMPUTE_MICROVERSION is None, which means\n # request without microversion So select base v2.1 schema.\n if version.is_null() and items['min'] is None:\n schema = items['schema']\n break\n # else select appropriate schema as per COMPUTE_MICROVERSION\n elif version.matches(min_version, max_version):\n schema = items['schema']\n break\n if schema is None:\n raise exceptions.JSONSchemaNotFound(\n version=version.get_string(),\n schema_versions_info=schema_versions_info)\n return schema", "def make_schema_changes(self, session, namespace='ns1'):\n debug(\"make_schema_changes() \" + str(namespace))\n session.execute('USE ks_%s' % namespace)\n # drop keyspace\n session.execute('DROP KEYSPACE ks2_%s' % namespace)\n wait(2)\n\n # create keyspace\n self.create_ks(session, \"ks3_%s\" % namespace, 2)\n session.execute('USE ks_%s' % namespace)\n\n wait(2)\n # drop column family\n session.execute(\"DROP COLUMNFAMILY cf2_%s\" % namespace)\n\n # create column family\n query = \"\"\"\n CREATE TABLE cf3_%s (\n col1 uuid PRIMARY KEY,\n col2 text,\n col3 text,\n col4 text\n );\n \"\"\" % (namespace)\n session.execute(query)\n\n # alter column family\n query = \"\"\"\n ALTER COLUMNFAMILY cf_%s\n ADD col4 text;\n \"\"\" % namespace\n session.execute(query)\n\n # add index\n session.execute(\"CREATE INDEX index2_%s ON cf_%s(col3)\"%(namespace, namespace))\n\n # remove an index\n session.execute(\"DROP INDEX index_%s\" % namespace)", "def finalizeSchema(schema, folderish=False, moveDiscussion=True):\n schema.moveField('businessOldLocation', after='workLocations')\n schema.moveField('foldermanagers', after='businessOldLocation')\n# schema.moveField('bound_licences', after='isTransferOfLicence')\n schema.moveField('rubrics', after='folderCategory')\n schema.moveField('description', after='additionalLegalConditions')\n schema.moveField('referenceFT', after='referenceDGATLP')\n schema.moveField('isTransferOfLicence', after='referenceFT')\n\n return schema", "def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def flatten_schema_map(\n schema_map,\n keep_nulls=False,\n sorted_schema=True,\n infer_mode=False,\n input_format='json',\n):\n if not isinstance(schema_map, dict):\n raise Exception(\n f\"Unexpected type '{type(schema_map)}' for schema_map\"\n )\n\n # Build the BigQuery schema from the internal 'schema_map'.\n schema = []\n map_items = sorted(schema_map.items()) if sorted_schema \\\n else schema_map.items()\n for name, meta in map_items:\n # Skip over fields which have been explicitly removed\n if not meta:\n continue\n\n status = meta['status']\n filled = meta['filled']\n info = meta['info']\n\n # Schema entries with a status of 'soft' are caused by 'null' or\n # empty fields. Don't print those out if the 'keep_nulls' flag is\n # False.\n if status == 'soft' and not keep_nulls:\n continue\n\n # Copy the 'info' dictionary into the schema dict, preserving the\n # ordering of the 'field', 'mode', 'name', 'type' elements. 'bq load'\n # keeps these sorted, so we created them in sorted order using an\n # OrderedDict, so they should preserve order here too.\n new_info = OrderedDict()\n for key, value in info.items():\n if key == 'fields':\n if not value:\n # Create a dummy attribute for an empty RECORD to make\n # the BigQuery importer happy.\n new_value = [\n OrderedDict([\n ('mode', 'NULLABLE'),\n ('name', '__unknown__'),\n ('type', 'STRING'),\n ])\n ]\n else:\n # Recursively flatten the sub-fields of a RECORD entry.\n new_value = flatten_schema_map(\n schema_map=value,\n keep_nulls=keep_nulls,\n sorted_schema=sorted_schema,\n infer_mode=infer_mode,\n input_format=input_format\n )\n elif key == 'type' and value in ['QINTEGER', 'QFLOAT', 'QBOOLEAN']:\n # Convert QINTEGER -> INTEGER, similarly for QFLOAT and QBOOLEAN\n new_value = value[1:]\n elif key == 'mode':\n # 'infer_mode' to set a field as REQUIRED is supported for only\n # input_format = 'csv' because the header line gives us the\n # complete list of fields to be expected in the CSV file. In\n # JSON data files, certain fields will often be completely\n # missing instead of being set to 'null' or \"\". If the field is\n # not even present, then it becomes incredibly difficult (not\n # impossible, but more effort than I want to expend right now)\n # to figure out which fields are missing so that we can mark the\n # appropriate schema entries with 'filled=False'.\n #\n # The --infer_mode option is activated only for\n # input_format == 'csv' in this function, which allows us to\n # overload the --infer_mode flag to mean that a REQUIRED mode of\n # an existing schema can transition to a NULLABLE mode.\n if (infer_mode and value == 'NULLABLE' and filled\n and input_format == 'csv'):\n new_value = 'REQUIRED'\n else:\n new_value = value\n else:\n new_value = value\n new_info[key] = new_value\n schema.append(new_info)\n return schema", "def job_update(request):\n try:\n\n if request.method == 'GET':\n query_dict = request.GET\n else:\n query_dict = json.loads(request.body)\n\n update = {}\n p_update = {}\n\n for key in ['t_id', 'file_link']:\n if key in query_dict:\n update['job.' + key] = query_dict[key]\n if 'status' in query_dict:\n p_update['job.status'] = {\n 'status': query_dict['status'],\n 'time': datetime.now()\n }\n\n for key in ['customer_count', 'sms_sent', 'sms_failed', 'errors']:\n if key in query_dict:\n update['job.report.' + key] = query_dict[key]\n\n if 'id' not in query_dict or not (update or p_update):\n return jsonResponse({\"success\": False, \"query\": query_dict, \"update\": update, \"p_update\": p_update})\n else:\n oid = query_dict['id']\n if oid.endswith('_segment'):\n oid = oid.replace('_segment', '')\n collection = db.segment_jobs\n else:\n collection = db.jobs\n\n final_update = {}\n if update:\n final_update[\"$set\"] = update\n if p_update:\n final_update[\"$push\"] = p_update\n\n collection.update_one({\"_id\": ObjectId(oid)}, final_update)\n return jsonResponse({\"success\": True})\n except Exception, e:\n return basic_error(e)", "def process_schema(self, hed_schema, save_merged=False):\n self._save_lib = False\n self._save_base = False\n if hed_schema.with_standard:\n self._save_lib = True\n if save_merged:\n self._save_base = True\n else:\n # Saving a standard schema or a library schema without a standard schema\n save_merged = True\n self._save_lib = True\n self._save_base = True\n self._save_merged = save_merged\n\n\n self._output_header(hed_schema.get_save_header_attributes(self._save_merged), hed_schema.prologue)\n self._output_tags(hed_schema.all_tags)\n self._output_units(hed_schema.unit_classes)\n self._output_section(hed_schema, HedSectionKey.UnitModifiers)\n self._output_section(hed_schema, HedSectionKey.ValueClasses)\n self._output_section(hed_schema, HedSectionKey.Attributes)\n self._output_section(hed_schema, HedSectionKey.Properties)\n self._output_footer(hed_schema.epilogue)\n\n return self.output", "def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)", "def update(self):\n\n if not self.db: self.validate()\n\n self.logging.debug( \"update(%s)\" % (self.db) )\n\n for name in self.tables:\n self.dbs_tables[name]['md5'] = get_md5( self.dbs_tables[name]['path'] )\n\n self._get_magnitudes()\n self._get_events()", "def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema", "def merge_updates(self, merge_updates_query):\n\n logging.info('{0:s} Merging updates from {1:s} into {2:s}.'.format(\n str(datetime.datetime.now()), self.temp_updates_table_id,\n self.final_table_id))\n\n merge_updates_job_config = bigquery.QueryJobConfig()\n merge_updates_job_config.use_legacy_sql = False\n\n merge_updates_query_job = self.bq_client.query(\n query=merge_updates_query,\n location='US',\n job_config=merge_updates_job_config)\n merge_updates_query_job.result()\n logging.info('{0:s} Successfully merged updates into {1:s}.'.format(\n str(datetime.datetime.now()), self.final_table_id))", "def _sync_delta(cursor, user, namespace):\n\n schemas = _fetch_hive_schemas(cursor, namespace.lower())\n if namespace.lower() not in schemas:\n _print_info('No corresponding schema for namespace: \\'{}\\'. Skipping.'.format(namespace))\n\n # sync all for wildcard user\n if user == PUBLIC_READ_USER:\n _print_debug('Syncing all for public read user')\n _sync_all(cursor)\n\n has_permission = _has_read_permission_user_namespace(user, namespace)\n\n # create user schema if does not already exist\n _create_user_schema(cursor, user)\n\n if has_permission:\n _grant_select_privilege(cursor, user, namespace)\n else:\n _revoke_select_privilege(cursor, user, namespace)", "def merge_schemas(self, old_schm, new_schm):\n\n old_schm_cols = [x['name'] for x in old_schm]\n\n for col in new_schm:\n if type(col) == dict:\n if col['name'] not in old_schm_cols:\n old_schm.append(col)\n \n for count, old_col in enumerate(old_schm):\n for meta in old_col:\n if type(old_col[meta]) == list:\n if old_col['name'] in [pot_new_col['name'] for pot_new_col in new_schm]:\n new_col = [pot_new_col for pot_new_col in new_schm if pot_new_col['name'] == old_col['name']][0]\n if meta in new_col:\n old_schm[count][meta] = self.merge_schemas(old_col[meta], new_col[meta])\n \n return old_schm", "def resolve_schema_references(self, definition):\n # type: (Generator, Dict) -> None\n if \"$ref\" in definition:\n schema_reference = definition.pop(\"$ref\")\n section, name = schema_reference.split(\"/\")[-2:]\n referenced_definition = self.parser.specification[section][name]\n definition.update(referenced_definition)\n\n for value in definition.values():\n if isinstance(value, dict):\n self.resolve_schema_references(value)", "def update_table(table_name):\n for filename in table_name_to_funcs[table_name][\"filename\"]:\n choose_file_to_get(table_name_to_funcs[table_name][\"file_type\"], filename)\n\n for process_func in table_name_to_funcs[table_name][\"process\"]:\n process_func()\n for to_sql_func in table_name_to_funcs[table_name][\"to_sql\"]:\n to_sql_func(update=True)", "def _read_schema(self):\n schema = self.SCHEMA[self.action]\n assert_keys_match(self.op.keys(), schema, allow_missing=False)\n if 'community' in schema: self._read_community()\n if 'account' in schema: self._read_account()\n if 'permlink' in schema: self._read_permlink()\n if 'role' in schema: self._read_role()\n if 'notes' in schema: self._read_notes()\n if 'title' in schema: self._read_title()\n if 'props' in schema: self._read_props()", "def _generate_schema(self):\n\n response = self._request('GET', CosmoSim.SCHEMA_URL,\n auth=(self.username, self.password),\n headers={'Accept': 'application/json'},\n cache=False)\n data = response.json()\n self.db_dict = {}\n for i in range(len(data['databases'])):\n self.db_dict[str(data['databases'][i]['name'])] = {}\n\n sstr = str(data['databases'][i]['name'])\n sid = str(data['databases'][i]['id'])\n self.db_dict[sstr]['id'] = sid\n sdesc = str(data['databases'][i]['description'])\n self.db_dict[sstr]['description'] = sdesc\n self.db_dict[sstr]['tables'] = {}\n for j in range(len(data['databases'][i]['tables'])):\n sstr2 = str(data['databases'][i]['tables'][j]['name'])\n self.db_dict[sstr]['tables'][sstr2] = {}\n sdata = data['databases'][i]['tables'][j]['id']\n self.db_dict[sstr]['tables'][sstr2]['id'] = sdata\n sdesc2 = data['databases'][i]['tables'][j]['description']\n self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2\n self.db_dict[sstr]['tables'][sstr2]['columns'] = {}\n tmpval = len(data['databases'][i]['tables'][j]['columns'])\n for k in range(tmpval):\n sdata2 = data['databases'][i]['tables'][j]['columns'][k]\n sdata2_id = sdata2['id']\n sstr3 = str(sdata2['name'])\n\n sdesc3 = sdata2['description']\n self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {\n 'id': sdata2_id,\n 'description': sdesc3}\n return response", "def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)", "def get_schema() -> Dict[str, type]:\n schema: Dict[str, type] = {}\n\n # Add all columns from pipeline configs\n for pipeline in get_pipelines():\n schema.update(pipeline.schema)\n\n # Add new columns from adapter\n for col_old, col_new in OUTPUT_COLUMN_ADAPTER.items():\n if col_old in schema and col_new is not None:\n schema[col_new] = schema[col_old]\n\n return schema", "def upgrade():\n pass\n # op.execute(\"\"\"\n # INSERT INTO context_implications (\n # source_context_id, source_context_scope, context_id, context_scope\n # )\n # SELECT DISTINCT rs.context_id as source_context_id,\n # 'Audit' AS source_context_scope,\n # p.context_id,\n # 'Program' AS context_scope\n # FROM relationships r\n # INNER JOIN responses rs\n # ON rs.id = r.source_id\n # AND r.source_type IN ('Response', 'DocumentationResponse',\n # 'InterviewResponse')\n # INNER JOIN programs p\n # ON p.id = r.destination_id\n # AND r.destination_type = 'Program'\n # WHERE p.private = 1\n # AND (SELECT count(*) from context_implications\n # WHERE source_context_id = rs.context_id\n # AND context_id = p.context_id) < 1\n # \"\"\")\n\n # op.execute(\"\"\"\n # INSERT INTO context_implications (\n # source_context_id, source_context_scope, context_id, context_scope\n # )\n # SELECT DISTINCT sp.context_id as source_context_id,\n # 'Program' AS source_context_scope,\n # p.context_id,\n # 'Program' AS context_scope\n # FROM relationships r\n # INNER JOIN responses rs\n # ON rs.id = r.source_id\n # AND r.source_type IN ('Response', 'DocumentationResponse',\n # 'InterviewResponse')\n # INNER JOIN requests rqs\n # ON rqs.id = rs.request_id\n # INNER JOIN audits a\n # ON a.id = rqs.audit_id\n # INNER JOIN programs sp\n # ON sp.id = a.program_id\n # INNER JOIN programs p\n # ON p.id = r.destination_id\n # AND r.destination_type = 'Program'\n # WHERE p.private = 1\n # AND (SELECT count(*) from context_implications\n # WHERE source_context_id = sp.context_id\n # AND context_id = p.context_id) < 1\n # \"\"\")", "def BqTableSchemaFileProcessor(file_arg):\n table_schema_type = GetApiMessage('TableSchema')\n schema_field_type = GetApiMessage('TableFieldSchema')\n\n try:\n schema_json = yaml.load(file_arg)\n schema_json = schema_json.get('schema', None)\n\n if not schema_json or not isinstance(schema_json, list):\n raise SchemaFileError(\n 'Error parsing schema file: no schema field list defined in file')\n\n all_fields = []\n for field in schema_json:\n new_field = schema_field_type(name=field['name'],\n type=field['type'],\n mode=field.get('mode', 'NULLABLE'))\n all_fields.append(new_field)\n\n return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))\n except yaml.YAMLParseError as ype:\n raise SchemaFileError('Error parsing schema file [{}]'.format(ype))\n except (AttributeError, KeyError) as e:\n raise SchemaFileError(\n 'Error parsing schema file, invalid field definition [{}]'.format(e))", "def process(self, conn):\n batch = msgpack.unpackb(self._request(conn), raw=False)\n ids = list(batch.keys())\n self.logger.debug(f'Received job ids: {ids}')\n\n # validate request\n validated = []\n errors = []\n for i, byte in enumerate(batch.values()):\n try:\n data = self._unpack(byte)\n obj = self.req_schema.parse_obj(data)\n validated.append(obj)\n self.logger.debug(f'{obj} passes the validation')\n except ValidationError as err:\n errors.append((i, self._pack(err.errors())))\n self.logger.info(\n f'Job {ids[i]} validation error',\n extra={'Validation': err.errors()}\n )\n except (json.JSONDecodeError,\n msgpack.ExtraData, msgpack.FormatError, msgpack.StackError) as err:\n errors.append((i, self._pack(str(err))))\n self.logger.info(f'Job {ids[i]} error: {err}')\n\n # inference\n self.logger.debug(f'Validated: {validated}, Errors: {errors}')\n result = []\n if validated:\n result = self.infer(validated)\n assert len(result) == len(validated), (\n 'Wrong number of inference results. '\n f'Expcet {len(validated)}, get{len(result)}.'\n )\n\n # validate response\n for data in result:\n self.resp_schema.parse_obj(data)\n\n # add errors information\n err_ids = ''\n result = [self._pack(data) for data in result]\n for index, err_msg in errors:\n err_ids += ids[index]\n result.insert(index, err_msg)\n\n # build batch job table\n resp = dict(zip(ids, result))\n if err_ids:\n resp['error_ids'] = err_ids\n self._response(conn, resp)", "def _update_per_namespace():\n\n for acc in account.Account.query().iter():\n try:\n acc.refresh_devices()\n acc.put()\n except:\n logging.error('Error refreshing account %s',\n acc.key.string_id(), exc_info=sys.exc_info())\n\n for _room in room.Room.query().iter():\n try:\n _room.update_lights()\n except:\n logging.error('Error updating room %s',\n _room.name, exc_info=sys.exc_info())", "def run(\n self,\n input_file=sys.stdin,\n output_file=sys.stdout,\n schema_map=None,\n ):\n schema_map, error_logs = self.deduce_schema(\n input_file, schema_map=schema_map\n )\n\n for error in error_logs:\n logging.info(\n f\"Problem on line {error['line_number']}: {error['msg']}\"\n )\n\n if self.debugging_map:\n json.dump(schema_map, output_file, indent=2)\n print(file=output_file)\n else:\n schema = self.flatten_schema(schema_map)\n json.dump(schema, output_file, indent=2)\n print(file=output_file)", "def has_desired_schema(self):\n if self._new_table == self._old_table:\n if not self.rebuild:\n log.info(\"Table already has the desired schema. \")\n return True\n else:\n log.info(\n \"Table already has the desired schema. However \"\n \"--rebuild is specified, doing a rebuild instead\"\n )\n return False\n return False", "async def write_schema_definition(controller, schema_definition_request):\n write_schema_resp = await controller.schema.write_schema(\n schema_definition_request.schema_name,\n schema_definition_request.schema_attrs,\n schema_definition_request.schema_version,\n )\n\n if not write_schema_resp or write_schema_resp == {}:\n raise HTTPException(\n status_code=404,\n detail=f\"Something went wrong.\\n Could not write schema to ledger.\\n{schema}\",\n )\n return write_schema_resp", "def update_updated_data_sqlite_db(self, table_name: str):\n # go through indicators and get updated data in dataframe\n print('start downloading queries')\n df = self.__get_updated_data(table_name)\n print('api download completed')\n\n # get list of sql queries to insert to sqlite db\n print('start creating queries')\n q_list = self.__get_sql_insert_query_list(df, table_name)\n\n # insert data to sqlite\n print('start inserting data')\n AccessDB().run_insert_query(q_list)\n return 'Process Completed'", "def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()", "def update_tables(tables):\n # First we ensure we can find the file format version number\n # in top-level metadata. Then we proceed to fix up the tables as necessary.\n if not (isinstance(tables.metadata, dict) and 'SLiM' in tables.metadata):\n # Old versions kept information in provenance, not top-level metadata.\n # Note this uses defaults on keys not present in provenance,\n # which prior to 0.5 was everything but generation and model_type.\n values = default_slim_metadata('tree_sequence')['SLiM']\n prov = None\n file_version = 'unknown'\n # use only the last SLiM provenance\n for p in tables.provenances:\n is_slim, this_file_version = slim_provenance_version(p) \n if is_slim:\n prov = p\n file_version = this_file_version\n values['file_version'] = file_version\n try:\n record = json.loads(prov.record)\n if file_version == \"0.1\":\n values['model_type'] = record['model_type']\n values['tick'] = record['generation']\n values['cycle'] = record['generation']\n else:\n if 'generation' in record['slim']:\n values['tick'] = record['slim']['generation']\n values['cycle'] = record['slim']['generation']\n for k in values:\n if k in record['parameters']:\n values[k] = record['parameters'][k]\n if k in record['slim']:\n values[k] = record['slim'][k]\n except:\n raise ValueError(\"Failed to obtain metadata from provenance.\")\n set_tree_sequence_metadata(tables, **values)\n\n file_version = tables.metadata['SLiM']['file_version']\n if file_version != slim_file_version:\n warnings.warn(\"This is a version {} SLiM tree sequence.\".format(file_version) +\n \" When you write this out, \" +\n \"it will be converted to version {}.\".format(slim_file_version))\n\n # the only tables to have metadata schema changed thus far\n # are populations, individuals, mutations, and top-level:\n old_schema = _old_metadata_schema(\"tree_sequence\", file_version)\n if old_schema is not None:\n md = tables.metadata\n new_schema = slim_metadata_schemas[\"tree_sequence\"]\n new_properties = new_schema.asdict()['properties']['SLiM']['required']\n tables.metadata_schema = new_schema\n defaults = default_slim_metadata(\"tree_sequence\")\n for k in new_properties:\n if k not in md['SLiM']:\n if k == \"tick\":\n md['SLiM']['tick'] = md['SLiM']['generation']\n md['SLiM']['cycle'] = md['SLiM']['generation']\n else:\n md['SLiM'][k] = defaults['SLiM'][k]\n tables.metadata = md\n\n old_schema = _old_metadata_schema(\"population\", file_version)\n if old_schema is not None:\n pops = tables.populations.copy()\n tables.populations.clear()\n if pops.metadata_schema == tskit.MetadataSchema(None):\n pops.metadata_schema = old_schema\n new_schema = slim_metadata_schemas[\"population\"]\n tables.populations.metadata_schema = new_schema\n defaults = default_slim_metadata(\"population\")\n # just needs recoding\n for pop in pops:\n tables.populations.append(pop)\n\n old_schema = _old_metadata_schema(\"individual\", file_version)\n if old_schema is not None:\n inds = tables.individuals.copy()\n tables.individuals.clear()\n if inds.metadata_schema == tskit.MetadataSchema(None):\n inds.metadata_schema = old_schema\n new_schema = slim_metadata_schemas[\"individual\"]\n tables.individuals.metadata_schema = new_schema\n defaults = default_slim_metadata(\"individual\")\n d = {}\n for k in [\"pedigree_p1\", \"pedigree_p2\"]:\n d[k] = defaults[k]\n for ind in inds:\n md = ind.metadata\n md.update(d)\n tables.individuals.append(ind.replace(metadata=md))\n\n old_schema = _old_metadata_schema(\"mutation\", file_version)\n if old_schema is not None:\n muts = tables.mutations.copy()\n tables.mutations.clear()\n if muts.metadata_schema == tskit.MetadataSchema(None):\n muts.metadata_schema = old_schema\n tables.mutations.metadata_schema = slim_metadata_schemas[\"mutation\"]\n for mut in muts:\n md = mut.metadata\n for ml in md['mutation_list']:\n ml['nucleotide'] = -1\n tables.mutations.append(mut.replace(metadata=md))\n\n if file_version == \"0.1\":\n # shift times\n slim_generation = tables.metadata['SLiM']['tick']\n node_times = tables.nodes.time + slim_generation\n tables.nodes.set_columns(\n flags=tables.nodes.flags,\n time=node_times,\n population=tables.nodes.population,\n individual=tables.nodes.individual,\n metadata=tables.nodes.metadata,\n metadata_offset=tables.nodes.metadata_offset)\n migration_times = tables.migrations.time + slim_generation\n tables.migrations.set_columns(\n left=tables.migrations.left,\n right=tables.migrations.right,\n node=tables.migrations.node,\n source=tables.migrations.source,\n dest=tables.migrations.dest,\n time=migration_times)\n\n new_record = {\n \"schema_version\": \"1.0.0\",\n \"software\": {\n \"name\": \"pyslim\",\n \"version\": pyslim_version,\n },\n \"parameters\": {\n \"command\": [\"updrade_tables\"],\n \"old_file_version\": file_version,\n \"new_file_version\": slim_file_version,\n },\n \"environment\": get_environment(),\n }\n tskit.validate_provenance(new_record)\n tables.provenances.add_row(json.dumps(new_record))\n\n set_metadata_schemas(tables)\n md = tables.metadata\n md['SLiM']['file_version'] = slim_file_version\n tables.metadata = md", "def serializeSchemaContext(schema_context, event=None):\n # find the FTI and model\n fti = schema_context.fti\n schemaName = schema_context.schemaName\n schema = schema_context.schema\n model = fti.lookupModel()\n\n # synchronize changes to the model\n syncSchema(schema, model.schemata[schemaName], overwrite=True)\n fti.model_source = serializeModel(model)", "async def send_schema(self, schema_data_json: str) -> str: # issuer\n\n req_json = await ledger.build_schema_request(self.did, schema_data_json)\n resp_json = await ledger.sign_and_submit_request(self.pool.handle, self.wallet_handle, self.did, req_json)\n resp = (json.loads(resp_json))['result']\n return await self.get_schema(resp['identifier'], resp['data']['name'], resp['data']['version'])", "def fix_all(self):\n\n altered_tables = {}\n\n for ingestible_db_conf in self.ingestible_db_conf_repo.get_ingestible_dbs():\n target_db= ingestible_db_conf.target_db_name\n db_type = ingestible_db_conf.db_type\n self.logger.info(\"Fixing consistency for DB Type: %s, Target DB: %s\" % (db_type, target_db))\n self.prepare_database(target_db)\n consistency_checker = HiveConsistencyChecker(target_db, db_type)\n\n unused_tables = consistency_checker.get_unused_tables()\n self.remove_unused_tables(unused_tables)\n\n new_tables = consistency_checker.get_new_tables()\n self.create_new_tables(new_tables)\n\n inconsistent_tables = consistency_checker.get_inconsistent_tables()\n self.fix_inconsistent_tables(inconsistent_tables, db_type)\n\n # Combine lists of inconsistent and unused tables\n altered_tables[db_type] = map(lambda qualified_table: qualified_table.split(\".\")[1],\n inconsistent_tables.keys() + unused_tables)\n\n self.logger.debug(\"Altered Tables: %s\" % altered_tables)\n return altered_tables", "def test_compare_schemas_empty(self):\n status = schema_utils.compare_schemas(\n {},\n self.base_schema,\n )\n\n assert status == schema_utils.Update.first_run", "def schema_handler(self, schema):\n dict_for_render = schema.get('properties', {}).items()\n if schema.get('$ref', None):\n def_name = schema.get('$ref').split('/')[-1]\n dict_for_render = self.definitions[def_name].get('properties', {}).items()\n elif schema.get('properties', None) is None:\n return ''\n\n answer_dict = {}\n json_dict = {}\n for opt_name, opt_value in dict_for_render:\n var_type = opt_value.get('format', None) or opt_value.get('type', None) or 'object'\n json_name = self.indent + f':jsonparameter {var_type} {opt_name}:'\n json_dict[json_name] = self.get_json_props_for_response(var_type, opt_value)\n\n answer_dict[opt_name] = self.get_response_example(opt_name, var_type, opt_value)\n if var_type == 'string':\n answer_dict[opt_name] = answer_dict[opt_name].format(opt_name)\n\n self.write('')\n for line in json.dumps(answer_dict, indent=4).split('\\n'):\n self.write(line, self.indent_depth)\n\n self.write('')\n for json_param_name, json_param_value in json_dict.items():\n desc = f'{json_param_value[\"title\"]}{json_param_value[\"props_str\"]}' or 'None'\n self.write(json_param_name + ' ' + desc)", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def match_schemas(w_schema, r_schema):\n if isinstance(w_schema, dict) and isinstance(r_schema, dict):\n # Array, Map, Enum, Fixed, Record, Error\n w_type = w_schema['type']\n r_type = r_schema['type']\n if w_type != r_type:\n return False\n if w_type == 'array':\n # 'Both schemas are arrays whose item types match'\n return match_schemas(w_schema['items'], r_schema['items'])\n elif w_type == 'map':\n # 'Both schemas are maps whose value types match'\n return match_schemas(w_schema['values'], r_schema['values'])\n elif w_type in ('enum', 'record', 'error'):\n # 'Both schemas are enums whose names match'\n # 'Both schemas are records with the same name'\n # Note: Futher checks must be applied after data is read in\n # `read_enum()` and `read_record()`\n return w_schema['name'] == r_schema['name']\n elif w_type == 'fixed':\n # 'Both schemas are fixed whose sizes and names match'\n return (\n w_schema['name'] == r_schema['name'] and\n w_schema['size'] == r_schema['size']\n )\n elif w_type == r_type:\n # Unknown type - just return True\n return True\n\n elif isinstance(w_schema, list) or isinstance(r_schema, list):\n # 'Either schema is a union'\n if isinstance(w_schema, list):\n # If the writer is a union, the check is applied in `read_union()`\n # when the correct schema is known.\n return True\n else:\n # If the reader is a union, ensure at least one of the schemas in\n # the reader's union matches the writer's schema.\n return any(match_schemas(w_schema, s) for s in r_schema)\n\n elif w_schema == r_schema:\n return True\n\n # Promotion cases:\n elif w_schema == 'int' and r_schema in ('long', 'float', 'double'):\n return True\n elif w_schema == 'long' and r_schema in ('float', 'double'):\n return True\n elif w_schema == 'float' and r_schema == 'double':\n return True\n elif w_schema == 'string' and r_schema == 'bytes':\n return True\n elif w_schema == 'bytes' and r_schema == 'string':\n return True\n\n return False", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def _get_schema(self):\n\n schema = ProtocolSchema()\n\n schema.id = self.id\n schema.type = type(self).__name__\n\n for input_path in self.required_inputs:\n\n if not (input_path.start_protocol is None or (input_path.start_protocol == self.id and\n input_path.start_protocol == input_path.last_protocol)):\n\n continue\n\n # Always make sure to only pass a copy of the input. Changing the schema\n # should NOT change the protocol.\n schema.inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))\n\n return schema", "def process(rec, conn):\n try:\n # Changes members from distinguished name to next_id for roles\n if \"members\" in rec[\"data\"]:\n rec = translate_field_to_next(rec, \"members\")\n if \"owners\" in rec[\"data\"]:\n rec = translate_field_to_next(rec, \"owners\")\n\n add_transaction(rec)\n if \"batch\" not in rec or not rec[\"batch\"]:\n r.table(\"inbound_queue\").get(rec[\"id\"]).delete().run(conn)\n rec[\"sync_direction\"] = \"inbound\"\n r.table(\"sync_errors\").insert(rec).run(conn)\n return\n\n batch = batch_pb2.Batch()\n batch.ParseFromString(rec[\"batch\"])\n batch_list = batch_to_list(batch=batch)\n client = ClientSync()\n status = client.send_batches_get_status(batch_list=batch_list)\n while status[0][\"status\"] == \"PENDING\":\n LOGGER.info(\"Batch status is %s\", status)\n status = client.status_recheck(batch_list)\n if status[0][\"status\"] == \"COMMITTED\":\n if rec[\"data_type\"] == \"user\":\n insert_to_user_mapping(rec)\n if \"metadata\" in rec and rec[\"metadata\"]:\n data = {\n \"address\": rec[\"address\"],\n \"object_type\": rec[\"object_type\"],\n \"object_id\": rec[\"object_id\"],\n \"provider_id\": rec[\"provider_id\"],\n \"created_at\": r.now(),\n \"updated_at\": r.now(),\n **rec[\"metadata\"],\n }\n\n query = (\n r.table(\"metadata\")\n .get(rec[\"address\"])\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(\n {\"metadata\": rec[\"metadata\"], \"updated_at\": r.now()}\n ),\n )\n )\n )\n result = query.run(conn)\n if (not result[\"inserted\"] and not result[\"replaced\"]) or result[\n \"errors\"\n ] > 0:\n LOGGER.warning(\n \"error updating metadata record:\\n%s\\n%s\", result, query\n )\n rec[\"sync_direction\"] = \"inbound\"\n r.table(\"changelog\").insert(rec).run(conn)\n r.table(\"inbound_queue\").get(rec[\"id\"]).delete().run(conn)\n else:\n rec[\"error\"] = get_status_error(status)\n rec[\"sync_direction\"] = \"inbound\"\n r.table(\"sync_errors\").insert(rec).run(conn)\n r.table(\"inbound_queue\").get(rec[\"id\"]).delete().run(conn)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.exception(\n \"%s exception processing inbound record:\\n%s\", type(err).__name__, rec\n )\n LOGGER.exception(err)", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def resolve_nested_schema(self, schema):\n try:\n schema_instance = resolve_schema_instance(schema)\n # If schema is a string and is not found in registry,\n # assume it is a schema reference\n except marshmallow.exceptions.RegistryError:\n return schema\n schema_key = make_schema_key(schema_instance)\n if schema_key not in self.refs:\n name = self.schema_name_resolver(schema)\n if not name:\n try:\n json_schema = self.schema2jsonschema(schema_instance)\n except RuntimeError:\n raise APISpecError(\n \"Name resolver returned None for schema {schema} which is \"\n \"part of a chain of circular referencing schemas. Please\"\n \" ensure that the schema_name_resolver passed to\"\n \" MarshmallowPlugin returns a string for all circular\"\n \" referencing schemas.\".format(schema=schema)\n )\n if getattr(schema, \"many\", False):\n return {\"type\": \"array\", \"items\": json_schema}\n return json_schema\n name = get_unique_schema_name(self.spec.components, name)\n self.spec.components.schema(name, schema=schema)\n return self.get_ref_dict(schema_instance)", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def check_schema_existence_api_call(context, schema, version):\n check_schema_existence(context, schema, version, \"api\")", "def _AddNewColsToSchema(new_fields, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for new_field in new_fields:\n if new_field.name in orig_schema_map:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n updated_schema_map[new_field.name] = new_field\n return updated_schema_map", "def do_export_schema(self):\n export_schema = self.get_arg_value(\"export_schema\")\n\n if export_schema:\n row = {\"schemas\": self.final_schemas}\n self.write_rows(rows=row)\n del row", "def patch(self):\n if not self.validate_complaint_document(\"update\"):\n return\n if apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info(\n \"Updated tender award complaint document {}\".format(self.request.context.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"tender_award_complaint_document_patch\"}),\n )\n return {\"data\": self.request.context.serialize(\"view\")}", "def load(self, base_schema):\n if base_schema == []:\n _base = []\n else:\n _base = base_schema or BASE_SCHEMA or []\n\n _base_schema = []\n for _sc in _base:\n if _sc == \"schema\" or _sc == \"schema.org\":\n self.schema_org_version = get_schemaorg_version()\n _base_schema.append(\n load_schemaorg(version=self.schema_org_version, verbose=self.verbose)\n )\n continue\n elif self.is_a_dde_schema(_sc):\n _base_schema.append(self.load_dde_schemas(_sc))\n\n _base_schema = merge_schema(*_base_schema)\n return _base_schema", "def __matchSchema(self, featureClass):\n fClassFields = []\n for field in arcpy.ListFields(featureClass):\n fieldName = field.name.lower()\n if fieldName == 'objectid' or fieldName == 'oid' or 'shape' in fieldName or field.name in self.userFields:\n pass\n else:\n fClassFields.append(field.name)\n fClassFields.insert(0, 'Shape@')\n objFields = [f['name'] for f in self.updateFields]\n return sorted(fClassFields) == sorted(objFields)" ]
[ "0.65370333", "0.61016726", "0.59257406", "0.59054583", "0.57786834", "0.5593872", "0.5549331", "0.5481114", "0.5472658", "0.54690826", "0.5401272", "0.5361555", "0.53470576", "0.5315268", "0.5254957", "0.52487254", "0.5139043", "0.50770354", "0.5074396", "0.5061819", "0.5033071", "0.502315", "0.5017254", "0.50041014", "0.5001959", "0.50011426", "0.49769256", "0.49501354", "0.49407235", "0.4929252", "0.49250436", "0.49121267", "0.4900479", "0.4880573", "0.48635983", "0.48530206", "0.48388177", "0.48343465", "0.4824842", "0.48192585", "0.4817013", "0.47990775", "0.47986832", "0.47926039", "0.4791585", "0.4790972", "0.47862694", "0.47694156", "0.47690782", "0.47657046", "0.47607344", "0.4751095", "0.47428063", "0.4735867", "0.47179085", "0.47115967", "0.4699601", "0.46832496", "0.46828026", "0.46818277", "0.46787435", "0.46737728", "0.46716136", "0.46694294", "0.46316704", "0.46195805", "0.46052155", "0.4562152", "0.45620155", "0.4556141", "0.4555454", "0.4551104", "0.454958", "0.4547384", "0.45462012", "0.45443198", "0.45382723", "0.4530371", "0.45241508", "0.45235947", "0.45217818", "0.4521111", "0.45162898", "0.450814", "0.45065898", "0.45054102", "0.44995424", "0.44988978", "0.44957325", "0.44954717", "0.4494832", "0.4493811", "0.44930992", "0.44930992", "0.44914076", "0.44845492", "0.44745573", "0.4471194", "0.44623452", "0.44609377" ]
0.8179473
0
Try to retrieve the current BigQuery TableSchema for a table_ref. Tries to fetch the schema of an existing table. Raises SchemaUpdateError if table is not found or if table is not of type 'TABLE'.
def _TryGetCurrentSchema(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables get_request_type = GetApiMessage('BigqueryTablesGetRequest') get_request = get_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) try: table = service.Get(get_request) if not table or table.type != 'TABLE': raise SchemaUpdateError('Schema modifications only supported ' 'on TABLE objects received [{}]'.format( table)) except apitools_exceptions.HttpNotFoundError: raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format( project_id, dataset_id, table_id)) return table.schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def CreateTableFromJson(self, table_name, schema_json):\n try:\n schema = json.JSONDecoder().decode(schema_json)\n except ValueError, e:\n raise SchemaError('Could not parse fields:\\n%s\\n%s' %\n (schema_json, str(e)))\n\n conn = self._Connect()\n result = conn.Call(\n dict(method='bigquery.tables.insert',\n collection='tables',\n operation=bq.REST.INSERT,\n params=dict(name=table_name, fields=schema)))\n return result", "def create_tables(client: bigquery.Client, tableSchemas: dict) -> dict:\n ds = create_dataset(client, f'FusionTable_Autoimport_{datetime.now()}')\n\n def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n \"\"\"Create a SchemaField from the dict\"\"\"\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )\n\n def _table_from_ft(ft_schema: dict) -> bigquery.Table:\n \"\"\"Create a local representation of a BigQuery table\"\"\"\n # A \"TableSchema\" is just a sequence of SchemaFields https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.table.Table.html\n schema = list(map(_create_field_schema, ft_schema['columns']))\n table = bigquery.Table(\n bigquery.TableReference(ds, to_safe_name(ft_schema['name'])),\n schema\n )\n table.description = ft_schema.get('description', '')\n return table\n\n return {\n ftId: client.create_table(_table_from_ft(ftSchema))\n for (ftId, ftSchema) in tableSchemas.items()\n }", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def create_bq_table(client, dataset, table, schema):\n \n print('Creating table %s.%s' % (dataset, table))\n exists = client.check_table(dataset, table)\n if exists:\n raise AssertionError(\"Table already exists: %s.%s\" % (dataset,table))\n created = client.create_table(dataset, table, schema)\n # Check that the empty table was created\n exists = client.check_table(dataset, table)\n if not exists:\n raise RuntimeError('Table creation failed: %s.%s' % (dataset, table))", "def __check_table(input_table):\n\n try:\n table = TABLE_TYPES[input_table]\n return table\n except KeyError:\n raise InvalidTableType(input_table)", "def CreateTableFromFile(self, table_name, schema_path):\n try:\n schema_file = open(schema_path)\n schema_json = schema_file.read()\n schema_file.close()\n except IOError, e:\n raise SchemaError('Could not read file (%s):\\n%s' %\n (schema_path, str(e)))\n return self.CreateTableFromJson(table_name, schema_json)", "def DescribeTable(self, table_name):\n conn = self._Connect()\n cursor = conn.cursor()\n return cursor.bq_get_table_metadata(table_name)", "def getTableSchema(self,tableName):\n\tif not self.schemaDict.has_key(tableName):\n\t if self.dbType==\"sqlite\":\n\t query = \"SELECT * FROM sqlite_master WHERE name='%s'\"%tableName\n\t tup = self.fetchOne(query)\n\t schema= tup[4]\n\t else: # MySQL \n\t query = \"DESCRIBE %s\"%tableName\n\t tup = self.fetchAll(query)\n\t schema= \"CREATE TABLE %s (\"%tableName\n\t for item in tup:\n\t name = item[0]\n\t\t type = item[1]\n\t\t priKey = item[3]\n\t\t autoInc = item[5] \n\t schema+=name+' '+type+' '+priKey+' '+autoInc\n\t\t if item!=tup[-1]:\n\t\t schema+=','\n\t schema+=\" )\"\n\t return schema\n\telse:\n\t return self.schemaDict[tableName]", "def _table_from_ft(ft_schema: dict) -> bigquery.Table:\n # A \"TableSchema\" is just a sequence of SchemaFields https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.table.Table.html\n schema = list(map(_create_field_schema, ft_schema['columns']))\n table = bigquery.Table(\n bigquery.TableReference(ds, to_safe_name(ft_schema['name'])),\n schema\n )\n table.description = ft_schema.get('description', '')\n return table", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_tables_in_schema(self, conn, schema_name):\n return conn.get_tables(schema_name)['table_name']", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def _create_table_if_not_exists(self) -> bigquery.Table:\n table = self.client.create_table(\n table=bigquery.Table(table_ref=self._table_ref, schema=Schema),\n exists_ok=True,\n )\n logging.info(\"table %s already exists.\", table.full_table_id)\n return table", "def getTableSchema(self, lsstLevel, dbName, tableName):\n return self._doRequest(self.httpClient.getTableSchema, lsstLevel, dbName, tableName)", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def schema_ref(schema, table):\n return schema + '.' + table", "def get_table_exists(schema_name, table_name):\n sql = (\"SELECT * FROM sys.objects so JOIN sys.schemas ss on (so.schema_id = ss.schema_id) \"\n \"WHERE so.type = 'U' AND so.name = ? and ss.name = ?\")\n\n row = fetch_row(sql, [table_name, schema_name])\n\n return True if row else False", "def ensure_schema(client, table_name):\n query = ''.join([\n 'CREATE TABLE {cf} ',\n '(\"lockId\" ascii, \"claimId\" timeuuid, PRIMARY KEY(\"lockId\", \"claimId\"));'])\n\n def errback(failure):\n failure.trap(InvalidRequestException)\n\n return client.execute(query.format(cf=table_name),\n {}, ConsistencyLevel.QUORUM).addErrback(errback)", "def BqTableSchemaFileProcessor(file_arg):\n table_schema_type = GetApiMessage('TableSchema')\n schema_field_type = GetApiMessage('TableFieldSchema')\n\n try:\n schema_json = yaml.load(file_arg)\n schema_json = schema_json.get('schema', None)\n\n if not schema_json or not isinstance(schema_json, list):\n raise SchemaFileError(\n 'Error parsing schema file: no schema field list defined in file')\n\n all_fields = []\n for field in schema_json:\n new_field = schema_field_type(name=field['name'],\n type=field['type'],\n mode=field.get('mode', 'NULLABLE'))\n all_fields.append(new_field)\n\n return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))\n except yaml.YAMLParseError as ype:\n raise SchemaFileError('Error parsing schema file [{}]'.format(ype))\n except (AttributeError, KeyError) as e:\n raise SchemaFileError(\n 'Error parsing schema file, invalid field definition [{}]'.format(e))", "def validate_table(self, table, table_struct, verbose=True):\n \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n GET_SCHEMA_INFORMATION_COMMAND = \"SELECT ORDINAL_POSITION, COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE, COLUMN_KEY, EXTRA \" \\\n \t \"FROM INFORMATION_SCHEMA.COLUMNS \" \\\n \t \"WHERE TABLE_NAME='{0}' ORDER BY ORDINAL_POSITION\".format(table)\n \n GET_SCHEMA_FK_INFORMATION_COMMAND = \"SELECT COLUMN_NAME, CONSTRAINT_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME \" \\\n \"FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE \" \\\n \"WHERE REFERENCED_TABLE_SCHEMA = '{0}' AND TABLE_NAME = '{1}' AND COLUMN_NAME = '{2}'\"\n \n CHANGE_TYPE_COMMAND = \"ALTER TABLE {0} MODIFY {1} {2} {3}\"\n \n ADD_FK_COMMAND = \"ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})\" \n \n DROP_FK_CONSTRAINT_COMMAND = \"ALTER TABLE {0} DROP FOREIGN KEY {1}\" \n \n \n self.cursor.execute(GET_SCHEMA_INFORMATION_COMMAND)\n \n # load all column info from the database \n columns = {}\n for c in self.cursor:\n columns[c[1]] = c\n \n for column,db_col in zip(table_struct,columns):\n \n # load parameter values from the DB \n (ord_pos, name, col_type, isnull, key_type, extra) = columns[db_col]\n \n isnull = isnull == 'YES'\n auto_increment = extra == 'auto_increment'\n foreign_key = key_type == 'MUL'\n \n # parse new parameter values\n struct_type = table_struct[column][0]\n parameters = table_struct[column][1] if ( len(table_struct[column]) > 1) else None\n \n # get parameters values in boolean format\n if (parameters == None):\n new_isnull = True\n new_auto_increment = False\n new_foreign_key = False\n else:\n if 'not_null' in parameters: new_isnull = not parameters['not_null']\n else: new_isnull = True\n \n if 'auto_increment' in parameters: new_auto_increment = parameters['auto_increment']\n else: new_auto_increment = False\n \n if 'foreign_key' in parameters: new_foreign_key = parameters['foreign_key']\n else: new_foreign_key = False\n \n \n \n \n if verbose: \n print(\"\\n---\\n\\nChecking column '{0}'...\".format(column))\n \n # check name, type and each parameter \n if name == column:\n \n # if something doesn't match, change within the database\n if ( col_type != struct_type ): \n if verbose:\n print(\"Column '{0}' found in the correct position with the incorrect type.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, col_type, struct_type),)\n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), '')\n \n if verbose: print(\"\\t\" + cmd)\n \n self.cursor.execute(cmd) \n \n if ( isnull != new_isnull ):\n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"NOT NULLABLE\" if new_isnull else \"NULLABLE\", \"NULLABLE\" if new_isnull else \"NOT NULLABLE\"))\n \n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), \"NOT NULL\" if not new_isnull else \"\" )\n \n if verbose: print(\"\\t\" + cmd)\n \n \n self.cursor.execute(cmd)\n \n if ( auto_increment != new_auto_increment ):\n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"AUTO INCREMENT\" if new_auto_increment else \"none\", \"none\" if new_auto_increment else \"AUTO INCREMENT\"))\n \n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), \"AUTO INCREMENT\" if new_auto_increment else \"\" )\n \n if verbose: print(\"\\t\" + cmd)\n \n \n self.cursor.execute(cmd)\n \n \n if ( foreign_key != new_foreign_key ):\n \n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"FOREIGN KEY\" if new_auto_increment else \"none\", \"none\" if new_auto_increment else \"FOREIGN KEY\"))\n \n \n \n if ('foreign_key' in parameters and parameters['foreign_key']):\n \n referenced_table = parameters['references'].split('(')[0]\n referenced_column = parameters['references'].split('(')[1][:-1] \n \n \n if (not self.check_table(referenced_table, verbose=False)):\n raise(TableNotFoundError)\n \n \n if (not self.check_column(referenced_column, referenced_table, verbose=False)):\n raise(ColumnNotFoundError)\n \n \n cmd = ADD_FK_COMMAND.format(table,column,referenced_table, referenced_column)\n\n \n \n if verbose: print(\"\\t\" + cmd)\n \n try:\n self.cursor.execute(cmd) \n except:\n print(\" > Error: Cannot add foreign key constraint to column '{0}' in the table '{1}'. You must remove all data from\\n > this column using the clear_column() command first.\".format(column, table))\n \n else:\n \n # check if column has a foreign key constraint\n \n cmd = GET_SCHEMA_FK_INFORMATION_COMMAND.format(self.config['database'], table, column)\n \n self.cursor.execute(cmd)\n \n fk_name = None\n for row in self.cursor:\n fk_name = row[1]\n break\n \n if fk_name != None:\n cmd = DROP_FK_CONSTRAINT_COMMAND.format(table, fk_name)\n \n if verbose: \n print(\"Column '{0}' involved in foreign key constraint '{1}'\".format(column, fk_name))\n print(\"Dropping foreign key constraint '{0}'\".format(fk_name))\n print(\"\\t\" + cmd)\n \n self.cursor.execute(cmd)\n\n \n \n if verbose: print(\"Done.\")\n \n \n if (len(columns) > len(table_struct)):\n \n if verbose: print(\"\\n---\\n\\nExtra columns found in database\")\n \n for col in columns:\n if (col not in table_struct): \n \n if verbose:\n print(\"Column '{0}' found in the database but not found in the configuration.\".format(col))\n \n self.delete_column(col, table)\n \n \n elif(len(table_struct) > len(columns)):\n \n if verbose: print(\"\\n---\\n\\nExtra columns found in configuration. \")\n\n for col in table_struct:\n if col not in columns:\n if verbose: print(\"Column '{0}' found in configuration but not in database\".format(col))\n self.insert_column(col, table_struct[col][0], table, params = table_struct[col][1] if ( len(table_struct[col]) > 1) else None)", "def _load_bigquery_schemas(self):\n logger.info(\"Reading BigQuery schema files...\")\n for table_name in self.tables + self.type_tables:\n logger.info(f\"Reading schema file for table '{table_name}'...\")\n schema_json = resource_stream('sotorrent_pipeline',\n f'bigquery_schemas/{table_name}.json').read().decode()\n self.bigquery_schemas[table_name] = json.loads(schema_json)\n self.bigquery_schemas_with_fields[table_name] = json.loads('{\"fields\":' + schema_json + '}')\n logger.info(f\"Read {len(self.bigquery_schemas)} schema file(s).\")", "def is_table_exist(table_name: str, schema: str) -> str:\n\n return f\"\"\"\n SELECT\n table_name\n FROM\n information_schema.tables\n WHERE\n table_name = '{table_name}' AND\n table_schema='{schema}'\n LIMIT 1\n \"\"\"", "def read_schema_from_db(cur, table):\n num_rows = cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n tbl_schema = []\n for i in range(num_rows):\n row = cur.fetchone()\n tbl_schema.append([row[0], row[1]])\n return tbl_schema", "def delete_bq_table(client, dataset, table):\n \n exists = client.check_table(dataset, table)\n if exists:\n print('WARNING: Deleting existing table %s.%s' % (dataset, table))\n deleted = client.delete_table(dataset, table)\n if not deleted:\n raise RuntimeError('Table deletion failed: %s.%s' % (dataset, table))", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "async def schema_state(appname, schema, table):\n base = sprout.cfg.db_str(appname)\n con = await asyncpg.connect(base)\n req = ', '.join([\n 'table_catalog',\n 'table_schema',\n 'table_name',\n 'column_name',\n 'data_type'\n ])\n q = f\"\"\"select {req} from information_schema.columns\nwhere table_schema='{schema}' and table_name='{table}';\"\"\"\n try:\n ret = await con.fetch(q)\n df = pd.DataFrame.from_records(ret, columns=ret[0].keys())\n return df\n except Exception as e:\n sprout.cfg.log.error(f\"fetching current table state failed: {e}\")\n finally:\n await con.close()", "def ReadSchemaFile(schema_file, bigquery_messages):\n\n if os.path.exists(schema_file):\n with open(schema_file, mode='r') as f:\n try:\n def UpperOrNone(string):\n return string and string.upper()\n field_schemas = [\n bigquery_messages.TableFieldSchema(\n name=json_object.get('name'),\n type=json_object.get('type').upper(),\n mode=UpperOrNone(json_object.get('mode')))\n for json_object in json.load(f)]\n return bigquery_messages.TableSchema(fields=field_schemas)\n except ValueError as e:\n raise bigquery.SchemaError(\n 'Error decoding JSON schema from file {0}: {1}.'.format(\n schema_file, e))\n else:\n raise bigquery.SchemaError(\n 'Error reading schema: File \"{0}\" was not found.'.format(schema_file))", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "async def add_table(self, table: TableSchema) -> None:\n name = table['name']\n if not self._schema_valid_prod(table):\n if self.update_schema: # Interactively create or upgrade table schema\n stored_schema = self._get_stored_schema(name)\n if stored_schema: # Generate migrations from previous schema\n table, requests = update_table_schema(stored_schema, table)\n self._create_migration(table, requests)\n self._schema_write(table) # Write new schema to disk\n elif self.prod_mode: # Crash if production mode is on\n raise MigrationException(f\"in prod, and table {name} has outdated schema\")\n\n current_level = await self._get_migration_level(name)\n if current_level is None: # New table\n self._new_table_queue.append(table)\n elif self._needs_migrations(name, current_level): # Needs migration\n self._migration_queue.append(table)\n # else: no need to do anything for this table", "def _TryDeleteTable(dataset_id, table_id, project_id):\n client = GetApiClient()\n service = client.tables\n delete_request_type = GetApiMessage('BigqueryTablesDeleteRequest')\n delete_request = delete_request_type(datasetId=dataset_id, tableId=table_id,\n projectId=project_id)\n service.Delete(delete_request)\n log.info('Deleted table [{}:{}:{}]'.format(project_id, dataset_id, table_id))", "def schema(self):\n return self.table_info.schema", "def _TableExists(dataset_id, table_id, project_id):\n client = GetApiClient()\n service = client.tables\n get_request_type = GetApiMessage('BigqueryTablesGetRequest')\n get_request = get_request_type(datasetId=dataset_id, tableId=table_id,\n projectId=project_id)\n try:\n service.Get(get_request)\n return True\n except apitools_exceptions.HttpNotFoundError:\n log.info('Table with id [{}:{}:{}] not found.'.format(\n project_id, dataset_id, table_id))\n\n return False", "def verify_table(self):\n metadata = MetaData()\n metadata.reflect(bind = StatusSource.engine)\n mine = str(self.table.columns)\n verified = str(metadata.tables[self.tablename].columns)\n if mine != verified:\n raise DbException(\"Table '%s' in the database has schema %s whereas the query's schema is %s\" % (self.tablename, verified, mine))", "def tableExists(self, schema, table):\r\n r = self.fetchSqlRecords(\r\n \"SELECT to_regclass('{}.{}')\".format(schema, table))\r\n return r[0][0]", "def get_schema_entry(self, key, value, base_path=None):\n value_mode, value_type = self.infer_bigquery_type(value)\n if not value_mode or not value_type:\n return None\n sanitized_key = self.sanitize_name(key)\n\n # yapf: disable\n if value_type == 'RECORD':\n new_base_path = json_full_path(base_path, key)\n # recursively figure out the RECORD\n fields = OrderedDict()\n if value_mode == 'NULLABLE':\n self.deduce_schema_for_record(\n json_object=value,\n schema_map=fields,\n base_path=new_base_path,\n )\n else:\n for val in value:\n self.deduce_schema_for_record(\n json_object=val,\n schema_map=fields,\n base_path=new_base_path,\n )\n\n schema_entry = OrderedDict([\n ('status', 'hard'),\n ('filled', True),\n ('info', OrderedDict([\n ('fields', fields),\n ('mode', value_mode),\n ('name', sanitized_key),\n ('type', value_type),\n ])),\n ])\n elif value_type == '__null__':\n schema_entry = OrderedDict([\n ('status', 'soft'),\n ('filled', False),\n ('info', OrderedDict([\n ('mode', 'NULLABLE'),\n ('name', sanitized_key),\n ('type', 'STRING'),\n ])),\n ])\n elif value_type == '__empty_array__':\n schema_entry = OrderedDict([\n ('status', 'soft'),\n ('filled', False),\n ('info', OrderedDict([\n ('mode', 'REPEATED'),\n ('name', sanitized_key),\n ('type', 'STRING'),\n ])),\n ])\n elif value_type == '__empty_record__':\n schema_entry = OrderedDict([\n ('status', 'soft'),\n ('filled', False),\n ('info', OrderedDict([\n ('fields', OrderedDict()),\n ('mode', value_mode),\n ('name', sanitized_key),\n ('type', 'RECORD'),\n ])),\n ])\n else:\n # Empty fields are returned as empty strings, and must be treated as\n # a (soft String) to allow clobbering by subsquent non-empty fields.\n if value == \"\" and self.input_format == 'csv':\n status = 'soft'\n filled = False\n else:\n status = 'hard'\n filled = True\n schema_entry = OrderedDict([\n ('status', status),\n ('filled', filled),\n ('info', OrderedDict([\n ('mode', value_mode),\n ('name', sanitized_key),\n ('type', value_type),\n ])),\n ])\n # yapf: enable\n return schema_entry", "def use_table(self):\n connection = self._get_connection()\n cursor = connection.cursor()\n cursor.execute(\n 'select exists(select * from information_schema.tables where table_name=%s)',\n (self.table,),\n )\n if cursor.fetchone()[0]:\n self.logger.info('Using existing table')\n else:\n try:\n cursor.execute(\n f'CREATE TABLE {self.table} ( \\\n ID VARCHAR PRIMARY KEY, \\\n DOC BYTEA);'\n )\n self.logger.info('Successfully created table')\n except (Exception, psycopg2.Error) as error:\n self.logger.error('Error while creating table!')\n connection.commit()\n self._close_connection(connection)", "def clean_table(self, a_schema, a_table):\n \n self._conn.execute(\"delete from %s.%s;\" %(a_schema, a_table))", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def GetTable(self, table_id):\n for table in self.tables:\n if table.table_id == table_id:\n return table\n\n return None", "def raise_exception_for_table(table_reference):\n if table_reference.table_id == non_existing_table_id:\n raise cloud.exceptions.NotFound('')", "def verify_table(self):\r\n metadata = MetaData()\r\n metadata.reflect(bind = DbInsertStatusHandler.engine)\r\n mine = str(self.table.columns)\r\n verified = str(metadata.tables[self.tablename].columns)\r\n if mine != verified:\r\n raise DbException(\"Table '%s' in the database has schema %s whereas the query's schema is %s\" % (self.tablename, verified, mine))", "def flatten_schema_map(\n schema_map,\n keep_nulls=False,\n sorted_schema=True,\n infer_mode=False,\n input_format='json',\n):\n if not isinstance(schema_map, dict):\n raise Exception(\n f\"Unexpected type '{type(schema_map)}' for schema_map\"\n )\n\n # Build the BigQuery schema from the internal 'schema_map'.\n schema = []\n map_items = sorted(schema_map.items()) if sorted_schema \\\n else schema_map.items()\n for name, meta in map_items:\n # Skip over fields which have been explicitly removed\n if not meta:\n continue\n\n status = meta['status']\n filled = meta['filled']\n info = meta['info']\n\n # Schema entries with a status of 'soft' are caused by 'null' or\n # empty fields. Don't print those out if the 'keep_nulls' flag is\n # False.\n if status == 'soft' and not keep_nulls:\n continue\n\n # Copy the 'info' dictionary into the schema dict, preserving the\n # ordering of the 'field', 'mode', 'name', 'type' elements. 'bq load'\n # keeps these sorted, so we created them in sorted order using an\n # OrderedDict, so they should preserve order here too.\n new_info = OrderedDict()\n for key, value in info.items():\n if key == 'fields':\n if not value:\n # Create a dummy attribute for an empty RECORD to make\n # the BigQuery importer happy.\n new_value = [\n OrderedDict([\n ('mode', 'NULLABLE'),\n ('name', '__unknown__'),\n ('type', 'STRING'),\n ])\n ]\n else:\n # Recursively flatten the sub-fields of a RECORD entry.\n new_value = flatten_schema_map(\n schema_map=value,\n keep_nulls=keep_nulls,\n sorted_schema=sorted_schema,\n infer_mode=infer_mode,\n input_format=input_format\n )\n elif key == 'type' and value in ['QINTEGER', 'QFLOAT', 'QBOOLEAN']:\n # Convert QINTEGER -> INTEGER, similarly for QFLOAT and QBOOLEAN\n new_value = value[1:]\n elif key == 'mode':\n # 'infer_mode' to set a field as REQUIRED is supported for only\n # input_format = 'csv' because the header line gives us the\n # complete list of fields to be expected in the CSV file. In\n # JSON data files, certain fields will often be completely\n # missing instead of being set to 'null' or \"\". If the field is\n # not even present, then it becomes incredibly difficult (not\n # impossible, but more effort than I want to expend right now)\n # to figure out which fields are missing so that we can mark the\n # appropriate schema entries with 'filled=False'.\n #\n # The --infer_mode option is activated only for\n # input_format == 'csv' in this function, which allows us to\n # overload the --infer_mode flag to mean that a REQUIRED mode of\n # an existing schema can transition to a NULLABLE mode.\n if (infer_mode and value == 'NULLABLE' and filled\n and input_format == 'csv'):\n new_value = 'REQUIRED'\n else:\n new_value = value\n else:\n new_value = value\n new_info[key] = new_value\n schema.append(new_info)\n return schema", "def get_table_meta(self, table_name):\n table = self._metadata['tables'].get(table_name)\n if table is None:\n raise ValueError('Table \"{}\" does not exist'.format(table_name))\n\n return copy.deepcopy(table)", "def decode_fusionTable_schema(tables: list) -> dict:\n def _map_col(col: dict) -> dict:\n col_schema = {k: col.get(k, '') for k in ('name', 'columnId', 'description')}\n if col['name'] != 'Comment':\n col_schema['mode'] = 'REQUIRED'\n # We can actually use int/float now!\n col_type: str = col['type']\n if col_type == 'NUMBER':\n col_type = 'INT64' if col['formatPattern'] == 'NUMBER_INTEGER' else 'FLOAT64'\n col_schema['type'] = col_type\n return col_schema\n\n def _map_table(table: dict) -> dict:\n table_schema = {k: table.get(k, '') for k in ('name', 'tableId', 'description')}\n table_schema['columns'] = list(map(_map_col, table['columns']))\n return table_schema\n\n return dict((s.get('tableId'), s) for s in map(_map_table, tables))", "def read_sql_table(table_name, engine, schema=None, meta=None, index_col=None,\n coerce_float=True, parse_dates=None, columns=None,\n chunksize=None):\n if meta is None:\n meta = MetaData(engine, schema=schema)\n \n try:\n meta.reflect(only=[table_name])\n \n except exc.InvalidRequestError:\n raise ValueError(\"Table %s not found\" % table_name)\n\n pandas_sql = SQLDatabase(engine, meta=meta)\n \n table = pandas_sql.read_table(table_name, index_col=index_col, coerce_float=coerce_float,\n parse_dates=parse_dates, columns=columns, chunksize=chunksize)\n\n if table is not None:\n return table\n else:\n raise ValueError(\"Table %s not found\" % table_name, con)", "def does_table_exist(table_name):\n result = call_bq(\n ['show', table_name], project='khan-academy', raise_exception=False)\n\n if \"Not found: Table\" in result:\n return False\n else:\n return True", "def create_schema(cursor, connection, table):\n error_message = []\n error_rc = 0\n utils.logit(\"info\", \"In create_schema function\", 0)\n if (table == \"all\"):\n tables = tls_tables.keys()\n utils.logit(\"info\", \"Request to create entire schema\", 0)\n else:\n utils.logit(\"info\", \"Request to create table --> {}\".format(table), 0)\n tables = table if type(table) is list else [table]\n\n for table_name in tables:\n utils.logit(\"info\", \"Creating table --> {}\".format(table_name), 1)\n (create_sql, *args) = tls_tables.get(table_name, fail)(table_name)\n try:\n cursor.execute(create_sql)\n error_detail = [\"Successfully created table --> {}\".format(table_name)]\n utils.logit(\"info\", error_detail, 0)\n connection.commit()\n except Exception as e:\n error_rc += 1\n error_detail = [\"ERROR creating table --> {}\".format(table_name)]\n utils.logit(\"warning\", error_detail, 0)\n utils.logit(\"warning\", \"SQL Error --> {}\".format(e), 0)\n error_detail.append(e)\n\n # sql_stmt = cursor.mogrify(create_sql)\n utils.logit(\"info\", \"SQL Statement --> {}\".format(create_sql), 0)\n error_message.append(error_detail)\n error_message.append(create_sql)\n\n return(error_message, error_rc)", "def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "def parse_schema(schemaurl, schema_dir=None):\n if schema_dir:\n try:\n # attempts to open .schema file in directory schema_dir\n local_schema_path = schema_dir + \"/\" + \\\n schemaurl[schemaurl.rfind('/') + 1:-1] + \".schema\"\n print \"Looking for schema in file %s\" % (local_schema_path)\n schema = json.load(open(local_schema_path))\n except Exception as e:\n print \"Couldn't load schema %s from file %s\\n%s\" % (\n schemaurl, schema_dir, str(e))\n return None\n else:\n # load the schema directly from schemaurl, i.e., from the web\n try:\n schema = json.load(urllib2.urlopen(schemaurl))\n except Exception as e:\n print \"Couldn't load schema %s\\n%s\" % (schemaurl, str(e))\n return None\n\n if 'extends' in schema and '$ref' in schema['extends']:\n\n parent_schema = json.load(urllib2.urlopen(schema['extends']['$ref']))\n while (True): # exits loop when no additional extensions (break below)\n for key in sorted(parent_schema.keys()):\n if key not in schema:\n schema[key] = parent_schema[key]\n # need to merge these keys individually\n if key == 'properties':\n for key in sorted(parent_schema['properties'].keys()):\n if key not in schema['properties']:\n schema['properties'][key] = parent_schema[\n 'properties'][key]\n if 'extends' in parent_schema:\n parent_schema = json.load(\n urllib2.urlopen(parent_schema['extends']['$ref']))\n else:\n break\n # essentially a do while loop (exit condition)\n\n return schema", "def check_table(table_name = None):\n\n if table_name is None:\n table_name = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n \n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"\"\"\n USE %s\n \"\"\"%(config['db'], ))\n\n cur.execute(\"\"\"\n SHOW TABLES;\n \"\"\")\n \n all_tables = cur.fetchall()\n if (table_name,) in all_tables:\n result = True\n else:\n result = False\n except Exception as e:\n print(\"check_table FAILED\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result", "def column_to_bq_schema(self):\n kwargs = {}\n if len(self.fields) > 0:\n fields = [field.column_to_bq_schema() for field in self.fields]\n kwargs = {\"fields\": fields}\n\n return google.cloud.bigquery.SchemaField(self.name, self.dtype,\n self.mode, **kwargs)", "def has_table(table_name, engine, schema=None):\n pandas_sql = SQLDatabase(engine, schema=schema, meta=None)\n return pandas_sql.has_table(table_name)", "def _add_table_schema(table_desc, table_name, schema):\n table_desc['TableName'] = table_name\n table_desc['AttributeDefinitions'] = [{\n 'AttributeName': item['name'],\n 'AttributeType': DynamoStubber._encode_type(item['type'])\n } for item in schema]\n table_desc['KeySchema'] = [{\n 'AttributeName': item['name'],\n 'KeyType': item['key_type']\n } for item in schema]", "def _get_or_create_table(self):\n\n table_schema = self._get_table_schema()\n try:\n table_description = self.client.create_table(**table_schema)\n logging.info('DynamoDB Table %s did not exist, creating.',self.table_name)\n\n # In case we created the table, wait until it becomes available.\n self._wait_for_table_status('ACTIVE')\n logging.info('DynamoDB Table %s is now available.',self.table_name)\n\n self.client.update_time_to_live(\n TableName=self.table_name,\n TimeToLiveSpecification={\n 'Enabled': True,\n 'AttributeName': self._expiry_field.name\n }\n )\n logging.info('DynamoDB Table %s now expires items',self.table_name)\n\n return table_description\n\n except ClientError as e:\n error_code = e.response['Error'].get('Code', 'Unknown')\n # If table exists, do not fail, just return the description.\n if error_code == 'ResourceInUseException':\n return self.client.describe_table(TableName=self.table_name)\n else:\n raise e", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def ReadSchema(schema, bigquery_messages):\n\n return bigquery_messages.TableSchema(\n fields=[\n _TableFieldSchemaForEntry(entry, bigquery_messages)\n for entry in schema.split(',')])", "def check_schema(\n self, client: Any, dataframe: DataFrame, table_name: str, database: str = None\n ) -> DataFrame:", "def setup_table(table_name = None, reconstruct = False):\n \n if table_name is None:\n table_name = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n try:\n cur = conn.cursor()\n cur.execute(\"\"\"\n USE %s\n \"\"\"%(config['db'], ))\n\n if reconstruct:\n cur.execute(\"\"\"\n DROP TABLE IF EXISTS `%s`;\n \"\"\"%(table_name,))\n cur.execute(\"\"\"CREATE TABLE `%s` (\n `id` INT UNSIGNED AUTO_INCREMENT,\n `actual value` INT UNSIGNED,\n `predict value` INT UNSIGNED,\n PRIMARY KEY(`id`)\n )\n ;\"\"\"%(table_name,))\n conn.commit()\n\n cur.execute(\"\"\"\n SHOW TABLES;\n \"\"\")\n conn.commit()\n \n all_tables = cur.fetchall()\n assert((table_name,) in all_tables)\n print(\"setup_table PASSED\")\n except Exception as e:\n print(\"setup_table FAILED\")\n print(e)\n\n conn.close()\n tunnel.close()", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def generate_bq_schema(self, file_name, schema_file_name=None):\n if not schema_file_name:\n schema_file_name = f'{self.directory}/schema_temp.json'\n os.system(f\"generate-schema --keep_nulls < {file_name} > {schema_file_name}\")\n\n schema = open(schema_file_name, 'r').read()\n\n os.remove(schema_file_name)\n\n return json.loads(schema)", "def check_if_table_exists(self, table_name):\n cursor = self.conn.cursor()\n cursor.execute(\"SELECT EXISTS(SELECT * FROM information_schema.tables WHERE table_name=%s)\", (table_name,)), \n self.conn.commit()\n return cursor.fetchone()[0]", "def check_db_schema(self):\n if not self.db.get_tables():\n self.create_db_schema()", "def _find_table(name):\n tables = Base.metadata.tables\n table = tables.get(name, None)\n if table is not None:\n return table\n else:\n raise NameError('Unable to locate table: %s' % name)", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def create_temporary_table(self, table_name, custom_sql, schema_name=None):\n\n ###\n # NOTE: 20200310 - The update to support snowflake transient table creation revealed several\n # import cases that are not fully handled.\n # The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But\n # the underlying incomplete handling of schema remains.\n #\n # Several cases we need to consider:\n #\n # 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>`\n # syntax, but currently we are biased towards only allowing schema.table\n #\n # 2. In the wild, we see people using several ways to declare the schema they want to use:\n # a. In the connection string, the original RFC only specifies database, but schema is supported by some\n # backends (Snowflake) as a query parameter.\n # b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session)\n # c. As part of individual queries.\n #\n # 3. We currently don't make it possible to select from a table in one query, but create a temporary table in\n # another schema, except for with BigQuery and (now) snowflake, where you can specify the table name (and\n # potentially triple of database, schema, table) in the batch_kwargs.\n #\n # The SqlAlchemyDataset interface essentially predates the batch_kwargs concept and so part of what's going\n # on, I think, is a mismatch between those. I think we should rename custom_sql -> \"temp_table_query\" or\n # similar, for example.\n ###\n\n engine_dialect = self.sql_engine_dialect.name.lower()\n # handle cases where dialect.name.lower() returns a byte string (e.g. databricks)\n if isinstance(engine_dialect, bytes):\n engine_dialect = str(engine_dialect, \"utf-8\")\n\n if engine_dialect == \"bigquery\":\n stmt = \"CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}\".format(\n table_name=table_name, custom_sql=custom_sql\n )\n elif engine_dialect == \"databricks\":\n stmt = \"CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}\".format(\n table_name=table_name, custom_sql=custom_sql\n )\n elif engine_dialect == \"snowflake\":\n table_type = \"TEMPORARY\" if self.generated_table_name else \"TRANSIENT\"\n\n logger.info(\"Creating temporary table %s\" % table_name)\n if schema_name is not None:\n table_name = schema_name + \".\" + table_name\n stmt = \"CREATE OR REPLACE {table_type} TABLE {table_name} AS {custom_sql}\".format(\n table_type=table_type, table_name=table_name, custom_sql=custom_sql\n )\n elif self.sql_engine_dialect.name == \"mysql\":\n # Note: We can keep the \"MySQL\" clause separate for clarity, even though it is the same as the generic case.\n stmt = \"CREATE TEMPORARY TABLE {table_name} AS {custom_sql}\".format(\n table_name=table_name, custom_sql=custom_sql\n )\n elif self.sql_engine_dialect.name == \"mssql\":\n # Insert \"into #{table_name}\" in the custom sql query right before the \"from\" clause\n # Split is case sensitive so detect case.\n # Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option!\n if \"from\" in custom_sql:\n strsep = \"from\"\n else:\n strsep = \"FROM\"\n custom_sqlmod = custom_sql.split(strsep, maxsplit=1)\n stmt = (\n custom_sqlmod[0] + \"into {table_name} from\" + custom_sqlmod[1]\n ).format(table_name=table_name)\n elif engine_dialect == \"awsathena\":\n stmt = \"CREATE TABLE {table_name} AS {custom_sql}\".format(\n table_name=table_name, custom_sql=custom_sql\n )\n elif engine_dialect == \"oracle\":\n stmt = \"CREATE GLOBAL TEMPORARY TABLE {table_name} ON COMMIT PRESERVE ROWS AS {custom_sql}\".format(\n table_name=table_name, custom_sql=custom_sql\n )\n else:\n stmt = 'CREATE TEMPORARY TABLE \"{table_name}\" AS {custom_sql}'.format(\n table_name=table_name, custom_sql=custom_sql\n )\n\n self.engine.execute(stmt)", "def upload_table_data(client: bigquery.Client, tableRef: bigquery.Table, fusionFile: str) -> bigquery.LoadJob:\n with open(fusionFile, mode='rb') as file:\n job = client.load_table_from_file(file, tableRef)\n return job", "def get_table(self, command, schema_name):\n self.log.debug(\"command:%s, schema_name:%s\" % (command, schema_name))\n if command is None:\n self.log.debug(\"No command provided: %s\" % command)\n return common.status_codes.FAILURE\n\n response = self.run_cli(command)\n if response is common.status_codes.FAILURE:\n return response\n schema_class = self.get_cli_schema_class(schema_name)\n self.set_schema_class(schema_class)\n schema_object = self.get_schema_object()\n response = self.preprocess_response(response, schema_name)\n #skip the command in response\n n = response.index('\\n')\n response = response[n:]\n schema_object.set_data_raw(response)\n\n return schema_object.table", "def write_to_bq(self, \n table_name, \n file_name, \n append=True, \n ignore_unknown_values=False, \n bq_schema_autodetect=False):\n table_name = table_name.lower().replace(\"-\",\"_\")\n self.log.info(f\"Writing {table_name} to BQ from file {file_name}\")\n dataset_ref = self.bq_client.dataset(self.dataset_id)\n table_ref = dataset_ref.table(table_name)\n\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = 'NEWLINE_DELIMITED_JSON'\n\n if bq_schema_autodetect == False:\n # prepare for schema manipulation\n current_tables = [x.table_id for x in self.bq_client.list_tables(dataset_ref)]\n new_schm = self.generate_bq_schema(file_name)\n\n # if table exists, edit schema. otherwise, use new_schm\n if table_name in current_tables:\t\n table = self.bq_client.get_table(table_ref)\t\n new_schm = self.merge_schemas(table.to_api_repr()['schema']['fields'], new_schm)\n\n # move new_schm into job_config through the api_repr options\n api_repr = job_config.to_api_repr()\n api_repr['load']['schema'] = {'fields': new_schm}\n job_config = job_config.from_api_repr(api_repr)\n else:\n job_config.autodetect = True\n \n # handle write options\n if append == False:\n job_config.write_disposition = \"WRITE_TRUNCATE\"\n else:\n job_config.write_disposition = \"WRITE_APPEND\"\n job_config.schema_update_options = ['ALLOW_FIELD_ADDITION']\n\n if ignore_unknown_values:\n job_config.ignore_unknown_values = True\n\n # send to BQ\n with open(file_name, 'rb') as source_file:\n job = self.bq_client.load_table_from_file(\n source_file,\n table_ref,\n job_config=job_config) # API request\n \n try:\n job.result() # Waits for table load to complete.\n except: \n self.log.info(job.errors)\n job.result()", "def check_table(self, table_name: str) -> bool:\n try:\n if self.engine.dialect.has_table(self.engine.connect(), table_name):\n return self.get_input(table_name)\n return False\n except Exception as err:\n logger.error(\"check_table [error] -> %s\" % err)\n return False", "def json_schema(schema_file=None, output=\"-\"):\n schemas = read_yaml(schema_file)\n dump_yaml(output, JsonSchemaConverterFromAccessSchema.convert_schemas(schemas))", "def import_table(self,\n db_type: t.Union[DBType, str],\n table: str,\n input_file: t.Union[Path, str],\n schema: str = 'public'):\n db_type = DBType(db_type)\n db_engine = self.get_db_engine(db_type=db_type)\n input_file = Path(input_file).resolve()\n\n if not check_if_table_or_view_exists(db_engine=db_engine, table_or_view=table, schema=schema):\n raise LookupError(f\"Can't load into '{schema}.{table}' - does not exist in '{db_type.value}' db.\")\n\n import_from_csv(\n db_engine=db_engine,\n input_file=input_file,\n table=table,\n schema=schema\n )", "def get_schema(self, engine, frame, name, keys=None):\n pandas_sql = SQLDatabase(engine, schema=None, meta=None)\n return pandas_sql._create_sql_schema(frame, name, keys=keys)", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def truncateTable(self, schema, table):\r\n return self.runSql('TRUNCATE TABLE {}'.format(self.encodeTableName(schema, table)))", "def _create_table_if_not_exists(self) -> None:\n COLUMN_DEFINITIONS = 'definitions'\n COLUMN_TYPE = 'type'\n\n KEY_REF = '$ref'\n\n TYPE_LOOKUP = {\n 'string': 'VARCHAR(255)',\n 'integer': 'INTEGER',\n 'boolean': 'BOOLEAN',\n 'number': 'INTEGER',\n }\n\n def ref_lookup(\n property: Dict[str, Any], fields: Dict[str, Any]\n ) -> Dict[str, Any]:\n ref = property[KEY_REF]\n property_lookup_name = ref[ref.rfind('/') + 1 :]\n return fields[COLUMN_DEFINITIONS][property_lookup_name]\n\n field_queries = []\n fields = json.loads(self.schema.schema_json())\n\n del fields[Keywords.Properties.value][\n Keywords.ID.value\n ] # Remove primary key field. It is handled with auto increment below.\n\n for property_name, property in fields[Keywords.Properties.value].items():\n if KEY_REF in property:\n property = ref_lookup(property, fields)\n field_queries.append(\n f'{property_name} {TYPE_LOOKUP[property[COLUMN_TYPE]]}'\n )\n table_columns = ', '.join(field_queries)\n\n with connect(**BaseModel.db_settings) as connection:\n cursor = connection.cursor()\n cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {self.table_name} (ID INTEGER PRIMARY KEY AUTO_INCREMENT, {table_columns})'\n )\n self._table_created[self.table_name] = True", "def get_table(self, table, format=\"FITS\", verbose=False):\n # make sure the table exists\n try:\n results = self.quick(\"select top 0 * from {}\".format(table),context=\"MYDB\")\n except Exception as e:\n # raise ValueError(\"table MyDB.{} not found\".format(table)) from None\n raise_from(ValueError(\"table MyDB.{} not found\".format(table)), None)\n # first try to get it as a quick request, which is much faster if it works\n try:\n return self.quick(\"select * from {}\".format(table),context=\"MYDB\",astropy=True)\n except Exception as e:\n pass\n \n # sigh, have to go through output queue\n t0 = time.time()\n format = format.upper()\n if format not in [\"FITS\",\"CSV\"]:\n # just force a good value\n format = \"FITS\"\n if verbose:\n print(\"Making output request for {}-format data\".format(format))\n job_id = self.request_output(table,format)\n status = self.monitor(job_id)\n if status[0] != 5:\n raise Exception(\"Output request failed.\")\n job_info = self.job_info(jobid=job_id)[0]\n url = job_info[\"OutputLoc\"]\n if format == \"FITS\":\n fh = fits.open(url)\n # TDIM keywords in the Casjobs FITS header are simply wrong\n # Have to delete them to avoid bad problems in astropy.io.fits\n del fh[1].header['TDIM*']\n tab = Table(fh[1].data)\n fh.close()\n else:\n r = requests.get(url)\n r.raise_for_status()\n tab = ascii.read(MastCasJobs.replacenull(r.text),format='csv')\n if verbose:\n print(\"{:.1f} s: Retrieved {} row {} table\".format(time.time()-t0,len(tab),format))\n return tab", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def generate_url_schema():\n json_str = json.dumps({'fields': [\n {'name': 'url', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}]})\n return parse_table_schema_from_json(json_str)", "def get_sql_create_table(schema, if_not_exists=False):\n if not schema.get(\"name\"):\n raise ValueError(\"no table name defined\")\n if not schema.get(\"columns\"):\n raise ValueError(\"no columns defined\")\n names = set()\n for column in schema[\"columns\"]:\n if utils.name_in_nocase(column[\"name\"], names):\n raise ValueError(\"column name %s repeated\" % column[\"name\"])\n if column[\"name\"] == \"rowid\":\n raise ValueError(\"column name 'rowid' is reserved by the system\")\n names.add(column[\"name\"])\n # Collect columns forming primary key.\n primarykey = []\n for column in schema[\"columns\"]:\n if column.get(\"primarykey\"):\n primarykey.append(column[\"name\"])\n # Column definitions, including column constraints.\n clauses = []\n for column in schema[\"columns\"]:\n coldef = [f\"\"\"\"{column['name']}\" {column['type']}\"\"\"]\n if column[\"name\"] in primarykey:\n column[\"notnull\"] = True\n if len(primarykey) == 1:\n coldef.append(\"PRIMARY KEY\")\n if column.get(\"notnull\"):\n coldef.append(\"NOT NULL\")\n clauses.append(\" \".join(coldef))\n # Primary key when more than one column.\n if len(primarykey) >= 2:\n clauses.append(\"PRIMARY KEY (%s)\" % \",\".join(['\"%s\"' for k in primarykey]))\n # Foreign keys.\n for foreignkey in schema.get(\"foreignkeys\", []):\n clauses.append(\n 'FOREIGN KEY (%s) REFERENCES \"%s\" (%s)'\n % (\n \",\".join([f'\"{c}\"' for c in foreignkey[\"columns\"]]),\n foreignkey[\"ref\"],\n \",\".join([f'\"{c}\"' for c in foreignkey[\"refcolumns\"]]),\n )\n )\n sql = [\"CREATE TABLE\"]\n if if_not_exists:\n sql.append(\"IF NOT EXISTS\")\n sql.append('\"%s\"' % schema[\"name\"])\n sql.append(\"(%s)\" % \", \".join(clauses))\n return \" \".join(sql)", "def check_schema_uri(self):\n import asdf\n\n if self.schema_uri is not None:\n with log.augment_exception(\"Invalid ASDF schema URI:\", self.schema_uri):\n asdf.schema.load_schema(self.schema_uri)", "def get_schema(self, schema_versions_info):\n schema = None\n version = api_version_request.APIVersionRequest(VOLUME_MICROVERSION)\n for items in schema_versions_info:\n min_version = api_version_request.APIVersionRequest(items['min'])\n max_version = api_version_request.APIVersionRequest(items['max'])\n # This is case where COMPUTE_MICROVERSION is None, which means\n # request without microversion So select base v2.1 schema.\n if version.is_null() and items['min'] is None:\n schema = items['schema']\n break\n # else select appropriate schema as per COMPUTE_MICROVERSION\n elif version.matches(min_version, max_version):\n schema = items['schema']\n break\n if schema is None:\n raise exceptions.JSONSchemaNotFound(\n version=version.get_string(),\n schema_versions_info=schema_versions_info)\n return schema", "def create_table(self, schema: str, table: str, col_types: dict, non_null_columns: List[str]):\n return", "def has_desired_schema(self):\n if self._new_table == self._old_table:\n if not self.rebuild:\n log.info(\"Table already has the desired schema. \")\n return True\n else:\n log.info(\n \"Table already has the desired schema. However \"\n \"--rebuild is specified, doing a rebuild instead\"\n )\n return False\n return False", "def check_table_exists(self, table_name):\n query_success, query_resp = self.run_athena_query(\n query='SHOW TABLES LIKE \\'{}\\';'.format(table_name),\n database=self.DATABASE_STREAMALERT\n )\n\n if query_success and query_resp['ResultSet']['Rows']:\n return True\n\n LOGGER.info('The streamalert table \\'%s\\' does not exist. '\n 'For alert buckets, create it with the following command: \\n'\n '$ python stream_alert_cli.py athena create-table '\n '--type alerts --bucket s3.bucket.id',\n table_name)\n return False", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def get_schema_by_name(parent_schema, full_name):\n\n # If no proper schema was provided, return None.\n if parent_schema is None:\n return None\n\n # Find a schema in the full schema that has a full name that\n # matches the provided one.\n # If the provided schema is a union schema, search the subschemas.\n if parent_schema.type == Utils._AVRO_UNION_TYPE:\n for schema in parent_schema.schemas:\n target_schema = Utils.get_schema_by_name(schema, full_name)\n if target_schema is not None:\n return target_schema\n\n # If the provided schema has a nested record definition, search that.\n elif parent_schema.type == Utils._AVRO_RECORD_TYPE:\n\n # If our parent schema has the name we are looking for, return it.\n if parent_schema.fullname == full_name:\n return parent_schema\n\n for field in parent_schema.fields:\n # For whatever reason, the schema of a record is stored in\n # a field named type.\n schema = field.type\n target_schema = Utils.get_schema_by_name(schema, full_name)\n if target_schema is not None:\n return target_schema\n\n # If no matching schema was found, return None.\n return None", "def check_schema(self, response):\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result", "def get_default_schema(self):\n schema = self._connection.settings.get(\"schema\")\n if schema:\n res = (\n self.sql(_SELECT_SCHEMA_NAME_QUERY.format(escape(schema)))\n .execute()\n .fetch_all()\n )\n try:\n if res[0][0] == schema:\n return Schema(self, schema)\n except IndexError:\n raise ProgrammingError(\n f\"Default schema '{schema}' does not exists\"\n ) from None\n return None", "def to_json_schema(dataframe_schema):\n empty = pd.DataFrame(columns=dataframe_schema.columns.keys()).astype(\n {k: v.type for k, v in dataframe_schema.dtypes.items()}\n )\n table_schema = pd.io.json.build_table_schema(empty)\n\n def _field_json_schema(field):\n return {\n \"type\": \"array\",\n \"items\": {\"type\": field[\"type\"]},\n }\n\n return {\n \"title\": dataframe_schema.name or \"pandera.DataFrameSchema\",\n \"type\": \"object\",\n \"properties\": {\n field[\"name\"]: _field_json_schema(field)\n for field in table_schema[\"fields\"]\n },\n }", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]" ]
[ "0.73889893", "0.6379297", "0.6305416", "0.6051959", "0.58043814", "0.57696646", "0.56733495", "0.5627315", "0.5534374", "0.5378048", "0.5363961", "0.53382576", "0.5309689", "0.5298764", "0.5259146", "0.52387893", "0.52369916", "0.52243495", "0.52111673", "0.5200395", "0.5174603", "0.5162272", "0.5160721", "0.51297754", "0.5118883", "0.5115819", "0.5082074", "0.5081547", "0.50572026", "0.5016008", "0.498506", "0.49842352", "0.49795562", "0.49772573", "0.49673092", "0.491708", "0.4915413", "0.4913952", "0.4895537", "0.4884174", "0.4873055", "0.48575428", "0.48561898", "0.4832509", "0.48121166", "0.4802197", "0.47910953", "0.47888026", "0.47582287", "0.47488788", "0.47448787", "0.47356552", "0.4732936", "0.47316298", "0.47284755", "0.47191116", "0.4717631", "0.47126135", "0.46999872", "0.46964434", "0.46911573", "0.46806735", "0.4666518", "0.46501893", "0.4640006", "0.4635662", "0.46354452", "0.46347308", "0.46346077", "0.46314654", "0.46275786", "0.46191004", "0.4615498", "0.4600725", "0.4593944", "0.4592192", "0.4581459", "0.45786935", "0.45763713", "0.45721313", "0.45705548", "0.45664695", "0.45610282", "0.45532814", "0.45438793", "0.45363635", "0.4511637", "0.44847524", "0.44781804", "0.44760057", "0.44756955", "0.44748446", "0.44651833", "0.4457048", "0.44540232", "0.44483903", "0.44442904", "0.44186062", "0.4416051", "0.44065487" ]
0.74110204
0
Update original_schema by adding and/or relaxing mode on columns.
def _GetUpdatedSchema( original_schema, new_columns=None, relaxed_columns=None): orig_field_map = ( {f.name: f for f in original_schema.fields} if original_schema else {}) if relaxed_columns: orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map) if new_columns: orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map) return sorted(orig_field_map.values(), key=lambda x: x.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetRelaxedCols(relaxed_columns, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for col in relaxed_columns:\n if col in orig_schema_map:\n updated_schema_map[col].mode = 'NULLABLE'\n else:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n return updated_schema_map", "async def upgradeSchema(self) -> None:", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def _AddNewColsToSchema(new_fields, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for new_field in new_fields:\n if new_field.name in orig_schema_map:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n updated_schema_map[new_field.name] = new_field\n return updated_schema_map", "def rebuild(self, dframe, overwrite=False):\n current_schema = self\n new_schema = schema_from_dframe(dframe, self)\n\n if current_schema and not overwrite:\n # merge new schema with existing schema\n current_schema.update(new_schema)\n new_schema = current_schema\n\n return new_schema", "def ensure_internal_schema_updated(self):\n if self._internal_schema_updated:\n return\n if internalmigrations.needs_upgrading(self):\n assert not self._in_transaction\n with self.lock():\n internalmigrations.upgrade(self)\n self.connection.commit()\n self._internal_schema_updated = True", "def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def _ensure_schema_has_covariates(self, x_underscore_columns):\n previous_rename = self.covariate_rename\n if set(x_underscore_columns) == set(previous_rename.values()):\n return\n # Only rewrite schema if the x_<integer> list has changed.\n # because the schema depends on the number of covariates, not\n # their names.\n covariate_columns = list(x_underscore_columns)\n # ASCII sorting isn't correct b/c x_11 is before x_2.\n covariate_columns.sort(key=lambda x: int(x[2:]))\n for create_name in [\"data\", \"avgint\"]:\n empty = self.dismod_file.empty_table(create_name)\n without = [c for c in empty.columns if not c.startswith(\"x_\")]\n # The wrapper needs these columns to have a dtype of Real.\n empty = empty[without].assign(**{cname: np.empty((0,), dtype=np.float) for cname in covariate_columns})\n self.dismod_file.update_table_columns(create_name, empty)\n if getattr(self.dismod_file, create_name).empty:\n CODELOG.debug(f\"Writing empty {create_name} table with columns {covariate_columns}\")\n setattr(self.dismod_file, create_name, empty)\n else:\n CODELOG.debug(f\"Adding to {create_name} table schema the columns {covariate_columns}\")", "def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)", "def make_schema_changes(self, session, namespace='ns1'):\n debug(\"make_schema_changes() \" + str(namespace))\n session.execute('USE ks_%s' % namespace)\n # drop keyspace\n session.execute('DROP KEYSPACE ks2_%s' % namespace)\n wait(2)\n\n # create keyspace\n self.create_ks(session, \"ks3_%s\" % namespace, 2)\n session.execute('USE ks_%s' % namespace)\n\n wait(2)\n # drop column family\n session.execute(\"DROP COLUMNFAMILY cf2_%s\" % namespace)\n\n # create column family\n query = \"\"\"\n CREATE TABLE cf3_%s (\n col1 uuid PRIMARY KEY,\n col2 text,\n col3 text,\n col4 text\n );\n \"\"\" % (namespace)\n session.execute(query)\n\n # alter column family\n query = \"\"\"\n ALTER COLUMNFAMILY cf_%s\n ADD col4 text;\n \"\"\" % namespace\n session.execute(query)\n\n # add index\n session.execute(\"CREATE INDEX index2_%s ON cf_%s(col3)\"%(namespace, namespace))\n\n # remove an index\n session.execute(\"DROP INDEX index_%s\" % namespace)", "def recreate_db(self, run=False):\n if run:\n db_schema = open(self.db_schema_file).read().splitlines()\n for s in db_schema:\n t = s.strip()\n if len(t):\n self.cur.execute(t)", "def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))", "def update(self, schema: 'Schema'):\n self._update(schema)", "def modify_tbl(self):\n debug = False\n dd = mg.DATADETS_OBJ\n orig_tblname = dd.tbl\n ## other (i.e. not the sofa_id) field details\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n if debug:\n print('oth_name_types to feed into '\n f'make_strict_typing_tbl {oth_name_types}')\n try: ## 1 way or other must do strict_cleanup()\n make_strict_typing_tbl(\n orig_tblname, oth_name_types, self.settings_data)\n except sqlite.IntegrityError as e: #@UndefinedVariable\n if debug: print(b.ue(e))\n strict_cleanup(restore_tblname=orig_tblname)\n raise FldMismatchException\n except Exception as e:\n strict_cleanup(restore_tblname=orig_tblname)\n raise Exception('Problem making strictly-typed table.'\n f'\\nCaused by error: {b.ue(e)}')\n copy_orig_tbl(orig_tblname)\n wipe_tbl(orig_tblname)\n final_name = self.tblname_lst[0] ## may have been renamed\n try:\n make_redesigned_tbl(final_name, oth_name_types)\n strict_cleanup(restore_tblname=final_name)\n dd.set_tbl(tbl=final_name)\n except Exception as e:\n strict_cleanup(restore_tblname=orig_tblname)\n restore_copy_tbl(orig_tblname) ## effectively removes tmp_tbl 2\n dd.set_tbl(tbl=orig_tblname)\n raise Exception('Problem making redesigned table.'\n f'\\nCaused by error: {b.ue(e)}')\n wipe_tbl(mg.TMP_TBLNAME2)", "def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)", "def update(self, force=False):\n if (self.__fields is None) or force:\n self.__fields = [f for f in self.dbms.fields(self.table, self.db)]", "def reset_schema_defs():\n SCHEMA_DEFS.clear()\n SCHEMA_DEFS.update((typ, typ) for typ in PRIMITIVE_TYPES)", "def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode", "def initialize_schema(self, dry_run=False):\n if not dry_run:\n self.flush()", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry", "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def fix_incremental(meta, bind):\n meta.create_all(bind=bind, checkfirst=True)\n ref = inspect(bind)\n for table in meta.sorted_tables:\n orm_cols = set(col.name for col in table.c)\n ref_cols = set(col['name'] for col in ref.get_columns(table.name))\n col_to_create = orm_cols - ref_cols\n col_to_delete = ref_cols - orm_cols\n if col_to_create:\n print table.name, 'has diff to create', col_to_create\n with bind.begin() as conn:\n for col_name in col_to_create:\n col = table.c.get(col_name)\n column_sql = CreateColumn(col).compile(bind).string\n sql = 'ALTER TABLE {} ADD COLUMN {}'.format(table.name, column_sql)\n if col.default:\n sql += ' DEFAULT {!r}'.format(col.default.arg) # can break when a pickle type has callable default.\n if not col.nullable:\n sql += ' NOT NULL'\n print 'executing sql: ' + sql\n conn.execute(sql)\n\n # Workaround to ensure updated DBs start with \"False\" in ignore column\n if list(col_to_create)[0] == 'ignore':\n sessionmaker = get_sessionmaker(bind.url.database)\n session = sessionmaker()\n query_object = {'dttrialdff0s': DTTrialDff0, 'trials': Trial}[table.name]\n items = session.query(query_object).all()\n for item in items:\n item.ignore = False\n session.flush()\n\n if col_to_delete:\n print table.name, 'has diff to delete', col_to_delete, 'maybe later version.'\n \"\"\"\n BEGIN TRANSACTION;\n CREATE TEMPORARY TABLE t1_backup(a,b);\n INSERT INTO t1_backup SELECT a,b FROM t1;\n DROP TABLE t1;\n CREATE TABLE t1(a,b);\n INSERT INTO t1 SELECT a,b FROM t1_backup;\n DROP TABLE t1_backup;\n COMMIT;\n \"\"\"", "def compute_schema_updates(self):\n data = self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/schema-update\" % (self.project_key, self.recipe_name))\n return RequiredSchemaUpdates(self, data)", "def has_desired_schema(self):\n if self._new_table == self._old_table:\n if not self.rebuild:\n log.info(\"Table already has the desired schema. \")\n return True\n else:\n log.info(\n \"Table already has the desired schema. However \"\n \"--rebuild is specified, doing a rebuild instead\"\n )\n return False\n return False", "def modify_schema(setup_path, names, lp, creds, reporter, ldif, msg):\n\n return provision_schema(setup_path, names, lp, creds, reporter, ldif, msg, True)", "def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def schema(self, schema):\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def upgrade():\n op.execute(\n f\"\"\"\n ALTER TABLE\n {config.CLEAN_SCHEMA}.forecasts\n RENAME COLUMN\n training_horizon\n TO\n train_horizon;\n \"\"\",\n ) # noqa:WPS355", "def update_schema(self, engine_name, schema):\n endpoint = \"engines/{}/schema\".format(engine_name)\n data = json.dumps(schema)\n return self.swiftype_session.request('post', endpoint, data=data)", "def filter_schema(schema):\n for column, column_schema in schema.iteritems():\n if column_schema.get(CARDINALITY):\n del column_schema[CARDINALITY]\n schema[column] = column_schema\n\n return schema", "def set_schema(self, schema, set_num_columns=True):\n update_dict = {self.SCHEMA: schema}\n\n if set_num_columns:\n update_dict.update({self.NUM_COLUMNS: len(schema.keys())})\n\n self.update(update_dict)", "def finalizeSchema(schema, folderish=False, moveDiscussion=True):\n schema.moveField('businessOldLocation', after='workLocations')\n schema.moveField('foldermanagers', after='businessOldLocation')\n# schema.moveField('bound_licences', after='isTransferOfLicence')\n schema.moveField('rubrics', after='folderCategory')\n schema.moveField('description', after='additionalLegalConditions')\n schema.moveField('referenceFT', after='referenceDGATLP')\n schema.moveField('isTransferOfLicence', after='referenceFT')\n\n return schema", "def _alter_field(\n self,\n model,\n old_field,\n new_field,\n old_type,\n new_type,\n old_db_params,\n new_db_params,\n strict=False,\n ):\n\n super()._alter_field(\n model,\n old_field,\n new_field,\n old_type,\n new_type,\n old_db_params,\n new_db_params,\n strict,\n )\n\n # If the pkey was dropped in the previous distribute migration,\n # foreign key constraints didn't previously exists so django does not\n # recreated them.\n # Here we test if we are in this case\n if isinstance(new_field, TenantForeignKey) and new_field.db_constraint:\n from_model = get_model_by_db_table(model._meta.db_table)\n fk_names = self._constraint_names(\n model, [new_field.column], foreign_key=True\n ) + self._constraint_names(\n model,\n [new_field.column, get_tenant_column(from_model)],\n foreign_key=True,\n )\n if not fk_names:\n self.execute(\n self._create_fk_sql(\n model, new_field, \"_fk_%(to_table)s_%(to_column)s\"\n )\n )", "def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)", "def setAllColumns(self, newAllColumns):\n \n pass", "def _clean_up_columns(\n self):\n self.log.debug('starting the ``_clean_up_columns`` method')\n\n tableName = self.dbTableName\n\n print \"cleaning up %(tableName)s columns\" % locals()\n\n sqlQuery = u\"\"\"\n set sql_mode=\"STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\";\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n sqlQuery = u\"\"\"\n update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;\n update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = \"\";\n update %(tableName)s set notes = null where notes = \"\";\n update %(tableName)s set redshift = null where redshift = 0;\n update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = \"\";\n update %(tableName)s set hubble_const = null where hubble_const = 0;\n update %(tableName)s set lmc_mod = null where lmc_mod = 0;\n update %(tableName)s set master_row = 0;\n update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n self.log.debug('completed the ``_clean_up_columns`` method')\n return None", "def apply_to(self, df: pd.DataFrame) -> pd.DataFrame:\n schema_names = self.names\n data_columns = df.columns\n\n assert len(schema_names) == len(\n data_columns\n ), \"schema column count does not match input data column count\"\n\n for column, dtype in zip(data_columns, self.types):\n pandas_dtype = dtype.to_pandas()\n\n col = df[column]\n col_dtype = col.dtype\n\n try:\n not_equal = pandas_dtype != col_dtype\n except TypeError:\n # ugh, we can't compare dtypes coming from pandas,\n # assume not equal\n not_equal = True\n\n if not_equal or not dtype.is_primitive():\n new_col = convert(col_dtype, dtype, col)\n else:\n new_col = col\n df[column] = new_col\n\n # return data with the schema's columns which may be different than the\n # input columns\n df.columns = schema_names\n return df", "def generate_altered_fields(self):\n result = super(MigrationAutodetector, self).generate_altered_fields()\n self.generate_sql_changes()\n return result", "def apply_patch():\n assert BaseDatabaseSchemaEditor is not None\n\n def _create_unique_sql(self, *args, **kwargs):\n from django.db.backends.ddl_references import IndexName\n\n statement = orig_create_unique_sql(self, *args, **kwargs)\n\n if statement is not None:\n index_name = statement.parts['name']\n\n if (isinstance(index_name, IndexName) and\n index_name.create_index_name == self._create_index_name):\n # The result will be unquoted. Let's quote it.\n index_name.create_index_name = lambda *args, **kwargs: \\\n self.quote_name(self._create_index_name(*args, **kwargs))\n\n return statement\n\n orig_create_unique_sql = BaseDatabaseSchemaEditor._create_unique_sql\n BaseDatabaseSchemaEditor._create_unique_sql = _create_unique_sql", "def upgrade():\n with op.batch_alter_table(\"slot_pool\") as batch_op:\n batch_op.add_column(sa.Column(\"include_deferred\", sa.Boolean))\n # Different databases support different literal for FALSE. This is fine.\n op.execute(sa.text(f\"UPDATE slot_pool SET include_deferred = {sa.false().compile(op.get_bind())}\"))\n with op.batch_alter_table(\"slot_pool\") as batch_op:\n batch_op.alter_column(\"include_deferred\", existing_type=sa.Boolean, nullable=False)", "def db_schema_32():\n with old_db_schema(\"32\"):\n yield", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def set_schema(self, schema):\r\n self.__schema = schema", "def extend_schema(\n schema: GraphQLSchema,\n document_ast: DocumentNode,\n assume_valid=False,\n assume_valid_sdl=False,\n) -> GraphQLSchema:\n assert_schema(schema)\n\n if not isinstance(document_ast, DocumentNode):\n \"Must provide valid Document AST\"\n\n if not (assume_valid or assume_valid_sdl):\n from ..validation.validate import assert_valid_sdl_extension\n\n assert_valid_sdl_extension(document_ast, schema)\n\n # Collect the type definitions and extensions found in the document.\n type_defs: List[TypeDefinitionNode] = []\n type_exts_map: Dict[str, Any] = defaultdict(list)\n\n # New directives and types are separate because a directives and types can have the\n # same name. For example, a type named \"skip\".\n directive_defs: List[DirectiveDefinitionNode] = []\n\n schema_def: Optional[SchemaDefinitionNode] = None\n # Schema extensions are collected which may add additional operation types.\n schema_exts: List[SchemaExtensionNode] = []\n\n for def_ in document_ast.definitions:\n if isinstance(def_, SchemaDefinitionNode):\n schema_def = def_\n elif isinstance(def_, SchemaExtensionNode):\n schema_exts.append(def_)\n elif isinstance(def_, TypeDefinitionNode):\n type_defs.append(def_)\n elif isinstance(def_, TypeExtensionNode):\n extended_type_name = def_.name.value\n type_exts_map[extended_type_name].append(def_)\n elif isinstance(def_, DirectiveDefinitionNode):\n directive_defs.append(def_)\n\n # If this document contains no new types, extensions, or directives then return the\n # same unmodified GraphQLSchema instance.\n if (\n not type_exts_map\n and not type_defs\n and not directive_defs\n and not schema_exts\n and not schema_def\n ):\n return schema\n\n # Below are functions used for producing this schema that have closed over this\n # scope and have access to the schema, cache, and newly defined types.\n\n # noinspection PyTypeChecker,PyUnresolvedReferences\n def replace_type(type_: GraphQLType) -> GraphQLType:\n if is_list_type(type_):\n return GraphQLList(replace_type(type_.of_type)) # type: ignore\n if is_non_null_type(type_):\n return GraphQLNonNull(replace_type(type_.of_type)) # type: ignore\n return replace_named_type(type_) # type: ignore\n\n def replace_named_type(type_: GraphQLNamedType) -> GraphQLNamedType:\n return type_map[type_.name]\n\n def get_maybe_type_by_name(type_name: Optional[str]) -> Optional[GraphQLNamedType]:\n return type_map[type_name] if type_name else None\n\n def get_merged_directives() -> List[GraphQLDirective]:\n if not schema.directives:\n raise TypeError(\"schema must have default directives\")\n\n return list(\n chain(\n map(extend_directive, schema.directives),\n map(ast_builder.build_directive, directive_defs),\n )\n )\n\n def extend_named_type(type_: GraphQLNamedType) -> GraphQLNamedType:\n if is_introspection_type(type_) or is_specified_scalar_type(type_):\n # Builtin types are not extended.\n return type_\n if is_scalar_type(type_):\n type_ = cast(GraphQLScalarType, type_)\n return extend_scalar_type(type_)\n if is_object_type(type_):\n type_ = cast(GraphQLObjectType, type_)\n return extend_object_type(type_)\n if is_interface_type(type_):\n type_ = cast(GraphQLInterfaceType, type_)\n return extend_interface_type(type_)\n if is_union_type(type_):\n type_ = cast(GraphQLUnionType, type_)\n return extend_union_type(type_)\n if is_enum_type(type_):\n type_ = cast(GraphQLEnumType, type_)\n return extend_enum_type(type_)\n if is_input_object_type(type_):\n type_ = cast(GraphQLInputObjectType, type_)\n return extend_input_object_type(type_)\n\n # Not reachable. All possible types have been considered.\n raise TypeError(f\"Unexpected type: '{inspect(type_)}'.\") # pragma: no cover\n\n def extend_directive(directive: GraphQLDirective) -> GraphQLDirective:\n kwargs = directive.to_kwargs()\n return GraphQLDirective( # type: ignore\n **{\n **kwargs,\n \"args\": {name: extend_arg(arg) for name, arg in kwargs[\"args\"].items()},\n }\n )\n\n def extend_input_object_type(\n type_: GraphQLInputObjectType\n ) -> GraphQLInputObjectType:\n kwargs = type_.to_kwargs()\n extensions = type_exts_map.get(kwargs[\"name\"], [])\n field_nodes = chain.from_iterable(node.fields or [] for node in extensions)\n\n return GraphQLInputObjectType(\n **{\n **kwargs,\n \"fields\": lambda: {\n **{\n name: GraphQLInputField( # type: ignore\n **{**field.to_kwargs(), \"type_\": replace_type(field.type)}\n )\n for name, field in kwargs[\"fields\"].items()\n },\n **{\n field.name.value: ast_builder.build_input_field(field)\n for field in field_nodes\n },\n },\n \"extension_ast_nodes\": kwargs[\"extension_ast_nodes\"] + extensions,\n }\n )\n\n def extend_enum_type(type_: GraphQLEnumType) -> GraphQLEnumType:\n kwargs = type_.to_kwargs()\n extensions = type_exts_map.get(kwargs[\"name\"], [])\n value_nodes = chain.from_iterable(node.values or [] for node in extensions)\n\n return GraphQLEnumType(\n **{\n **kwargs,\n \"values\": {\n **kwargs[\"values\"],\n **{\n value.name.value: ast_builder.build_enum_value(value)\n for value in value_nodes\n },\n },\n \"extension_ast_nodes\": kwargs[\"extension_ast_nodes\"] + extensions,\n }\n )\n\n def extend_scalar_type(type_: GraphQLScalarType) -> GraphQLScalarType:\n kwargs = type_.to_kwargs()\n extensions = type_exts_map.get(kwargs[\"name\"], [])\n\n return GraphQLScalarType(\n **{\n **kwargs,\n \"extension_ast_nodes\": kwargs[\"extension_ast_nodes\"] + extensions,\n }\n )\n\n def extend_object_type(type_: GraphQLObjectType) -> GraphQLObjectType:\n kwargs = type_.to_kwargs()\n extensions = type_exts_map.get(kwargs[\"name\"], [])\n interface_nodes = chain.from_iterable(\n node.interfaces or [] for node in extensions\n )\n field_nodes = chain.from_iterable(node.fields or [] for node in extensions)\n\n return GraphQLObjectType(\n **{\n **kwargs,\n \"interfaces\": lambda: [\n replace_named_type(interface) for interface in kwargs[\"interfaces\"]\n ]\n # Note: While this could make early assertions to get the correctly\n # typed values, that would throw immediately while type system\n # validation with validate_schema will produce more actionable results.\n + [ast_builder.get_named_type(node) for node in interface_nodes],\n \"fields\": lambda: {\n **{\n name: extend_field(field)\n for name, field in kwargs[\"fields\"].items()\n },\n **{\n node.name.value: ast_builder.build_field(node)\n for node in field_nodes\n },\n },\n \"extension_ast_nodes\": kwargs[\"extension_ast_nodes\"] + extensions,\n }\n )\n\n def extend_interface_type(type_: GraphQLInterfaceType) -> GraphQLInterfaceType:\n kwargs = type_.to_kwargs()\n extensions = type_exts_map.get(kwargs[\"name\"], [])\n field_nodes = chain.from_iterable(node.fields or [] for node in extensions)\n\n return GraphQLInterfaceType(\n **{\n **kwargs,\n \"fields\": lambda: {\n **{\n name: extend_field(field)\n for name, field in kwargs[\"fields\"].items()\n },\n **{\n node.name.value: ast_builder.build_field(node)\n for node in field_nodes\n },\n },\n \"extension_ast_nodes\": kwargs[\"extension_ast_nodes\"] + extensions,\n }\n )\n\n def extend_union_type(type_: GraphQLUnionType) -> GraphQLUnionType:\n kwargs = type_.to_kwargs()\n extensions = type_exts_map.get(kwargs[\"name\"], [])\n type_nodes = chain.from_iterable(node.types or [] for node in extensions)\n\n return GraphQLUnionType(\n **{\n **kwargs,\n \"types\": lambda: [\n replace_named_type(member_type) for member_type in kwargs[\"types\"]\n ]\n # Note: While this could make early assertions to get the correctly\n # typed values, that would throw immediately while type system\n # validation with validate_schema will produce more actionable results.\n + [ast_builder.get_named_type(node) for node in type_nodes],\n \"extension_ast_nodes\": kwargs[\"extension_ast_nodes\"] + extensions,\n }\n )\n\n def extend_field(field: GraphQLField) -> GraphQLField:\n return GraphQLField( # type: ignore\n **{\n **field.to_kwargs(),\n \"type_\": replace_type(field.type),\n \"args\": {name: extend_arg(arg) for name, arg in field.args.items()},\n }\n )\n\n def extend_arg(arg: GraphQLArgument) -> GraphQLArgument:\n return GraphQLArgument( # type: ignore\n **{**arg.to_kwargs(), \"type_\": replace_type(arg.type)}\n )\n\n # noinspection PyShadowingNames\n def resolve_type(type_name: str) -> GraphQLNamedType:\n type_ = type_map.get(type_name)\n if not type_:\n raise TypeError(f\"Unknown type: '{type_name}'.\")\n return type_\n\n ast_builder = ASTDefinitionBuilder(\n assume_valid=assume_valid, resolve_type=resolve_type\n )\n\n type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs}\n for existing_type_name, existing_type in schema.type_map.items():\n type_map[existing_type_name] = extend_named_type(existing_type)\n\n # Get the extended root operation types.\n operation_types: Dict[OperationType, Optional[str]] = {\n OperationType.QUERY: schema.query_type.name if schema.query_type else None,\n OperationType.MUTATION: schema.mutation_type.name\n if schema.mutation_type\n else None,\n OperationType.SUBSCRIPTION: schema.subscription_type.name\n if schema.subscription_type\n else None,\n }\n\n if schema_def:\n for operation_type in schema_def.operation_types:\n operation = operation_type.operation\n operation_types[operation] = operation_type.type.name.value\n\n # Then, incorporate schema definition and all schema extensions.\n for schema_ext in schema_exts:\n if schema_ext.operation_types:\n for operation_type in schema_ext.operation_types:\n operation = operation_type.operation\n operation_types[operation] = operation_type.type.name.value\n\n # Then produce and return a Schema with these types.\n return GraphQLSchema( # type: ignore\n # Note: While this could make early assertions to get the correctly\n # typed values, that would throw immediately while type system\n # validation with validateSchema() will produce more actionable results.\n query=get_maybe_type_by_name(operation_types[OperationType.QUERY]),\n mutation=get_maybe_type_by_name(operation_types[OperationType.MUTATION]),\n subscription=get_maybe_type_by_name(\n operation_types[OperationType.SUBSCRIPTION]\n ),\n types=list(type_map.values()),\n directives=get_merged_directives(),\n ast_node=schema_def or schema.ast_node,\n extension_ast_nodes=(\n (\n schema.extension_ast_nodes\n or cast(FrozenList[SchemaExtensionNode], FrozenList())\n )\n + schema_exts\n )\n or None,\n )", "def test_compare_schemas_happypath(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.base_schema\n )\n\n assert status == schema_utils.Update.no_update", "def correct_db_schema_precision(\n instance: Recorder,\n table_object: type[DeclarativeBase],\n schema_errors: set[str],\n) -> None:\n table_name = table_object.__tablename__\n\n if f\"{table_name}.double precision\" in schema_errors:\n from ..migration import ( # pylint: disable=import-outside-toplevel\n _modify_columns,\n )\n\n precision_columns = _get_precision_column_types(table_object)\n # Attempt to convert timestamp columns to µs precision\n session_maker = instance.get_session\n engine = instance.engine\n assert engine is not None, \"Engine should be set\"\n _modify_columns(\n session_maker,\n engine,\n table_name,\n [f\"{column} {DOUBLE_PRECISION_TYPE_SQL}\" for column in precision_columns],\n )", "def _update(self, schema: 'Schema'):\n for method in schema._get_methods():\n if method.id in self:\n raise ValueError(\n f\"Duplicate method id for {method.method} id: {method.id}\"\n )\n\n for combinator in schema._get_combinators():\n if combinator.id in self:\n raise ValueError(\n f\"Duplicate combinator id for {combinator.predicate} \" +\n f\"id: {combinator.id}\"\n )\n\n self.constructors += schema.constructors\n self.functions += schema.functions\n\n self._build_schema_data()", "def upgrade():\n op.add_column(\n 'wish', sa.Column(\n 'admin_votes_num', sa.Integer(), server_default='0',\n nullable=True))\n op.add_column(\n 'wish', sa.Column('real_votes_num', sa.Integer(), nullable=True))\n op.execute('UPDATE wish SET real_votes_num = votes_num')\n op.drop_column('wish', 'votes_num')", "def merge_schemas(self, old_schm, new_schm):\n\n old_schm_cols = [x['name'] for x in old_schm]\n\n for col in new_schm:\n if type(col) == dict:\n if col['name'] not in old_schm_cols:\n old_schm.append(col)\n \n for count, old_col in enumerate(old_schm):\n for meta in old_col:\n if type(old_col[meta]) == list:\n if old_col['name'] in [pot_new_col['name'] for pot_new_col in new_schm]:\n new_col = [pot_new_col for pot_new_col in new_schm if pot_new_col['name'] == old_col['name']][0]\n if meta in new_col:\n old_schm[count][meta] = self.merge_schemas(old_col[meta], new_col[meta])\n \n return old_schm", "def update_2xto30():\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')", "def _migrate_if_necessary(self, entries):\r\n entries = [\r\n self._migrate[entry.get('schema', 0)](self, entry)\r\n for entry in entries\r\n ]\r\n return entries", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def fix_all(self):\n\n altered_tables = {}\n\n for ingestible_db_conf in self.ingestible_db_conf_repo.get_ingestible_dbs():\n target_db= ingestible_db_conf.target_db_name\n db_type = ingestible_db_conf.db_type\n self.logger.info(\"Fixing consistency for DB Type: %s, Target DB: %s\" % (db_type, target_db))\n self.prepare_database(target_db)\n consistency_checker = HiveConsistencyChecker(target_db, db_type)\n\n unused_tables = consistency_checker.get_unused_tables()\n self.remove_unused_tables(unused_tables)\n\n new_tables = consistency_checker.get_new_tables()\n self.create_new_tables(new_tables)\n\n inconsistent_tables = consistency_checker.get_inconsistent_tables()\n self.fix_inconsistent_tables(inconsistent_tables, db_type)\n\n # Combine lists of inconsistent and unused tables\n altered_tables[db_type] = map(lambda qualified_table: qualified_table.split(\".\")[1],\n inconsistent_tables.keys() + unused_tables)\n\n self.logger.debug(\"Altered Tables: %s\" % altered_tables)\n return altered_tables", "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def schema_upgrades():\n op.create_table('vpp_licenses',\n sa.Column('license_id', sa.Integer(), nullable=False),\n sa.Column('adam_id', sa.String(), nullable=True),\n sa.Column('product_type', sa.Enum('Software', 'Application', 'Publication', name='vppproducttype'), nullable=True),\n sa.Column('product_type_name', sa.String(), nullable=True),\n sa.Column('pricing_param', sa.Enum('StandardQuality', 'HighQuality', name='vpppricingparam'), nullable=True),\n sa.Column('is_irrevocable', sa.Boolean(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('client_user_id', commandment.dbtypes.GUID(), nullable=True),\n sa.Column('its_id_hash', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['client_user_id'], ['vpp_users.client_user_id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['vpp_users.user_id'], ),\n sa.PrimaryKeyConstraint('license_id')\n )", "def same_schema(self):\n return self._same_schema", "def enforce_constraints(self):\n self.session.flush()\n try:\n self.session.execute('SET CONSTRAINTS ALL IMMEDIATE')\n except DatabaseError:\n handle_sqlalchemy_database_error()", "def resolve_schema(self, data):\n if not isinstance(data, dict):\n return\n\n # OAS 2 component or OAS 3 header\n if \"schema\" in data:\n data[\"schema\"] = self.openapi.resolve_schema_dict(data[\"schema\"])\n # OAS 3 component except header\n if self.openapi_version.major >= 3:\n if \"content\" in data:\n for content_type in data[\"content\"]:\n schema = data[\"content\"][content_type][\"schema\"]\n data[\"content\"][content_type][\n \"schema\"\n ] = self.openapi.resolve_schema_dict(schema)", "def finalizeSchema(schema, folderish=False, moveDiscussion=True):\n schema.moveField('description', after='architects')\n return schema", "def downgrade():\n op.execute(\n f\"\"\"\n ALTER TABLE\n {config.CLEAN_SCHEMA}.forecasts\n RENAME COLUMN\n train_horizon\n TO\n training_horizon;\n \"\"\",\n ) # noqa:WPS355", "def clone_schema(base_schema_name: str, new_schema_name: str, dry_run: bool = False):\n check_schema_name(new_schema_name)\n cursor = connection.cursor()\n\n # check if the clone_schema function already exists in the db\n try:\n cursor.execute(\n \"SELECT 'public.clone_schema(text, text, public.cloneparms[])'::regprocedure\"\n )\n except ProgrammingError: # pragma: no cover\n _create_clone_schema_function()\n transaction.commit()\n\n try:\n with transaction.atomic():\n cursor.callproc(\"clone_schema\", [base_schema_name, new_schema_name, \"DATA\"])\n cursor.close()\n if dry_run:\n raise DryRunException\n except DryRunException:\n cursor.close()", "def regenerate_constraints(self):\n\n # Let us not forget to remove fields that migh be empty by now\n if hasattr(self, '_cons_kinds'):\n for k in self._cons_kinds:\n attrname = camel2underscores(k)\n try:\n delattr(self, attrname)\n except AttributeError:\n pass # The attribute may not have been set up yet\n\n _cons_kinds = defaultdict(DictList)\n\n for k, v in self._cons_dict.items():\n _cons_kinds[v.__class__.__name__].append(v)\n\n for k in _cons_kinds:\n attrname = camel2underscores(k)\n setattr(self, attrname, _cons_kinds[k])\n\n self._cons_kinds = _cons_kinds", "def add_cols(self, source) :\n\n cols = source.get_cols()\n types = source.get_types()\n\n new_cols = []\n new_types = []\n for i in range(len(cols)) :\n if cols[i] not in self.cols :\n new_cols.append(cols[i])\n new_types.append(types[i])\n self.cols.extend(new_cols)\n self.types.extend(new_types)\n\n self._alter_table(new_cols, new_types)\n\n row_ids = self.get_values('__ROWID')\n \n for col in new_cols :\n new_vals = source.get_values(col)\n if len(row_ids) == 0 :\n for val in new_vals :\n self._insert_internal(['__ROWID', col], [0, val])\n\n row_ids = self.get_values('__ROWID')\n\n else :\n binds = zip(new_vals, row_ids)\n q = self._quoter(col)\n sql_base = 'UPDATE \"%s\" SET \"%s\" = %s WHERE \"__ROWID\" = %%d' % (self.name, col, q)\n cur = self.con.cursor()\n for bind in binds :\n if bind[0] :\n update_sql = sql_base % (str(bind[0]), bind[1])\n cur.execute(update_sql)\n\n self.version += 1", "def upgrade_to_14():\n config.db.singletons.find_one_and_update(\n {'_id': 'config', 'persistent.schema_path': {'$exists': True}},\n {'$unset': {'persistent.schema_path': ''}})", "def upgrade_to_13():\n config.db.singletons.find_one_and_update(\n {'_id': 'config', 'persistent.schema_path': {'$exists': True}},\n {'$unset': {'persistent.schema_path': ''}})", "def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)", "def deferred_to_columns_cb(self, target, model, fields):\n table = model._meta.db_table\n if table not in target:\n target[table] = set()\n for field in fields:\n if not hasattr(field.column, \"columns\"):\n target[table].add(field.column)\n else:\n target[table].update(field.column.columns)", "def upgrade_to_6():\n\n colls = config.db.collections.find({'modified': {'$type': 2}}) # type string\n for c in colls:\n fixed_mod = dateutil.parser.parse(c['modified'])\n config.db.collections.update_one({'_id': c['_id']}, {'$set': {'modified': fixed_mod}})", "def _refactor_time_columns(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _refactor_time_columns')\n write_cursor.execute('ALTER TABLE timed_balances RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE timed_location_data RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE trades RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE asset_movements RENAME COLUMN time TO timestamp')\n log.debug('Exit _refactor_time_columns')", "def repair(self):\n # self.add_cons_vars([x.constraint for x in self._cons_dict.values()])\n # self.add_cons_vars([x.variable for x in self._var_dict.values()])\n self._push_queue()\n Model.repair(self)\n self.regenerate_constraints()\n self.regenerate_variables()", "def strict_startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.clean()\n self.add_numeric_cols()", "def upkeep(self) -> None:\n if self.atype in ['REINFORCE', 'A2C']:\n self._db.reset()", "def unmodify_schema(setup_path, names, lp, creds, reporter, ldif, msg):\n\n return deprovision_schema(setup_path, names, lp, creds, reporter, ldif, msg, True)", "def modify(self, fields=None, **fields_kwargs):\n modified_fields = set()\n fields = self.make_dict(fields, fields_kwargs)\n fields = self._modify(fields)\n for field_name, field_val in fields.items():\n in_schema = field_name in self.schema.fields\n if in_schema:\n setattr(self, field_name, field_val)\n modified_fields.add(field_name)\n\n return modified_fields", "def reset_modified(self):\n self.modified_fields = set()\n\n # compensate for us not having knowledge of certain fields changing\n for field_name, field in self.schema.normal_fields.items():\n if isinstance(field, ObjectField):\n self.modified_fields.add(field_name)", "def migrate(env):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n warnings.warn(message)\n else:\n getattr(registry, backend).initialize_schema()", "def ibis_schema_apply_to(schema, df):\n\n for column, dtype in schema.items():\n pandas_dtype = dtype.to_pandas()\n if isinstance(dtype, dt.Interval):\n df[column] = df[column].values.astype(pandas_dtype)\n else:\n df[column] = df[column].astype(pandas_dtype, errors='ignore')\n\n if PY2 and dtype == dt.string:\n df[column] = df[column].str.decode('utf-8', errors='ignore')\n\n return df", "def _rewrite_project(self, node: saldag.Project):\n\n selected_cols = node.selected_cols\n\n for in_col, out_col in zip(selected_cols, node.out_rel.columns):\n out_col.coll_sets |= copy.deepcopy(in_col.coll_sets)", "def update(self, schema: ISchema) -> ISchema:\n schema = super().update(schema)\n\n schema = cast(EventSchema, schema)\n if not schema.Active and schema.Sport > 0:\n self.select('ID', 'Sport', 'Active').filter(\n 'Sport', Operators.Equals, schema.Sport,\n ).filter('Active', Operators.Equals, 1)\n result = self.execute()\n if not result:\n sm = SportModel()\n sport = sm.find(schema.Sport)\n sport = cast(SportSchema, sport)\n sport.Active = False\n sm.update(sport)\n return schema", "def test_can_update_risk_type_schema(self):\n risk_type = self.create_risk_type()\n for r in risk_type.schema:\n if r['field_type'] == 'text':\n r['field_type'] = 'textarea'\n break\n\n response = self.client.put(\n f'/api/v0/risk-types/{risk_type.id}/',\n data={\n 'type_name': 'New Risk Type',\n 'schema': risk_type.schema,\n }, format='json')\n risk_type.refresh_from_db()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(risk_type.schema[0].get('field_type'), 'textarea')", "def upgrade():\n op.add_column(\n 'assessments',\n sa.Column(\n 'assessment_type',\n sa.String(length=250),\n nullable=False,\n server_default=\"Control\",\n )\n )\n # Change CA help text \"Assessment type\" to \"Assessment Category\"\n op.execute(\n 'UPDATE custom_attribute_definitions '\n 'SET helptext = \"Assessment Category\" '\n 'WHERE helptext = \"Assessment type\" '\n 'AND definition_type = \"assessment\" AND title = \"Type\";'\n )", "def migrate(env, dry_run=False):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)", "def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def _create_clone_schema_function():\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"clone_schema.sql\")) as f:\n CLONE_SCHEMA_FUNCTION = (\n f.read()\n .replace(\"RAISE NOTICE ' source schema\", \"RAISE EXCEPTION ' source schema\")\n .replace(\"RAISE NOTICE ' dest schema\", \"RAISE EXCEPTION ' dest schema\")\n )\n\n cursor = connection.cursor()\n cursor.execute(CLONE_SCHEMA_FUNCTION)\n cursor.close()", "def test_compare_schemas_major(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.major_removed_value\n )\n\n assert status == schema_utils.Update.major", "def extend_schema(schema, documentAST=None):\n\n assert isinstance(schema, GraphQLSchema), \"Must provide valid GraphQLSchema\"\n assert documentAST and isinstance(\n documentAST, ast.Document\n ), \"Must provide valid Document AST\"\n\n # Collect the type definitions and extensions found in the document.\n type_definition_map = {}\n type_extensions_map = defaultdict(list)\n\n for _def in documentAST.definitions:\n if isinstance(\n _def,\n (\n ast.ObjectTypeDefinition,\n ast.InterfaceTypeDefinition,\n ast.EnumTypeDefinition,\n ast.UnionTypeDefinition,\n ast.ScalarTypeDefinition,\n ast.InputObjectTypeDefinition,\n ),\n ):\n # Sanity check that none of the defined types conflict with the\n # schema's existing types.\n type_name = _def.name.value\n if schema.get_type(type_name):\n raise GraphQLError(\n (\n 'Type \"{}\" already exists in the schema. It cannot also '\n + \"be defined in this type definition.\"\n ).format(type_name),\n [_def],\n )\n\n type_definition_map[type_name] = _def\n elif isinstance(_def, ast.TypeExtensionDefinition):\n # Sanity check that this type extension exists within the\n # schema's existing types.\n extended_type_name = _def.definition.name.value\n existing_type = schema.get_type(extended_type_name)\n if not existing_type:\n raise GraphQLError(\n (\n 'Cannot extend type \"{}\" because it does not '\n + \"exist in the existing schema.\"\n ).format(extended_type_name),\n [_def.definition],\n )\n if not isinstance(existing_type, GraphQLObjectType):\n raise GraphQLError(\n 'Cannot extend non-object type \"{}\".'.format(extended_type_name),\n [_def.definition],\n )\n\n type_extensions_map[extended_type_name].append(_def)\n\n # Below are functions used for producing this schema that have closed over\n # this scope and have access to the schema, cache, and newly defined types.\n\n def get_type_from_def(type_def):\n type = _get_named_type(type_def.name)\n assert type, \"Invalid schema\"\n return type\n\n def get_type_from_AST(astNode):\n type = _get_named_type(astNode.name.value)\n if not type:\n raise GraphQLError(\n (\n 'Unknown type: \"{}\". Ensure that this type exists '\n + \"either in the original schema, or is added in a type definition.\"\n ).format(astNode.name.value),\n [astNode],\n )\n return type\n\n # Given a name, returns a type from either the existing schema or an\n # added type.\n def _get_named_type(typeName):\n cached_type_def = type_def_cache.get(typeName)\n if cached_type_def:\n return cached_type_def\n\n existing_type = schema.get_type(typeName)\n if existing_type:\n type_def = extend_type(existing_type)\n type_def_cache[typeName] = type_def\n return type_def\n\n type_ast = type_definition_map.get(typeName)\n if type_ast:\n type_def = build_type(type_ast)\n type_def_cache[typeName] = type_def\n return type_def\n\n # Given a type's introspection result, construct the correct\n # GraphQLType instance.\n def extend_type(type):\n if isinstance(type, GraphQLObjectType):\n return extend_object_type(type)\n if isinstance(type, GraphQLInterfaceType):\n return extend_interface_type(type)\n if isinstance(type, GraphQLUnionType):\n return extend_union_type(type)\n return type\n\n def extend_object_type(type):\n return GraphQLObjectType(\n name=type.name,\n description=type.description,\n interfaces=lambda: extend_implemented_interfaces(type),\n fields=lambda: extend_field_map(type),\n )\n\n def extend_interface_type(type):\n return GraphQLInterfaceType(\n name=type.name,\n description=type.description,\n fields=lambda: extend_field_map(type),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_union_type(type):\n return GraphQLUnionType(\n name=type.name,\n description=type.description,\n types=list(map(get_type_from_def, type.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_implemented_interfaces(type):\n interfaces = list(map(get_type_from_def, type.interfaces))\n\n # If there are any extensions to the interfaces, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for namedType in extension.definition.interfaces:\n interface_name = namedType.name.value\n if any([_def.name == interface_name for _def in interfaces]):\n raise GraphQLError(\n (\n 'Type \"{}\" already implements \"{}\". '\n + \"It cannot also be implemented in this type extension.\"\n ).format(type.name, interface_name),\n [namedType],\n )\n interfaces.append(get_type_from_AST(namedType))\n\n return interfaces\n\n def extend_field_map(type):\n new_field_map = OrderedDict()\n old_field_map = type.fields\n for field_name, field in old_field_map.items():\n new_field_map[field_name] = GraphQLField(\n extend_field_type(field.type),\n description=field.description,\n deprecation_reason=field.deprecation_reason,\n args=field.args,\n resolver=cannot_execute_client_schema,\n )\n\n # If there are any extensions to the fields, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for field in extension.definition.fields:\n field_name = field.name.value\n if field_name in old_field_map:\n raise GraphQLError(\n (\n 'Field \"{}.{}\" already exists in the '\n + \"schema. It cannot also be defined in this type extension.\"\n ).format(type.name, field_name),\n [field],\n )\n new_field_map[field_name] = GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n\n return new_field_map\n\n def extend_field_type(type):\n if isinstance(type, GraphQLList):\n return GraphQLList(extend_field_type(type.of_type))\n if isinstance(type, GraphQLNonNull):\n return GraphQLNonNull(extend_field_type(type.of_type))\n return get_type_from_def(type)\n\n def build_type(type_ast):\n _type_build = {\n ast.ObjectTypeDefinition: build_object_type,\n ast.InterfaceTypeDefinition: build_interface_type,\n ast.UnionTypeDefinition: build_union_type,\n ast.ScalarTypeDefinition: build_scalar_type,\n ast.EnumTypeDefinition: build_enum_type,\n ast.InputObjectTypeDefinition: build_input_object_type,\n }\n func = _type_build.get(type(type_ast))\n if func:\n return func(type_ast)\n\n def build_object_type(type_ast):\n return GraphQLObjectType(\n type_ast.name.value,\n interfaces=lambda: build_implemented_interfaces(type_ast),\n fields=lambda: build_field_map(type_ast),\n )\n\n def build_interface_type(type_ast):\n return GraphQLInterfaceType(\n type_ast.name.value,\n fields=lambda: build_field_map(type_ast),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_union_type(type_ast):\n return GraphQLUnionType(\n type_ast.name.value,\n types=list(map(get_type_from_AST, type_ast.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_scalar_type(type_ast):\n return GraphQLScalarType(\n type_ast.name.value,\n serialize=lambda *args, **kwargs: None,\n # Note: validation calls the parse functions to determine if a\n # literal value is correct. Returning null would cause use of custom\n # scalars to always fail validation. Returning false causes them to\n # always pass validation.\n parse_value=lambda *args, **kwargs: False,\n parse_literal=lambda *args, **kwargs: False,\n )\n\n def build_enum_type(type_ast):\n return GraphQLEnumType(\n type_ast.name.value,\n values={v.name.value: GraphQLEnumValue() for v in type_ast.values},\n )\n\n def build_input_object_type(type_ast):\n return GraphQLInputObjectType(\n type_ast.name.value,\n fields=lambda: build_input_values(type_ast.fields, GraphQLInputObjectField),\n )\n\n def build_implemented_interfaces(type_ast):\n return list(map(get_type_from_AST, type_ast.interfaces))\n\n def build_field_map(type_ast):\n return {\n field.name.value: GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n for field in type_ast.fields\n }\n\n def build_input_values(values, input_type=GraphQLArgument):\n input_values = OrderedDict()\n for value in values:\n type = build_field_type(value.type)\n input_values[value.name.value] = input_type(\n type, default_value=value_from_ast(value.default_value, type)\n )\n return input_values\n\n def build_field_type(type_ast):\n if isinstance(type_ast, ast.ListType):\n return GraphQLList(build_field_type(type_ast.type))\n if isinstance(type_ast, ast.NonNullType):\n return GraphQLNonNull(build_field_type(type_ast.type))\n return get_type_from_AST(type_ast)\n\n # If this document contains no new types, then return the same unmodified\n # GraphQLSchema instance.\n if not type_extensions_map and not type_definition_map:\n return schema\n\n # A cache to use to store the actual GraphQLType definition objects by name.\n # Initialize to the GraphQL built in scalars and introspection types. All\n # functions below are inline so that this type def cache is within the scope\n # of the closure.\n\n type_def_cache = {\n \"String\": GraphQLString,\n \"Int\": GraphQLInt,\n \"Float\": GraphQLFloat,\n \"Boolean\": GraphQLBoolean,\n \"ID\": GraphQLID,\n \"__Schema\": __Schema,\n \"__Directive\": __Directive,\n \"__DirectiveLocation\": __DirectiveLocation,\n \"__Type\": __Type,\n \"__Field\": __Field,\n \"__InputValue\": __InputValue,\n \"__EnumValue\": __EnumValue,\n \"__TypeKind\": __TypeKind,\n }\n\n # Get the root Query, Mutation, and Subscription types.\n query_type = get_type_from_def(schema.get_query_type())\n\n existing_mutation_type = schema.get_mutation_type()\n mutationType = (\n existing_mutation_type and get_type_from_def(existing_mutation_type) or None\n )\n\n existing_subscription_type = schema.get_subscription_type()\n subscription_type = (\n existing_subscription_type\n and get_type_from_def(existing_subscription_type)\n or None\n )\n\n # Iterate through all types, getting the type definition for each, ensuring\n # that any type not directly referenced by a field will get created.\n types = [get_type_from_def(_def) for _def in schema.get_type_map().values()]\n\n # Do the same with new types, appending to the list of defined types.\n types += [get_type_from_AST(_def) for _def in type_definition_map.values()]\n\n # Then produce and return a Schema with these types.\n return GraphQLSchema(\n query=query_type,\n mutation=mutationType,\n subscription=subscription_type,\n # Copy directives.\n directives=schema.get_directives(),\n types=types,\n )", "def _swapcolumns(self):\n return self.reindex_axis([self.columns[1], self.columns[0]], axis=1)", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "async def reload_database(self, schema='conf/schema.sql'):\n with open(schema) as schema:\n await self.dao.build((schema.read()))", "def upgrade():\n op.execute(\n \"\"\"\n insert ignore into relationships (\n modified_by_id,\n created_at,\n updated_at,\n source_id,\n source_type,\n destination_id,\n destination_type\n )\n select\n modified_by_id,\n created_at,\n updated_at,\n parent_id as source_id,\n parent_type as source_type,\n child_id as destination_id,\n child_type as destination_type\n from snapshots\n \"\"\"\n )", "def upgrade_to_py3(self):\n # Rewrite database metadata pickles to force to version 3.\n for name, (id, attrs, idx) in self._object_types.items():\n self._db_query(\"UPDATE types SET attrs_pickle=?, idx_pickle=? WHERE id=?\", (self._pickle(attrs), self._pickle(idx), id))\n for name, ivtidx in self._inverted_indexes.items():\n self._db_query(\"UPDATE inverted_indexes SET value=? WHERE name=? AND attr=?\", (self._pickle(ivtidx), name, 'definition'))\n self.commit()\n self._readonly = False" ]
[ "0.7231359", "0.7089279", "0.65710247", "0.6482866", "0.64562935", "0.61638325", "0.61030746", "0.5923907", "0.5808623", "0.5721755", "0.56536114", "0.56280774", "0.56276286", "0.5590315", "0.5581879", "0.55440784", "0.5543209", "0.55420613", "0.5536027", "0.55306125", "0.55029225", "0.54575", "0.5431088", "0.54255265", "0.5394251", "0.5375789", "0.5367753", "0.5357313", "0.5336318", "0.5328095", "0.5278523", "0.52771056", "0.52656645", "0.52656645", "0.52656645", "0.52209383", "0.5218055", "0.52002794", "0.51970035", "0.51930255", "0.51924634", "0.5184683", "0.51577127", "0.5151599", "0.51475465", "0.51405257", "0.5139874", "0.513891", "0.51383835", "0.5136204", "0.5116184", "0.50896347", "0.508401", "0.5082808", "0.50731707", "0.5038522", "0.49948877", "0.49718288", "0.4968792", "0.49658614", "0.49541888", "0.49411947", "0.49195436", "0.49037525", "0.49031138", "0.48992842", "0.4892197", "0.48897704", "0.48810467", "0.48694217", "0.486147", "0.4859146", "0.48515552", "0.483281", "0.4822995", "0.4821257", "0.48161238", "0.48121506", "0.48110282", "0.48067328", "0.48044786", "0.48014674", "0.4797127", "0.47816378", "0.47792447", "0.47722372", "0.47709516", "0.47657466", "0.47617412", "0.4757309", "0.47540647", "0.47444832", "0.47422335", "0.47422227", "0.47393706", "0.47351882", "0.47344276", "0.47135434", "0.47040495", "0.46960893" ]
0.6559085
3
Change mode to `NULLABLE` for columns in existing schema. Tries set mode on existing columns in orig_schema_map to `NULLABLE`. Raises SchemaUpdateError if column is not found in orig_schema_map.
def _GetRelaxedCols(relaxed_columns, orig_schema_map): updated_schema_map = orig_schema_map.copy() for col in relaxed_columns: if col in orig_schema_map: updated_schema_map[col].mode = 'NULLABLE' else: raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE) return updated_schema_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode", "def nullable(self):\n _columns = []\n if not isinstance(self._last_column, list):\n _columns = [self._last_column]\n\n for column in _columns:\n column.nullable()\n return self", "def filter_schema(schema):\n for column, column_schema in schema.iteritems():\n if column_schema.get(CARDINALITY):\n del column_schema[CARDINALITY]\n schema[column] = column_schema\n\n return schema", "def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry", "def remove_null_fields(self):\n with open(self.schema_path, 'r') as file_obj:\n schema_data = yaml.safe_load(file_obj)\n schema_fields = schema_data.get('mapping').keys()\n for field in schema_fields:\n # We want to keep 'false' and 0 values, and avoid removing fields that are required in the schema.\n if field in self.data and self.data[field] in (None, '', [], {}) and \\\n not schema_data.get('mapping', {}).get(field, {}).get('required'):\n self.data.pop(field)", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def drop_null(self, how: Literal[\"any\", \"all\"] = \"any\"):\n # TODO only flat columns supported...\n assert self._dtype is not None\n res = Scope._EmptyColumn(self._dtype.constructor(nullable=False))\n if how == \"any\":\n for i in self:\n if not self._has_any_null(i):\n res._append(i)\n elif how == \"all\":\n for i in self:\n if not self._has_all_null(i):\n res._append(i)\n return res._finalize()", "def correct_db_schema_precision(\n instance: Recorder,\n table_object: type[DeclarativeBase],\n schema_errors: set[str],\n) -> None:\n table_name = table_object.__tablename__\n\n if f\"{table_name}.double precision\" in schema_errors:\n from ..migration import ( # pylint: disable=import-outside-toplevel\n _modify_columns,\n )\n\n precision_columns = _get_precision_column_types(table_object)\n # Attempt to convert timestamp columns to µs precision\n session_maker = instance.get_session\n engine = instance.engine\n assert engine is not None, \"Engine should be set\"\n _modify_columns(\n session_maker,\n engine,\n table_name,\n [f\"{column} {DOUBLE_PRECISION_TYPE_SQL}\" for column in precision_columns],\n )", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def is_nullable(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return schema_obj.is_nullable\n return False", "def setNullAllowed(self, onlyNullAllowed):\n self._onlyNullAllowed = onlyNullAllowed", "def nullify(self):\n self._original_values.clear()\n self._modified_values.clear()\n self._extra_record_data.clear()\n self._references.clear()\n for mtm in self._mtm_referencelist:\n self._mtm_referencelist[mtm].parentobjectid = None\n for chl in self._child_referencelist:\n self._child_referencelist[chl].clear() \n self._ismodified = False\n self._hasdata = False\n self._astxt = \"(null)\"\n if self._table: \n for f in self._table:\n self._original_values[f.name] = None", "def set_sql_mode(self):\n self.execute_sql(\n sql.set_session_variable(\"sql_mode\"),\n (\"STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO\",),\n )", "def is_nullable(self) -> bool: # pragma: no cover\n pass", "def _AddNewColsToSchema(new_fields, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for new_field in new_fields:\n if new_field.name in orig_schema_map:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n updated_schema_map[new_field.name] = new_field\n return updated_schema_map", "def fillna_mode(data, columns, verbose=True):\n for col in columns:\n fill_val = data[col].mode()[0]\n if verbose: print('Filling ' + col + ' with: ' + fill_val)\n data[col].fillna(fill_val, inplace=True)", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def _copy_custom_attributes(self, column):\n\n column._fk = self._fk\n column._fk_on_update = self._fk_on_update\n column._fk_on_delete = self._fk_on_delete\n\n super()._copy_custom_attributes(column)", "async def upgradeSchema(self) -> None:", "def _GetUpdatedSchema(\n original_schema,\n new_columns=None,\n relaxed_columns=None):\n orig_field_map = (\n {f.name: f for f in original_schema.fields} if original_schema else {})\n\n if relaxed_columns:\n orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)\n\n if new_columns:\n orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)\n\n return sorted(orig_field_map.values(), key=lambda x: x.name)", "def reset_schema_defs():\n SCHEMA_DEFS.clear()\n SCHEMA_DEFS.update((typ, typ) for typ in PRIMITIVE_TYPES)", "def replace(self, dictionary):\n for column in self.__table__.columns.keys():\n setattr(self, column, None)\n self.from_dict(dictionary)", "def _fillna_meta_cols(self):\n for col_name, fill_value in self._fillna.items():\n if col_name in self._hybrid_meta.columns:\n self._hybrid_meta[col_name].fillna(fill_value, inplace=True)\n else:\n self.__warn_missing_col(col_name, action='fill')\n\n self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True)\n self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True)", "def update_records(cursor,table_schema,table_name,column_name,value):\n update_records = \"UPDATE \" + table_schema + \".\" + table_name + \" SET \" + column_name + \"='\" + value + \"' WHERE COALESCE(\" + column_name + \",'')='';\"\n cursor.execute(update_records)", "def _set_editable_mode(self):\n dist = self.distribution\n build = dist.get_command_obj(\"build\")\n for cmd_name in build.get_sub_commands():\n cmd = dist.get_command_obj(cmd_name)\n if hasattr(cmd, \"editable_mode\"):\n cmd.editable_mode = True\n elif hasattr(cmd, \"inplace\"):\n cmd.inplace = True # backward compatibility with distutils", "def test_null_update_deletes_column(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, None if i == 3 else str(i))", "def update(self, force=False):\n if (self.__fields is None) or force:\n self.__fields = [f for f in self.dbms.fields(self.table, self.db)]", "def _encode_nullable(data_type, obj, alias_validators, old_style, for_msgpack):\n if obj is not None:\n return _json_compat_obj_encode_helper(\n data_type.validator, obj, alias_validators, old_style, for_msgpack)\n else:\n return None", "def reinitToNull(self):\n for sAttr in self.getDataAttributes():\n setattr(self, sAttr, None);\n return self;", "def set_null(self, /, *defaults: Any, **kwargs: Any) -> \"fn\":\n return self._mod.set_null(self._func, *defaults, **kwargs)", "def get_nullable():\n changes = True\n while changes:\n changes = False\n for p in PRODUCTION_LIST:\n if not symbol_for_str(p.left).is_nullable:\n if p.right[0] == 'null':\n symbol_for_str(p.left).is_nullable = True\n changes = True\n continue\n else:\n right_is_nullable = symbol_for_str(p.right[0]).is_nullable\n # For X -> Y1 ... YN, Nullable(X) = Nullable(Y1) &\n # Nullable(Y2) ... & Nullable(YN)\n for r in p.right[1:]:\n if r.startswith('P'):\n continue\n right_is_nullable = right_is_nullable & symbol_for_str(\n r).is_nullable\n\n if right_is_nullable:\n changes = True\n symbol_for_str(p.left).is_nullable = True", "def __null_check(self, record_attribute, attribute_schema):\n if attribute_schema[NULLABLE_KEY]:\n return True\n elif record_attribute is not None:\n return True\n else:\n IS_VALID_FILE = False\n return False", "def _prepend_none_dimension(features):\n if features:\n modified_features = dict(features) # Create a copy to modify\n for key, feature in features.items():\n if isinstance(feature, FixedLenSequenceFeature):\n if not feature.allow_missing:\n raise ValueError(\"Unsupported: FixedLenSequenceFeature requires \"\n \"allow_missing to be True.\")\n modified_features[key] = FixedLenSequenceFeature(\n [None] + list(feature.shape),\n feature.dtype,\n feature.allow_missing,\n feature.default_value)\n return modified_features\n else:\n return features", "def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}", "def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))", "def fix_incremental(meta, bind):\n meta.create_all(bind=bind, checkfirst=True)\n ref = inspect(bind)\n for table in meta.sorted_tables:\n orm_cols = set(col.name for col in table.c)\n ref_cols = set(col['name'] for col in ref.get_columns(table.name))\n col_to_create = orm_cols - ref_cols\n col_to_delete = ref_cols - orm_cols\n if col_to_create:\n print table.name, 'has diff to create', col_to_create\n with bind.begin() as conn:\n for col_name in col_to_create:\n col = table.c.get(col_name)\n column_sql = CreateColumn(col).compile(bind).string\n sql = 'ALTER TABLE {} ADD COLUMN {}'.format(table.name, column_sql)\n if col.default:\n sql += ' DEFAULT {!r}'.format(col.default.arg) # can break when a pickle type has callable default.\n if not col.nullable:\n sql += ' NOT NULL'\n print 'executing sql: ' + sql\n conn.execute(sql)\n\n # Workaround to ensure updated DBs start with \"False\" in ignore column\n if list(col_to_create)[0] == 'ignore':\n sessionmaker = get_sessionmaker(bind.url.database)\n session = sessionmaker()\n query_object = {'dttrialdff0s': DTTrialDff0, 'trials': Trial}[table.name]\n items = session.query(query_object).all()\n for item in items:\n item.ignore = False\n session.flush()\n\n if col_to_delete:\n print table.name, 'has diff to delete', col_to_delete, 'maybe later version.'\n \"\"\"\n BEGIN TRANSACTION;\n CREATE TEMPORARY TABLE t1_backup(a,b);\n INSERT INTO t1_backup SELECT a,b FROM t1;\n DROP TABLE t1;\n CREATE TABLE t1(a,b);\n INSERT INTO t1 SELECT a,b FROM t1_backup;\n DROP TABLE t1_backup;\n COMMIT;\n \"\"\"", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n # first the column type must be defined\n raise Exception('The data type of this column is not yet defined!')", "def _copy_custom_attributes(self, column):\n\n column.min_length = self.min_length\n column.max_length = self.max_length\n column.allow_blank = self.allow_blank\n column.allow_whitespace = self.allow_whitespace\n\n super()._copy_custom_attributes(column)", "def allows_auto_pk_0(self):\n return \"NO_AUTO_VALUE_ON_ZERO\" in self.connection.sql_mode", "def correct_db_schema_utf8(\n instance: Recorder, table_object: type[DeclarativeBase], schema_errors: set[str]\n) -> None:\n table_name = table_object.__tablename__\n if (\n f\"{table_name}.4-byte UTF-8\" in schema_errors\n or f\"{table_name}.utf8mb4_unicode_ci\" in schema_errors\n ):\n from ..migration import ( # pylint: disable=import-outside-toplevel\n _correct_table_character_set_and_collation,\n )\n\n _correct_table_character_set_and_collation(table_name, instance.get_session)", "def apply_patch():\n assert BaseDatabaseSchemaEditor is not None\n\n def _create_unique_sql(self, *args, **kwargs):\n from django.db.backends.ddl_references import IndexName\n\n statement = orig_create_unique_sql(self, *args, **kwargs)\n\n if statement is not None:\n index_name = statement.parts['name']\n\n if (isinstance(index_name, IndexName) and\n index_name.create_index_name == self._create_index_name):\n # The result will be unquoted. Let's quote it.\n index_name.create_index_name = lambda *args, **kwargs: \\\n self.quote_name(self._create_index_name(*args, **kwargs))\n\n return statement\n\n orig_create_unique_sql = BaseDatabaseSchemaEditor._create_unique_sql\n BaseDatabaseSchemaEditor._create_unique_sql = _create_unique_sql", "def to_json_schema(\n schema: Dict[str, Any], *, nullable_name: str, copy: bool = True, is_response_schema: bool = False\n) -> Dict[str, Any]:\n if copy:\n schema = fast_deepcopy(schema)\n if schema.get(nullable_name) is True:\n del schema[nullable_name]\n schema = {\"anyOf\": [schema, {\"type\": \"null\"}]}\n schema_type = schema.get(\"type\")\n if schema_type == \"file\":\n schema[\"type\"] = \"string\"\n schema[\"format\"] = \"binary\"\n if schema_type == \"object\":\n if is_response_schema:\n # Write-only properties should not occur in responses\n rewrite_properties(schema, is_write_only)\n else:\n # Read-only properties should not occur in requests\n rewrite_properties(schema, is_read_only)\n return schema", "def test_build_schema_no_update(self):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n self.fake_metadata,\n schema_utils.Update.no_update,\n )\n assert metadata == self.fake_metadata", "def test_columns_set_to_all_columns_when_none(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=None)\n\n x.columns_set_or_check(X=df)\n\n h.assert_equal_dispatch(\n expected=list(df.columns.values),\n actual=x.columns,\n msg=\"x.columns set when None\",\n )", "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))", "def initialize_schema(self, dry_run=False):\n if not dry_run:\n self.flush()", "def _decode_nullable(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if obj is not None:\n return _json_compat_obj_decode_helper(\n data_type.validator, obj, alias_validators, strict, old_style,\n for_msgpack)\n else:\n return None", "def _parse_fixed_length_data(self, original_record, column, null_table):\n column_name = column.col_name_str\n # Boolean fields are encoded in the null table\n if column.type == TYPE_BOOLEAN:\n if column.column_id > len(null_table):\n logging.error(f\"Failed to parse bool field, Column not found in null_table column: {column_name} ,\"\n f\" column id: {column.column_id} , null_table: {null_table}\")\n return\n\n parsed_type = null_table[column.column_id]\n else:\n\n if column.fixed_offset > len(original_record):\n logging.error(f\"Column offset is bigger than the length of the record {column.fixed_offset}\")\n return\n record = original_record[column.fixed_offset:]\n parsed_type = parse_type(column.type, record, version=self.version)\n\n if not null_table[column.column_id] and column.type != TYPE_BOOLEAN:\n self.parsed_table[column_name].append(\"\")\n else:\n self.parsed_table[column_name].append(parsed_type)", "def rev_ensure_column(v,initial_shape):\n if initial_shape: # check that the tuple is nonempty\n v.shape = initial_shape \n \n return v", "def test_null_update_deletes_column(self):\r\n partition = uuid4()\r\n for i in range(5):\r\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\r\n\r\n # sanity check\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == str(i)\r\n\r\n # perform update\r\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\r\n\r\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\r\n assert row.cluster == i\r\n assert row.count == i\r\n assert row.text == (None if i == 3 else str(i))", "def test_update_risk_field_to_null(self, field, field_name):\n risk = factories.RiskFactory()\n\n response = self.api.put(risk, risk.id, {\n field: None,\n })\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertIsNotNone(risk.external_id)", "def _base_schema(self, data_schema: Callable[[bool], StructType]) -> StructType:\n return StructType([\n StructField(\"id\", StringType(), False),\n StructField(\"op\", StringType(), False),\n StructField(\"ts\", LongType(), False),\n StructField(\"data\", data_schema(False), True),\n StructField(\"set\", data_schema(True), True),\n ])", "def flatten_schema_map(\n schema_map,\n keep_nulls=False,\n sorted_schema=True,\n infer_mode=False,\n input_format='json',\n):\n if not isinstance(schema_map, dict):\n raise Exception(\n f\"Unexpected type '{type(schema_map)}' for schema_map\"\n )\n\n # Build the BigQuery schema from the internal 'schema_map'.\n schema = []\n map_items = sorted(schema_map.items()) if sorted_schema \\\n else schema_map.items()\n for name, meta in map_items:\n # Skip over fields which have been explicitly removed\n if not meta:\n continue\n\n status = meta['status']\n filled = meta['filled']\n info = meta['info']\n\n # Schema entries with a status of 'soft' are caused by 'null' or\n # empty fields. Don't print those out if the 'keep_nulls' flag is\n # False.\n if status == 'soft' and not keep_nulls:\n continue\n\n # Copy the 'info' dictionary into the schema dict, preserving the\n # ordering of the 'field', 'mode', 'name', 'type' elements. 'bq load'\n # keeps these sorted, so we created them in sorted order using an\n # OrderedDict, so they should preserve order here too.\n new_info = OrderedDict()\n for key, value in info.items():\n if key == 'fields':\n if not value:\n # Create a dummy attribute for an empty RECORD to make\n # the BigQuery importer happy.\n new_value = [\n OrderedDict([\n ('mode', 'NULLABLE'),\n ('name', '__unknown__'),\n ('type', 'STRING'),\n ])\n ]\n else:\n # Recursively flatten the sub-fields of a RECORD entry.\n new_value = flatten_schema_map(\n schema_map=value,\n keep_nulls=keep_nulls,\n sorted_schema=sorted_schema,\n infer_mode=infer_mode,\n input_format=input_format\n )\n elif key == 'type' and value in ['QINTEGER', 'QFLOAT', 'QBOOLEAN']:\n # Convert QINTEGER -> INTEGER, similarly for QFLOAT and QBOOLEAN\n new_value = value[1:]\n elif key == 'mode':\n # 'infer_mode' to set a field as REQUIRED is supported for only\n # input_format = 'csv' because the header line gives us the\n # complete list of fields to be expected in the CSV file. In\n # JSON data files, certain fields will often be completely\n # missing instead of being set to 'null' or \"\". If the field is\n # not even present, then it becomes incredibly difficult (not\n # impossible, but more effort than I want to expend right now)\n # to figure out which fields are missing so that we can mark the\n # appropriate schema entries with 'filled=False'.\n #\n # The --infer_mode option is activated only for\n # input_format == 'csv' in this function, which allows us to\n # overload the --infer_mode flag to mean that a REQUIRED mode of\n # an existing schema can transition to a NULLABLE mode.\n if (infer_mode and value == 'NULLABLE' and filled\n and input_format == 'csv'):\n new_value = 'REQUIRED'\n else:\n new_value = value\n else:\n new_value = value\n new_info[key] = new_value\n schema.append(new_info)\n return schema", "def is_field_nullable(\n nullable: Optional[bool],\n default: Any,\n server_default: Any,\n pydantic_only: Optional[bool],\n) -> bool:\n if nullable is None:\n return (\n default is not None\n or server_default is not None\n or (pydantic_only is not None and pydantic_only)\n )\n return nullable", "def update_writables(self, record_id, fields, typecast=False):\n \n cols = self.writable_columns\n cols_dict = { col['name']: col for col in cols }\n writable_fields = { k : v for k, v in fields.items() if k in cols_dict}\n \n Airtable.update(self, record_id, writable_fields, typecast)", "def before_update(mapper, conn, target):\n if isinstance(target, Column):\n raise TypeError('Got a column instead of a table')\n\n if target.id_ is None:\n dataset_id = ObjectNumber.parse(target.d_id)\n target.id_ = str(TableNumber(dataset_id, target.sequence_id))", "def _is_nullable(self) -> bool:\n return self.__nullable", "def set_schema(self, schema):\r\n self.__schema = schema", "def _set_default_edit_fields(self):\n default_edit_fields = []\n\n for col in self.table.get_column_names():\n req = False\n if col[-3:].lower() == '_id':\n # foreign key\n req = True\n \n dict_template = self._get_field_list_dict()\n \n dict_template['name'] = '{}'.format(col)\n dict_template['label'] = '{}'.format(col).replace('_',' ').title()\n dict_template['type'] = 'text'\n try:\n dict_template['type'] = '{}'.format(self.table.get_column_type(col))\n except KeyError:\n pass\n dict_template['req'] = req\n \n default_edit_fields.append(dict_template)\n \n return default_edit_fields", "def set_schema(self, schema, set_num_columns=True):\n update_dict = {self.SCHEMA: schema}\n\n if set_num_columns:\n update_dict.update({self.NUM_COLUMNS: len(schema.keys())})\n\n self.update(update_dict)", "def _clean_up_columns(\n self):\n self.log.debug('starting the ``_clean_up_columns`` method')\n\n tableName = self.dbTableName\n\n print \"cleaning up %(tableName)s columns\" % locals()\n\n sqlQuery = u\"\"\"\n set sql_mode=\"STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\";\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n sqlQuery = u\"\"\"\n update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;\n update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = \"\";\n update %(tableName)s set notes = null where notes = \"\";\n update %(tableName)s set redshift = null where redshift = 0;\n update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = \"\";\n update %(tableName)s set hubble_const = null where hubble_const = 0;\n update %(tableName)s set lmc_mod = null where lmc_mod = 0;\n update %(tableName)s set master_row = 0;\n update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n self.log.debug('completed the ``_clean_up_columns`` method')\n return None", "def test_can_update_risk_type_schema(self):\n risk_type = self.create_risk_type()\n for r in risk_type.schema:\n if r['field_type'] == 'text':\n r['field_type'] = 'textarea'\n break\n\n response = self.client.put(\n f'/api/v0/risk-types/{risk_type.id}/',\n data={\n 'type_name': 'New Risk Type',\n 'schema': risk_type.schema,\n }, format='json')\n risk_type.refresh_from_db()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(risk_type.schema[0].get('field_type'), 'textarea')", "def test_updates_from_none(self):\r\n m = TestMapModel.create(int_map=None)\r\n expected = {1: uuid4()}\r\n m.int_map = expected\r\n m.save()\r\n\r\n m2 = TestMapModel.get(partition=m.partition)\r\n assert m2.int_map == expected\r\n\r\n m2.int_map = None\r\n m2.save()\r\n m3 = TestMapModel.get(partition=m.partition)\r\n assert m3.int_map != expected", "def convertToParamNull(self):\n for sAttr in self.getDataAttributes():\n oValue = getattr(self, sAttr);\n oNewValue = self._convertAttributeToParamNull(sAttr, oValue);\n if oValue != oNewValue:\n setattr(self, sAttr, oNewValue);\n return self;", "def _write_local_schema_file(self, cursor):\n schema = []\n tmp_schema_file_handle = NamedTemporaryFile(delete=True)\n if self.schema is not None and isinstance(self.schema, string_types):\n schema = self.schema\n tmp_schema_file_handle.write(schema)\n else:\n if self.schema is not None and isinstance(self.schema, list):\n schema = self.schema\n else:\n for field in cursor.description:\n # See PEP 249 for details about the description tuple.\n field_name = field[0]\n field_type = self.type_map(field[1])\n # Always allow TIMESTAMP to be nullable. MySQLdb returns None types\n # for required fields because some MySQL timestamps can't be\n # represented by Python's datetime (e.g. 0000-00-00 00:00:00).\n if field[6] or field_type == 'TIMESTAMP':\n field_mode = 'NULLABLE'\n else:\n field_mode = 'REQUIRED'\n schema.append({\n 'name': field_name,\n 'type': field_type,\n 'mode': field_mode,\n })\n s = json.dumps(schema, tmp_schema_file_handle, sort_keys=True)\n if PY3:\n s = s.encode('utf-8')\n tmp_schema_file_handle.write(s)\n\n self.log.info('Using schema for %s: %s', self.schema_filename, schema)\n return {self.schema_filename: tmp_schema_file_handle}", "def modify_tbl(self):\n debug = False\n dd = mg.DATADETS_OBJ\n orig_tblname = dd.tbl\n ## other (i.e. not the sofa_id) field details\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n if debug:\n print('oth_name_types to feed into '\n f'make_strict_typing_tbl {oth_name_types}')\n try: ## 1 way or other must do strict_cleanup()\n make_strict_typing_tbl(\n orig_tblname, oth_name_types, self.settings_data)\n except sqlite.IntegrityError as e: #@UndefinedVariable\n if debug: print(b.ue(e))\n strict_cleanup(restore_tblname=orig_tblname)\n raise FldMismatchException\n except Exception as e:\n strict_cleanup(restore_tblname=orig_tblname)\n raise Exception('Problem making strictly-typed table.'\n f'\\nCaused by error: {b.ue(e)}')\n copy_orig_tbl(orig_tblname)\n wipe_tbl(orig_tblname)\n final_name = self.tblname_lst[0] ## may have been renamed\n try:\n make_redesigned_tbl(final_name, oth_name_types)\n strict_cleanup(restore_tblname=final_name)\n dd.set_tbl(tbl=final_name)\n except Exception as e:\n strict_cleanup(restore_tblname=orig_tblname)\n restore_copy_tbl(orig_tblname) ## effectively removes tmp_tbl 2\n dd.set_tbl(tbl=orig_tblname)\n raise Exception('Problem making redesigned table.'\n f'\\nCaused by error: {b.ue(e)}')\n wipe_tbl(mg.TMP_TBLNAME2)", "def generate_altered_fields(self):\n result = super(MigrationAutodetector, self).generate_altered_fields()\n self.generate_sql_changes()\n return result", "def dim_col_imputation(self, columns):\n for column in columns:\n if column in self.col_with_nulls:\n if not self._pandas_flag:\n mode = self.data_frame.select(column).toPandas().mode().values[0][0]\n self.data_frame = self.data_frame.fillna({ column:mode })\n else:\n self.data_frame[column] = self.mode_impute(self.data_frame[column])\n self.data_change_dict['ModeImputeCols'].append(column)", "def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.Set(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement({1, 2, 3, 4}, None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == {1, 2, 3, 4}\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])", "def _migrate_0(self, entry):\r\n if 'name' not in entry['_id']:\r\n entry_id = entry['_id']\r\n entry_id = bson.son.SON([\r\n ('org', entry_id['org']),\r\n ('course', entry_id['course']),\r\n ])\r\n self.location_map.remove({'_id': entry_id})\r\n return None\r\n\r\n # add schema, org, offering, etc, remove old fields\r\n entry['schema'] = 0\r\n entry.pop('course_id', None)\r\n entry.pop('lower_course_id', None)\r\n old_course_id = SlashSeparatedCourseKey(entry['_id']['org'], entry['_id']['course'], entry['_id']['name'])\r\n entry['org'] = old_course_id.org\r\n entry['offering'] = old_course_id.offering.replace('/', '+')\r\n return self._migrate_1(entry, True)", "def is_nullable_type(self):\n raise exceptions.NotImplementedError()", "def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)", "def deferred_to_columns_cb(self, target, model, fields):\n table = model._meta.db_table\n if table not in target:\n target[table] = set()\n for field in fields:\n if not hasattr(field.column, \"columns\"):\n target[table].add(field.column)\n else:\n target[table].update(field.column.columns)", "def _null_set_rel_pos(self, rel_pos):\n self.get_sp_rel_pos = self._null_get_rel_pos", "def convertFromParamNull(self):\n for sAttr in self.getDataAttributes():\n oValue = getattr(self, sAttr);\n oNewValue = self._convertAttributeFromParamNull(sAttr, oValue);\n if oValue != oNewValue:\n setattr(self, sAttr, oNewValue);\n return self;", "def test_update_field_to_null(self, field, field_name):\n control = factories.ControlFactory()\n\n response = self.api.put(control, control.id, {field: None})\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n control = db.session.query(all_models.Control).get(control.id)\n self.assertIsNotNone(control.external_id)", "def DEADempty_fix_table():\n sqlnuke = \"delete from fix\"\n conf.Cur.execute(sqlnuke)\n conf.Con.commit()", "def _migrate_if_necessary(self, entries):\r\n entries = [\r\n self._migrate[entry.get('schema', 0)](self, entry)\r\n for entry in entries\r\n ]\r\n return entries", "def column_empty_like_same_mask(column, dtype):\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)", "def update(self, schema: ISchema) -> ISchema:\n schema = super().update(schema)\n\n schema = cast(EventSchema, schema)\n if not schema.Active and schema.Sport > 0:\n self.select('ID', 'Sport', 'Active').filter(\n 'Sport', Operators.Equals, schema.Sport,\n ).filter('Active', Operators.Equals, 1)\n result = self.execute()\n if not result:\n sm = SportModel()\n sport = sm.find(schema.Sport)\n sport = cast(SportSchema, sport)\n sport.Active = False\n sm.update(sport)\n return schema", "def isNullable(self):\n if self.isPrimaryKey():\n return False\n else:\n return self._nullable", "def apply_to(self, df: pd.DataFrame) -> pd.DataFrame:\n schema_names = self.names\n data_columns = df.columns\n\n assert len(schema_names) == len(\n data_columns\n ), \"schema column count does not match input data column count\"\n\n for column, dtype in zip(data_columns, self.types):\n pandas_dtype = dtype.to_pandas()\n\n col = df[column]\n col_dtype = col.dtype\n\n try:\n not_equal = pandas_dtype != col_dtype\n except TypeError:\n # ugh, we can't compare dtypes coming from pandas,\n # assume not equal\n not_equal = True\n\n if not_equal or not dtype.is_primitive():\n new_col = convert(col_dtype, dtype, col)\n else:\n new_col = col\n df[column] = new_col\n\n # return data with the schema's columns which may be different than the\n # input columns\n df.columns = schema_names\n return df", "def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None", "def test_empty_cols_allowed(self):\n self.test_table.allow_empty_columns = True\n self.test_table.change_header(Path=1, SectionType=3, Value=4)\n self.assertEqual(self.test_table._header, [\"Path\", None, \"SectionType\",\n \"Value\"])", "def on_doctype_update():\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_defkey_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_defkey_index(parent, defkey)\"\"\")\n\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_parenttype_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_parenttype_index(parent, parenttype)\"\"\")", "def change_col_type(df,schema):\n d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}\n \n for c,t in schema.items():\n df = df.withColumn(c,col(c).cast(d[t]))\n return df", "def is_nullable(self):\n return self._is_nullable", "def getSchemaMap(self):\n\t\trSchemaMap = {}\n\t\tfor schemaName in self.schemaList:\n\t\t\tmodeList = [str(tKey) for tKey in self.schemaList[schemaName].modes.keys()]\n\n\t\t\tif len(modeList) == 1 and None in self.schemaList[schemaName].modes:\n\t\t\t\trSchemaMap[schemaName] = 0\n\t\t\telse:\n\t\t\t\trSchemaMap[schemaName] = modeList\n\n\t\treturn rSchemaMap", "def before_insert(mapper, conn, target):\n\n #from identity import ObjectNumber\n #assert not target.fk_vid or not ObjectNumber.parse(target.fk_vid).revision\n\n if target.sequence_id is None:\n # In case this happens in multi-process mode\n conn.execute(\"BEGIN IMMEDIATE\")\n sql = text(\n '''SELECT max(c_sequence_id)+1 FROM columns WHERE c_t_id = :tid''')\n\n max_id, = conn.execute(sql, tid=target.t_id).fetchone()\n\n if not max_id:\n max_id = 1\n\n target.sequence_id = max_id\n\n Column.before_update(mapper, conn, target)", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def reset_modified(self):\n self.modified_fields = set()\n\n # compensate for us not having knowledge of certain fields changing\n for field_name, field in self.schema.normal_fields.items():\n if isinstance(field, ObjectField):\n self.modified_fields.add(field_name)", "def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))", "def standardize(self, options: ParameterSetSchema) -> \"Parameters\":\n replace_dict = {}\n for key, val in asdict(self).items():\n dimensions = (val.shape[0], getattr(options, key).degree + 1)\n val_new = np.zeros(dimensions)\n max_dimension = (np.minimum(val.shape[1], dimensions[1]))\n val_new[:, :max_dimension] = val[:, :max_dimension]\n replace_dict.update({key: val_new})\n return replace(self, **replace_dict)", "def changenonetoNone(s):\r\n if s=='None':\r\n return None\r\n else:\r\n return s" ]
[ "0.581769", "0.5178056", "0.5142687", "0.5107475", "0.50148743", "0.4938137", "0.49304643", "0.4709507", "0.4704251", "0.4693166", "0.46439534", "0.460573", "0.45949084", "0.4564342", "0.45579243", "0.44939494", "0.4487079", "0.44671857", "0.44571728", "0.44475183", "0.44284815", "0.44273856", "0.4420121", "0.44103774", "0.4398482", "0.4389953", "0.43886152", "0.43481353", "0.4338426", "0.43361387", "0.43354636", "0.4328191", "0.432072", "0.43179303", "0.43146068", "0.43080887", "0.43002936", "0.42834532", "0.42733145", "0.42574638", "0.4256291", "0.42553374", "0.42515603", "0.42507502", "0.42487434", "0.42222852", "0.4220983", "0.42181864", "0.4215106", "0.4207957", "0.42077914", "0.42056474", "0.42030367", "0.41983145", "0.4197902", "0.4193015", "0.4184039", "0.41691354", "0.4166196", "0.4161701", "0.41562364", "0.4149698", "0.41378924", "0.41359726", "0.41308078", "0.41205308", "0.41057402", "0.4103443", "0.40973568", "0.4094938", "0.40752518", "0.40581706", "0.40564007", "0.40423664", "0.40393743", "0.4038854", "0.4034126", "0.40301055", "0.40212595", "0.4020144", "0.40148473", "0.40145954", "0.40142488", "0.40111858", "0.4004962", "0.3997657", "0.39896274", "0.39865515", "0.39862368", "0.3969516", "0.39657027", "0.39650917", "0.39629957", "0.39604303", "0.39579844", "0.39570943", "0.39489138", "0.39434963", "0.39393175", "0.39359477" ]
0.637235
0
Add new columns to an existing schema. Tries add new fields to an existing schema. Raises SchemaUpdateError if column already exists in the orig_schema_map.
def _AddNewColsToSchema(new_fields, orig_schema_map): updated_schema_map = orig_schema_map.copy() for new_field in new_fields: if new_field.name in orig_schema_map: raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE) updated_schema_map[new_field.name] = new_field return updated_schema_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def _GetRelaxedCols(relaxed_columns, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for col in relaxed_columns:\n if col in orig_schema_map:\n updated_schema_map[col].mode = 'NULLABLE'\n else:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n return updated_schema_map", "def _GetUpdatedSchema(\n original_schema,\n new_columns=None,\n relaxed_columns=None):\n orig_field_map = (\n {f.name: f for f in original_schema.fields} if original_schema else {})\n\n if relaxed_columns:\n orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)\n\n if new_columns:\n orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)\n\n return sorted(orig_field_map.values(), key=lambda x: x.name)", "def add_cols(self, source) :\n\n cols = source.get_cols()\n types = source.get_types()\n\n new_cols = []\n new_types = []\n for i in range(len(cols)) :\n if cols[i] not in self.cols :\n new_cols.append(cols[i])\n new_types.append(types[i])\n self.cols.extend(new_cols)\n self.types.extend(new_types)\n\n self._alter_table(new_cols, new_types)\n\n row_ids = self.get_values('__ROWID')\n \n for col in new_cols :\n new_vals = source.get_values(col)\n if len(row_ids) == 0 :\n for val in new_vals :\n self._insert_internal(['__ROWID', col], [0, val])\n\n row_ids = self.get_values('__ROWID')\n\n else :\n binds = zip(new_vals, row_ids)\n q = self._quoter(col)\n sql_base = 'UPDATE \"%s\" SET \"%s\" = %s WHERE \"__ROWID\" = %%d' % (self.name, col, q)\n cur = self.con.cursor()\n for bind in binds :\n if bind[0] :\n update_sql = sql_base % (str(bind[0]), bind[1])\n cur.execute(update_sql)\n\n self.version += 1", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def add_columns(self, table, col_data, col_type):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n for data, typ in zip(col_data, col_type):\n c.execute(\"ALTER TABLE {tn} ADD COLUMN {cn} {ct}\".\n format(tn=table, cn=data, ct=typ))\n conn.commit() \n conn.close()", "def add_table_column(self, schema, column):\n if not column[\"name\"] or not constants.NAME_RX.match(column[\"name\"]):\n raise ValueError(\"invalid column name\")\n if utils.name_in_nocase(column[\"name\"], [c[\"name\"] for c in schema[\"columns\"]]):\n raise ValueError(\"non-unique column name\")\n if column[\"type\"] not in constants.COLUMN_TYPES:\n raise ValueError(\"invalid column type\")\n sql = (\n f'''ALTER TABLE \"{schema['name']}\"'''\n f\"\"\" ADD COLUMN \"{column['name']}\" {column['type']}\"\"\"\n )\n if column.get(\"notnull\"):\n notnull = [\"NOT NULL\"]\n if column[\"type\"] == constants.INTEGER:\n notnull.append(\"DEFAULT 0\")\n elif column[\"type\"] == constants.REAL:\n notnull.append(\"DEFAULT 0.0\")\n elif column[\"type\"] in (constants.TEXT, constants.BLOB):\n notnull.append(\"DEFAULT ''\")\n sql += \" \" + \" \".join(notnull)\n self.dbcnx.execute(sql)\n schema[\"columns\"].append(column)\n self.update_table(schema)", "def add_column_to_staging_table(cursor,table_schema,table_name,column_name):\n if not check_if_column_exists(cursor, table_schema, table_name, column_name):\n add_column = \"ALTER TABLE \" + table_schema + \".\" + table_name + \" ADD COLUMN \" + column_name + \" text;\"\n cursor.execute(add_column)", "def add_schema_fields(self, fields):\n if not fields:\n return\n\n data = json.dumps(fields)\n\n try:\n return self.client.post(\n self._get_collection_url('schema/fields'),\n body=data\n )\n except solr_errors.SolrError as e:\n raise solr_errors.SolrSchemaUpdateError(fields, message=e.args[0])", "def add_column_into_target_sf(self, tap_type, table, new_column):\n self.run_query_target_snowflake(\n f'ALTER TABLE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table} ADD {new_column[\"name\"]} int'\n )\n self.run_query_target_snowflake(\n f'UPDATE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table}'\n f' SET {new_column[\"name\"]}={new_column[\"value\"]} WHERE 1=1'\n )", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def add_feature_columns(self, feature_columns: typing.List[str]):\n self.feature_columns += feature_columns", "def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)", "def newColumn (layer,FieldName,DataType):\n # Check if field already exists\n if layer.fields().indexFromName(FieldName)==-1:\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes([QgsField(FieldName,DataType)])\n print(\"New field \\\"{}\\\" added\".format(FieldName))\n # Update to propagate the changes\n layer.updateFields()\n else:\n print(\"Field \\\"{}\\\" already exists.\".format(FieldName))", "def rename_column(self, original_column_name, new_column_name):\n self.renames.append((original_column_name, new_column_name))\n if not self.column_exists(new_column_name):\n super(MigrationTable, self).rename_column(original_column_name, new_column_name)", "def _add_hybrid_cols(self):\n for new_col_name, method in HYBRID_METHODS.items():\n out = method(self)\n if out is not None:\n try:\n self._hybrid_meta[new_col_name] = out\n except ValueError as e:\n msg = (\"Unable to add {!r} column to hybrid meta. The \"\n \"following exception was raised when adding \"\n \"the data output by '{}': {!r}.\")\n w = msg.format(new_col_name, method.__name__, e)\n logger.warning(w)\n warn(w, OutputWarning)", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )", "def update(self, other):\n # Check if any columns will remain with their original length. If so\n # also check if the lengths of the tables are the same.\n self._dirty = True\n nrows = other.number_of_rows()\n\n if (other._columns and\n set(self._columns) - set(other._columns) and\n other.number_of_rows() != self.number_of_rows()):\n\n raise ValueError('Can not add columns of length {}'\n ' to table of length {}'.format(\n other.number_of_rows(),\n self.number_of_rows()))\n\n for name, column in other._columns.items():\n self._set_column_column_nocheck(name, column, nrows)\n\n self.set_table_attributes(other.get_table_attributes())\n self.set_name(other.get_name())", "def create_column(self, new_column, dtype):\n self.logger.debug(\"[%u] Ready to add column %s\" %\n (os.getpid(), new_column))\n ddl = \"\"\"\n ALTER TABLE {schema}.{table}\n ADD COLUMN IF NOT EXISTS {col} {type}\n \"\"\"\n # TODO Replace by execute_ddl func and test it\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl.format(schema=self.config['schema'],\n table=self.config['table'],\n col=new_column,\n type=dtype))\n self.logger.debug(\"[%u] Column %s has been added\" %\n (os.getpid(), new_column))", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "def add_col(self, colname, n_batch=5000, debug=False):\n\n if debug: print(\"Create new column {col}\".format(col=colname))\n # Alter table add column\n #\n alter_query = '''\n ALTER TABLE \"{tablename}\"\n ADD COLUMN \"{colname}\" {datatype};\n '''.format(tablename=self.get_carto_tablename(),\n colname=colname,\n datatype=datatype_map(str(self.dtypes[colname])))\n if debug: print(alter_query)\n\n # add column\n resp = self.carto_sql_client.send(alter_query)\n if debug: print(resp)\n\n # update all the values in that column\n #\n # NOTE: fails if colval is 'inf' or some other exceptional Python\n # or NumPy type\n n_items = len(self[colname])\n update_query = '''\n UPDATE \"{tablename}\"\n SET \"{colname}\" = {colval}\n WHERE \"cartodb_id\" = {cartodb_id};\n '''\n queries = []\n\n for row_num, item in enumerate(self[colname].iteritems()):\n # if debug: print(item)\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n temp_query = update_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(item[1], pgtype),\n cartodb_id=item[0]).strip()\n queries.append(temp_query)\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n output_query = '\\n'.join(queries)\n if debug: print(output_query)\n if debug: print(\"Num chars in query: {}\".format(len(output_query)))\n resp = self.carto_sql_client.send(output_query)\n queries = []\n\n return None", "def AddColumns(sqlite_file, table_name):\r\n columns = ['cf_direct_parent','cf_kingdom','cf_superclass',\\\r\n 'cf_class','cf_subclass','cf_intermediate_0','cf_intermediate_1',\\\r\n 'cf_intermediate_2','cf_intermediate_3','cf_intermediate_4',\\\r\n 'cf_intermediate_5','cf_molecular_framework','cf_alternative_parents',\\\r\n 'cf_substituents', 'cf_description']\r\n column_type = 'TEXT'\r\n # Connecting to the database file\r\n conn = sqlite3.connect(sqlite_file) # Connecting to the database\r\n c = conn.cursor() # Adding a cursor to interact with the database\r\n # Adding new column, if it does not exist yet, without a row value\r\n for new_column_name in columns:\r\n try:\r\n c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\r\n .format(tn=table_name, cn=new_column_name, ct=column_type))\r\n print(\"Column created: {cn}\".format(cn=new_column_name))\r\n except sqlite3.OperationalError:\r\n print(\"Column already exists: {cn}\".format(cn=new_column_name))\r\n conn.commit()\r\n conn.close()\r\n return None", "def rename_columns(self, col):\n try:\n self.cleaned_data.columns = col\n except Exception as e:\n raise e", "def addCommonExtraColumn(self, req, study_id, found_extra_table, column_name, data_type, description):\n debug = False\n common_extra_table_name = None\n min_column_count = None\n quoted_column_name = '\"{0}\"'.format(column_name.upper())\n \n if 'SAMPLE' in found_extra_table:\n common_extra_table_name = 'COMMON_EXTRA_SAMPLE'\n min_column_count = 2\n elif 'PREP' in found_extra_table:\n common_extra_table_name = 'COMMON_EXTRA_PREP'\n min_column_count = 3\n \n if common_extra_table_name == None:\n raise Exception('Error: Could not determine the common extra table name. The found extra table is: %s' % found_extra_table)\n \n # Set the database data type:\n database_data_type = ''\n if data_type == 'text' or database_data_type == 'range':\n database_data_type = 'varchar2(4000)'\n elif data_type == 'numeric':\n database_data_type = 'int'\n elif data_type == 'date':\n database_data_type = 'date'\n \n if database_data_type == '':\n raise Exception('Could not determine common extra column data type.')\n\n # Create the column if it doesn't already exist\n statement = \"\"\"\n select count(*) \n from all_tab_columns \n where column_name = '{0}' \n and table_name = '{1}'\n \"\"\".format(column_name.upper(), common_extra_table_name)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().execute(statement).fetchone()\n if results[0] == 0:\n statement = 'alter table %s add %s %s' % (common_extra_table_name, quoted_column_name, database_data_type)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Copy the data found in the found extra_table\n if common_extra_table_name == 'COMMON_EXTRA_SAMPLE':\n statement = \"\"\"\n MERGE INTO common_extra_sample e\n USING (\n SELECT sample_id, {0}\n FROM {1}\n ) x\n ON (e.sample_id = x.sample_id)\n WHEN MATCHED THEN \n UPDATE SET e.{0} = x.{0}\n WHEN NOT MATCHED THEN \n INSERT (e.sample_id, e.{0})\n VALUES (x.sample_id, x.{0})\n \"\"\".format(quoted_column_name, found_extra_table)\n else:\n statement = \"\"\"\n MERGE INTO common_extra_prep e\n USING (\n SELECT sample_id, row_number, {0}\n FROM {1}\n ) x\n ON (e.sample_id = x.sample_id and e.row_number = x.row_number)\n WHEN MATCHED THEN \n UPDATE SET e.{0} = x.{0}\n WHEN NOT MATCHED THEN \n INSERT (e.sample_id, e.row_number, e.{0})\n VALUES (x.sample_id, x.row_number, x.{0})\n \"\"\".format(quoted_column_name, found_extra_table)\n \n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n statement = 'commit'\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Remove the column from the found extra table. If it's the last custom column in the table, remove the table\n statement = \"select count(*) from all_tab_columns where table_name = '%s'\" % (found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n results = con.cursor().execute(statement).fetchone()\n if results[0] <= min_column_count:\n statement = 'drop table %s' % (found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n else:\n statement = 'alter table %s drop column %s' % (found_extra_table, quoted_column_name)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Clean up references in study_actual_columns\n extra_table_study_id = found_extra_table.split('_')[2]\n\n statement = \"\"\"\n update study_actual_columns \n set table_name = '\"{0}\"' \n where study_id = {1} \n and table_name = '\"{2}\"'\n \"\"\".format(common_extra_table_name, extra_table_study_id, found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n statement = 'commit'\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)", "def addSchemaFile(self, newSchemaFile):\n\t\tself.schemaFile.append(newSchemaFile)", "def test_rename_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n title_column = Varchar()\n title_column._meta.name = \"title\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[title_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.rename_columns.statements) == 1)\n self.assertEqual(\n schema_differ.rename_columns.statements[0],\n \"manager.rename_column(table_class_name='Band', tablename='band', old_column_name='title', new_column_name='name', old_db_column_name='title', new_db_column_name='name')\", # noqa\n )", "def _alter_table(self, names, types) :\n\n cur = self.con.cursor()\n for i in range(min(len(names), len(types))) :\n alter_sql = 'ALTER TABLE \"%s\" ADD COLUMN \"%s\" %s' % (self.name, names[i], types[i])\n cur.execute(alter_sql)", "def _check_columns(\n schema_errors: set[str],\n stored: Mapping,\n expected: Mapping,\n columns: Iterable[str],\n table_name: str,\n supports: str,\n) -> None:\n for column in columns:\n if stored[column] == expected[column]:\n continue\n schema_errors.add(f\"{table_name}.{supports}\")\n _LOGGER.error(\n \"Column %s in database table %s does not support %s (stored=%s != expected=%s)\",\n column,\n table_name,\n supports,\n stored[column],\n expected[column],\n )", "def add_column(self, table_name: str, column) -> None:\n sql = 'ALTER TABLE ' + table_name + ' ADD COLUMN ' + column.to_sql()\n self.cursor.execute(sql)", "def addcolumn(self, column):\n if column not in self.headersindex:\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"ALTER TABLE \\'%s\\' ADD COLUMN %s\" % (self.name, column.to_declaration()))", "def __update_feature_table_columns(self):\n self.__init_table()\n\n feature_dict_sorted_keys = feature_extractor_definition.keys()\n feature_dict_sorted_keys.sort()\n for key in feature_dict_sorted_keys:\n if not self.__has_feature_column(key):\n self.__add_feature_column(key, feature_extractor_definition[key])", "def add_new_cols(cat, prefix=\"\", floatcols=None, boolcols=None):\n\t\n\tif floatcols != None:\n\t\tfor col in floatcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=float, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)\n\tif boolcols != None:\n\t\tfor col in boolcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=bool, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)", "def _add_necessary_columns(args, custom_columns):\n # we need to add the variant's chrom, start and gene if \n # not already there.\n if custom_columns.find(\"gene\") < 0:\n custom_columns += \", gene\"\n if custom_columns.find(\"start\") < 0:\n custom_columns += \", start\"\n \n return custom_columns", "def save_column_mappings(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n organization = import_file.import_record.super_organization\n mappings = body.get('mappings', [])\n for mapping in mappings:\n dest_field, raw_field = mapping\n if dest_field == '':\n dest_field = None\n\n dest_cols = _column_fields_to_columns(dest_field, organization)\n raw_cols = _column_fields_to_columns(raw_field, organization)\n try:\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n except ColumnMapping.MultipleObjectsReturned:\n # handle the special edge-case where remove dupes doesn't get\n # called by ``get_or_create``\n ColumnMapping.objects.filter(\n super_organization=organization,\n column_raw__in=raw_cols,\n ).delete()\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n\n # Clear out the column_raw and column mapped relationships.\n column_mapping.column_raw.clear()\n column_mapping.column_mapped.clear()\n\n # Add all that we got back from the interface back in the M2M rel.\n [column_mapping.column_raw.add(raw_col) for raw_col in raw_cols]\n if dest_cols is not None:\n [\n column_mapping.column_mapped.add(dest_col)\n for dest_col in dest_cols\n ]\n\n column_mapping.user = request.user\n column_mapping.save()\n\n return {'status': 'success'}", "def columns(self, new_columns: ColumnT) -> None:\n new_columns2: ndarray = init.check_column_validity(new_columns)\n len_new: int = len(new_columns2)\n len_old: int = len(self._columns)\n if len_new != len_old:\n raise ValueError(f'There are {len_old} columns in the DataFrame. '\n f'You provided {len_new}.')\n\n new_column_info: ColInfoT = {}\n for old_col, new_col in zip(self._columns, new_columns2):\n new_column_info[new_col] = utils.Column(*self._column_info[old_col].values)\n\n self._column_info = new_column_info\n self._columns = new_columns2", "def add_column(self, colspec):\n if colspec.name == DEFAULT_COLUMN_NAME or colspec.name in self.columns.keys():\n raise Exception(\"Column {} already exists.\".format(colspec.name))\n\n self.info.add_column(colspec.name, colspec.video, colspec.dtype)", "def test_add_column(self):\n self.spy_on(DataGrid.add_column)\n\n DataGridColumnsHook(extension=self.extension,\n datagrid_cls=DataGrid,\n columns=[Column(id='sandbox')])\n\n self.assertTrue(DataGrid.add_column.called)", "def modify(self, fields=None, **fields_kwargs):\n modified_fields = set()\n fields = self.make_dict(fields, fields_kwargs)\n fields = self._modify(fields)\n for field_name, field_val in fields.items():\n in_schema = field_name in self.schema.fields\n if in_schema:\n setattr(self, field_name, field_val)\n modified_fields.add(field_name)\n\n return modified_fields", "def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)", "def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata", "def parse_migration_columns(\n self, tablename: str, migration_columns: ColumnParametersSchema\n ):\n try:\n update = self.check_table(tablename)\n\n for col in migration_columns:\n self.source_column = col.sourceColumn\n self.destination_column = col.destinationColumn\n self.dest_options = col.destinationColumn.options.dict()\n\n self._parse_fk(tablename, self.dest_options.pop(\"foreign_key\"))\n column_type = self._parse_column_type()\n\n col = Column(self.destination_column.name, column_type, **self.dest_options)\n if update:\n if not self.check_column(tablename, self.destination_column.name):\n # self.add_alter_column(tablename, {\"column_name\": self.destination_column.name,\"type\":column_type,\"options\":{**self.dest_options}})\n # else:\n self.add_updated_table(tablename, col)\n else:\n self.add_created_table(tablename, col)\n except Exception as err:\n logger.error(\"parse_migration_columns [error] -> %s\" % err)", "def add_col2tab(con_db, cur_db, tab_name, col_name, col_type):\n\n # Iterate through all existing column names of the database table using\n # the PRAGMA table_info command\n for row in cur_db.execute(f'PRAGMA table_info({tab_name})'):\n\n # If the column exists: exit the function\n if row[1] == col_name:\n break\n\n # If the column is not existing yet, add the new column\n else:\n cur_db.execute(f'ALTER TABLE {tab_name} ' \\\n f'ADD COLUMN {col_name} {col_type}')\n con_db.commit()", "async def upgradeSchema(self) -> None:", "def AddColumn(self, column):\n self.columns.append(column)\n self.column_dict[column.column_id] = column", "def append_columns(cls, columns, grid=None, grid_url=None):\n grid_id = parse_grid_id_args(grid, grid_url)\n\n grid_ops.ensure_uploaded(grid_id)\n\n # Verify unique column names\n column_names = [c.name for c in columns]\n if grid:\n existing_column_names = [c.name for c in grid]\n column_names.extend(existing_column_names)\n duplicate_name = utils.get_first_duplicate(column_names)\n if duplicate_name:\n err = exceptions.NON_UNIQUE_COLUMN_MESSAGE.format(duplicate_name)\n raise exceptions.InputError(err)\n\n # This is sorta gross, we need to double-encode this.\n body = {\"cols\": _json.dumps(columns, cls=PlotlyJSONEncoder)}\n fid = grid_id\n response = v2.grids.col_create(fid, body)\n parsed_content = response.json()\n\n cls._fill_in_response_column_ids(columns, parsed_content[\"cols\"], fid)\n\n if grid:\n grid.extend(columns)", "def _format_meta_pre_merge(self):\n self.__col_name_map = {\n ColNameFormatter.fmt(c): c\n for c in self.data.solar_meta.columns.values\n }\n\n self._rename_cols(self.data.solar_meta, prefix=SOLAR_PREFIX)\n self._rename_cols(self.data.wind_meta, prefix=WIND_PREFIX)\n\n self._save_rep_prof_index_internally()", "def rename_columns(columns, mapper, keep_original):\n for name, rename in mapper.items():\n if name in columns:\n columns[rename] = org_copy.deepcopy(columns[name])\n if 'parent' in columns[name]:\n parents = columns[name]['parent']\n else:\n parents = {}\n if not keep_original: del columns[name]\n columns[rename]['name'] = rename\n for parent_name, parent_spec in list(parents.items()):\n new_parent_map = {}\n if parent_name in mapper:\n new_name = mapper[parent_name]\n new_parent_map[new_name] = parent_spec\n columns[rename]['parent'] = new_parent_map\n if columns[rename].get('values'):\n values = columns[rename]['values']\n if isinstance(values, str):\n if values in mapper:\n columns[rename]['values'] = mapper[values]", "def _ensure_schema_has_covariates(self, x_underscore_columns):\n previous_rename = self.covariate_rename\n if set(x_underscore_columns) == set(previous_rename.values()):\n return\n # Only rewrite schema if the x_<integer> list has changed.\n # because the schema depends on the number of covariates, not\n # their names.\n covariate_columns = list(x_underscore_columns)\n # ASCII sorting isn't correct b/c x_11 is before x_2.\n covariate_columns.sort(key=lambda x: int(x[2:]))\n for create_name in [\"data\", \"avgint\"]:\n empty = self.dismod_file.empty_table(create_name)\n without = [c for c in empty.columns if not c.startswith(\"x_\")]\n # The wrapper needs these columns to have a dtype of Real.\n empty = empty[without].assign(**{cname: np.empty((0,), dtype=np.float) for cname in covariate_columns})\n self.dismod_file.update_table_columns(create_name, empty)\n if getattr(self.dismod_file, create_name).empty:\n CODELOG.debug(f\"Writing empty {create_name} table with columns {covariate_columns}\")\n setattr(self.dismod_file, create_name, empty)\n else:\n CODELOG.debug(f\"Adding to {create_name} table schema the columns {covariate_columns}\")", "def add_column(self, name, type):\n raise NotImplementedError(\n \"Please implement the 'add_column' method in a derived class.\")", "def merge(self, other: Schema) -> Schema:\n if duplicates := self.keys() & other.keys():\n raise IntegrityError(f'Duplicate column name(s): {duplicates}')\n return self.__class__({**self, **other})", "def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))", "def upgrade_alter_table(self, db, tablename, colname, coltype):\n try:\n cursor = db.cursor()\n cursor.execute(\"select %s from %s\" % (colname,tablename))\n except Exception, e:\n self.log.debug(\"upgrade_alter_table: %s\", e)\n cursor = db.cursor()\n alter = \"ALTER TABLE %s add column %s %s\" % (tablename, colname,\n coltype)\n cursor.execute(alter)", "def merge_schemas(self, old_schm, new_schm):\n\n old_schm_cols = [x['name'] for x in old_schm]\n\n for col in new_schm:\n if type(col) == dict:\n if col['name'] not in old_schm_cols:\n old_schm.append(col)\n \n for count, old_col in enumerate(old_schm):\n for meta in old_col:\n if type(old_col[meta]) == list:\n if old_col['name'] in [pot_new_col['name'] for pot_new_col in new_schm]:\n new_col = [pot_new_col for pot_new_col in new_schm if pot_new_col['name'] == old_col['name']][0]\n if meta in new_col:\n old_schm[count][meta] = self.merge_schemas(old_col[meta], new_col[meta])\n \n return old_schm", "def _get_and_validate_schema_mapping(schema1, schema2, strict=False):\n\n len_schema1 = len(schema1)\n len_schema2 = len(schema2)\n\n # If both non-empty, must be same length\n if 0 < len_schema1 != len_schema2 > 0:\n raise ValueError(\"Attempted to merge profiles with different \"\n \"numbers of columns\")\n\n # In the case of __add__ with one of the schemas not initialized\n if strict and (len_schema1 == 0 or len_schema2 == 0):\n raise ValueError(\"Cannot merge empty profiles.\")\n\n # In the case of _update_from_chunk with uninitialized schema\n if not strict and len_schema2 == 0:\n return {col_ind: col_ind for col_ind_list in schema1.values()\n for col_ind in col_ind_list}\n\n # Map indices in schema1 to indices in schema2\n schema_mapping = dict()\n\n for key in schema1:\n # Pandas columns are int by default, but need to fuzzy match strs\n if isinstance(key, str):\n key = key.lower()\n if key not in schema2:\n raise ValueError(\"Columns do not match, cannot update \"\n \"or merge profiles.\")\n\n elif len(schema1[key]) != len(schema2[key]):\n raise ValueError(f\"Different number of columns detected for \"\n f\"'{key}', cannot update or merge profiles.\")\n\n is_duplicate_col = len(schema1[key]) > 1\n for schema1_col_ind, schema2_col_ind in zip(schema1[key],\n schema2[key]):\n if is_duplicate_col and (schema1_col_ind != schema2_col_ind):\n raise ValueError(f\"Different column indices under \"\n f\"duplicate name '{key}', cannot update \"\n f\"or merge unless schema is identical.\")\n schema_mapping[schema1_col_ind] = schema2_col_ind\n\n return schema_mapping", "def ensure_internal_schema_updated(self):\n if self._internal_schema_updated:\n return\n if internalmigrations.needs_upgrading(self):\n assert not self._in_transaction\n with self.lock():\n internalmigrations.upgrade(self)\n self.connection.commit()\n self._internal_schema_updated = True", "def _refactor_time_columns(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _refactor_time_columns')\n write_cursor.execute('ALTER TABLE timed_balances RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE timed_location_data RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE trades RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE asset_movements RENAME COLUMN time TO timestamp')\n log.debug('Exit _refactor_time_columns')", "def test_new_columns(self):\n rename = '{}*'\n expected = (list(self.df.columns) +\n list(rename.format(f) for f in self.formants))\n actual = self.normalizer(rename=rename, **self.kwargs).normalize(\n self.df).columns\n\n expected = sorted(expected)\n actual = sorted(actual)\n self.assertListEqual(actual, expected)", "def addcolumn(self, colname, coldata):\n if len(coldata) != len(self):\n raise ValueError,\"Column length must match catalog length\"\n\n #Most of the bookkeeping is the same as for an empty column\n self.addemptycolumn(colname,coldata.dtype)\n\n #and then we reset the column to contain the actual data\n setattr(self,colname,coldata)", "def insert_column(self, tb_name, column_name, data_type):\n sentences = f\"\"\"\n ALTER TABLE {tb_name} ADD COLUMN {column_name} {data_type};\n \"\"\"\n print(sentences)\n self.commit(sentences)", "def get_column_definitions(self, schema, table, connection=None):\n raise NotImplementedError", "def addColumn(self, name, column):\n self.columnNames.append(name)\n self.addColumnValues(column)", "def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)", "def update_column(self, xmldata):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n headers = managers.request_manager.get_request().session().value(\"headers\")\n if not columns:\n return False\n if xmldata:\n # Parsing of column declaration\n dom = parseString(xmldata.encode(\"UTF-8\"))\n column = dom.getElementsByTagName(\"column\")[0]\n name = un_quote(column.getAttribute(\"name\"))\n if not name:\n return False\n declaration = name\n constraints = {}\n cid = column.getAttribute(\"id\")\n type = column.getAttribute(\"type\")\n if not type or type == \"INTEGER\" or type == \"REAL\" or type == \"TEXT\" or type == \"BLOB\":\n constraints[\"type\"] = type\n if column.getAttribute(\"notnull\") == \"true\":\n constraints[\"not null\"] = True\n if column.getAttribute(\"primary\") == \"true\":\n if column.getAttribute(\"autoincrement\") == \"true\":\n constraints[\"primary key\"] = \"autoincrement\"\n else:\n constraints[\"primary key\"] = True\n if column.getAttribute(\"unique\") == \"true\":\n constraints[\"unique\"] = True\n\n if column.getAttribute(\"default\") and column.getAttribute(\"default\") != \"\" and column.getAttribute(\"default\") != \"NULL\":\n constraints[\"default\"] = column.getAttribute(\"default\")\n\n column_obj = VDOM_db_column(name, constraints)\n column_obj.id = cid\n\n # praparing SQL code\n old_column = None\n for col in columns:\n if columns[col].id == cid:\n old_column = columns[col]\n break\n if not old_column:\n return False\n\n newtable = \"%s_new(\" % self.name\n oldtable = \"%s(\" % self.name\n for col in headers:\n if oldtable[-1] != \"(\":\n oldtable += \", \"\n oldtable += columns[col].to_declaration()\n\n if columns[col].id == cid:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += column_obj.to_declaration()\n\n else:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += columns[col].to_declaration()\n newtable += \")\"\n if newtable[-2] == \"(\":\n return False\n newcols = []\n newcols.extend(headers)\n newcols.remove(old_column.name)\n newcols_decl = \"\"\n for ctr in newcols:\n newcols_decl += \", `%s`\" % ctr\n\n sql = \"\"\"BEGIN TRANSACTION;\nCREATE TABLE %(newtable)s;\nINSERT INTO `%(newtablename)s` (%(newcols)s) SELECT %(newcols)s FROM `%(oldtablename)s`;\nDROP TABLE `%(oldtablename)s`;\nALTER TABLE `%(newtablename)s` RENAME TO `%(oldtablename)s`;\nEND TRANSACTION;\"\"\" % {\"newtable\": newtable, \"newtablename\": self.name + \"_new\", \"oldtablename\": self.name, \"newcols\": newcols_decl[2:]}\n query = VDOM_sql_query(self.owner_id, self.database_id, sql, None, True)\n query.commit()\n columns.pop(old_column.name)\n columns[column_obj.name] = column_obj\n managers.request_manager.get_request().session().value(\"columns\", columns)\n self.restore_structure()\n return True", "def addStudyActualColumn(self, study_id, column_name, table_name):\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.add_study_actual_column', [study_id, column_name, table_name])\n except Exception, e: \n raise Exception('Exception caught in addStudyActualColumns(): %s.\\nThe error is: %s' % (type(e), e))", "def add_column(self, col_name, definition):\n if not self.column_exists(col_name):\n self.execute(self.commands.add_column(self.name, col_name, definition))", "def addcolumn_from_xml(self, xmldata):\n num_added = 0\n if xmldata:\n dom = parseString(xmldata.encode(\"UTF-8\"))\n for column in dom.getElementsByTagName(\"column\"):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n if not columns:\n columns = self.parse_declaration()\n name = un_quote(column.getAttribute(\"name\"))\n if not name:\n continue\n if name in columns:\n continue # column already exists\n declaration = name\n constraints = {}\n cid = column.getAttribute(\"id\")\n type = column.getAttribute(\"type\")\n if not type or type == \"INTEGER\" or type == \"REAL\" or type == \"TEXT\" or type == \"BLOB\":\n constraints[\"type\"] = type\n if column.getAttribute(\"notnull\") == \"true\":\n constraints[\"not null\"] = True\n if column.getAttribute(\"primary\") == \"true\":\n if column.getAttribute(\"autoincrement\") == \"true\":\n constraints[\"primary key\"] = \"autoincrement\"\n else:\n constraints[\"primary key\"] = True\n if column.getAttribute(\"unique\") == \"true\":\n constraints[\"unique\"] = True\n\n if column.getAttribute(\"default\") and column.getAttribute(\"default\") != \"\" and column.getAttribute(\"default\") != \"NULL\":\n constraints[\"default\"] = column.getAttribute(\"default\")\n\n column_obj = VDOM_db_column(name, constraints)\n column_obj.id = cid\n columns[name] = column_obj\n self.addcolumn(column_obj)\n managers.request_manager.get_request().session().value(\"columns\", columns)\n num_added += 1\n return num_added", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def add_latclonc_to_db(self):\n \n # add new columns\n try:\n command =\"ALTER TABLE {tb} ADD COLUMN latc TEXT\".format(tb=self.table_name) \n self.conn.cursor.execute(command)\n except:\n # pass if the column latc exists\n pass\n try:\n command =\"ALTER TABLE {tb} ADD COLUMN lonc TEXT\".format(tb=self.table_name) \n self.conn.cursor.execute(command)\n except:\n # pass if the column lonc exists\n pass\n\n # iterate through tvals of the self.sites\n sdtm = self.stm\n for ii, st in enumerate(self.sites):\n if ii == len(self.sites)-1:\n edtm = self.etm\n else:\n edtm = st.tval\n command = \"SELECT rowid, slist, vel, frang, rsep, datetime FROM {tb} WHERE (DATETIME(datetime)>'{sdtm}' and\\\n DATETIME(datetime)<='{edtm}') ORDER BY datetime\".format(tb=self.table_name,\\\n sdtm=str(sdtm), edtm=str(edtm))\n self.conn.cursor.execute(command)\n rows = self.conn.cursor.fetchall() \n if rows != []:\n rowid, slist, vel, frang_old, rsep_old, date_time_old = rows[0]\n\n # calculate latc_all and lonc_all in 'geo' coords\n latc_all, lonc_all = calc_latc_lonc(self.sites[ii], self.bmnum, frang_old, rsep_old, \n altitude=300., elevation=None, coord_alt=0.,\n coords=\"geo\", date_time=None)\n for row in rows:\n rowid, slist, vel, frang, rsep, date_time = row\n if (frang, rsep) != (frang_old, rsep_old):\n latc_all, lonc_all = calc_latc_lonc(self.sites[ii], self.bmnum, frang, rsep, \n altitude=300., elevation=None, coord_alt=0.,\n coords=\"geo\", date_time=None)\n\n \n frang_old, rsep_old = frang, rsep\n\n # convert from string to float\n slist = [int(float(x)) for x in slist.split(\",\")]\n vel = [float(x) for x in vel.split(\",\")]\n\n # exclude the slist values beyond maxgate and their correspinding velocities\n vel = [vel[i] for i in range(len(vel)) if slist[i] < st.maxgate]\n slist = [s for s in slist if s < st.maxgate]\n\n # extract latc and lonc values\n latc = [latc_all[s] for s in slist]\n lonc = [lonc_all[s] for s in slist]\n\n # convert to comma seperated text\n slist = \",\".join([str(x) for x in slist])\n vel = \",\".join([str(round(x,2)) for x in vel])\n latc = \",\".join([str(round(x,2)) for x in latc])\n lonc = \",\".join([str(round(x,2)) for x in lonc])\n\n # update the table\n command = \"UPDATE {tb} SET slist='{slist}', vel='{vel}',\\\n latc='{latc}', lonc='{lonc}' WHERE rowid=={rowid}\".\\\n format(tb=self.table_name, slist=slist, vel=vel,\\\n latc=latc, lonc=lonc, rowid=rowid)\n self.conn.cursor.execute(command)\n\n # update sdtm\n sdtm = edtm\n\n # commit the data into the db\n self.conn._commit()\n\n # close db connection\n self.conn._close_connection()\n \n return", "def _insert_into_new_columns(self, commit, mapping_index):\n for i in range(self.num_new_columns):\n if self.new_columns[i].commit == commit:\n self.mapping[mapping_index] = i\n return mapping_index + 2\n\n # This commit isn't already in new_columns. Add it.\n column = Column(commit, self._find_commit_color(commit))\n self.new_columns[self.num_new_columns] = column\n self.mapping[mapping_index] = self.num_new_columns\n self.num_new_columns += 1\n return mapping_index + 2", "def setAllColumns(self, newAllColumns):\n \n pass", "def deferred_to_columns_cb(self, target, model, fields):\n table = model._meta.db_table\n if table not in target:\n target[table] = set()\n for field in fields:\n if not hasattr(field.column, \"columns\"):\n target[table].add(field.column)\n else:\n target[table].update(field.column.columns)", "def addTableColumn(self, tablename, columnname, columntype):\n\n # Check if the table exists\n if tablename in self.getTableNames():\n\n # Check that the column does not already exist\n if columnname not in self.getColumnNames(tablename):\n\n #Allow columnames with spaces\n columnname = '`'+columnname+'`'\n\n \"\"\"# Fit characters to the allowed format if necessary\n fmt = ''\n if (self.connector == 'mysql' and\n ('TEXT' in columntype or 'VARCHAR' in columntype) and\n not ('CHARACTER SET' in columntype or\n 'utf8mb4' in columntype)):\n\n # We enforze utf8mb4 for mysql\n fmt = ' CHARACTER SET utf8mb4'\n\n\n sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +\n columnname + ' ' + columntype + fmt)\"\"\"\n sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +\n columnname + ' ' + columntype) \n self._c.execute(sqlcmd)\n\n # Commit changes\n self._conn.commit()\n\n else:\n print((\"WARNING: Column {0} already exists in table {1}.\"\n ).format(columnname, tablename))\n\n else:\n print('Error adding column to table. Please, select a valid ' +\n 'table name from the list')\n print(self.getTableNames())\n\n return", "def _insert_column(self, column_name, column_type, table, params=None, overwrite=False, after_col=None, verbose=True):\n \n not_null = ''\n auto_increment = ''\n \n if params != None and 'not_null' in params:\n not_null = 'NOT NULL'\n \n \n if params != None and 'auto_increment' in params:\n auto_increment = \"AUTO_INCREMENT\"\n \n \n ADD_COLUMN_COMMAND = \"ALTER TABLE {0} ADD {1} {2} {3} {4}\".format(table, column_name, column_type, not_null, auto_increment)\n \n if (after_col != None and type(after_col) is str):\n ADD_COLUMN_COMMAND += \" AFTER {0} \".format(after_col)\n \n \n self.cursor.execute(ADD_COLUMN_COMMAND)\n \n if verbose: \n print(\"Adding the column '{0}' to the table '{1}'...\".format(column_name, table))\n print(\"\\t\" + ADD_COLUMN_COMMAND) \n \n \n if params != None and 'foreign_key' in params:\n \n if 'references' not in params:\n raise InvalidParameterError\n \n referenced_table = params['references'].split('(')[0]\n referenced_column = params['references'].split('(')[1][:-1] \n \n \n if (not self.check_table(referenced_table, verbose=False)):\n raise(TableNotFoundError)\n \n \n if (not self.check_column(referenced_column, referenced_table, verbose=False)):\n raise(ColumnNotFoundError)\n \n \n ADD_FOREIGN_KEY_COMMAND = \"ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})\".format(table, column_name, referenced_table, referenced_column)\n \n \n if verbose: \n print(\"\\t\" + ADD_FOREIGN_KEY_COMMAND) \n \n self.cursor.execute(ADD_FOREIGN_KEY_COMMAND)", "def addExtraColumnMetadata(self, study_id, table_level, column_name, description, data_type):\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.extra_column_metadata_insert', [study_id, table_level, \n column_name, description, data_type])\n except Exception, e: \n raise Exception('Exception caught in addExtraColumnMetadata(): {0}.\\nThe error is: {1}.\\nstudy_id: \\\n {2}\\ntable_level: {3}\\ncolumn_name: {4}\\ndescription: {5}\\n data_type: {6}'.format(type(e), e, \\\n str(study_id), str(table_level), str(column_name), str(description), str(data_type)))", "def test_dummydb_add_data_to_table_wrong_column_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", four=1)", "def generate_altered_fields(self):\n result = super(MigrationAutodetector, self).generate_altered_fields()\n self.generate_sql_changes()\n return result", "def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)", "def has_desired_schema(self):\n if self._new_table == self._old_table:\n if not self.rebuild:\n log.info(\"Table already has the desired schema. \")\n return True\n else:\n log.info(\n \"Table already has the desired schema. However \"\n \"--rebuild is specified, doing a rebuild instead\"\n )\n return False\n return False", "def _add_cols(df: pandas.DataFrame, scope = (globals(), locals())) -> None:\n command : str = input(\"\\nAdd a column:\\n\")\n if command.lower() in ['n', 'no', 'quit()', 'exit', 'return']:\n return\n\n col_name : str = command[ \\\n re.search(r'[\\w\\.\\(\\)]+', command).start(): \\\n re.search(r'[\\w\\.\\(\\)]+', command).end() \\\n ]\n # new column's name\n\n arg : str = command[re.search(r'[=,;]', command).end():]\n # the new column's \"function\"\n ref_cols = re.findall(r'(?<=\\{)\\w[\\w\\.\\(\\)]*(?=\\})', arg)\n # df column names that are referenced to create new columns\n\n for i in range(len(ref_cols)):\n arg = re.sub(\n f'{{{ref_cols[i]}}}',\n f'df[\\'{ref_cols[i]}\\']',\n arg\n )\n # substituting references\n\n scope[0].update(globals())\n scope[1].update(locals())\n\n col_arg = eval(arg, scope[0], scope[1])\n # pandas.Series for type checking\n df[col_name] = col_arg\n # creating column\n\n more : str = input(\"\\nWould you like to add more columns?\\n\")\n if more.lower() in ['y', 'yes', 'continue', 'true']:\n return _add_cols(df)\n return", "def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry", "def build_column_mapping(raw_columns, dest_columns, previous_mapping=None, map_args=None,\n default_mappings=None, thresh=0):\n\n return MappingColumns(raw_columns, dest_columns, previous_mapping=previous_mapping,\n map_args=map_args, default_mappings=default_mappings,\n threshold=thresh).final_mappings", "def _modify_fields(usecols, dtype, badcols):\n for col in badcols:\n usecols = [badcols[col] if uc == col else uc for uc in usecols]\n try:\n dtype[badcols[col]] = dtype.pop(col)\n except KeyError:\n pass\n return usecols, dtype", "def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None", "def addColumnsToFile(filename, columns, add_before_original=True):\n\ttempFile = \"TEMP.tmp\"\n\toutFile = open(tempFile, \"w\")\n\tinFile = open(filename, \"r\")\n\tcolumns = listR.toList(columns)\n\tindex = 0\n\taLine = inFile.readline()\n\twhile aLine:\n\t\tif add_before_original:\n\t\t\toutFile.write(columns[index] + aLine)\n\t\telse:\n\t\t\toutFile.write(aLine + columns[index])\n\t\tindex = listR.next(columns, index)\n\t\taLine = inFile.readline()\n\tinFile.close\n\toutFile.close()\n\tcopy(tempFile, filename)", "def rename_column(self, table_name, old, new):\r\n self._remake_table(table_name, renames={old: new})", "def _prepare_schema(self):\n schema = DaskSchema(self.schema_name)\n\n if not self.tables:\n logger.warning(\"No tables are registered.\")\n\n for name, dc in self.tables.items():\n table = DaskTable(name)\n df = dc.df\n logger.debug(\n f\"Adding table '{name}' to schema with columns: {list(df.columns)}\"\n )\n for column in df.columns:\n data_type = df[column].dtype\n sql_data_type = python_to_sql_type(data_type)\n\n table.addColumn(column, sql_data_type)\n\n schema.addTable(table)\n\n if not self.functions:\n logger.debug(\"No custom functions defined.\")\n\n for function_description in self.function_list:\n name = function_description.name\n sql_return_type = python_to_sql_type(function_description.return_type)\n if function_description.aggregation:\n logger.debug(f\"Adding function '{name}' to schema as aggregation.\")\n dask_function = DaskAggregateFunction(name, sql_return_type)\n else:\n logger.debug(f\"Adding function '{name}' to schema as scalar function.\")\n dask_function = DaskScalarFunction(name, sql_return_type)\n\n dask_function = self._add_parameters_from_description(\n function_description, dask_function\n )\n\n schema.addFunction(dask_function)\n\n return schema", "def apply_patch():\n assert BaseDatabaseSchemaEditor is not None\n\n def _create_unique_sql(self, *args, **kwargs):\n from django.db.backends.ddl_references import IndexName\n\n statement = orig_create_unique_sql(self, *args, **kwargs)\n\n if statement is not None:\n index_name = statement.parts['name']\n\n if (isinstance(index_name, IndexName) and\n index_name.create_index_name == self._create_index_name):\n # The result will be unquoted. Let's quote it.\n index_name.create_index_name = lambda *args, **kwargs: \\\n self.quote_name(self._create_index_name(*args, **kwargs))\n\n return statement\n\n orig_create_unique_sql = BaseDatabaseSchemaEditor._create_unique_sql\n BaseDatabaseSchemaEditor._create_unique_sql = _create_unique_sql" ]
[ "0.6689222", "0.6531303", "0.60537446", "0.6039341", "0.60350144", "0.5883875", "0.5845651", "0.5813085", "0.5690668", "0.5641946", "0.56057465", "0.54915494", "0.54909086", "0.5434344", "0.542425", "0.5340224", "0.53072643", "0.5284575", "0.52605635", "0.5229555", "0.52235705", "0.5215396", "0.51270014", "0.5114374", "0.5099848", "0.5091163", "0.50850743", "0.5079301", "0.5066776", "0.5061453", "0.5058868", "0.50537986", "0.50251365", "0.50063413", "0.5003117", "0.49837986", "0.49823472", "0.4980061", "0.49774334", "0.49630207", "0.49608415", "0.49564832", "0.49248683", "0.49137408", "0.49054983", "0.4888428", "0.48686206", "0.48596603", "0.48417622", "0.48306552", "0.48175353", "0.47841254", "0.47764885", "0.47700122", "0.47664386", "0.47572923", "0.47393766", "0.47371304", "0.4725905", "0.4722962", "0.47181255", "0.47132292", "0.4712522", "0.47057188", "0.47056517", "0.46860245", "0.46845675", "0.46844116", "0.46684757", "0.46683952", "0.46643776", "0.46638516", "0.46638516", "0.46638516", "0.46638516", "0.46638516", "0.46638516", "0.46638516", "0.46638516", "0.46638516", "0.4663825", "0.4663445", "0.46563223", "0.4647813", "0.46442434", "0.4643868", "0.46406853", "0.46243957", "0.46223035", "0.4617757", "0.46147448", "0.46147442", "0.46130097", "0.45996097", "0.45981187", "0.45927855", "0.45847002", "0.4576712", "0.4568765", "0.45627815" ]
0.82155335
0
Validate a resource of the given type with specified ID already exists.
def _DatasetExists(dataset_id, project_id): client = GetApiClient() service = client.datasets get_request_type = GetApiMessage('BigqueryDatasetsGetRequest') get_request = get_request_type(datasetId=dataset_id, projectId=project_id) try: service.Get(get_request) return True except apitools_exceptions.HttpNotFoundError: log.info('Dataset with id [{}:{}] not found.'.format( project_id, dataset_id)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_id_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'id': 1, 'name': 'New Type', 'units': 'Kilo-Frobnicate',\n 'description': 'A new filter type.'})", "def check_id(self, id):", "def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))", "def check_repost_exists(type, id):\n \n try:\n soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))\n return True\n except HTTPError as e:\n if e.response.status_code == 404:\n db.mark_as_deleted(type, id)\n return False\n else:\n raise", "def test_resource_id(self):\n resource_id = '1234-1234-1234'\n\n # resource id does not exist\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 404)\n\n # create a resource ID\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # resource id exists\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # cannot create twice\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 409)\n\n # delete resource id\n resp = self.app.delete(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # resource id does not exist\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 404)", "def ResourceExists(self, name):\n pass", "def assert_has(self, type_id, message=''):\n if self.first_by_id(type_id):\n return\n raise AssertionError(message)", "def does_resource_exist(resource):\n try:\n resource.load()\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'ValidationError':\n return False\n else:\n raise e", "def check_action_type_exists(\n action_type_id: int\n) -> None:\n if not db.session.query(db.exists().where(models.ActionType.id == action_type_id)).scalar():\n raise errors.ActionTypeDoesNotExistError()", "def check_id_existence(self, id:str):\n\n oc_prefix = id[:(id.index(':')+1)]\n\n if oc_prefix == 'doi:':\n vldt = doi.DOIManager() # you can use removeprefix(oc_prefix) from Python 3.9+\n return vldt.exists(id.replace(oc_prefix, '', 1)) # todo: use id.replace(oc_prefix, '', 1) for Python < v.3.9\n if oc_prefix == 'isbn:':\n vldt = isbn.ISBNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'issn:':\n vldt = issn.ISSNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'orcid:':\n vldt = orcid.ORCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmcid:':\n vldt = pmcid.PMCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmid:':\n vldt = pmid.PMIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'ror:':\n vldt = ror.RORManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'url:':\n vldt = url.URLManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'viaf:':\n vldt = viaf.ViafManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikidata:':\n vldt = wikidata.WikidataManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikipedia:':\n vldt = wikipedia.WikipediaManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))", "def test_create_id_type(self):\n self.assertIsInstance(Rectangle.create().id, int)", "def test_duplicate_flavorids_fail(self):\n flavorid = 'flavor1'\n instance_types.create('name one', 256, 1, 120, 200, flavorid)\n self.assertRaises(exception.InstanceTypeIdExists,\n instance_types.create,\n 'name two', 256, 1, 120, 200, flavorid)", "def test_difference_id(self):\n self.assertFalse(\n self.factory.create_type('iphone') is self.factory.create_type(\n 'iphone'))", "def validate_id(cls, id: str) -> ObjectId:\n return ObjectId(id)", "def test_id_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_id(-1)", "def check_unique_element_id(cls, instance, element_id):\n if not element_id:\n return\n try:\n element_ids = instance.placeholder.page.cascadepage.glossary['element_ids'][instance.language]\n except (AttributeError, KeyError, ObjectDoesNotExist):\n return\n else:\n for key, value in element_ids.items():\n if str(key) != str(instance.pk) and element_id == value:\n msg = _(\"The element ID '{}' is not unique for this page.\")\n raise ValidationError(msg.format(element_id))", "def test_duplicate_id(self):\n with self.assertRaises(ValueError):\n REANATemplate(\n workflow_spec={},\n parameters=[\n pd.parameter_declaration('A', index=1),\n pd.parameter_declaration('B'),\n pd.parameter_declaration('C'),\n pd.parameter_declaration('A', index=2),\n pd.parameter_declaration('E', index=1)\n ],\n validate=True\n )", "def test_create_resource(self):\n test_resource = ResourceTypeName.get() # the name of the resource type to create\n\n # the resource type should not exist yet\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 404)\n\n # create the resource type\n resp = self.app.post(f'/v1/resource/{test_resource}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # the resource type exists\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # delete the resource type\n resp = self.app.delete(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # the resource type should not exist\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 404)", "def _validate_create(context, db_api, create_data, model_name):\n ipaddrlist = utils.get_addresses(create_data['ip_address'])\n\n if not ipaddrlist:\n errors = (_(\"Failed to register (%s)\" +\n \". The (%s) IP Address (%s) could not \"\n \"be resolved.\")\n % (model_name, model_name, create_data['ip_address']))\n raise exception.AddressResolutionFailure(reason=errors)\n LOG.info(\"IP/FQDN for the \" + model_name + \" %s is %s\" % (\n create_data['ip_address'],\n ipaddrlist))\n try:\n get_all = getattr(db_api, \"get_all_%ss\" % model_name)\n res_data = get_all(context)\n if not res_data:\n # No registered resources\n LOG.info(\"No registered %s\" % model_name)\n return\n except Exception:\n errors = (_(\"Failed to retrieve data for (%s) %s\")\n % (model_name, create_data.get('ip_address')))\n raise exception.InternalFailure(reason=errors)\n name = create_data.get(\"name\")\n valid_name = _validate_duplicate_names(res_data, name)\n if not valid_name:\n msg = (_(\"Two different (%s) with same \"\n \"name cannot be registered\") % model_name)\n raise exception.ResourceExists(reason=msg)\n registered_data = []\n for data in res_data:\n registered_data.append(data['ip_address'])\n\n if set(ipaddrlist).intersection(set(registered_data)):\n errors = (_(\"(%s) by ip_address (%s) already exists.\")\n % (model_name, create_data['ip_address']))\n raise exception.ResourceExists(reason=errors)", "def test_will_not_get_instance_type_with_bad_id(self):\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, 'asdf')", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def resourceExists(node, name = None, resourceType = PROP_RESOURCE_TYPE_RESOURCE):\n \n exists = False\n if not node:\n return exists\n try:\n myResourceType = \"\"\n if name:\n # make sure it's unicode:\n if not isinstance(name, str):\n name = name.decode(sys.getfilesystemencoding())\n url = node.url\n if url.endswith(\"/\"):\n url = url + name\n else:\n url = url + \"/\" + name\n newNode = ResourceStorer(url, node.connection)\n element = newNode.readProperty(NS_DAV, PROP_RESOURCE_TYPE)\n else: # name is \"None\":\n element = node.readProperty(NS_DAV, PROP_RESOURCE_TYPE)\n \n if len(element.children) > 0:\n myResourceType = element.children[0].name\n if resourceType == myResourceType or resourceType == PROP_RESOURCE_TYPE_RESOURCE:\n exists = True\n else:\n exists = False\n except WebdavError as wderr:\n if wderr.code == CODE_NOT_FOUND:\n # node doesn't exist -> exists = False:\n exists = False\n else:\n # another exception occured -> \"re-raise\" it:\n raise\n return exists", "def is_resource_id(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return is_resource_id(schema_obj.metadata)\n elif isinstance(schema_obj, schema.Struct):\n return schema_obj.full_name == 'weave.common.ResourceId'\n return False", "def is_existing_object(did):\n if not d1_gmn.app.did.is_existing_object(did):\n raise d1_common.types.exceptions.NotFound(\n 0,\n \"Identifier is {}. Expected a Persistent ID (PID) for an existing \"\n 'object. id=\"{}\"'.format(d1_gmn.app.did.classify_identifier(did), did),\n identifier=did,\n )", "def test_will_not_get_instance_type_by_unknown_id(self):\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, 10000)", "def validate_id(ctx, param, value):\r\n try:\r\n if value is None:\r\n return None\r\n obj = optiga.Object(int(value, base=16))\r\n obj = obj.meta\r\n return int(value, base=16)\r\n except (ValueError, TypeError, OSError) as no_object:\r\n raise click.BadParameter(\"Object ID doesn't exist. Please align with the Objects map\") from no_object", "def taco_test_put_error_requires_id(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '501'))", "def _create_resource(resource, **options):\n global _existing\n\n if _existing[resource]:\n print('{r} {k}:{v} already exists with id {i}.'.format(\n r=resource,\n k=args.tag,\n v=args.role,\n i=_existing[resource].id\n ))\n return True\n\n print('{v} a {r} with parameters: {p}...'.format(\n v='Would create' if dry else 'Creating',\n r=resource,\n p=str(options)\n ))\n\n if dry:\n return True\n\n # All easy cases out of the way, we now need to actually create something.\n r = None\n try:\n r = getattr(ec2, definitions[resource].create)(** options)\n # In some cases (instance) a list is returned instead of one item. Quack!\n try:\n r = r[0]\n except:\n pass\n _tag_resource(r)\n print('... {r} id {i} created.'.format(\n r=resource,\n i=r.id\n ))\n _existing[resource] = r\n return True\n except Exception as e:\n if r is None:\n print('Could not create resource {r}.'.format(\n r=resource\n ))\n traceback.print_exc()\n else:\n print('Could not tag resource {r}, id {i}.'.format(\n r=resource,\n i=r.id\n ))\n traceback.print_exc()\n _destroy_resource(resource)\n return False", "def validate_identifier(self, identifier):\n pass", "def needs_unique_instance(type_):\n return type_ in unique_instance_types", "def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def test_create_id_identity(self):\n self.assertIs(Rectangle.create(id=True).id, True)\n self.assertIs(Rectangle.create(id=type).id, type)\n self.assertIs(Rectangle.create(id=None).id, None)", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "def validate(self):\n # Validate all mandatory keys are present\n if not self.mandatory_keys.issubset(set(self.resource)):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] miss a \"\n \"mandatory key. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource does not contains extra keys\n if not set(self.resource).issubset(self.keys):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] contains \"\n \"extra keys. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource value type\n for key, value in self.resource.items():\n if not isinstance(value, self.__class__.MODEL[key][0]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data type (expected: %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][0]))\n # For str type validate the content as according the regex\n if self.__class__.MODEL[key][0] is str:\n if not re.match(self.__class__.MODEL[key][1], value):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))\n # For list type validate the content as according the regex\n if self.__class__.MODEL[key][0] is list:\n if not all([re.match(self.__class__.MODEL[key][1], v)\n for v in value]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))", "def test_check_input_id(self):\n r3 = Rectangle(10, 2, 0, 0, 12)\n self.assertEqual(r3.id, 12)", "def resource(\n name: str,\n *,\n mime_type: str,\n parent_folder_id: drive_api.ResourceID,\n drive_service: Optional[discovery.Resource] = None,\n) -> drive_api.ResourceID:\n command = request(\n name,\n mime_type=mime_type,\n parent_folder_id=parent_folder_id,\n drive_service=drive_service,\n )\n result = command.execute()\n new_id: drive_api.ResourceID = result.get(\"id\")\n return new_id", "def test_bad_id(self):\n r1 = Square(10, 2)\n self.assertEqual(r1.id, 1)\n\n r2 = Square(2, 10)\n self.assertEqual(r2.id, 2)", "def validatePredefinedType(self, type: int) -> bool:\n ...", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def __validateResourceStateEntry(self, resource: Dict[str, str]):\n if AZ_RESOURCE_ID not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (\n self.fullName, AZ_RESOURCE_ID))\n if SID not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (self.fullName, SID))\n if ARM_TYPE not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (self.fullName, ARM_TYPE))", "def validate_id(self, value):\n try:\n Tutor.objects.get(pk=value)\n except Tutor.DoesNotExist:\n raise serializers.ValidationError('Tutor object with id \\'{}\\' does not exist.'.format(value))\n return value", "def test_readable_id_invalid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n with pytest.raises(ValidationError):\n program.save()\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n with pytest.raises(ValidationError):\n course.save()", "def test_api_update_book_with_id_does_not_exist(self):\n\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book with id 1 does not exist')\n\t\tself.assertTrue(book_res['status'] == 'error')", "def test_check_id(self):\n r1 = Rectangle(2, 10)\n r2 = Rectangle(2, 10)\n r3 = Rectangle(2, 10)\n self.assertGreater(r2.id, r1.id)\n self.assertGreater(r3.id, r2.id)", "def create_resource(self, resource, resource_data, resource_type, **kwargs):\n if not isinstance(resource, Resource):\n raise KeyError(\"'resource' should be an instance of Resource\")\n if not isinstance(resource_data, ResourceData) or not resource_data.value:\n raise KeyError(\n \"'resource_data' should be ResourceData with 'value' attribute\")\n if not isinstance(resource_type, ResourceType):\n raise KeyError(\"'resource_type' should be an instance of ResourceType\")\n if not kwargs or 'feed_id' not in kwargs:\n raise KeyError('Variable \"feed_id\" id mandatory field!')\n\n resource_id = urlquote(resource.id, safe='')\n r = self._post('entity/f;{}/resource'.format(kwargs['feed_id']),\n data={\"name\": resource.name, \"id\": resource.id,\n \"resourceTypePath\": \"rt;{}\"\n .format(resource_type.path.resource_type_id)})\n if r:\n r = self._post('entity/f;{}/r;{}/data'\n .format(kwargs['feed_id'], resource_id),\n data={'role': 'configuration', \"value\": resource_data.value})\n else:\n # if resource or it's data was not created correctly, delete resource\n self._delete('entity/f;{}/r;{}'.format(kwargs['feed_id'], resource_id))\n return r", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def resource_exists(self, resource):\n products = Product.select(self.env, where={'name' : resource.id})\n return bool(products)", "def test_cannot_add_existing_asset_code(self):\n self.assertEqual(self.all_assets.count(), 1)\n asset_code = \"IC002\"\n new_asset = Asset(asset_code, \"SN001\")\n with self.assertRaises(ValidationError):\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 1)", "def send_error_missing_id(message, obj_type):\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Missing id',\n \"object_type\": obj_type,\n \"description\": message\n }}), 400)", "def test_init_id_identity(self):\n self.assertIs(Rectangle(1, 1, id=True).id, True)\n self.assertIs(Rectangle(1, 1, id=type).id, type)", "def id_exists(host_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(host_id, database_connection)", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def validate(self):\n try:\n # update _resource to have default values from the schema\n self._resource = self.schema(self._resource)\n except MultipleInvalid as e:\n errors = [format_error(err, self.resource_type) for err in e.errors]\n raise exceptions.ValidationError({'errors': errors})\n\n yield self.check_unique()", "def _uniqueness_check(self, cls, unique_in = None, **attr):\n # under the same datasource, only 1 subsystem, 1 neuropil, 1 tract of the name can exist\n # under the same neuropil, only 1 neuron of the name can exist\n # multiple (collections of) synapses can exist between two neurons\n if cls == 'Species':\n tmp = self.sql_query(\n \"\"\"select from Species where (name = \"{name}\" or \"{name}\" in synonyms) and stage = \"{stage}\" and sex = \"{sex}\" \"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Species {name} at {stage} stage ({sex}) already exists with rid = {rid}\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Species {name} (as its synonym) at {stage} stage ({sex}) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = obj._id, formalname = obj.name))\n elif cls == 'DataSource':\n objs = self.find_objs('DataSource', name=attr['name'], version=attr['version'])\n #if self.exists(cls, name = attr['name'], version = attr['version']):\n if len(objs):\n raise NodeAlreadyExistError(\"\"\"{} Node with attributes {} already exists with rid = {}\"\"\".format(\n cls, ', '.join([\"\"\"{} = {}\"\"\".format(key, value) \\\n for key, value in attr.items()]), objs[0]._id))\n elif cls == 'Neurotransmitter':\n tmp = self.sql_query(\n \"\"\"select from Neurotransmitter where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n name = attr['name']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Neurotransmitter {name} already exists with rid = {rid}\"\"\".format(\n name = attr['name'], rid = objs[0]._id))\n return objs\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Neurotransmitter {name} (as its synonym) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], rid = obj._id, formalname = obj.name))\n elif cls in ['Subsystem', 'Neuropil', 'Subregion', 'Tract']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'], formalname = obj.name,\n ds = unique_in.name,\n version = unique_in.version, rid = obj._id))\n # Alternatively, try:\n # tmp = self.sql_query(\n # \"\"\"select from {cls} where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n # cls = cls, name = attr['name']))\n # ds = tmp.owned_by(cls = 'DataSource').has(rid = datasource)\n # if len(ds):\n # tmp1 = tmp.has(name = attr['name'])\n # if len(tmp1.owned_by(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = datasource.name,\n # version = datasource.version))\n # else:\n # all_synonym_objs = (tmp - tmp1).node_objs\n # for obj in objs:\n # if len(QueryWrapper.from_rids(obj._id).has(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = datasource.name,\n # version = datasource.version))\n\n # Alternatively 2, try: (will be slow when it has a lot of Owns edges)\n # tmp = sql_query(\n # \"\"\"\n # select from (select expand(out('Owns')[@class = \"{cls}\"]) from {rid}) where name = \"{name}\" or \"{name}\" in synonyms\n # \"\"\"\n # )\n # elif cls in ['Subregion']:\n # if not isinstance(unique_in, models.Neuropil):\n # raise TypeError('To check the uniqueness of a {} instance, unique_in must be a Neuropil object'.format(cls))\n # tmp = self.sql_query(\n # \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='ucls' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n # rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n # if len(tmp):\n # objs = tmp.node_objs\n # if attr['name'] in [obj.name for obj in objs]:\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = unique_in.name))\n # else:\n # for obj in objs:\n # if name in obj.synonyms:\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = unique_in.name))\n elif cls in ['Neuron', 'NeuronFragment']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where uname = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists with rid = {rid}, under DataSource {ds} version {version}\"\"\".format(\n cls = cls, name = attr['name'], rid = objs[0]._id,\n ds = unique_in.name,\n version = unique_in.version))\n elif cls == 'Circuit':\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n elif cls == 'ArborizationData':\n if not isinstance(unique_in, (models.Neuron, models.Synapse)):\n raise TypeError('To check the uniqueness of a ArborizationData instance, unique_in must be a Neuron or a Synapse object')\n tmp = self.sql_query(\n \"\"\"select from (select expand(out(HasData)) from {rid}) where @class = 'ArborizationData' \"\"\".format(rid = unique_in._id))\n if len(tmp):\n raise NodeAlreadyExistError(\"\"\"ArborizationData already exists for {node} {uname} with rid = {rid}. Use NeuroArch.update_{node}_arborization to update the record\"\"\".format(\n node = unique_in.element_type.lower(), rid = tmp.node_objs[0]._id, uname = unique_in.uname))\n else:\n raise TypeError('Model type not understood.')\n return True", "def is_resource(space, w_obj):\n return space.wrap(space.is_resource(w_obj))", "def is_valid_pid_for_create(did):\n if not d1_gmn.app.did.is_valid_pid_for_create(did):\n raise d1_common.types.exceptions.IdentifierNotUnique(\n 0,\n 'Identifier is already in use as {}. did=\"{}\"'.format(\n d1_gmn.app.did.classify_identifier(did), did\n ),\n identifier=did,\n )", "def test_validation_idaa_valid_and_exists(self):\n expected_discrepancies = 0\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][self.create_idaa_program_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertTrue(upload_program.is_valid())\n self.assertTrue(upload_program.tola_program_exists)\n self.assertEquals(upload_program.discrepancy_count, expected_discrepancies)", "def _check_for_preexisting_identifier(self, doi: Doi):\n # The database expects each field to be a list.\n query_criterias = {\"ids\": [doi.pds_identifier]}\n\n # Query database for rows with given id value.\n columns, rows = self._database_obj.select_latest_rows(query_criterias)\n\n for row in rows:\n existing_record = dict(zip(columns, row))\n\n if doi.doi != existing_record[\"doi\"]:\n raise IllegalDOIActionException(\n f\"There is already a DOI {existing_record['doi']} associated \"\n f\"with PDS identifier {doi.pds_identifier} \"\n f\"(status={existing_record['status']}).\\n\"\n f\"You cannot modify a DOI for an existing PDS identifier.\"\n )", "def existence_validation(self, request, *args, **kwargs):\n lection_id = self.kwargs.get('lection_id')\n course_id = self.kwargs.get('course_id')\n lection = Lection.objects.filter(Q(course_id=course_id) & Q(id=lection_id)).first()\n error = {'error': f\"Either lection with ID{lection_id} doesn't belong to course with ID{course_id}, \"\n f\"or doesn't exist.\"}\n if lection:\n return method(self, request, *args, **kwargs)\n return Response(error, status=status.HTTP_403_FORBIDDEN)", "def test_init_id_type(self):\n self.assertIsInstance(Rectangle(1, 1).id, int)\n self.assertIsInstance(Rectangle(1, 1, id=None).id, int)", "def add_existing_key_fail(self, data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)", "def test_get_by_id_wrong_type(self):\n assert ExampleUserModel.get_by_id(\"xyz\") is None", "def test_get_event_type_by_id_invalid_id(self):\n\t\trequest = self.client.get('/api/event_type/esper/0', follow=True)\n\t\tself.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)", "def test_resource_exists(self):\r\n\t\tself.assertTrue(self._configuration_.resources().has_key(\"AddWordTaskRepeat\") and self._configuration_.resources().has_key(\"RemoveWordTaskRepeat\"))", "def test_register_subscription_existing_type(self):\n mock_type = Mock()\n bus = event_bus._event_bus\n bus._subscriptions[mock_type] = [\n EventSubscription(mock_type, lambda _: None)]\n new_subscription = EventSubscription(mock_type, lambda _: True)\n\n reg_id = event_bus.register_subscription(new_subscription)\n\n self.assertTrue(new_subscription in bus._subscriptions[mock_type])\n self.assertTrue(reg_id in bus._registration_id_map.keys())", "def test_add_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertNotIn(b1, s1.catalogue)\n s1.add_resource(b1)\n self.assertIn(b1, s1.catalogue)\n s1.add_resource(b1)\n self.assertEqual(len(s1.catalogue), 1)", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")", "def _resource_name_check(self, resource_name):\n return self._name_check(resource_name, 'resources')", "def validate(self, entity_id):\n\n safe_entity = self.find_by_id(entity_id)\n if not safe_entity:\n raise EntityNotFound(\"The param_id = {} is not valid.\".format(entity_id))\n\n return safe_entity", "def check_status(self, id):\n raise NotImplementedError()", "def test_cannot_add_existing_serial_number(self):\n self.assertEqual(self.all_assets.count(), 1)\n serial_number = \"SN001\"\n new_asset = Asset(serial_number, \"SN001\")\n with self.assertRaises(ValidationError):\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 1)", "def delete_does_not_exist_fail(self, id_):\n assert is_404(self.get((id_, self.delete_url)))\n self.delete_fail(id_, 'does not exist')", "def test_readable_id_valid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n program.save()\n assert program.id is not None\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n course.save()\n assert course.id is not None", "def validated_id(cls, name):\n if name:\n if name in cls._ids:\n return cls._ids[name]\n if cls.validated_name(name):\n if Accounts.exists(name):\n return cls.get_id(name)\n return None", "def ResourceExists(resource_name, search_user_paths=True):\n try:\n ResourcePath(resource_name, search_user_paths)\n return True\n except ResourceNotFound:\n return False", "def test_get_event_type_by_id_not_found(self):\n\t\tevent_type = EventType.objects.get(name=\"asd\")\n\t\trequest = self.client.get('/api/event_type/esper/' + str(event_type.id + 1), follow=True)\n\t\tself.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)", "def test_create_with_bad_id(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n post_data = {'source_type': 'spotify', 'source_id': 00}\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json.loads(resp.content)\n new_records_count = Track.objects.all().count()\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(existing_records_count, new_records_count)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def validate_new_person(self, person_id):\n\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Person WHERE id == %s\"\"\", (person_id,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n return False\n return True", "async def test_create_invalid_field(self):\n data = {'id': 33, 'value': 'foo'}\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.create(data)\n self.assertEqual(\n 'Error: \"id\": 33 is not a string: {\\'id\\': \\'\\'}',\n str(cm.exception))", "def update_object_type(self, object_type=None):\n # Return Value\n # ------------\n # {success: true}\n #\n if not is_basic_identifier(object_type.name):\n raise BadRequest(\"Invalid object_type name: %s\" % object_type.name)\n if not is_yaml_string_valid(object_type.definition):\n raise BadRequest(\"Invalid YAML definition\")\n object_id, version = self.clients.resource_registry.update(object_type)\n return object_id", "def find_by_id(cls, id):\n\t\tif id:\n\t\t\treturn cls.query.filter_by(id=id).first()\n\t\treturn {\n\t\t\t'message': 'id field is required',\n\t\t\t'status': 'Failed'\n\t\t\t}, 400", "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def test_id_uniqueness(self):\n user_2 = User()\n self.assertNotEqual(self.user_1.id, user_2.id)", "def sanity_check(cls, data, ticket_id=None):\n if ticket_id is not None:\n ticket_id = int(ticket_id)\n ticket = DB_TICKET_TABLE.get(doc_id=ticket_id)\n if not ticket:\n flask_restful.abort(404, message=f\"ticket '{ticket_id}' not found!\")\n else:\n ticket = None\n\n # A custom validator to make sure the aportio ID remains unique\n def validate_aportio_id(aportio_id):\n if not isinstance(aportio_id, str):\n raise ValueError(\"expected string type for aportio ID\")\n TicketQuery = Query()\n ticket_with_aportio_id = DB_TICKET_TABLE.get(\n TicketQuery.aportio_id == aportio_id)\n if ticket_with_aportio_id and (ticket_with_aportio_id.doc_id != ticket_id):\n raise ValueError(f\"a ticket with aportio ID '{aportio_id}' \"\n f\"exists already\")\n return aportio_id\n\n # A custom validator for the short title\n def validate_short_title(text):\n return _str_len_check(text, cls.SHORT_TITLE_MIN_LEN, cls.SHORT_TITLE_MAX_LEN)\n\n # A custom validator for the long text\n def validate_long_text(text):\n return _str_len_check(text, cls.LONG_TEXT_MIN_LEN, cls.LONG_TEXT_MAX_LEN)\n\n # Perform some sanity checking of the provided attributes\n data = _dict_sanity_check(data,\n mandatory_keys = [\n (\"aportio_id\", validate_aportio_id),\n (\"customer_id\", Customer.exists),\n (\"short_title\", validate_short_title),\n (\"user_id\", User.exists),\n (\"status\", Ticket.valid_status),\n (\"classification\", Ticket.valid_classification),\n (\"long_text\", validate_long_text)\n ],\n optional_keys = [\n (\"custom_fields\", dict)\n ],\n obj=ticket)\n # Now check whether this user is even associated with that customer\n cust_id = data['customer_id']\n user_id = data['user_id']\n assoc_q = Query()\n assoc_data = DB_USER_CUSTOMER_RELS_TABLE.search((assoc_q.customer_id == cust_id) &\n (assoc_q.user_id == user_id))\n if not assoc_data:\n flask_restful.abort(400, message=f\"Bad Request - user '{user_id}' is not \"\n f\"associated with customer '{cust_id}'\")\n return data, ticket", "def test_exists(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n BaseSample.exists('SKM7.640188', SampleTemplate(1))", "def test_validation_idaa_valid_and_does_not_exists(self):\n idaa_index = 1\n expected_discrepancies = 0\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertTrue(upload_program.is_valid())\n self.assertFalse(upload_program.tola_program_exists)\n self.assertEquals(upload_program.discrepancy_count, expected_discrepancies)", "def _check_id(self, keyword):\n if keyword not in self.request.data:\n return '{} parameter is missing'.format(keyword)\n \"\"\" Check if <keyword> parameter is not None \"\"\"\n if self.request.data[keyword] == '':\n return '{} ID cannot be None'.format(keyword)\n \"\"\" Check if <keyword> parameter is > 0 \"\"\"\n if int(self.request.data[keyword]) < 1:\n return '{} ID must be an integer > 0'.format(keyword)", "def exists(self, obj):\n return False", "def _check_subject_id(subject_id):\n if (subject_id and\n len(subject_id) > models.Subject.id.property.columns[\n 0].type.length):\n raise exception.SubjectNotFound()", "def test_validate_post_existing_resource(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def validate_item_id(idx):\n item = ItemModel.query.get(idx)\n if item is None:\n raise NotFound(f'Item with id {idx} not found.')\n return item", "def test_data_type_id(self):\n self.assertTrue(self.tester.data_type(ret_id=True), 2)", "def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)", "def check_instance(self, class_name, inst_id, stored_objects):\n '''get '<class_name>.id' to FileStorage.__objects key format'''\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n \"\"\"given id does not exist\"\"\"\n print(\"** no instance found **\")\n instance = False\n return instance", "def test_create_already_existing_campaign_fails(self):\n test_campaign = return_canned_campaign()\n self.test_org.campaign = [test_campaign]\n test_campaign.save()\n response = self.client.post(\n self.endpoint_url,\n json={\n \"logo\": test_campaign.logo,\n \"name\": test_campaign.name,\n \"organisations\": [self.test_org.id],\n \"url\": test_campaign.url,\n },\n headers={\"Authorization\": self.session_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 409)\n self.assertEqual(response_body[\"Error\"], \" Campaign name already exists\")\n self.assertEqual(response_body[\"SubCode\"], \"NameExists\")", "def exists(self, Search_ID):\n if self.get_id(Search_ID) is None:\n return False\n else:\n return True", "def validateID(self, id : int) -> int:\n # If ID is a string, ensure it can be casted to an int before casting and returning.\n if type(id) == str:\n if not lib.stringTyping.isInt(id):\n raise TypeError(\"user ID must be either int or string of digits\")\n return int(id)\n # If ID is not a string, nor an int, throw an error.\n elif type(id) != int:\n raise TypeError(\"user ID must be either int or string of digits\")\n # ID must be an int, so return it.\n return id", "def id_exists(show_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(show_id, database_connection)" ]
[ "0.6422405", "0.64173734", "0.6142269", "0.61312246", "0.5939884", "0.5908327", "0.5880398", "0.5806814", "0.577634", "0.5734848", "0.5730072", "0.56382006", "0.55637", "0.5501549", "0.5488068", "0.5485431", "0.5476124", "0.5466836", "0.54339", "0.5411698", "0.5398409", "0.5372584", "0.53484315", "0.53477323", "0.5336371", "0.5327411", "0.5316241", "0.5294655", "0.5292505", "0.52906764", "0.5290138", "0.52719474", "0.52706325", "0.52505064", "0.5224689", "0.5216522", "0.5177424", "0.5173158", "0.5164601", "0.51274157", "0.51106805", "0.5106305", "0.50903237", "0.5088144", "0.50764173", "0.5063056", "0.5062557", "0.50492555", "0.50405514", "0.5026922", "0.5023985", "0.5008982", "0.50059426", "0.5004298", "0.5000435", "0.49945658", "0.49936256", "0.49927726", "0.4989894", "0.49898884", "0.49880224", "0.49869823", "0.49819273", "0.49790868", "0.49758255", "0.49716154", "0.49641553", "0.49623126", "0.49579415", "0.49545392", "0.49482954", "0.49476528", "0.49418265", "0.49415585", "0.4931851", "0.49189654", "0.49133888", "0.4912833", "0.49104878", "0.49034798", "0.4903424", "0.4892134", "0.48883384", "0.4884316", "0.48806715", "0.48744503", "0.48673403", "0.48648918", "0.48645955", "0.48611632", "0.48581317", "0.4855877", "0.48537856", "0.48529413", "0.48418126", "0.48340088", "0.482996", "0.4826713", "0.48135507", "0.4812498", "0.48092806" ]
0.0
-1
Validate a resource of the given type with specified ID already exists.
def _TableExists(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables get_request_type = GetApiMessage('BigqueryTablesGetRequest') get_request = get_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) try: service.Get(get_request) return True except apitools_exceptions.HttpNotFoundError: log.info('Table with id [{}:{}:{}] not found.'.format( project_id, dataset_id, table_id)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_id_already_exists(self) -> None:\n with pytest.raises(IntegrityError):\n ObservationType.add({'id': 1, 'name': 'New Type', 'units': 'Kilo-Frobnicate',\n 'description': 'A new filter type.'})", "def check_id(self, id):", "def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))", "def check_repost_exists(type, id):\n \n try:\n soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))\n return True\n except HTTPError as e:\n if e.response.status_code == 404:\n db.mark_as_deleted(type, id)\n return False\n else:\n raise", "def test_resource_id(self):\n resource_id = '1234-1234-1234'\n\n # resource id does not exist\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 404)\n\n # create a resource ID\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # resource id exists\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # cannot create twice\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 409)\n\n # delete resource id\n resp = self.app.delete(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # resource id does not exist\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 404)", "def ResourceExists(self, name):\n pass", "def assert_has(self, type_id, message=''):\n if self.first_by_id(type_id):\n return\n raise AssertionError(message)", "def does_resource_exist(resource):\n try:\n resource.load()\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'ValidationError':\n return False\n else:\n raise e", "def check_action_type_exists(\n action_type_id: int\n) -> None:\n if not db.session.query(db.exists().where(models.ActionType.id == action_type_id)).scalar():\n raise errors.ActionTypeDoesNotExistError()", "def check_id_existence(self, id:str):\n\n oc_prefix = id[:(id.index(':')+1)]\n\n if oc_prefix == 'doi:':\n vldt = doi.DOIManager() # you can use removeprefix(oc_prefix) from Python 3.9+\n return vldt.exists(id.replace(oc_prefix, '', 1)) # todo: use id.replace(oc_prefix, '', 1) for Python < v.3.9\n if oc_prefix == 'isbn:':\n vldt = isbn.ISBNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'issn:':\n vldt = issn.ISSNManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'orcid:':\n vldt = orcid.ORCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmcid:':\n vldt = pmcid.PMCIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'pmid:':\n vldt = pmid.PMIDManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'ror:':\n vldt = ror.RORManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'url:':\n vldt = url.URLManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'viaf:':\n vldt = viaf.ViafManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikidata:':\n vldt = wikidata.WikidataManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))\n if oc_prefix == 'wikipedia:':\n vldt = wikipedia.WikipediaManager()\n return vldt.exists(id.replace(oc_prefix, '', 1))", "def test_create_id_type(self):\n self.assertIsInstance(Rectangle.create().id, int)", "def test_duplicate_flavorids_fail(self):\n flavorid = 'flavor1'\n instance_types.create('name one', 256, 1, 120, 200, flavorid)\n self.assertRaises(exception.InstanceTypeIdExists,\n instance_types.create,\n 'name two', 256, 1, 120, 200, flavorid)", "def test_difference_id(self):\n self.assertFalse(\n self.factory.create_type('iphone') is self.factory.create_type(\n 'iphone'))", "def validate_id(cls, id: str) -> ObjectId:\n return ObjectId(id)", "def test_id_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_id(-1)", "def check_unique_element_id(cls, instance, element_id):\n if not element_id:\n return\n try:\n element_ids = instance.placeholder.page.cascadepage.glossary['element_ids'][instance.language]\n except (AttributeError, KeyError, ObjectDoesNotExist):\n return\n else:\n for key, value in element_ids.items():\n if str(key) != str(instance.pk) and element_id == value:\n msg = _(\"The element ID '{}' is not unique for this page.\")\n raise ValidationError(msg.format(element_id))", "def test_duplicate_id(self):\n with self.assertRaises(ValueError):\n REANATemplate(\n workflow_spec={},\n parameters=[\n pd.parameter_declaration('A', index=1),\n pd.parameter_declaration('B'),\n pd.parameter_declaration('C'),\n pd.parameter_declaration('A', index=2),\n pd.parameter_declaration('E', index=1)\n ],\n validate=True\n )", "def test_create_resource(self):\n test_resource = ResourceTypeName.get() # the name of the resource type to create\n\n # the resource type should not exist yet\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 404)\n\n # create the resource type\n resp = self.app.post(f'/v1/resource/{test_resource}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # the resource type exists\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # delete the resource type\n resp = self.app.delete(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n # the resource type should not exist\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=admin_headers)\n self.assertEqual(resp.status_code, 404)", "def _validate_create(context, db_api, create_data, model_name):\n ipaddrlist = utils.get_addresses(create_data['ip_address'])\n\n if not ipaddrlist:\n errors = (_(\"Failed to register (%s)\" +\n \". The (%s) IP Address (%s) could not \"\n \"be resolved.\")\n % (model_name, model_name, create_data['ip_address']))\n raise exception.AddressResolutionFailure(reason=errors)\n LOG.info(\"IP/FQDN for the \" + model_name + \" %s is %s\" % (\n create_data['ip_address'],\n ipaddrlist))\n try:\n get_all = getattr(db_api, \"get_all_%ss\" % model_name)\n res_data = get_all(context)\n if not res_data:\n # No registered resources\n LOG.info(\"No registered %s\" % model_name)\n return\n except Exception:\n errors = (_(\"Failed to retrieve data for (%s) %s\")\n % (model_name, create_data.get('ip_address')))\n raise exception.InternalFailure(reason=errors)\n name = create_data.get(\"name\")\n valid_name = _validate_duplicate_names(res_data, name)\n if not valid_name:\n msg = (_(\"Two different (%s) with same \"\n \"name cannot be registered\") % model_name)\n raise exception.ResourceExists(reason=msg)\n registered_data = []\n for data in res_data:\n registered_data.append(data['ip_address'])\n\n if set(ipaddrlist).intersection(set(registered_data)):\n errors = (_(\"(%s) by ip_address (%s) already exists.\")\n % (model_name, create_data['ip_address']))\n raise exception.ResourceExists(reason=errors)", "def test_will_not_get_instance_type_with_bad_id(self):\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, 'asdf')", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def resourceExists(node, name = None, resourceType = PROP_RESOURCE_TYPE_RESOURCE):\n \n exists = False\n if not node:\n return exists\n try:\n myResourceType = \"\"\n if name:\n # make sure it's unicode:\n if not isinstance(name, str):\n name = name.decode(sys.getfilesystemencoding())\n url = node.url\n if url.endswith(\"/\"):\n url = url + name\n else:\n url = url + \"/\" + name\n newNode = ResourceStorer(url, node.connection)\n element = newNode.readProperty(NS_DAV, PROP_RESOURCE_TYPE)\n else: # name is \"None\":\n element = node.readProperty(NS_DAV, PROP_RESOURCE_TYPE)\n \n if len(element.children) > 0:\n myResourceType = element.children[0].name\n if resourceType == myResourceType or resourceType == PROP_RESOURCE_TYPE_RESOURCE:\n exists = True\n else:\n exists = False\n except WebdavError as wderr:\n if wderr.code == CODE_NOT_FOUND:\n # node doesn't exist -> exists = False:\n exists = False\n else:\n # another exception occured -> \"re-raise\" it:\n raise\n return exists", "def is_resource_id(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return is_resource_id(schema_obj.metadata)\n elif isinstance(schema_obj, schema.Struct):\n return schema_obj.full_name == 'weave.common.ResourceId'\n return False", "def is_existing_object(did):\n if not d1_gmn.app.did.is_existing_object(did):\n raise d1_common.types.exceptions.NotFound(\n 0,\n \"Identifier is {}. Expected a Persistent ID (PID) for an existing \"\n 'object. id=\"{}\"'.format(d1_gmn.app.did.classify_identifier(did), did),\n identifier=did,\n )", "def test_will_not_get_instance_type_by_unknown_id(self):\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, 10000)", "def validate_id(ctx, param, value):\r\n try:\r\n if value is None:\r\n return None\r\n obj = optiga.Object(int(value, base=16))\r\n obj = obj.meta\r\n return int(value, base=16)\r\n except (ValueError, TypeError, OSError) as no_object:\r\n raise click.BadParameter(\"Object ID doesn't exist. Please align with the Objects map\") from no_object", "def taco_test_put_error_requires_id(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '501'))", "def _create_resource(resource, **options):\n global _existing\n\n if _existing[resource]:\n print('{r} {k}:{v} already exists with id {i}.'.format(\n r=resource,\n k=args.tag,\n v=args.role,\n i=_existing[resource].id\n ))\n return True\n\n print('{v} a {r} with parameters: {p}...'.format(\n v='Would create' if dry else 'Creating',\n r=resource,\n p=str(options)\n ))\n\n if dry:\n return True\n\n # All easy cases out of the way, we now need to actually create something.\n r = None\n try:\n r = getattr(ec2, definitions[resource].create)(** options)\n # In some cases (instance) a list is returned instead of one item. Quack!\n try:\n r = r[0]\n except:\n pass\n _tag_resource(r)\n print('... {r} id {i} created.'.format(\n r=resource,\n i=r.id\n ))\n _existing[resource] = r\n return True\n except Exception as e:\n if r is None:\n print('Could not create resource {r}.'.format(\n r=resource\n ))\n traceback.print_exc()\n else:\n print('Could not tag resource {r}, id {i}.'.format(\n r=resource,\n i=r.id\n ))\n traceback.print_exc()\n _destroy_resource(resource)\n return False", "def validate_identifier(self, identifier):\n pass", "def needs_unique_instance(type_):\n return type_ in unique_instance_types", "def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def test_create_id_identity(self):\n self.assertIs(Rectangle.create(id=True).id, True)\n self.assertIs(Rectangle.create(id=type).id, type)\n self.assertIs(Rectangle.create(id=None).id, None)", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "def validate(self):\n # Validate all mandatory keys are present\n if not self.mandatory_keys.issubset(set(self.resource)):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] miss a \"\n \"mandatory key. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource does not contains extra keys\n if not set(self.resource).issubset(self.keys):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] contains \"\n \"extra keys. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource value type\n for key, value in self.resource.items():\n if not isinstance(value, self.__class__.MODEL[key][0]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data type (expected: %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][0]))\n # For str type validate the content as according the regex\n if self.__class__.MODEL[key][0] is str:\n if not re.match(self.__class__.MODEL[key][1], value):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))\n # For list type validate the content as according the regex\n if self.__class__.MODEL[key][0] is list:\n if not all([re.match(self.__class__.MODEL[key][1], v)\n for v in value]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))", "def test_check_input_id(self):\n r3 = Rectangle(10, 2, 0, 0, 12)\n self.assertEqual(r3.id, 12)", "def resource(\n name: str,\n *,\n mime_type: str,\n parent_folder_id: drive_api.ResourceID,\n drive_service: Optional[discovery.Resource] = None,\n) -> drive_api.ResourceID:\n command = request(\n name,\n mime_type=mime_type,\n parent_folder_id=parent_folder_id,\n drive_service=drive_service,\n )\n result = command.execute()\n new_id: drive_api.ResourceID = result.get(\"id\")\n return new_id", "def test_bad_id(self):\n r1 = Square(10, 2)\n self.assertEqual(r1.id, 1)\n\n r2 = Square(2, 10)\n self.assertEqual(r2.id, 2)", "def validatePredefinedType(self, type: int) -> bool:\n ...", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def __validateResourceStateEntry(self, resource: Dict[str, str]):\n if AZ_RESOURCE_ID not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (\n self.fullName, AZ_RESOURCE_ID))\n if SID not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (self.fullName, SID))\n if ARM_TYPE not in resource:\n raise ValueError(\n '[%s] %s is not present in the armMapping.' % (self.fullName, ARM_TYPE))", "def validate_id(self, value):\n try:\n Tutor.objects.get(pk=value)\n except Tutor.DoesNotExist:\n raise serializers.ValidationError('Tutor object with id \\'{}\\' does not exist.'.format(value))\n return value", "def test_readable_id_invalid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n with pytest.raises(ValidationError):\n program.save()\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n with pytest.raises(ValidationError):\n course.save()", "def test_api_update_book_with_id_does_not_exist(self):\n\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book with id 1 does not exist')\n\t\tself.assertTrue(book_res['status'] == 'error')", "def test_check_id(self):\n r1 = Rectangle(2, 10)\n r2 = Rectangle(2, 10)\n r3 = Rectangle(2, 10)\n self.assertGreater(r2.id, r1.id)\n self.assertGreater(r3.id, r2.id)", "def create_resource(self, resource, resource_data, resource_type, **kwargs):\n if not isinstance(resource, Resource):\n raise KeyError(\"'resource' should be an instance of Resource\")\n if not isinstance(resource_data, ResourceData) or not resource_data.value:\n raise KeyError(\n \"'resource_data' should be ResourceData with 'value' attribute\")\n if not isinstance(resource_type, ResourceType):\n raise KeyError(\"'resource_type' should be an instance of ResourceType\")\n if not kwargs or 'feed_id' not in kwargs:\n raise KeyError('Variable \"feed_id\" id mandatory field!')\n\n resource_id = urlquote(resource.id, safe='')\n r = self._post('entity/f;{}/resource'.format(kwargs['feed_id']),\n data={\"name\": resource.name, \"id\": resource.id,\n \"resourceTypePath\": \"rt;{}\"\n .format(resource_type.path.resource_type_id)})\n if r:\n r = self._post('entity/f;{}/r;{}/data'\n .format(kwargs['feed_id'], resource_id),\n data={'role': 'configuration', \"value\": resource_data.value})\n else:\n # if resource or it's data was not created correctly, delete resource\n self._delete('entity/f;{}/r;{}'.format(kwargs['feed_id'], resource_id))\n return r", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def resource_exists(self, resource):\n products = Product.select(self.env, where={'name' : resource.id})\n return bool(products)", "def test_cannot_add_existing_asset_code(self):\n self.assertEqual(self.all_assets.count(), 1)\n asset_code = \"IC002\"\n new_asset = Asset(asset_code, \"SN001\")\n with self.assertRaises(ValidationError):\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 1)", "def send_error_missing_id(message, obj_type):\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Missing id',\n \"object_type\": obj_type,\n \"description\": message\n }}), 400)", "def test_init_id_identity(self):\n self.assertIs(Rectangle(1, 1, id=True).id, True)\n self.assertIs(Rectangle(1, 1, id=type).id, type)", "def id_exists(host_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(host_id, database_connection)", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def validate(self):\n try:\n # update _resource to have default values from the schema\n self._resource = self.schema(self._resource)\n except MultipleInvalid as e:\n errors = [format_error(err, self.resource_type) for err in e.errors]\n raise exceptions.ValidationError({'errors': errors})\n\n yield self.check_unique()", "def _uniqueness_check(self, cls, unique_in = None, **attr):\n # under the same datasource, only 1 subsystem, 1 neuropil, 1 tract of the name can exist\n # under the same neuropil, only 1 neuron of the name can exist\n # multiple (collections of) synapses can exist between two neurons\n if cls == 'Species':\n tmp = self.sql_query(\n \"\"\"select from Species where (name = \"{name}\" or \"{name}\" in synonyms) and stage = \"{stage}\" and sex = \"{sex}\" \"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Species {name} at {stage} stage ({sex}) already exists with rid = {rid}\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Species {name} (as its synonym) at {stage} stage ({sex}) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], stage = attr['stage'], sex = attr['sex'], rid = obj._id, formalname = obj.name))\n elif cls == 'DataSource':\n objs = self.find_objs('DataSource', name=attr['name'], version=attr['version'])\n #if self.exists(cls, name = attr['name'], version = attr['version']):\n if len(objs):\n raise NodeAlreadyExistError(\"\"\"{} Node with attributes {} already exists with rid = {}\"\"\".format(\n cls, ', '.join([\"\"\"{} = {}\"\"\".format(key, value) \\\n for key, value in attr.items()]), objs[0]._id))\n elif cls == 'Neurotransmitter':\n tmp = self.sql_query(\n \"\"\"select from Neurotransmitter where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n name = attr['name']))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"Neurotransmitter {name} already exists with rid = {rid}\"\"\".format(\n name = attr['name'], rid = objs[0]._id))\n return objs\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"Neurotransmitter {name} (as its synonym) already exists with rid = {rid}, use name {formalname} instead\"\"\".format(\n name = attr['name'], rid = obj._id, formalname = obj.name))\n elif cls in ['Subsystem', 'Neuropil', 'Subregion', 'Tract']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n else:\n for obj in objs:\n if attr['name'] in obj.synonyms:\n raise NodeAlreadyExistError(\n \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'], formalname = obj.name,\n ds = unique_in.name,\n version = unique_in.version, rid = obj._id))\n # Alternatively, try:\n # tmp = self.sql_query(\n # \"\"\"select from {cls} where name = \"{name}\" or \"{name}\" in synonyms\"\"\".format(\n # cls = cls, name = attr['name']))\n # ds = tmp.owned_by(cls = 'DataSource').has(rid = datasource)\n # if len(ds):\n # tmp1 = tmp.has(name = attr['name'])\n # if len(tmp1.owned_by(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = datasource.name,\n # version = datasource.version))\n # else:\n # all_synonym_objs = (tmp - tmp1).node_objs\n # for obj in objs:\n # if len(QueryWrapper.from_rids(obj._id).has(cls = 'DataSource').has(rid = datasource)):\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under DataSource {ds} version {version}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = datasource.name,\n # version = datasource.version))\n\n # Alternatively 2, try: (will be slow when it has a lot of Owns edges)\n # tmp = sql_query(\n # \"\"\"\n # select from (select expand(out('Owns')[@class = \"{cls}\"]) from {rid}) where name = \"{name}\" or \"{name}\" in synonyms\n # \"\"\"\n # )\n # elif cls in ['Subregion']:\n # if not isinstance(unique_in, models.Neuropil):\n # raise TypeError('To check the uniqueness of a {} instance, unique_in must be a Neuropil object'.format(cls))\n # tmp = self.sql_query(\n # \"\"\"select from (select from {cls} where name = \"{name}\" or \"{name}\" in synonyms) let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='ucls' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n # rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n # if len(tmp):\n # objs = tmp.node_objs\n # if attr['name'] in [obj.name for obj in objs]:\n # raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'],\n # ds = unique_in.name))\n # else:\n # for obj in objs:\n # if name in obj.synonyms:\n # raise NodeAlreadyExistError(\n # \"\"\"{cls} {name} already exists as a synonym of {cls} {formalname} under Neuropil {ds}\"\"\".format(\n # cls = cls, name = attr['name'], formalname = obj.name,\n # ds = unique_in.name))\n elif cls in ['Neuron', 'NeuronFragment']:\n # TODO: synonyms are not checked against existing names and synonyms\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where uname = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists with rid = {rid}, under DataSource {ds} version {version}\"\"\".format(\n cls = cls, name = attr['name'], rid = objs[0]._id,\n ds = unique_in.name,\n version = unique_in.version))\n elif cls == 'Circuit':\n if not isinstance(unique_in, models.DataSource):\n raise TypeError('To check the uniqueness of a {} instance, unique_in must be a DataSource object'.format(cls))\n tmp = self.sql_query(\n \"\"\"select from (select from {cls} where name = \"{name}\") let $q = (select from (select expand($parent.$parent.current.in('Owns'))) where @class='{ucls}' and @rid = {rid}) where $q.size() = 1\"\"\".format(\n rid = unique_in._id, cls = cls, name = attr['name'], ucls = unique_in.element_type))\n if len(tmp):\n objs = tmp.node_objs\n if attr['name'] in [obj.name for obj in objs]:\n raise NodeAlreadyExistError(\"\"\"{cls} {name} already exists under DataSource {ds} version {version}, rid = {rid}\"\"\".format(\n cls = cls, name = attr['name'],\n ds = unique_in.name,\n version = unique_in.version, rid = objs[0]._id))\n elif cls == 'ArborizationData':\n if not isinstance(unique_in, (models.Neuron, models.Synapse)):\n raise TypeError('To check the uniqueness of a ArborizationData instance, unique_in must be a Neuron or a Synapse object')\n tmp = self.sql_query(\n \"\"\"select from (select expand(out(HasData)) from {rid}) where @class = 'ArborizationData' \"\"\".format(rid = unique_in._id))\n if len(tmp):\n raise NodeAlreadyExistError(\"\"\"ArborizationData already exists for {node} {uname} with rid = {rid}. Use NeuroArch.update_{node}_arborization to update the record\"\"\".format(\n node = unique_in.element_type.lower(), rid = tmp.node_objs[0]._id, uname = unique_in.uname))\n else:\n raise TypeError('Model type not understood.')\n return True", "def is_resource(space, w_obj):\n return space.wrap(space.is_resource(w_obj))", "def is_valid_pid_for_create(did):\n if not d1_gmn.app.did.is_valid_pid_for_create(did):\n raise d1_common.types.exceptions.IdentifierNotUnique(\n 0,\n 'Identifier is already in use as {}. did=\"{}\"'.format(\n d1_gmn.app.did.classify_identifier(did), did\n ),\n identifier=did,\n )", "def test_validation_idaa_valid_and_exists(self):\n expected_discrepancies = 0\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][self.create_idaa_program_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertTrue(upload_program.is_valid())\n self.assertTrue(upload_program.tola_program_exists)\n self.assertEquals(upload_program.discrepancy_count, expected_discrepancies)", "def _check_for_preexisting_identifier(self, doi: Doi):\n # The database expects each field to be a list.\n query_criterias = {\"ids\": [doi.pds_identifier]}\n\n # Query database for rows with given id value.\n columns, rows = self._database_obj.select_latest_rows(query_criterias)\n\n for row in rows:\n existing_record = dict(zip(columns, row))\n\n if doi.doi != existing_record[\"doi\"]:\n raise IllegalDOIActionException(\n f\"There is already a DOI {existing_record['doi']} associated \"\n f\"with PDS identifier {doi.pds_identifier} \"\n f\"(status={existing_record['status']}).\\n\"\n f\"You cannot modify a DOI for an existing PDS identifier.\"\n )", "def existence_validation(self, request, *args, **kwargs):\n lection_id = self.kwargs.get('lection_id')\n course_id = self.kwargs.get('course_id')\n lection = Lection.objects.filter(Q(course_id=course_id) & Q(id=lection_id)).first()\n error = {'error': f\"Either lection with ID{lection_id} doesn't belong to course with ID{course_id}, \"\n f\"or doesn't exist.\"}\n if lection:\n return method(self, request, *args, **kwargs)\n return Response(error, status=status.HTTP_403_FORBIDDEN)", "def test_init_id_type(self):\n self.assertIsInstance(Rectangle(1, 1).id, int)\n self.assertIsInstance(Rectangle(1, 1, id=None).id, int)", "def add_existing_key_fail(self, data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)", "def test_get_by_id_wrong_type(self):\n assert ExampleUserModel.get_by_id(\"xyz\") is None", "def test_get_event_type_by_id_invalid_id(self):\n\t\trequest = self.client.get('/api/event_type/esper/0', follow=True)\n\t\tself.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)", "def test_resource_exists(self):\r\n\t\tself.assertTrue(self._configuration_.resources().has_key(\"AddWordTaskRepeat\") and self._configuration_.resources().has_key(\"RemoveWordTaskRepeat\"))", "def test_register_subscription_existing_type(self):\n mock_type = Mock()\n bus = event_bus._event_bus\n bus._subscriptions[mock_type] = [\n EventSubscription(mock_type, lambda _: None)]\n new_subscription = EventSubscription(mock_type, lambda _: True)\n\n reg_id = event_bus.register_subscription(new_subscription)\n\n self.assertTrue(new_subscription in bus._subscriptions[mock_type])\n self.assertTrue(reg_id in bus._registration_id_map.keys())", "def test_add_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertNotIn(b1, s1.catalogue)\n s1.add_resource(b1)\n self.assertIn(b1, s1.catalogue)\n s1.add_resource(b1)\n self.assertEqual(len(s1.catalogue), 1)", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")", "def _resource_name_check(self, resource_name):\n return self._name_check(resource_name, 'resources')", "def validate(self, entity_id):\n\n safe_entity = self.find_by_id(entity_id)\n if not safe_entity:\n raise EntityNotFound(\"The param_id = {} is not valid.\".format(entity_id))\n\n return safe_entity", "def check_status(self, id):\n raise NotImplementedError()", "def test_cannot_add_existing_serial_number(self):\n self.assertEqual(self.all_assets.count(), 1)\n serial_number = \"SN001\"\n new_asset = Asset(serial_number, \"SN001\")\n with self.assertRaises(ValidationError):\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 1)", "def delete_does_not_exist_fail(self, id_):\n assert is_404(self.get((id_, self.delete_url)))\n self.delete_fail(id_, 'does not exist')", "def test_readable_id_valid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n program.save()\n assert program.id is not None\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n course.save()\n assert course.id is not None", "def validated_id(cls, name):\n if name:\n if name in cls._ids:\n return cls._ids[name]\n if cls.validated_name(name):\n if Accounts.exists(name):\n return cls.get_id(name)\n return None", "def ResourceExists(resource_name, search_user_paths=True):\n try:\n ResourcePath(resource_name, search_user_paths)\n return True\n except ResourceNotFound:\n return False", "def test_get_event_type_by_id_not_found(self):\n\t\tevent_type = EventType.objects.get(name=\"asd\")\n\t\trequest = self.client.get('/api/event_type/esper/' + str(event_type.id + 1), follow=True)\n\t\tself.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)", "def test_create_with_bad_id(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n post_data = {'source_type': 'spotify', 'source_id': 00}\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json.loads(resp.content)\n new_records_count = Track.objects.all().count()\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(existing_records_count, new_records_count)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def validate_new_person(self, person_id):\n\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Person WHERE id == %s\"\"\", (person_id,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n return False\n return True", "async def test_create_invalid_field(self):\n data = {'id': 33, 'value': 'foo'}\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.create(data)\n self.assertEqual(\n 'Error: \"id\": 33 is not a string: {\\'id\\': \\'\\'}',\n str(cm.exception))", "def update_object_type(self, object_type=None):\n # Return Value\n # ------------\n # {success: true}\n #\n if not is_basic_identifier(object_type.name):\n raise BadRequest(\"Invalid object_type name: %s\" % object_type.name)\n if not is_yaml_string_valid(object_type.definition):\n raise BadRequest(\"Invalid YAML definition\")\n object_id, version = self.clients.resource_registry.update(object_type)\n return object_id", "def find_by_id(cls, id):\n\t\tif id:\n\t\t\treturn cls.query.filter_by(id=id).first()\n\t\treturn {\n\t\t\t'message': 'id field is required',\n\t\t\t'status': 'Failed'\n\t\t\t}, 400", "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def test_id_uniqueness(self):\n user_2 = User()\n self.assertNotEqual(self.user_1.id, user_2.id)", "def sanity_check(cls, data, ticket_id=None):\n if ticket_id is not None:\n ticket_id = int(ticket_id)\n ticket = DB_TICKET_TABLE.get(doc_id=ticket_id)\n if not ticket:\n flask_restful.abort(404, message=f\"ticket '{ticket_id}' not found!\")\n else:\n ticket = None\n\n # A custom validator to make sure the aportio ID remains unique\n def validate_aportio_id(aportio_id):\n if not isinstance(aportio_id, str):\n raise ValueError(\"expected string type for aportio ID\")\n TicketQuery = Query()\n ticket_with_aportio_id = DB_TICKET_TABLE.get(\n TicketQuery.aportio_id == aportio_id)\n if ticket_with_aportio_id and (ticket_with_aportio_id.doc_id != ticket_id):\n raise ValueError(f\"a ticket with aportio ID '{aportio_id}' \"\n f\"exists already\")\n return aportio_id\n\n # A custom validator for the short title\n def validate_short_title(text):\n return _str_len_check(text, cls.SHORT_TITLE_MIN_LEN, cls.SHORT_TITLE_MAX_LEN)\n\n # A custom validator for the long text\n def validate_long_text(text):\n return _str_len_check(text, cls.LONG_TEXT_MIN_LEN, cls.LONG_TEXT_MAX_LEN)\n\n # Perform some sanity checking of the provided attributes\n data = _dict_sanity_check(data,\n mandatory_keys = [\n (\"aportio_id\", validate_aportio_id),\n (\"customer_id\", Customer.exists),\n (\"short_title\", validate_short_title),\n (\"user_id\", User.exists),\n (\"status\", Ticket.valid_status),\n (\"classification\", Ticket.valid_classification),\n (\"long_text\", validate_long_text)\n ],\n optional_keys = [\n (\"custom_fields\", dict)\n ],\n obj=ticket)\n # Now check whether this user is even associated with that customer\n cust_id = data['customer_id']\n user_id = data['user_id']\n assoc_q = Query()\n assoc_data = DB_USER_CUSTOMER_RELS_TABLE.search((assoc_q.customer_id == cust_id) &\n (assoc_q.user_id == user_id))\n if not assoc_data:\n flask_restful.abort(400, message=f\"Bad Request - user '{user_id}' is not \"\n f\"associated with customer '{cust_id}'\")\n return data, ticket", "def test_exists(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n BaseSample.exists('SKM7.640188', SampleTemplate(1))", "def test_validation_idaa_valid_and_does_not_exists(self):\n idaa_index = 1\n expected_discrepancies = 0\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertTrue(upload_program.is_valid())\n self.assertFalse(upload_program.tola_program_exists)\n self.assertEquals(upload_program.discrepancy_count, expected_discrepancies)", "def _check_id(self, keyword):\n if keyword not in self.request.data:\n return '{} parameter is missing'.format(keyword)\n \"\"\" Check if <keyword> parameter is not None \"\"\"\n if self.request.data[keyword] == '':\n return '{} ID cannot be None'.format(keyword)\n \"\"\" Check if <keyword> parameter is > 0 \"\"\"\n if int(self.request.data[keyword]) < 1:\n return '{} ID must be an integer > 0'.format(keyword)", "def exists(self, obj):\n return False", "def _check_subject_id(subject_id):\n if (subject_id and\n len(subject_id) > models.Subject.id.property.columns[\n 0].type.length):\n raise exception.SubjectNotFound()", "def test_validate_post_existing_resource(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def validate_item_id(idx):\n item = ItemModel.query.get(idx)\n if item is None:\n raise NotFound(f'Item with id {idx} not found.')\n return item", "def test_data_type_id(self):\n self.assertTrue(self.tester.data_type(ret_id=True), 2)", "def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)", "def check_instance(self, class_name, inst_id, stored_objects):\n '''get '<class_name>.id' to FileStorage.__objects key format'''\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n \"\"\"given id does not exist\"\"\"\n print(\"** no instance found **\")\n instance = False\n return instance", "def test_create_already_existing_campaign_fails(self):\n test_campaign = return_canned_campaign()\n self.test_org.campaign = [test_campaign]\n test_campaign.save()\n response = self.client.post(\n self.endpoint_url,\n json={\n \"logo\": test_campaign.logo,\n \"name\": test_campaign.name,\n \"organisations\": [self.test_org.id],\n \"url\": test_campaign.url,\n },\n headers={\"Authorization\": self.session_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 409)\n self.assertEqual(response_body[\"Error\"], \" Campaign name already exists\")\n self.assertEqual(response_body[\"SubCode\"], \"NameExists\")", "def exists(self, Search_ID):\n if self.get_id(Search_ID) is None:\n return False\n else:\n return True", "def validateID(self, id : int) -> int:\n # If ID is a string, ensure it can be casted to an int before casting and returning.\n if type(id) == str:\n if not lib.stringTyping.isInt(id):\n raise TypeError(\"user ID must be either int or string of digits\")\n return int(id)\n # If ID is not a string, nor an int, throw an error.\n elif type(id) != int:\n raise TypeError(\"user ID must be either int or string of digits\")\n # ID must be an int, so return it.\n return id", "def id_exists(show_id: int,\n database_connection: mysql.connector.connect) -> bool:\n return validate_id(show_id, database_connection)" ]
[ "0.6422405", "0.64173734", "0.6142269", "0.61312246", "0.5939884", "0.5908327", "0.5880398", "0.5806814", "0.577634", "0.5734848", "0.5730072", "0.56382006", "0.55637", "0.5501549", "0.5488068", "0.5485431", "0.5476124", "0.5466836", "0.54339", "0.5411698", "0.5398409", "0.5372584", "0.53484315", "0.53477323", "0.5336371", "0.5327411", "0.5316241", "0.5294655", "0.5292505", "0.52906764", "0.5290138", "0.52719474", "0.52706325", "0.52505064", "0.5224689", "0.5216522", "0.5177424", "0.5173158", "0.5164601", "0.51274157", "0.51106805", "0.5106305", "0.50903237", "0.5088144", "0.50764173", "0.5063056", "0.5062557", "0.50492555", "0.50405514", "0.5026922", "0.5023985", "0.5008982", "0.50059426", "0.5004298", "0.5000435", "0.49945658", "0.49936256", "0.49927726", "0.4989894", "0.49898884", "0.49880224", "0.49869823", "0.49819273", "0.49790868", "0.49758255", "0.49716154", "0.49641553", "0.49623126", "0.49579415", "0.49545392", "0.49482954", "0.49476528", "0.49418265", "0.49415585", "0.4931851", "0.49189654", "0.49133888", "0.4912833", "0.49104878", "0.49034798", "0.4903424", "0.4892134", "0.48883384", "0.4884316", "0.48806715", "0.48744503", "0.48673403", "0.48648918", "0.48645955", "0.48611632", "0.48581317", "0.4855877", "0.48537856", "0.48529413", "0.48418126", "0.48340088", "0.482996", "0.4826713", "0.48135507", "0.4812498", "0.48092806" ]
0.0
-1
Try to delete a dataset, propagating error on failure.
def _TryDeleteDataset(dataset_id, project_id): client = GetApiClient() service = client.datasets delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest') delete_request = delete_request_type(datasetId=dataset_id, projectId=project_id, deleteContents=True) service.Delete(delete_request) log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_dataset(self, dataset):\n raise NotImplementedError('delete_dataset')", "def _handle_creation_failure(session: Session, stub_dataset: Dataset, error: str):\n try:\n dataset.delete(session, stub_dataset)\n except requests.HTTPError:\n raise CreationFailure(\n f\"Created dataset did not delete after an earlier error: {error}\"\n )\n raise CreationFailure(error)", "def _delete_dataset_netex(dataset_id):\n try:\n logging.info(\"Going to delete the netex file of the dataset %s\", dataset_id)\n community_resource = _find_community_resources(dataset_id)\n _delete_community_resources(dataset_id, community_resource)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)\n except requests.HTTPError as err:\n logging.warning(\n \"Unable to delete to the dataset %s. Http Error %s\", dataset_id, err\n )\n except Exception as err:\n logging.warning(\n \"Unable to delete to the dataset %s. Generic Error %s\", dataset_id, err\n )", "def delete_dataset(self, dataset: DatasetDB):\n try:\n self._es.delete_index(dataset_records_index(dataset.id))\n finally:\n self._es.delete_document(index=DATASETS_INDEX_NAME, doc_id=dataset.id)", "def test_dataset_deltitem(train_dataset):\n with pytest.raises(Exception):\n del train_dataset[0]", "def delete(_id):\n dataset = ESDataset.get(id=_id, ignore=404)\n\n if not dataset:\n raise NoEntityError(f\"dataset {_id} does not exist.\")\n\n dataset.delete()\n\n return dataset.name", "def delete(log, args):\n log('dataset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete dataset command coming soon.')", "def delete_dataset(self, identifier):\n # Delete the dataset directory if it exists. Otherwise return False\n dataset_dir = self.get_dataset_dir(identifier)\n if not os.path.isdir(dataset_dir):\n return False\n shutil.rmtree(dataset_dir)\n return True", "def delete_dataset(dataset_path):\n force_rmtree(dataset_path)", "def DeleteDataset(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def delete_dataset(request):\n body = json.loads(request.body)\n dataset_id = body.get('dataset_id', '')\n organization_id = body.get('organization_id')\n # check if user has access to the dataset\n d = ImportRecord.objects.filter(\n super_organization_id=organization_id, pk=dataset_id\n )\n if not d.exists():\n return {\n 'status': 'error',\n 'message': 'user does not have permission to delete dataset',\n }\n d = d[0]\n d.delete()\n return {\n 'status': 'success',\n }", "def delete(self, dataset_name=None, dataset_id=None, sure=False, really=False):\n if sure and really:\n dataset = self.get(dataset_name=dataset_name, dataset_id=dataset_id)\n success, response = self._client_api.gen_request(req_type='delete',\n path='/datasets/{}'.format(dataset.id))\n if not success:\n raise exceptions.PlatformException(response)\n logger.info('Dataset {} was deleted successfully'.format(dataset.name))\n return True\n else:\n raise exceptions.PlatformException(\n error='403',\n message='Cant delete dataset from SDK. Please login to platform to delete')", "def delete(self, ds, del_raw_data=False, **kwargs):\n self.logger.warning('ds_id already exists: {}. Deleting'.format(ds.id))\n self._del_iso_images(ds)\n self._es.delete_ds(ds.id)\n self._db.alter('DELETE FROM dataset WHERE id=%s', ds.id)\n if del_raw_data:\n self.logger.warning('Deleting raw data: {}'.format(ds.input_path))\n wd_man = WorkDirManager(ds.id)\n wd_man.del_input_data(ds.input_path)\n if self.mode == 'queue':\n self._queue.publish({'ds_id': ds.id, 'status': DatasetStatus.DELETED}, SM_DS_STATUS)", "def test_that_when_dataset_is_deleted_the_account_is_still_there(self):\n test_dataset = Dataset.objects.get(\n dataset_slug=\"google-geojson-example\")\n test_dataset.delete()\n with self.assertRaises(ObjectDoesNotExist):\n Dataset.objects.get(dataset_slug=\"google-geojson-example\")\n Account.objects.get(account_slug=\"test_user\")", "def delete(self, name):\n assert name, \"Must input a valid dataset name.\"\n self.manager.delete_data(name)", "def _try_delete_resource(self, delete_callable, *args, **kwargs):\n try:\n delete_callable(*args, **kwargs)\n # if resource is not found, this means it was deleted in the test\n except exceptions.NotFound:\n pass", "def delete_datasetreplica(self, dataset_replica):\n raise NotImplementedError('delete_datasetreplica')", "def delete(self, ds, del_raw_data=False):\n self._post_sm_msg(ds=ds, action=DatasetAction.DELETE, priority=DatasetActionPriority.HIGH)", "def test_dataset_exists(client, to_delete):\n DATASET_ID = \"get_table_dataset_{}\".format(_millis())\n dataset_ref = client.dataset(DATASET_ID)\n dataset = bigquery.Dataset(dataset_ref)\n dataset = client.create_dataset(dataset)\n to_delete.append(dataset)\n\n assert dataset_exists(client, dataset_ref)\n assert not dataset_exists(client, client.dataset(\"dataset doesnot exist\"))", "def delete_dataset_target_bigquery(self, dataset):\n return db.delete_dataset_bigquery(\n dataset, project=self.get_conn_env_var('TARGET_BIGQUERY', 'PROJECT')\n )", "def purge_dataset(request):\n data = json.loads(request.body.decode('utf-8'))\n if not request.user.is_staff:\n return JsonResponse({'error': 'Only admin can purge dataset'}, status=403)\n try:\n dset = models.Dataset.objects.get(pk=data['item_id'])\n except models.Dataset.DoesNotExist:\n return JsonResponse({'error': 'Dataset does not exist'}, status=403)\n purgemsg = delete_dataset_from_cold(dset)\n if purgemsg['state'] == 'error':\n return JsonResponse(purgemsg, status=500)\n else:\n return JsonResponse(purgemsg)", "def raise_exception_for_dataset(dataset_reference):\n if dataset_reference.dataset_id == non_existing_dataset_id:\n raise cloud.exceptions.NotFound('')", "def delete_bigquery_dataset(self, shared_state: Dict[str, Any]):\n\n yield\n\n # Get the bigquery dataset id used for testing and wipe it\n bigquery_dataset = shared_state[\"bigquery_dataset\"]\n bigquery_client = shared_state[\"bigquery_client\"]\n bigquery_client.delete_dataset(\n bigquery_dataset.dataset_id, delete_contents=True, not_found_ok=True\n ) # Make an API request.", "def test_delete_unexpected_error(self, requests_mock, capsys):\n requests_mock.delete(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.delete(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out", "def delete(dtype, name, rootdir=None):\n # type and the name\n # delete them\n num_deleted = 0\n for dataset in FreezableAPI.datasets(dtype,name,rootdir=rootdir,fullpath=True):\n # delete it\n shutil.rmtree(dataset)\n num_deleted += 1\n return num_deleted", "def delete_temp_dataset():\n\n bq.delete_dataset(temp_dataset_ref, delete_contents=True, not_found_ok=True)", "def delete(self, identifier, dataset):\n # Fix identifier because SQlAlchemy can't parse RDF Literals\n identifier = str(identifier)\n\n #self._load_config()\n self.log( 'Removing resource %s in dataset %s' % (identifier, dataset))\n\n # Remove it\n data = self.es_instance.delete_document(identifier, dataset)\n\n self.log( 'Registro %s removido com sucesso' % identifier)\n\n return data", "def delete_task(dataset):\n Observation.delete_all(dataset)\n super(dataset.__class__, dataset).delete({DATASET_ID: dataset.dataset_id})", "def do_delete(self, node_id):\n _error_code, _msg = RAMSTKDataModel.do_delete(self, node_id)\n\n # pylint: disable=attribute-defined-outside-init\n # It is defined in RAMSTKDataModel.__init__\n if _error_code != 0:\n _error_code = 1\n _msg = _msg + (\n '\\n RAMSTK ERROR: Attempted to delete non-existent '\n 'Allocation ID {0:d}.').format(node_id)\n else:\n self.last_id = max(self.tree.nodes.keys())\n\n return _error_code, _msg", "def delete_ds(self, dt):\n\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n if F not in data[k].keys():\n continue \n max_date = data[k][F]['max_date'] \n \"\"\" Deleting unecessary ds \"\"\"\n if dt > max_date : # check max date and check if data is still loaded\n print(blue + 'Memory used before deleting : ' , process.memory_info().rss/1000000000 , cend) \n del data[k][F] \n print(\"*** Erasing dataset: \" , k , ' ' , F ) \n print(blue + 'Memory used after deleting : ' , process.memory_info().rss/1000000000 , cend) \n \n else:\n continue", "def dataset(bf):\n ds = bf.create_dataset(\"test_dataset_{}\".format(uuid4()))\n\n yield ds\n\n bf._api.datasets.delete(ds)", "def destroyDataset(request):\n\n if request.method=='DELETE':\n json_data=json.loads(request.body)\n name=json_data['name']\n try:\n dataset=Dataset.nodes.get(name=name)\n dataset.delete()\n return JsonResponse({\"success\": \"Dataset deleted\"}, safe=False)\n except:\n return JsonResponse({\"error\":\"Error occurred\"}, safe=False)", "def test_delete_voice_dataset(self):\n pass", "def delete_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n self.data[\"dataset\"].pop(name)\n self.update_categories()\n self.write_data_cache(self.data)\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def test_deletion_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the data pipe does not exist.\n self.assertRaises(RelaxNoPipeError, pipes.delete, 'x')", "def test_delete_http_error(self, requests_mock, capsys):\n requests_mock.delete(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.delete(data_url)\n assert 'HTTP error: 300' in capsys.readouterr().out", "def test_delete_error(self):\n with self.assertRaises(QiitaDBExecutionError):\n PrepTemplate.delete(1)", "def delete_datasets(self, base_url):\n response = requests.get(base_url + '/testdata')\n for index in range(len(response.json()['testdata'])):\n self.delete_dataset(base_url, response.json()['testdata'][index]['dataset'])", "def test_delete(self):\n data_columns = ['id', 'column_string', 'column_float']\n data_values = [[1, 'string1', 456.956], [2, 'string2', 38.905]]\n data = pd.DataFrame(data_values, columns=data_columns)\n data.name = 'test_delete'\n my_conn = MySQL(*self.conn_params)\n my_conn.insert(data)\n table = my_conn.get_table(data.name)\n expected = 2\n current = my_conn.engine.scalar(\n select([func.count('*')]).select_from(table)\n )\n self.assertEqual(current, expected)\n\n # delete from operation\n # the None argument in delete DML is included to avoid pylint E1120\n table.delete(None).where(table.c.id == 2).execute()\n\n expected = 1\n current = my_conn.engine.scalar(\n select([func.count('*')]).select_from(table)\n )\n self.assertEqual(current, expected)\n my_conn.drop(data.name)", "def delData(self, ide = None):\r\n try:\r\n if ide is not None:\r\n self.cursor.execute(\"DELETE FROM DATAS WHERE ID = %s;\" %(ide))\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False", "def delete():", "def delete(self, data):\r\n pass", "def delete_upload_data(self, id=None, name=None) -> None:\r\n # get the dataset\r\n dataset = self.get_upload_data(id=id, name=name)\r\n\r\n try:\r\n self.session.delete(dataset)\r\n self.session.commit()\r\n except Exception as e:\r\n self.session.rollback()\r\n raise e", "def delete_run(self, run_id):\n ds = self.datastorage\n ds.get_metrics_dao().delete(run_id)\n # TODO: implement\n # ds.get_artifact_dao().delete(run_id)\n # ds.get_resource_dao().delete(run_id)\n ds.get_run_dao().delete(run_id)", "def test_07_datastore_delete(self, Mock):\r\n html_request = FakeRequest(json.dumps({}), 200,\r\n {'content-type': 'application/json'})\r\n\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_delete failed\" == type, type", "def delete_row(self, identifier, rowid, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Delete the row at the given index position\n df = vizual.delete_rows(dataset.to_dataframe(), rowids=[rowid])\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations.filter(rows=list(df.index))\n )\n return VizualApiResult(ds)", "def dataset_delete(self, name, deep=False):\n\n # Checks inputs\n check_type(value=name, allowed_types=str, var_name=\"name\", raise_exception=True)\n check_is_valid_ds_name(value=name, raise_exception=True)\n check_type(value=deep, allowed_types=bool, var_name=\"deep\", raise_exception=True)\n\n template = 'dataset_remove'\n if deep:\n template = 'dataset_deep_remove'\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.DELETE,\n template=TEMPLATES[template],\n uri_params={\n 'name': name\n })\n\n if response.status_code == 404:\n raise IkatsNotFoundError(\"Dataset %s not found in database\" % name)\n return response.text", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")", "def delete(self, identifier):\n self.get(identifier)\n conn = self.get_connector()\n cursor = conn.cursor()\n\n query = \"delete from {0} where {2}={1}\".format(\n self.ressource_config[\"table\"],\n identifier,\n self.model.pk_field.name)\n try:\n cursor.execute(query)\n except sqlite3.IntegrityError, e:\n message = \"\"\n if \"foreign\" in e.message:\n message = \"\"\"another ressource depends on this\n object. Cloud not delete before all ressources\n depending on it are also deleted\"\"\"\n\n raise BadRequest(message)\n\n conn.commit()\n conn.close()", "def delete_dataset_without_original_url():\n logging.warning(\n \"*** deleting all netex files created by transport.data.gouv.fr ***\"\n )\n r = requests.get(\"https://transport.data.gouv.fr/api/datasets\")\n r.raise_for_status()\n datasets = r.json()\n\n print_resource = lambda r: f\"\\n\\t*[url = {r['url']} | extras = {r.get('extras')}]\"\n print_resources = lambda rs: [print_resource(r) for r in rs]\n\n for d in datasets:\n dataset_name = d[\"title\"]\n if d[\"type\"] != \"public-transit\":\n continue\n\n dataset_id = d[\"id\"]\n\n community_resources = _find_community_resources(dataset_id)\n logging.info(\"community ressources : %s\", print_resources(community_resources))\n old_community_resources = [\n r\n for r in community_resources\n if \"transport:original_resource_url\" not in r.get(\"extras\", {})\n ]\n if old_community_resources:\n logging.info(\n \"old community ressources : %s\",\n print_resources(old_community_resources),\n )\n _delete_community_resources(dataset_id, old_community_resources)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)", "def test_do_delete_non_existent_id(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(300)\r\n\r\n assert _error_code == 2005\r\n assert _msg == (\"RAMSTK ERROR: Attempted to delete non-existent \"\r\n \"Function ID 300.\")", "def test_delete_datasource(\n in_memory_runtime_context: EphemeralDataContext,\n) -> None:\n context = in_memory_runtime_context\n\n name = context.list_datasources()[0][\"name\"]\n context.delete_datasource(name)\n\n assert name not in context.datasources", "def test_delete(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res_id = self.metadata.get_by_id(\n entity=Dashboard, entity_id=str(res_name.id.__root__)\n )\n\n # Delete\n self.metadata.delete(\n entity=Dashboard, entity_id=str(res_id.id.__root__), recursive=True\n )\n\n # Then we should not find it\n res = self.metadata.list_entities(entity=Dashboard)\n assert not next(\n iter(\n ent\n for ent in res.entities\n if ent.fullyQualifiedName == self.entity.fullyQualifiedName\n ),\n None,\n )", "def _try_delete_and_return_permissions_error(component_url):\n try:\n delete_object_task.DeleteObjectTask(component_url, verbose=False).execute()\n except api_errors.CloudApiError as e:\n status = getattr(e, 'status_code', None)\n if status == 403:\n return e\n raise", "async def test_delete_invalid(database,valid_data):\n test_valid_insert(database,valid_data)\n N = 10\n for idx in range(N+1,N*2):\n try:\n await database.delete(_id=idx,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def del_data(self, name):\n raise NotImplementedError('Do I want to delete data from a df?')", "def delete_datasource_instance(connection, id, error_msg=None):\n url = f\"{connection.base_url}/api/datasources/{id}\"\n response = connection.session.delete(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = f\"Error deleting Datasource Instance with ID: {id}\"\n response_handler(response, error_msg)\n return response", "def test_delete_run(self):\n pass", "def delete(self, hostname):\n self.not_supported()", "def do_delete(self, arg):\n \treturn False", "def test_delete_data(self):\n data = Data.objects.create(\n name='Test data',\n contributor=self.user,\n process=self.proc,\n )\n\n data.output = {'json_field': {'foo': 'bar'}}\n data.status = Data.STATUS_DONE\n data.save()\n\n self.assertEqual(Storage.objects.count(), 1)\n\n data.delete()\n self.assertEqual(Storage.objects.count(), 0)", "def test_wrong_delete_param(self):\n def close_conn():\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n LDAPConnection(cli).delete(\"cn=dummy\")\n self.assertRaises(ClosedConnection, close_conn)\n self.assertRaises(TypeError, lambda: self.conn.delete(0))", "def testDeleteOpInvalidDN(self):\n self.assertRaises(AssertionError, delta.DeleteOp, 0)", "def delete(self, ssubject: str, predicate: str, obj: str) -> None:\n raise NotImplementedError(\"The RDF graph is read-only: DELETE DATA queries are not allowed\")", "def test_delete_failure(self):\r\n problem_url_name = 'H1P1'\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n self.define_option_problem(problem_url_name)\r\n self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n expected_message = \"bad things happened\"\r\n with patch('courseware.models.StudentModule.delete') as mock_delete:\r\n mock_delete.side_effect = ZeroDivisionError(expected_message)\r\n instructor_task = self.delete_problem_state('instructor', location)\r\n self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)", "def test_handle_delete_github_error(self):\n self.db.query.side_effect = GithubAPIException(\"error\")\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (\"Team delete was unsuccessful with \"\n \"the following error: \"\n \"error\", 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def delete_datafile(self, save_instance=True):\n if self.datafile:\n logger.debug(\"Deleting datafile '%s'\", self.datafile.name)\n try:\n self.datafile.delete(save=save_instance)\n except OSError as exc:\n logger.error(\"Error deleting file '%s': %s\",\n self.datafile.name, exc)\n return False\n logger.info(\"Deleted datafile of '%s'\", self)", "async def test_delete_batch_invalid(database,valid_data):\n test_valid_insert_batch(database,valid_data)\n N = 10\n batch_id = 1\n for idx in range(N+1,N*2):\n try:\n await database.delete_batch(batch_id=batch_id,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):\n\n name = self._validate_name(name)\n rdtype = dns.rdatatype.RdataType.make(rdtype)\n if covers is not None:\n covers = dns.rdatatype.RdataType.make(covers)\n node = self.get_node(name)\n if node is not None:\n node.delete_rdataset(self.rdclass, rdtype, covers)\n if len(node) == 0:\n self.delete_node(name)", "def delete(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(\n context, id_)\n\n if db_resource_data['type'] == (eon_const.\n EON_RESOURCE_TYPE_ESX_CLUSTER):\n msg = _(\"Delete operation not supported for type %s\"\n % db_resource_data['type'])\n raise exception.DeleteException(err=msg)\n\n _resource_data = _make_response(\n db_resource_data)\n _resource_data_log = deepcopy(_resource_data)\n _resource_data_log.pop(\"meta_data\", None)\n LOG.info(\"Details for the ID %s is: %s\" % (\n id_, logging.mask_password(_resource_data_log)))\n driver_obj = driver.load_resource_driver(\n db_resource_data['type'])\n driver_obj.validate_delete(db_resource_data)\n driver_obj.delete(context, id_)\n self.db_api.delete_resource(context, id_)\n # delete the data from hlm input model\n try:\n LOG.info(\"[%s] remove resource from input model\" % id_)\n hux_obj = HLMFacadeWrapper(context)\n resource_id = db_resource_data[eon_const.EON_RESOURCE_ID]\n hux_obj.delete_server(resource_id)\n hux_obj.commit_changes(resource_id, \"Delete compute resource\")\n except facade_excep.NotFound:\n # log and do nothing\n LOG.warn(\"[%s] resource not found in hlm input model\" % id_)\n LOG.info(\"[%s]: Deleted resource from eon\" % id_)\n # Notify the message to consumers\n try:\n message = {\"resource_id\": id_,\n \"resource_state\": eon_const.EON_RESOURCE_STATE_REMOVED,\n \"resource_details\": _resource_data,\n }\n message_notifier.notify(context,\n message_notifier.EVENT_PRIORITY_INFO,\n message_notifier.EVENT_TYPE[\n 'removed'],\n message)\n except Exception as ex:\n LOG.exception(\n \"Exception while notifying the message : %s\" % ex)\n except exception.NotFound as e:\n msg = (\"Failed to delete resource %s. Error: %s\") % (\n _resource_data['name'], e.message)\n LOG.exception(msg)\n raise e", "def delete(self):\n self.logger.debug(\"In delete.\")\n\n if self._id is None:\n self.logger.warn(\"Attempt to delete a %s with no ID.\", __name__)\n raise Exception(\"{} does not have an ID.\".format(__name__))\n\n id = self._id\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n # Assume failure\n success = False\n\n try:\n self.logger.info(\"Deleting %s with ID %s.\", __name__, id)\n session.get_osdf().delete_node(id)\n success = True\n except Exception as delete_exception:\n self.logger.exception(delete_exception)\n self.logger.error(\"An error occurred when deleting %s.\", self)\n\n return success", "def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)", "def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate.delete(5)", "def s3_delete_data(self):\n\n self.k.delete()", "def delete(self, *args, **kwargs):\n raise NotImplementedError()", "def delete(self, error_id):\n error = self.db.session.query(models.Error).get(error_id)\n if error:\n self.db.session.delete(error)\n self.db.session.commit()\n else:\n raise InvalidErrorReference(\"No error with id %s\" % str(error_id))", "def delete_dataset(key=None) -> DeleteDatasetResponse:\n config = ThreatExchangeConfig.getx(str(key))\n hmaconfig.delete_config(config)\n return DeleteDatasetResponse(response=\"The privacy group is deleted\")", "def clean(args):\n with_dataset(args, Dataset._clean)", "def delete(self, predicate, successHandler=None, failHandler=None):\n queryDelete = \"DELETE FROM `article_article` \" \\\n \"WHERE `{}` {} %s LIMIT 1;\".format(\n predicate[0],\n predicate[1]\n )\n\n queryDeleteParams = []\n if isinstance(predicate[2], datetime):\n queryDeleteParams.append(predicate[2].strftime(\"%Y-%m-%d %H:%M:%S\"))\n else:\n queryDeleteParams.append(predicate[2])\n\n def failCallback(error):\n errorMessage = str(error)\n if isinstance(error, Failure):\n errorMessage = error.getErrorMessage()\n\n self.log.error(\n \"[Default.Article] delete() database error: {errorMessage}\",\n errorMessage=errorMessage\n )\n\n if failHandler is not None:\n reactor.callInThread(failHandler, [\"DatabaseError\"])\n\n def successCallback(results):\n if successHandler is not None:\n reactor.callInThread(successHandler)\n\n operation = self.dbService.runOperation(\n queryDelete,\n tuple(queryDeleteParams)\n )\n operation.addCallbacks(successCallback, failCallback)", "def delete(self):\n raise NotImplementedError", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate.delete(5)", "def remove_dataset(self, name):\n payload = {\"name\": name}\n r = self.request(\n \"delete\", url_path_join(USER_DATASET_RESOURCE_URL, self.owner), payload=payload\n )\n self.check_and_raise(r)", "async def delete_dataset(\n self,\n request: Optional[\n Union[data_labeling_service.DeleteDatasetRequest, dict]\n ] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> None:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = data_labeling_service.DeleteDatasetRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.delete_dataset,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=30.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=30.0,\n ),\n default_timeout=30.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )", "def test_delete_edge_case_with_write_concern_0_return_None(self):\n p1 = self.Person(name=\"User Z\", age=20).save()\n del_result = p1.delete(w=0)\n assert del_result is None", "def delete_table_data():\n try:\n print 'delete existing data'\n sql = 'delete from document'\n sql1 = 'delete from clean_keywords'\n sql2 = 'delete from keywords'\n util.executeSQL(conn, sql) # delete the existing data.\n util.executeSQL(conn, sql1)\n util.executeSQL(conn, sql2)\n except Exception as e:\n print e", "def test_data_object_del(self):\n pass", "def test_raise_when_no_id(self):\n model = self.Test({}, False)\n self.assertRaises(errors.PersistenceError, model.delete)", "def delete(self, *args, **kwargs):\n return 0", "def delete_file(self, filename=None):\n return self._service.delete_object(self._datasets_id, filename)", "def test_delete(self, init_db, audit):\n audit.delete()\n assert Audit.get(audit.id) == None", "def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')", "def _TryDeleteTable(dataset_id, table_id, project_id):\n client = GetApiClient()\n service = client.tables\n delete_request_type = GetApiMessage('BigqueryTablesDeleteRequest')\n delete_request = delete_request_type(datasetId=dataset_id, tableId=table_id,\n projectId=project_id)\n service.Delete(delete_request)\n log.info('Deleted table [{}:{}:{}]'.format(project_id, dataset_id, table_id))", "def test_delete(self):\n pt = PrepTemplate.create(self.metadata, self.new_raw_data,\n self.test_study, self.data_type_id)\n PrepTemplate.delete(pt.id)\n\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_template WHERE prep_template_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.common_prep_info WHERE prep_template_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_columns WHERE prep_template_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_2\")", "def delete(self):\n raise NotImplementedError()", "def delete(self):\n raise NotImplementedError()", "def test_delete_without_partition_key(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(attempt_id=0).delete()" ]
[ "0.7605005", "0.7107175", "0.70523417", "0.69809973", "0.6883148", "0.6740322", "0.6667521", "0.65653944", "0.6536053", "0.65328515", "0.65034896", "0.6410687", "0.6386324", "0.6328574", "0.63253295", "0.62904114", "0.6277353", "0.62735856", "0.6252367", "0.61857146", "0.6168082", "0.6162157", "0.613244", "0.6089961", "0.60710067", "0.60618997", "0.6054307", "0.60175395", "0.59812534", "0.59728926", "0.5945504", "0.5898049", "0.58455795", "0.5814105", "0.58104396", "0.5775425", "0.5733373", "0.5725115", "0.570578", "0.5701192", "0.5630272", "0.56293786", "0.56126684", "0.56120926", "0.5611404", "0.56092745", "0.5607267", "0.5595084", "0.5575618", "0.5560845", "0.55180013", "0.55004615", "0.54946786", "0.5494068", "0.5493824", "0.54772246", "0.5475572", "0.54748356", "0.54659593", "0.54646385", "0.546054", "0.545929", "0.5447527", "0.5440744", "0.54403776", "0.5428439", "0.5425052", "0.5412754", "0.53937244", "0.53930616", "0.5389392", "0.53804857", "0.537615", "0.53478956", "0.5340608", "0.533264", "0.53314257", "0.5328683", "0.53236055", "0.5322863", "0.5313439", "0.53118443", "0.5311221", "0.5309877", "0.530916", "0.53082687", "0.52888113", "0.5282388", "0.52746874", "0.52719414", "0.5265507", "0.5262418", "0.5255371", "0.52480537", "0.52463824", "0.52461874", "0.5243807", "0.5236309", "0.5236309", "0.52332956" ]
0.74737805
1
Try to delete a dataset, propagating error on failure.
def _TryDeleteTable(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables delete_request_type = GetApiMessage('BigqueryTablesDeleteRequest') delete_request = delete_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) service.Delete(delete_request) log.info('Deleted table [{}:{}:{}]'.format(project_id, dataset_id, table_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_dataset(self, dataset):\n raise NotImplementedError('delete_dataset')", "def _TryDeleteDataset(dataset_id, project_id):\n client = GetApiClient()\n service = client.datasets\n delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest')\n delete_request = delete_request_type(datasetId=dataset_id,\n projectId=project_id,\n deleteContents=True)\n service.Delete(delete_request)\n log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id))", "def _handle_creation_failure(session: Session, stub_dataset: Dataset, error: str):\n try:\n dataset.delete(session, stub_dataset)\n except requests.HTTPError:\n raise CreationFailure(\n f\"Created dataset did not delete after an earlier error: {error}\"\n )\n raise CreationFailure(error)", "def _delete_dataset_netex(dataset_id):\n try:\n logging.info(\"Going to delete the netex file of the dataset %s\", dataset_id)\n community_resource = _find_community_resources(dataset_id)\n _delete_community_resources(dataset_id, community_resource)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)\n except requests.HTTPError as err:\n logging.warning(\n \"Unable to delete to the dataset %s. Http Error %s\", dataset_id, err\n )\n except Exception as err:\n logging.warning(\n \"Unable to delete to the dataset %s. Generic Error %s\", dataset_id, err\n )", "def delete_dataset(self, dataset: DatasetDB):\n try:\n self._es.delete_index(dataset_records_index(dataset.id))\n finally:\n self._es.delete_document(index=DATASETS_INDEX_NAME, doc_id=dataset.id)", "def test_dataset_deltitem(train_dataset):\n with pytest.raises(Exception):\n del train_dataset[0]", "def delete(_id):\n dataset = ESDataset.get(id=_id, ignore=404)\n\n if not dataset:\n raise NoEntityError(f\"dataset {_id} does not exist.\")\n\n dataset.delete()\n\n return dataset.name", "def delete(log, args):\n log('dataset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete dataset command coming soon.')", "def delete_dataset(self, identifier):\n # Delete the dataset directory if it exists. Otherwise return False\n dataset_dir = self.get_dataset_dir(identifier)\n if not os.path.isdir(dataset_dir):\n return False\n shutil.rmtree(dataset_dir)\n return True", "def delete_dataset(dataset_path):\n force_rmtree(dataset_path)", "def DeleteDataset(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def delete_dataset(request):\n body = json.loads(request.body)\n dataset_id = body.get('dataset_id', '')\n organization_id = body.get('organization_id')\n # check if user has access to the dataset\n d = ImportRecord.objects.filter(\n super_organization_id=organization_id, pk=dataset_id\n )\n if not d.exists():\n return {\n 'status': 'error',\n 'message': 'user does not have permission to delete dataset',\n }\n d = d[0]\n d.delete()\n return {\n 'status': 'success',\n }", "def delete(self, dataset_name=None, dataset_id=None, sure=False, really=False):\n if sure and really:\n dataset = self.get(dataset_name=dataset_name, dataset_id=dataset_id)\n success, response = self._client_api.gen_request(req_type='delete',\n path='/datasets/{}'.format(dataset.id))\n if not success:\n raise exceptions.PlatformException(response)\n logger.info('Dataset {} was deleted successfully'.format(dataset.name))\n return True\n else:\n raise exceptions.PlatformException(\n error='403',\n message='Cant delete dataset from SDK. Please login to platform to delete')", "def delete(self, ds, del_raw_data=False, **kwargs):\n self.logger.warning('ds_id already exists: {}. Deleting'.format(ds.id))\n self._del_iso_images(ds)\n self._es.delete_ds(ds.id)\n self._db.alter('DELETE FROM dataset WHERE id=%s', ds.id)\n if del_raw_data:\n self.logger.warning('Deleting raw data: {}'.format(ds.input_path))\n wd_man = WorkDirManager(ds.id)\n wd_man.del_input_data(ds.input_path)\n if self.mode == 'queue':\n self._queue.publish({'ds_id': ds.id, 'status': DatasetStatus.DELETED}, SM_DS_STATUS)", "def test_that_when_dataset_is_deleted_the_account_is_still_there(self):\n test_dataset = Dataset.objects.get(\n dataset_slug=\"google-geojson-example\")\n test_dataset.delete()\n with self.assertRaises(ObjectDoesNotExist):\n Dataset.objects.get(dataset_slug=\"google-geojson-example\")\n Account.objects.get(account_slug=\"test_user\")", "def delete(self, name):\n assert name, \"Must input a valid dataset name.\"\n self.manager.delete_data(name)", "def _try_delete_resource(self, delete_callable, *args, **kwargs):\n try:\n delete_callable(*args, **kwargs)\n # if resource is not found, this means it was deleted in the test\n except exceptions.NotFound:\n pass", "def delete_datasetreplica(self, dataset_replica):\n raise NotImplementedError('delete_datasetreplica')", "def delete(self, ds, del_raw_data=False):\n self._post_sm_msg(ds=ds, action=DatasetAction.DELETE, priority=DatasetActionPriority.HIGH)", "def test_dataset_exists(client, to_delete):\n DATASET_ID = \"get_table_dataset_{}\".format(_millis())\n dataset_ref = client.dataset(DATASET_ID)\n dataset = bigquery.Dataset(dataset_ref)\n dataset = client.create_dataset(dataset)\n to_delete.append(dataset)\n\n assert dataset_exists(client, dataset_ref)\n assert not dataset_exists(client, client.dataset(\"dataset doesnot exist\"))", "def delete_dataset_target_bigquery(self, dataset):\n return db.delete_dataset_bigquery(\n dataset, project=self.get_conn_env_var('TARGET_BIGQUERY', 'PROJECT')\n )", "def purge_dataset(request):\n data = json.loads(request.body.decode('utf-8'))\n if not request.user.is_staff:\n return JsonResponse({'error': 'Only admin can purge dataset'}, status=403)\n try:\n dset = models.Dataset.objects.get(pk=data['item_id'])\n except models.Dataset.DoesNotExist:\n return JsonResponse({'error': 'Dataset does not exist'}, status=403)\n purgemsg = delete_dataset_from_cold(dset)\n if purgemsg['state'] == 'error':\n return JsonResponse(purgemsg, status=500)\n else:\n return JsonResponse(purgemsg)", "def raise_exception_for_dataset(dataset_reference):\n if dataset_reference.dataset_id == non_existing_dataset_id:\n raise cloud.exceptions.NotFound('')", "def delete_bigquery_dataset(self, shared_state: Dict[str, Any]):\n\n yield\n\n # Get the bigquery dataset id used for testing and wipe it\n bigquery_dataset = shared_state[\"bigquery_dataset\"]\n bigquery_client = shared_state[\"bigquery_client\"]\n bigquery_client.delete_dataset(\n bigquery_dataset.dataset_id, delete_contents=True, not_found_ok=True\n ) # Make an API request.", "def test_delete_unexpected_error(self, requests_mock, capsys):\n requests_mock.delete(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.delete(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out", "def delete(dtype, name, rootdir=None):\n # type and the name\n # delete them\n num_deleted = 0\n for dataset in FreezableAPI.datasets(dtype,name,rootdir=rootdir,fullpath=True):\n # delete it\n shutil.rmtree(dataset)\n num_deleted += 1\n return num_deleted", "def delete_temp_dataset():\n\n bq.delete_dataset(temp_dataset_ref, delete_contents=True, not_found_ok=True)", "def delete(self, identifier, dataset):\n # Fix identifier because SQlAlchemy can't parse RDF Literals\n identifier = str(identifier)\n\n #self._load_config()\n self.log( 'Removing resource %s in dataset %s' % (identifier, dataset))\n\n # Remove it\n data = self.es_instance.delete_document(identifier, dataset)\n\n self.log( 'Registro %s removido com sucesso' % identifier)\n\n return data", "def delete_task(dataset):\n Observation.delete_all(dataset)\n super(dataset.__class__, dataset).delete({DATASET_ID: dataset.dataset_id})", "def do_delete(self, node_id):\n _error_code, _msg = RAMSTKDataModel.do_delete(self, node_id)\n\n # pylint: disable=attribute-defined-outside-init\n # It is defined in RAMSTKDataModel.__init__\n if _error_code != 0:\n _error_code = 1\n _msg = _msg + (\n '\\n RAMSTK ERROR: Attempted to delete non-existent '\n 'Allocation ID {0:d}.').format(node_id)\n else:\n self.last_id = max(self.tree.nodes.keys())\n\n return _error_code, _msg", "def delete_ds(self, dt):\n\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n if F not in data[k].keys():\n continue \n max_date = data[k][F]['max_date'] \n \"\"\" Deleting unecessary ds \"\"\"\n if dt > max_date : # check max date and check if data is still loaded\n print(blue + 'Memory used before deleting : ' , process.memory_info().rss/1000000000 , cend) \n del data[k][F] \n print(\"*** Erasing dataset: \" , k , ' ' , F ) \n print(blue + 'Memory used after deleting : ' , process.memory_info().rss/1000000000 , cend) \n \n else:\n continue", "def dataset(bf):\n ds = bf.create_dataset(\"test_dataset_{}\".format(uuid4()))\n\n yield ds\n\n bf._api.datasets.delete(ds)", "def destroyDataset(request):\n\n if request.method=='DELETE':\n json_data=json.loads(request.body)\n name=json_data['name']\n try:\n dataset=Dataset.nodes.get(name=name)\n dataset.delete()\n return JsonResponse({\"success\": \"Dataset deleted\"}, safe=False)\n except:\n return JsonResponse({\"error\":\"Error occurred\"}, safe=False)", "def test_delete_voice_dataset(self):\n pass", "def delete_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n self.data[\"dataset\"].pop(name)\n self.update_categories()\n self.write_data_cache(self.data)\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def test_deletion_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the data pipe does not exist.\n self.assertRaises(RelaxNoPipeError, pipes.delete, 'x')", "def test_delete_http_error(self, requests_mock, capsys):\n requests_mock.delete(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.delete(data_url)\n assert 'HTTP error: 300' in capsys.readouterr().out", "def test_delete_error(self):\n with self.assertRaises(QiitaDBExecutionError):\n PrepTemplate.delete(1)", "def delete_datasets(self, base_url):\n response = requests.get(base_url + '/testdata')\n for index in range(len(response.json()['testdata'])):\n self.delete_dataset(base_url, response.json()['testdata'][index]['dataset'])", "def test_delete(self):\n data_columns = ['id', 'column_string', 'column_float']\n data_values = [[1, 'string1', 456.956], [2, 'string2', 38.905]]\n data = pd.DataFrame(data_values, columns=data_columns)\n data.name = 'test_delete'\n my_conn = MySQL(*self.conn_params)\n my_conn.insert(data)\n table = my_conn.get_table(data.name)\n expected = 2\n current = my_conn.engine.scalar(\n select([func.count('*')]).select_from(table)\n )\n self.assertEqual(current, expected)\n\n # delete from operation\n # the None argument in delete DML is included to avoid pylint E1120\n table.delete(None).where(table.c.id == 2).execute()\n\n expected = 1\n current = my_conn.engine.scalar(\n select([func.count('*')]).select_from(table)\n )\n self.assertEqual(current, expected)\n my_conn.drop(data.name)", "def delData(self, ide = None):\r\n try:\r\n if ide is not None:\r\n self.cursor.execute(\"DELETE FROM DATAS WHERE ID = %s;\" %(ide))\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False", "def delete():", "def delete(self, data):\r\n pass", "def delete_upload_data(self, id=None, name=None) -> None:\r\n # get the dataset\r\n dataset = self.get_upload_data(id=id, name=name)\r\n\r\n try:\r\n self.session.delete(dataset)\r\n self.session.commit()\r\n except Exception as e:\r\n self.session.rollback()\r\n raise e", "def delete_run(self, run_id):\n ds = self.datastorage\n ds.get_metrics_dao().delete(run_id)\n # TODO: implement\n # ds.get_artifact_dao().delete(run_id)\n # ds.get_resource_dao().delete(run_id)\n ds.get_run_dao().delete(run_id)", "def test_07_datastore_delete(self, Mock):\r\n html_request = FakeRequest(json.dumps({}), 200,\r\n {'content-type': 'application/json'})\r\n\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n out = self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n err_msg = \"It should return True\"\r\n assert out is True, err_msg\r\n # Check the error\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.datastore_delete(name='task',\r\n resource_id=self.task_resource_id)\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! datastore_delete failed\" == type, type", "def delete_row(self, identifier, rowid, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Delete the row at the given index position\n df = vizual.delete_rows(dataset.to_dataframe(), rowids=[rowid])\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations.filter(rows=list(df.index))\n )\n return VizualApiResult(ds)", "def dataset_delete(self, name, deep=False):\n\n # Checks inputs\n check_type(value=name, allowed_types=str, var_name=\"name\", raise_exception=True)\n check_is_valid_ds_name(value=name, raise_exception=True)\n check_type(value=deep, allowed_types=bool, var_name=\"deep\", raise_exception=True)\n\n template = 'dataset_remove'\n if deep:\n template = 'dataset_deep_remove'\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.DELETE,\n template=TEMPLATES[template],\n uri_params={\n 'name': name\n })\n\n if response.status_code == 404:\n raise IkatsNotFoundError(\"Dataset %s not found in database\" % name)\n return response.text", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")", "def delete(self, identifier):\n self.get(identifier)\n conn = self.get_connector()\n cursor = conn.cursor()\n\n query = \"delete from {0} where {2}={1}\".format(\n self.ressource_config[\"table\"],\n identifier,\n self.model.pk_field.name)\n try:\n cursor.execute(query)\n except sqlite3.IntegrityError, e:\n message = \"\"\n if \"foreign\" in e.message:\n message = \"\"\"another ressource depends on this\n object. Cloud not delete before all ressources\n depending on it are also deleted\"\"\"\n\n raise BadRequest(message)\n\n conn.commit()\n conn.close()", "def delete_dataset_without_original_url():\n logging.warning(\n \"*** deleting all netex files created by transport.data.gouv.fr ***\"\n )\n r = requests.get(\"https://transport.data.gouv.fr/api/datasets\")\n r.raise_for_status()\n datasets = r.json()\n\n print_resource = lambda r: f\"\\n\\t*[url = {r['url']} | extras = {r.get('extras')}]\"\n print_resources = lambda rs: [print_resource(r) for r in rs]\n\n for d in datasets:\n dataset_name = d[\"title\"]\n if d[\"type\"] != \"public-transit\":\n continue\n\n dataset_id = d[\"id\"]\n\n community_resources = _find_community_resources(dataset_id)\n logging.info(\"community ressources : %s\", print_resources(community_resources))\n old_community_resources = [\n r\n for r in community_resources\n if \"transport:original_resource_url\" not in r.get(\"extras\", {})\n ]\n if old_community_resources:\n logging.info(\n \"old community ressources : %s\",\n print_resources(old_community_resources),\n )\n _delete_community_resources(dataset_id, old_community_resources)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)", "def test_do_delete_non_existent_id(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(300)\r\n\r\n assert _error_code == 2005\r\n assert _msg == (\"RAMSTK ERROR: Attempted to delete non-existent \"\r\n \"Function ID 300.\")", "def test_delete_datasource(\n in_memory_runtime_context: EphemeralDataContext,\n) -> None:\n context = in_memory_runtime_context\n\n name = context.list_datasources()[0][\"name\"]\n context.delete_datasource(name)\n\n assert name not in context.datasources", "def test_delete(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res_id = self.metadata.get_by_id(\n entity=Dashboard, entity_id=str(res_name.id.__root__)\n )\n\n # Delete\n self.metadata.delete(\n entity=Dashboard, entity_id=str(res_id.id.__root__), recursive=True\n )\n\n # Then we should not find it\n res = self.metadata.list_entities(entity=Dashboard)\n assert not next(\n iter(\n ent\n for ent in res.entities\n if ent.fullyQualifiedName == self.entity.fullyQualifiedName\n ),\n None,\n )", "def _try_delete_and_return_permissions_error(component_url):\n try:\n delete_object_task.DeleteObjectTask(component_url, verbose=False).execute()\n except api_errors.CloudApiError as e:\n status = getattr(e, 'status_code', None)\n if status == 403:\n return e\n raise", "async def test_delete_invalid(database,valid_data):\n test_valid_insert(database,valid_data)\n N = 10\n for idx in range(N+1,N*2):\n try:\n await database.delete(_id=idx,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def del_data(self, name):\n raise NotImplementedError('Do I want to delete data from a df?')", "def delete_datasource_instance(connection, id, error_msg=None):\n url = f\"{connection.base_url}/api/datasources/{id}\"\n response = connection.session.delete(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = f\"Error deleting Datasource Instance with ID: {id}\"\n response_handler(response, error_msg)\n return response", "def test_delete_run(self):\n pass", "def delete(self, hostname):\n self.not_supported()", "def do_delete(self, arg):\n \treturn False", "def test_delete_data(self):\n data = Data.objects.create(\n name='Test data',\n contributor=self.user,\n process=self.proc,\n )\n\n data.output = {'json_field': {'foo': 'bar'}}\n data.status = Data.STATUS_DONE\n data.save()\n\n self.assertEqual(Storage.objects.count(), 1)\n\n data.delete()\n self.assertEqual(Storage.objects.count(), 0)", "def test_wrong_delete_param(self):\n def close_conn():\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n LDAPConnection(cli).delete(\"cn=dummy\")\n self.assertRaises(ClosedConnection, close_conn)\n self.assertRaises(TypeError, lambda: self.conn.delete(0))", "def testDeleteOpInvalidDN(self):\n self.assertRaises(AssertionError, delta.DeleteOp, 0)", "def delete(self, ssubject: str, predicate: str, obj: str) -> None:\n raise NotImplementedError(\"The RDF graph is read-only: DELETE DATA queries are not allowed\")", "def test_delete_failure(self):\r\n problem_url_name = 'H1P1'\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n self.define_option_problem(problem_url_name)\r\n self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n expected_message = \"bad things happened\"\r\n with patch('courseware.models.StudentModule.delete') as mock_delete:\r\n mock_delete.side_effect = ZeroDivisionError(expected_message)\r\n instructor_task = self.delete_problem_state('instructor', location)\r\n self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)", "def test_handle_delete_github_error(self):\n self.db.query.side_effect = GithubAPIException(\"error\")\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (\"Team delete was unsuccessful with \"\n \"the following error: \"\n \"error\", 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def delete_datafile(self, save_instance=True):\n if self.datafile:\n logger.debug(\"Deleting datafile '%s'\", self.datafile.name)\n try:\n self.datafile.delete(save=save_instance)\n except OSError as exc:\n logger.error(\"Error deleting file '%s': %s\",\n self.datafile.name, exc)\n return False\n logger.info(\"Deleted datafile of '%s'\", self)", "async def test_delete_batch_invalid(database,valid_data):\n test_valid_insert_batch(database,valid_data)\n N = 10\n batch_id = 1\n for idx in range(N+1,N*2):\n try:\n await database.delete_batch(batch_id=batch_id,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):\n\n name = self._validate_name(name)\n rdtype = dns.rdatatype.RdataType.make(rdtype)\n if covers is not None:\n covers = dns.rdatatype.RdataType.make(covers)\n node = self.get_node(name)\n if node is not None:\n node.delete_rdataset(self.rdclass, rdtype, covers)\n if len(node) == 0:\n self.delete_node(name)", "def delete(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(\n context, id_)\n\n if db_resource_data['type'] == (eon_const.\n EON_RESOURCE_TYPE_ESX_CLUSTER):\n msg = _(\"Delete operation not supported for type %s\"\n % db_resource_data['type'])\n raise exception.DeleteException(err=msg)\n\n _resource_data = _make_response(\n db_resource_data)\n _resource_data_log = deepcopy(_resource_data)\n _resource_data_log.pop(\"meta_data\", None)\n LOG.info(\"Details for the ID %s is: %s\" % (\n id_, logging.mask_password(_resource_data_log)))\n driver_obj = driver.load_resource_driver(\n db_resource_data['type'])\n driver_obj.validate_delete(db_resource_data)\n driver_obj.delete(context, id_)\n self.db_api.delete_resource(context, id_)\n # delete the data from hlm input model\n try:\n LOG.info(\"[%s] remove resource from input model\" % id_)\n hux_obj = HLMFacadeWrapper(context)\n resource_id = db_resource_data[eon_const.EON_RESOURCE_ID]\n hux_obj.delete_server(resource_id)\n hux_obj.commit_changes(resource_id, \"Delete compute resource\")\n except facade_excep.NotFound:\n # log and do nothing\n LOG.warn(\"[%s] resource not found in hlm input model\" % id_)\n LOG.info(\"[%s]: Deleted resource from eon\" % id_)\n # Notify the message to consumers\n try:\n message = {\"resource_id\": id_,\n \"resource_state\": eon_const.EON_RESOURCE_STATE_REMOVED,\n \"resource_details\": _resource_data,\n }\n message_notifier.notify(context,\n message_notifier.EVENT_PRIORITY_INFO,\n message_notifier.EVENT_TYPE[\n 'removed'],\n message)\n except Exception as ex:\n LOG.exception(\n \"Exception while notifying the message : %s\" % ex)\n except exception.NotFound as e:\n msg = (\"Failed to delete resource %s. Error: %s\") % (\n _resource_data['name'], e.message)\n LOG.exception(msg)\n raise e", "def delete(self):\n self.logger.debug(\"In delete.\")\n\n if self._id is None:\n self.logger.warn(\"Attempt to delete a %s with no ID.\", __name__)\n raise Exception(\"{} does not have an ID.\".format(__name__))\n\n id = self._id\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n # Assume failure\n success = False\n\n try:\n self.logger.info(\"Deleting %s with ID %s.\", __name__, id)\n session.get_osdf().delete_node(id)\n success = True\n except Exception as delete_exception:\n self.logger.exception(delete_exception)\n self.logger.error(\"An error occurred when deleting %s.\", self)\n\n return success", "def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)", "def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate.delete(5)", "def s3_delete_data(self):\n\n self.k.delete()", "def delete(self, *args, **kwargs):\n raise NotImplementedError()", "def delete(self, error_id):\n error = self.db.session.query(models.Error).get(error_id)\n if error:\n self.db.session.delete(error)\n self.db.session.commit()\n else:\n raise InvalidErrorReference(\"No error with id %s\" % str(error_id))", "def delete_dataset(key=None) -> DeleteDatasetResponse:\n config = ThreatExchangeConfig.getx(str(key))\n hmaconfig.delete_config(config)\n return DeleteDatasetResponse(response=\"The privacy group is deleted\")", "def clean(args):\n with_dataset(args, Dataset._clean)", "def delete(self, predicate, successHandler=None, failHandler=None):\n queryDelete = \"DELETE FROM `article_article` \" \\\n \"WHERE `{}` {} %s LIMIT 1;\".format(\n predicate[0],\n predicate[1]\n )\n\n queryDeleteParams = []\n if isinstance(predicate[2], datetime):\n queryDeleteParams.append(predicate[2].strftime(\"%Y-%m-%d %H:%M:%S\"))\n else:\n queryDeleteParams.append(predicate[2])\n\n def failCallback(error):\n errorMessage = str(error)\n if isinstance(error, Failure):\n errorMessage = error.getErrorMessage()\n\n self.log.error(\n \"[Default.Article] delete() database error: {errorMessage}\",\n errorMessage=errorMessage\n )\n\n if failHandler is not None:\n reactor.callInThread(failHandler, [\"DatabaseError\"])\n\n def successCallback(results):\n if successHandler is not None:\n reactor.callInThread(successHandler)\n\n operation = self.dbService.runOperation(\n queryDelete,\n tuple(queryDeleteParams)\n )\n operation.addCallbacks(successCallback, failCallback)", "def delete(self):\n raise NotImplementedError", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_unkonwn_id_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate.delete(5)", "def remove_dataset(self, name):\n payload = {\"name\": name}\n r = self.request(\n \"delete\", url_path_join(USER_DATASET_RESOURCE_URL, self.owner), payload=payload\n )\n self.check_and_raise(r)", "async def delete_dataset(\n self,\n request: Optional[\n Union[data_labeling_service.DeleteDatasetRequest, dict]\n ] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> None:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = data_labeling_service.DeleteDatasetRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.delete_dataset,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=30.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=30.0,\n ),\n default_timeout=30.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )", "def test_delete_edge_case_with_write_concern_0_return_None(self):\n p1 = self.Person(name=\"User Z\", age=20).save()\n del_result = p1.delete(w=0)\n assert del_result is None", "def delete_table_data():\n try:\n print 'delete existing data'\n sql = 'delete from document'\n sql1 = 'delete from clean_keywords'\n sql2 = 'delete from keywords'\n util.executeSQL(conn, sql) # delete the existing data.\n util.executeSQL(conn, sql1)\n util.executeSQL(conn, sql2)\n except Exception as e:\n print e", "def test_data_object_del(self):\n pass", "def test_raise_when_no_id(self):\n model = self.Test({}, False)\n self.assertRaises(errors.PersistenceError, model.delete)", "def delete(self, *args, **kwargs):\n return 0", "def delete_file(self, filename=None):\n return self._service.delete_object(self._datasets_id, filename)", "def test_delete(self, init_db, audit):\n audit.delete()\n assert Audit.get(audit.id) == None", "def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')", "def test_delete(self):\n pt = PrepTemplate.create(self.metadata, self.new_raw_data,\n self.test_study, self.data_type_id)\n PrepTemplate.delete(pt.id)\n\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_template WHERE prep_template_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.common_prep_info WHERE prep_template_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_columns WHERE prep_template_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_2\")", "def delete(self):\n raise NotImplementedError()", "def delete(self):\n raise NotImplementedError()", "def test_delete_without_partition_key(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(attempt_id=0).delete()" ]
[ "0.7605005", "0.74737805", "0.7107175", "0.70523417", "0.69809973", "0.6883148", "0.6740322", "0.6667521", "0.65653944", "0.6536053", "0.65328515", "0.65034896", "0.6410687", "0.6386324", "0.6328574", "0.63253295", "0.62904114", "0.6277353", "0.62735856", "0.6252367", "0.61857146", "0.6168082", "0.6162157", "0.613244", "0.6089961", "0.60710067", "0.60618997", "0.6054307", "0.60175395", "0.59812534", "0.59728926", "0.5945504", "0.5898049", "0.58455795", "0.5814105", "0.58104396", "0.5775425", "0.5733373", "0.5725115", "0.570578", "0.5701192", "0.5630272", "0.56293786", "0.56126684", "0.56120926", "0.5611404", "0.56092745", "0.5607267", "0.5595084", "0.5575618", "0.5560845", "0.55180013", "0.55004615", "0.54946786", "0.5494068", "0.5493824", "0.54772246", "0.5475572", "0.54748356", "0.54659593", "0.54646385", "0.546054", "0.545929", "0.5447527", "0.5440744", "0.54403776", "0.5428439", "0.5425052", "0.5412754", "0.53937244", "0.53930616", "0.5389392", "0.53804857", "0.537615", "0.53478956", "0.5340608", "0.533264", "0.53314257", "0.5328683", "0.53236055", "0.5322863", "0.5313439", "0.53118443", "0.5311221", "0.5309877", "0.530916", "0.53082687", "0.52888113", "0.5282388", "0.52746874", "0.52719414", "0.5265507", "0.5262418", "0.5255371", "0.52480537", "0.52463824", "0.5243807", "0.5236309", "0.5236309", "0.52332956" ]
0.52461874
96
Get Table resource args (source, destination) for copy command.
def GetTableCopyResourceArgs(): table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table') arg_specs = [ resource_args.GetResourcePresentationSpec( verb='to copy from', name='source', required=True, prefixes=True, attribute_overrides={'table': 'source'}, positional=False, resource_data=table_spec_data.GetData()), resource_args.GetResourcePresentationSpec( verb='to copy to', name='destination', required=True, prefixes=True, attribute_overrides={'table': 'destination'}, positional=False, resource_data=table_spec_data.GetData())] fallthroughs = { '--source.dataset': ['--destination.dataset'], '--destination.dataset': ['--source.dataset'] } return [concept_parsers.ConceptParser(arg_specs, fallthroughs)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name())\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.projectId',\n destination_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.destinationTable.tableId',\n destination_ref.Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.datasetId',\n source_ref.Parent().Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.projectId',\n source_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.tableId',\n source_ref.Name())\n return request", "def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)", "def CopyFile(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"copyFile\", payload=payload, response_object=None)", "def getCloneArgs(self):\n\n values = {\n \"dest\": self.subnode_dest.makeClone()\n if self.subnode_dest is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def test_args_copy():\n args = cli.parse_args(['-c'])\n assert args.copy\n args = cli.parse_args(['--copy'])\n assert args.copy", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def command_copy(args):\n sources = args.sources\n destpath = args.destpath\n source_files = []\n for file_ in sources:\n if \"*\" in file_:\n selected = glob(file_)\n source_files.extend(selected)\n elif os.path.isfile(file_):\n source_files.append(file_)\n\n if destpath.endswith(\"/\") or os.path.isdir(destpath) or len(sources) > 1:\n # -- DESTDIR-MODE: Last argument is a directory.\n destdir = destpath\n else:\n # -- DESTFILE-MODE: Copy (and rename) one file.\n assert len(source_files) == 1\n destdir = os.path.dirname(destpath)\n\n # -- WORK-HORSE: Copy one or more files to destpath.\n if not os.path.isdir(destdir):\n sys.stdout.write(\"copy: Create dir %s\\n\" % destdir)\n os.makedirs(destdir)\n for source in source_files:\n destname = os.path.join(destdir, os.path.basename(source))\n sys.stdout.write(\"copy: %s => %s\\n\" % (source, destname))\n shutil.copy(source, destname)\n return 0", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"subscribed\": self.subnode_subscribed.makeClone(),\n \"subscript\": self.subnode_subscript.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def CopyObjsCommand(self, args, sub_opts=None, headers=None, debug=0,\n command='cp'):\n # Expand wildcards and containers in source StorageUris.\n src_uri_expansion = self.ExpandWildcardsAndContainers(\n args[0:len(args)-1], sub_opts, headers, debug)\n\n # Check for various problems and determine base_dst_uri based for request.\n (base_dst_uri, multi_src_request) = self.ErrorCheckCopyRequest(\n src_uri_expansion, args[-1], headers, debug, command)\n # Rewrite base_dst_uri and create dest dir as needed for multi-source copy.\n if multi_src_request:\n base_dst_uri = self.HandleMultiSrcCopyRequst(src_uri_expansion,\n base_dst_uri)\n\n # Now iterate over expanded src URIs, and perform copy operations.\n total_elapsed_time = total_bytes_transferred = 0\n for src_uri in iter(src_uri_expansion):\n for exp_src_uri in src_uri_expansion[src_uri]:\n print 'Copying %s...' % exp_src_uri\n dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri)\n (elapsed_time, bytes_transferred) = self.PerformCopy(\n exp_src_uri, dst_uri, sub_opts, headers, debug)\n total_elapsed_time += elapsed_time\n total_bytes_transferred += bytes_transferred\n if debug == 3:\n # Note that this only counts the actual GET and PUT bytes for the copy\n # - not any transfers for doing wildcard expansion, the initial HEAD\n # request boto performs when doing a bucket.get_key() operation, etc.\n if total_bytes_transferred != 0:\n print 'Total bytes copied=%d, total elapsed time=%5.3f secs (%sps)' % (\n total_bytes_transferred, total_elapsed_time,\n MakeHumanReadable(float(total_bytes_transferred) /\n float(total_elapsed_time)))", "def doTheCopy(argpath,argdest):\n print(\"To copy:\"+argpath)\n shutil.copy(argpath,argdest)", "def supported_table_args(self) -> t.Tuple[str, ...]:", "def cp(self, copy_from, copy_to, **kwargs):\n return self.exec_command('cp %s %s' % (copy_from, copy_to), **kwargs)", "def getCloneArgs(self):\n\n values = {\n \"dest\": self.subnode_dest.makeClone()\n if self.subnode_dest is not None\n else None,\n \"value\": self.subnode_value.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def args():\n\n useDB = docopt(__doc__)['--from-db']\n snapFile = docopt(__doc__)['-i']\n # csvFile = docopt(__doc__)['-o']\n # utils.askErase(csvFile)\n\n return [snapFile, useDB]", "def copyData(self, src_schema, src_table, src_columns, dest_schema, dest_table, dest_columns):\r\n sql = 'INSERT INTO {} ( {} ) SELECT {} FROM {}'.format(self.encodeTableName(dest_schema, dest_table), ','.join(dest_columns),\r\n ','.join(src_columns), self.encodeTableName(src_schema, src_table))\r\n return self.runSql(sql)", "def copy(CopySource=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, SourceClient=None, Config=None):\n pass", "def svn_client_copy_source_t_path_get(svn_client_copy_source_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_source_system_profile_params(argv):\n with get_audit_db(argv) as audit_db:\n if audit_db is None:\n if not argv.tablelist:\n return []\n\n if len(argv.tablelist) == 1:\n # A file containing table names\n if os.path.isfile(argv.tablelist[0]):\n with open(argv.tablelist[0]) as f:\n return [(argv.sourceschema,\n t.strip(),\n argv.targetschema,\n None) for t in f]\n\n return [(argv.sourceschema, table, argv.targetschema, None)\n for table in argv.tablelist]\n\n sql = \"\"\"\n SELECT source_region, object_name, target_region, query_condition\n FROM {audit_schema}.source_system_profile\n WHERE profile_name = %s\n AND version = %s\n AND active_ind = 'Y'\n ORDER BY object_seq\"\"\".format(audit_schema=argv.auditschema)\n\n bind_values = [argv.profilename, argv.profileversion]\n result = audit_db.execute_query(sql, argv.arraysize, bind_values)\n\n return [(row[0], row[1], row[2], row[3]) for row in result]", "def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)", "def svn_fs_copy(*args):\r\n return _fs.svn_fs_copy(*args)", "def destination(self) -> str:\n for a in self.args:\n if a[0] != '-':\n return a\n try:\n return self.kwargs['dest']\n except KeyError:\n for a in self.args:\n if a.startswith('--'):\n dest = a.lstrip('-').replace('-', '_')\n if dest.isidentifier():\n return dest\n raise AttributeError(F'The argument with these values has no destination: {self!r}')", "def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args", "def copyCommand(self):\n\n selection = self.selectedIndexes()\n\n if selection:\n rows = [index.row() for index in selection]\n columns = [index.column() for index in selection]\n if len(rows) == 4:\n model = self.proxyModel.sourceModel()\n row = rows[3]\n column = columns[3]\n command = model.dataset.data[row][column].cell\n QApplication.clipboard().setText(command)", "def destination(self) -> pulumi.Input['DestinationArgs']:\n return pulumi.get(self, \"destination\")", "def test_args_combination():\n args = cli.parse_args(['-cp'])\n assert args.copy\n assert args.paste\n args = cli.parse_args(['-c', '-p'])\n assert args.copy\n assert args.paste", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"expression\": self.subnode_expression.makeClone(),\n \"lower\": self.subnode_lower.makeClone()\n if self.subnode_lower is not None\n else None,\n \"upper\": self.subnode_upper.makeClone()\n if self.subnode_upper is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"expression\": self.subnode_expression.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def _copy_file ( self, source, dest ):\n return", "def print_dry_run_copy_info(source, dest):\n\n def shorten_home(path):\n expanded_home = os.path.expanduser(\"~\")\n path = str(path)\n if path.startswith(expanded_home):\n return path.replace(expanded_home, \"~\")\n return path\n\n def truncate_middle(path: str, acceptable_len: int):\n \"\"\"Middle truncate a string\n https://www.xormedia.com/string-truncate-middle-with-ellipsis/\n \"\"\"\n if len(path) <= acceptable_len:\n return path\n # half of the size, minus the 3 .'s\n n_2 = int(acceptable_len / 2 - 3)\n # whatever's left\n n_1 = int(acceptable_len - n_2 - 3)\n return f\"{path[:n_1]}...{path[-n_2:]}\"\n\n trimmed_source = shorten_home(source)\n trimmed_dest = shorten_home(dest)\n longest_allowed_path_len = 87\n if len(trimmed_source) + len(trimmed_dest) > longest_allowed_path_len:\n trimmed_source = truncate_middle(trimmed_source, longest_allowed_path_len)\n trimmed_dest = truncate_middle(trimmed_dest, longest_allowed_path_len)\n print(\n Fore.YELLOW + Style.BRIGHT + trimmed_source + Style.NORMAL,\n \"->\",\n Style.BRIGHT + trimmed_dest + Style.RESET_ALL,\n )", "def getCloneArgs(self):\n\n values = {\n \"subscribed\": self.subnode_subscribed.makeClone(),\n \"subscript\": self.subnode_subscript.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def CopyTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('copyTapSettings', payload=payload, response_object=None)", "def get_args():\n parser = argparse.ArgumentParser(description='Rsync like thingy')\n parser.add_argument('--dry-run', required=False,\n action='store_true', default=False, dest='dry_run',\n help='Does not do anything on the destination filesystem')\n\n parser.add_argument('-v', '--verbose', required=False,\n action='count', default=0, dest='vlevel',\n help='Log level')\n\n parser.add_argument('--dump-delta', required=False,\n default=None, metavar='DELTA_PATH', dest='delta_path',\n help='If a filename is specified, the delta is dumped into the file as JSON')\n\n parser.add_argument('--max-depth', required=False,\n type=int, default=10, dest='max_depth',\n help='Max depth for both local and remote')\n\n parser.add_argument('--adb-batch-size', required=False,\n type=int, default=5, dest='adb_batch_size',\n help='Maximum number of adb push to run in parallel')\n\n parser.add_argument('--command-batch-size', required=False,\n type=int, default=100, dest='command_batch_size',\n help='Maximum number of arguments / chained commands to run at once')\n\n parser.add_argument('--delete', required=False,\n action='store_true', default=False, dest='delete_files',\n help='Creates a \"perfect mirror\" of local at remote')\n \n parser.add_argument('source', help='The source')\n parser.add_argument('destination', help='The destination')\n return parser.parse_args()", "def svn_client_copy(svn_client_commit_info_t_commit_info_p, char_src_path, svn_opt_revision_t_src_revision, char_dst_path, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def __get_datatables_args():\n\n table_args = dict()\n\n #\n # Common Arguments\n #\n\n table_args['column-count'] = 0\n table_args['sort-col-count'] = 0\n\n if request.args.get('draw'):\n table_args['sequence'] = request.args.get('draw')\n\n if request.args.get('start'):\n table_args['offset'] = int(request.args.get('start'))\n\n if request.args.get('length'):\n table_args['limit'] = int(request.args.get('length'))\n\n if request.args.get('search[value]'):\n table_args['filter'] = request.args.get('search[value]')\n\n if request.args.get('search[regex]'):\n table_args['filter-regex'] = request.args.get('search[regex]')\n\n #\n # Custom Arguments\n #\n\n if request.args.get('time_filter'):\n table_args['time_filter'] = request.args.get('time_filter')\n\n i = 0\n while True:\n if request.args.get('columns[%d][data]' % i):\n table_args['column-count'] += 1\n table_args['mDataProp_%d' % i] = request.args.get('columns[%d][data]' % i)\n else:\n break\n\n #\n # Column Search\n #\n\n if request.args.get('columns[%d][searchable]' % i):\n table_args['bSearchable_%d' % i] = request.args.get('columns[%d][searchable]' % i)\n\n if request.args.get('columns[%d][search][value]' % i):\n table_args['sSearch_%d' % i] = request.args.get('columns[%d][search][value]' % i)\n\n if request.args.get('columns[%d][search][regex]' % i):\n table_args['bRegex_%d' % i] = request.args.get('columns[%d][search][regex]' % i)\n\n #\n # Column Sort\n #\n\n if request.args.get('columns[%d][orderable]' % i):\n table_args['bSortable_%d' % i] = request.args.get('columns[%d][orderable]' % i)\n\n if request.args.get('order[%d][column]' % i):\n table_args['sort-col-count'] += 1\n table_args['iSortCol_%d' % i] = int(request.args.get('order[%d][column]' % i))\n\n if request.args.get('order[%d][dir]' % i):\n table_args['sSortDir_%d' % i] = request.args.get('order[%d][dir]' % i)\n\n i += 1\n\n return table_args", "def copyFlexor(*args, **kwargs)->AnyStr:\n pass", "def copy_db(src=FRESHDB, dst=[APPDB]):\n for dest in dst:\n try:\n x = shutil.copy2(src, dest)\n print('File copied to {}'.format(x))\n except shutil.SameFileError:\n print('Both source and destination are identical.')", "def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request", "def _copyTexture(self, src, dest):\n self.copyTextureNode.setShaderInput(\"src\", src)\n self.copyTextureNode.setShaderInput(\"dest\", dest)\n self._executeShader(\n self.copyTextureNode,\n (src.getXSize() + 15) / 16,\n (src.getYSize() + 15) / 16)", "def copy_source_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"copy_source_arn\")", "def getCloneArgs(self):\n\n values = {}\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def copy(self, src_path: str, tgt_path: str) -> None:", "async def copy(self, _id: str, dst_id: str, *,\n rev: Optional[str] = None,\n dst_rev: Optional[str] = None,\n batch: Optional[bool] = None) -> dict:\n\n headers = dict(\n Destination=f'{dst_id}?rev={dst_rev}' if dst_rev else dst_id\n )\n\n params = dict(\n rev=rev,\n batch=\"ok\" if batch else None,\n )\n\n return await self.__connection.query('COPY', self._get_path(_id), params=params, headers=headers)", "def getCloneArgs(self):\n\n values = {\n \"source_code\": self.subnode_source_code.makeClone(),\n \"globals_arg\": self.subnode_globals_arg.makeClone()\n if self.subnode_globals_arg is not None\n else None,\n \"locals_arg\": self.subnode_locals_arg.makeClone()\n if self.subnode_locals_arg is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"condition\": self.subnode_condition.makeClone(),\n \"yes_branch\": self.subnode_yes_branch.makeClone()\n if self.subnode_yes_branch is not None\n else None,\n \"no_branch\": self.subnode_no_branch.makeClone()\n if self.subnode_no_branch is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def parse_args():\n\n copy_help = 'Copy files back to original target directory? Default False'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', help=copy_help, action='store_true',\n required=False)\n arguments = parser.parse_args()\n return arguments", "def run_copy(self, src, dst):\n pass", "def get(self):\n copies = db.session.query(models.Copy)\n args = copy_query_parser.parse_args()\n copy_id = args['copy_id']\n if copy_id is not None:\n copies = copies.filter_by(id=copy_id)\n book_id = args['book_id']\n if book_id is not None:\n copies = copies.filter_by(book=book_id)\n\n username = args['user']\n if username is not None:\n copies = copies.filter_by(user=username)\n status = args[\"status\"]\n if status is not None:\n copies = copies.filter_by(status=status)\n if copy_id is None and book_id is None and username is None and status is None:\n return 'Please proved searching parameter', 400\n return [copy.serialize() for copy in copies], 200", "def getCloneArgs(self):\n\n values = {\n \"value\": self.subnode_value.makeClone(),\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def svn_client_copy_source_t_revision_get(svn_client_copy_source_t_self): # real signature unknown; restored from __doc__\n pass", "def parse_args(args):\n parser = argparse.ArgumentParser(description='Parse Mysql Copy account you want',add_help=False)\n connect_setting = parser.add_argument_group('connect setting')\n connect_setting.add_argument('-h','--host',dest='host',type=str,help='Host the MySQL database server located',default='127.0.0.1')\n connect_setting.add_argument('-u','--user',dest='user',type=str,help='MySQL Username to log in as',default='root')\n connect_setting.add_argument('-p','--password',dest='password',type=str,help='MySQL Password to use', default='')\n connect_setting.add_argument('-P', '--port', dest='port', type=int,help='MySQL port to use', default=3306)\n copy_user = parser.add_argument_group('copy user')\n copy_user.add_argument('--src-user',dest='srcuser',type=str,help='copy from the user',nargs='*',default='*')\n copy_user.add_argument('--src-host',dest='srchost',type=str,help='copy from the host',nargs='*',default='*')\n copy_user.add_argument('--dest-user',dest='destuser',type=str,help='copy to the user',nargs='*',default='')\n copy_user.add_argument('--dest-host',dest='desthost',type=str,help='copy to the host',nargs='*',default='')\n\n parser.add_argument('--help', dest='help', action='store_true', help='help infomation', default=False)\n\n return parser", "def svn_client_copy2(svn_commit_info_t_commit_info_p, char_src_path, svn_opt_revision_t_src_revision, char_dst_path, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def copy_table(self, source_table_name, dest_table_name):\n logger.info(f\"Creating {dest_table_name} from {source_table_name}\")\n try:\n self._submit_single_q(COPY_TABLE_DDL.format(new_table_name=dest_table_name, old_table_name=source_table_name))\n except connector.errors.ProgrammingError as e:\n logger.error(f\"Table {dest_table_name} already exists. Exiting.\")\n return\n except Exception as e:\n logger.error(\"Failed creating table\")\n raise\n\n try:\n q = COPY_TABLE_DATA_DML.format(new_table_name=dest_table_name, old_table_name=source_table_name)\n self._submit_single_q(COPY_TABLE_DATA_DML.format(new_table_name=dest_table_name, old_table_name=source_table_name))\n except Exception as e:\n logger.error(\"Failed inserting data\")\n raise e\n\n # print(self._submit_single_q(\"checksum table xfrm_product, staging_product\")", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def CopyTo(self, *args, **kwargs):\n pass", "def copyPetscArguments(jobdir, argsfile):\n shutil.copyfile(argsfile, os.path.join(jobdir, \"petsc_commandline_arg\"))", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"Extract a sample of the data.\")\n\n parser.add_argument(\"userid\", help=\"User ID\")\n parser.add_argument(\"start\", help=\"Start\")\n parser.add_argument(\"end\", help=\"End\")\n parser.add_argument(\"source\", help=\"Source data file\")\n parser.add_argument(\"dest\", help=\"Destination file\")\n\n return parser.parse_args()", "def copy(ctx, source, dest, force=False):\n # print \"COPY:\", locals()\n # print \"COPY:\", ctx.force, ctx.verbose\n if source == dest:\n return dest\n\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = \"\"\n if sys.platform == 'win32':\n if force:\n flags += \" /Y\"\n # print 'copy {flags} {source} {dest}'.format(**locals())\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else: # pragma: nocover\n if force:\n flags += \" --force\"\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest", "def getCloneArgs(self):\n\n values = {\n \"iterator\": self.subnode_iterator.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def copyAttr(*args, attribute: Union[AnyStr, List[AnyStr]]=\"\", containerParentChild: bool=True,\n inConnections: bool=True, keepSourceConnections: bool=True, outConnections:\n bool=True, renameTargetContainer: bool=True, values: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def get_arguments():\n\tparser.add_argument('-i', '--interface', help='interface to affect')\n\tparser.add_argument('-m','--mac', help='mac to allocate')\n\n\targs = parser.parse_args()\n\tinterface = args.interface\n\tmac = args.mac\n\treturn (interface, mac)", "def args(self) -> typing.Tuple[str, typing.List[str]]:\n func = inspect.stack()[1][3]\n command = func[len(self.CMD_PREFIX):]\n return ('{} {}'.format(sys.argv[0], command),\n sys.argv[2:])", "def copy_continuous_backups_properties(events: dict, context: dict) -> List:\n if 'SourceTableName' not in events:\n raise KeyError('Requires SourceTableName')\n if 'TargetTableName' not in events:\n raise KeyError('Requires TargetTableName')\n\n source_table_name: str = events['SourceTableName']\n target_table_name: str = events['TargetTableName']\n continuous_backups_settings = _describe_continuous_backups(table_name=source_table_name)\n continuous_backups_status = continuous_backups_settings\\\n .get('ContinuousBackupsDescription', {})\\\n .get('PointInTimeRecoveryDescription', {})\\\n .get('PointInTimeRecoveryStatus', '')\n\n if continuous_backups_status in ['ENABLED', 'ENABLING']:\n _enable_continuous_backups(table_name=target_table_name)\n\n return continuous_backups_status", "def get_clipboard(*args, **kwargs):\n return G.DEVICE.get_clipboard(*args, **kwargs)", "def copy_resource_tags(events: dict, context: dict) -> dict:\n if 'SourceTableName' not in events:\n raise KeyError('Requires SourceTableName')\n if 'TargetTableName' not in events:\n raise KeyError('Requires TargetTableName')\n if 'Region' not in events:\n raise KeyError('Requires Region')\n if 'Account' not in events:\n raise KeyError('Requires Account')\n\n source_table_name = events['SourceTableName']\n region = events['Region']\n account = events['Account']\n resource_arn = f'arn:aws:dynamodb:{region}:{account}:table/{source_table_name}'\n tags = list(_list_tags(resource_arn=resource_arn))\n if tags:\n target_table_name = events['TargetTableName']\n resource_arn = f'arn:aws:dynamodb:{region}:{account}:table/{target_table_name}'\n _update_tags(resource_arn=resource_arn, tags=tags)\n\n return {\n \"Tags\": tags\n }", "def _FormSCPCommandLine(self, src, dst, extra_scp_args=None):\n assert not self.local, \"Cannot use SCP on local target.\"\n\n args = ['scp', '-P', str(self._ssh_port)] + self._ssh_args\n if self._ssh_identity:\n args.extend(['-i', self._ssh_identity])\n if extra_scp_args:\n args.extend(extra_scp_args)\n args += [src, dst]\n return args", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def getCloneArgs(self):\n\n values = {\n \"statements\": tuple(v.makeClone() for v in self.subnode_statements),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"value\": self.subnode_value.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def test_source_copy_round_trip(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testcopyto (\n a int,\n b text,\n c float,\n d uuid,\n PRIMARY KEY (a, b)\n )\"\"\")\n\n insert_statement = self.session.prepare(\"INSERT INTO testcopyto (a, b, c, d) VALUES (?, ?, ?, ?)\")\n args = [(i, str(i), float(i) + 0.5, uuid4()) for i in range(1000)]\n execute_concurrent_with_args(self.session, insert_statement, args)\n\n results = list(self.session.execute(\"SELECT * FROM testcopyto\"))\n\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file: {name}'.format(name=tempfile.name))\n\n commandfile = self.get_temp_file()\n with open(commandfile.name, 'w') as f:\n f.write('USE ks;\\n')\n f.write(\"COPY ks.testcopyto TO '{name}' WITH HEADER=false;\".format(name=tempfile.name))\n\n self.run_cqlsh(cmds=\"SOURCE '{name}'\".format(name=commandfile.name))\n\n # import the CSV file with COPY FROM\n self.session.execute(\"TRUNCATE ks.testcopyto\")\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n\n commandfile = self.get_temp_file()\n with open(commandfile.name, 'w') as f:\n f.write('USE ks;\\n')\n f.write(\"COPY ks.testcopyto FROM '{name}' WITH HEADER=false;\".format(name=tempfile.name))\n\n self.run_cqlsh(cmds=\"SOURCE '{name}'\".format(name=commandfile.name))\n new_results = list(self.session.execute(\"SELECT * FROM testcopyto\"))\n assert sorted(results) == sorted(new_results)", "def command_args(self):\n return self._command_args", "def clone(source, destination):\n\t\treturn \"CREATE DATABASE {0} WITH TEMPLATE {1};\".format(destination, source)", "def get_args_from_console(args):\n return {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }", "def copy_file_to_table(self, schema, table, filepath):\n fields = \", \".join(self.schemas[schema][table][0])\n sql = f'set role {self.write_role}; ' \\\n f'COPY {schema}.{table}( {fields} ) FROM stdin WITH DELIMITER \\',\\' CSV header;'\n return sql, open(filepath, 'r')", "def getCopyDestinations(self, hiveDB, hiveTable):\n\n\t\tsession = self.configDBSession()\n\t\tcopyTables = aliased(configSchema.copyTables)\n\n\t\tself.copyDestinations = []\n\n\t\tcopyTablesResult = pd.DataFrame(session.query(copyTables.copy_id, copyTables.hive_filter, copyTables.destination, copyTables.data_transfer).all()).fillna('')\n\t\tfor index, row in copyTablesResult.iterrows():\n\t\t\tif ';' in row['destination']:\n\t\t\t\tlogging.error(\"';' not supported in the Copy Destination name\")\n\t\t\t\tself.remove_temporary_files()\n\t\t\t\tsys.exit(1)\n\n\t\t\tfor hiveFilterSplit in row['hive_filter'].split(';'):\n\n\t\t\t\tif '.' not in hiveFilterSplit:\n\t\t\t\t\tlogging.warning(\"The filter in table 'copy_tables' with copy_id = %s contains an invalid filter. Missing a . in the database.table format\"%(row['copy_id']))\n\t\t\t\telse:\n\t\t\t\t\tfilterDB = hiveFilterSplit.split('.')[0]\n\t\t\t\t\tfilterTable = hiveFilterSplit.split('.')[1]\n\n\t\t\t\t\tif fnmatch.fnmatch(hiveDB, filterDB) and fnmatch.fnmatch(hiveTable, filterTable):\n\t\t\t\t\t\tdestString = \"%s;%s\"%(row['destination'], row['data_transfer'])\n\t\t\t\t\t\tdestStringASync = \"%s;Asynchronous\"%(row['destination'])\n\t\t\t\t\t\tdestStringSync = \"%s;Synchronous\"%(row['destination'])\n\t\t\t\t\t\tif destStringSync in self.copyDestinations and destString == destStringASync:\n\t\t\t\t\t\t\t# ASync have priority. So if sync is already in there, we remove it and add async\n\t\t\t\t\t\t\tself.copyDestinations.remove(destStringSync)\n\t\t\t\t\t\tif destStringASync not in self.copyDestinations and destStringSync not in self.copyDestinations:\n\t\t\t\t\t\t\tself.copyDestinations.append(destString)\n\t\n\t\tif self.copyDestinations == []:\n\t\t\tself.copyDestinations = None\n\t\treturn self.copyDestinations", "def svn_client_copy4(svn_commit_info_t_commit_info_p, apr_array_header_t_sources, char_dst_path, svn_boolean_t_copy_as_child, svn_boolean_t_make_parents, apr_hash_t_revprop_table, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def cp(self, src, dest):\r\n return self._call(\"-cp\", src, dest, suppress_output=True)", "def _FormSCPCommandLine(self, src, dst, extra_scp_args=None):\n assert not self.local, \"Cannot use SCP on local target.\"\n\n # -C enables compression.\n args = ['scp', '-C', '-P', str(self._ssh_port)] + self._ssh_args\n if self._ssh_identity:\n args.extend(['-i', self._ssh_identity])\n if extra_scp_args:\n args.extend(extra_scp_args)\n args += [src, dst]\n return args", "def getCloneArgs(self):\n\n values = {\n \"iterated_length\": self.subnode_iterated_length.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def test_copy_sources(self):\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Source\", first(metric_copy[\"sources\"].values())[\"name\"])", "def getPlainCopyPrefices():\n\n oldPrefix = \"\"\n newPrefix = \"\"\n\n # get the copyprefices\n copyprefix = readpar('copyprefixin')\n if copyprefix == \"\":\n copyprefix = readpar('copyprefix')\n\n if \"^\" in copyprefix:\n prefices = copyprefix.split(\"^\")\n oldPrefix = prefices[0]\n newPrefix = prefices[1]\n else:\n tolog(\"!!WARNING!!4444!! Unexpected copyprefix[in] format: %s\" % (copyprefix))\n\n return oldPrefix, newPrefix", "def args(self):\n return self._args.copy()", "def create_copyfile_dict(child):\n\n attrs = ['src', 'dest']\n values = [child.get(attr) for attr in attrs]\n\n if None in values:\n raise InvalidManifest(\n 'Missing required attribute in copyfile element'\n )\n\n return dict(zip(attrs, values))", "def getCloneArgs(self):\n\n values = {\n \"set_arg\": self.subnode_set_arg.makeClone(),\n \"value\": self.subnode_value.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCopyRef(self):\n return self.base.get(\"copy_ref\", [])", "def getCloneArgs(self):\n\n values = {\n \"tried\": self.subnode_tried.makeClone(),\n \"except_handler\": self.subnode_except_handler.makeClone()\n if self.subnode_except_handler is not None\n else None,\n \"break_handler\": self.subnode_break_handler.makeClone()\n if self.subnode_break_handler is not None\n else None,\n \"continue_handler\": self.subnode_continue_handler.makeClone()\n if self.subnode_continue_handler is not None\n else None,\n \"return_handler\": self.subnode_return_handler.makeClone()\n if self.subnode_return_handler is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def copy_table(source_table, destination_table, db='default'):\n try:\n with connections[db].cursor() as cursor:\n cursor.execute('CREATE TABLE IF NOT EXISTS %s LIKE %s;' % (destination_table, source_table))\n except:\n pass", "def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args", "def svn_client_commit_item2_t_copyfrom_url_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def drag_dest_info(self):\n return DdTargets.URI_LIST", "def svn_fs_copied_from(*args):\r\n return _fs.svn_fs_copied_from(*args)", "def dest_columns(self):\n return self.intersection + self.dest_renames" ]
[ "0.62132674", "0.56889355", "0.55710876", "0.55588037", "0.5540921", "0.54608697", "0.54608697", "0.54608697", "0.53417224", "0.5325953", "0.52964944", "0.52829", "0.5282849", "0.52788526", "0.52047265", "0.52009565", "0.51657414", "0.51652837", "0.516145", "0.5143834", "0.5141003", "0.5089321", "0.5060838", "0.5059258", "0.5040755", "0.50163513", "0.4990004", "0.49761227", "0.4948509", "0.4938476", "0.49368966", "0.49062234", "0.48965007", "0.4858598", "0.48385695", "0.4830435", "0.48218018", "0.48083597", "0.48037088", "0.4801186", "0.48010105", "0.4800827", "0.47903466", "0.478679", "0.47862244", "0.4776404", "0.476859", "0.47649565", "0.47568598", "0.474825", "0.4725044", "0.47191182", "0.47161132", "0.47158703", "0.47028", "0.46928236", "0.46928236", "0.46928236", "0.46928236", "0.46928236", "0.46928236", "0.46928236", "0.4689384", "0.4687183", "0.46865472", "0.46853408", "0.46819425", "0.46801308", "0.46769735", "0.46666905", "0.4666626", "0.4666013", "0.4645687", "0.46397057", "0.46252763", "0.46245512", "0.46213064", "0.4617944", "0.4614827", "0.46081105", "0.46067435", "0.4602058", "0.4600361", "0.45927164", "0.45917934", "0.4591577", "0.4589844", "0.45893788", "0.45889303", "0.45842716", "0.45831826", "0.45789352", "0.45689532", "0.4568338", "0.4566945", "0.4565554", "0.45598337", "0.45584893", "0.4558324", "0.45500827" ]
0.8508633
0
Print a simple greeting to each user in the list.
def greet_users(names): for name in names: msg = "Hello, " + name.title() + "!" print(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greet_users(names):\n for name in names:\n print(f\"Hello, {name.title()}!\")", "def greeting(list_of_guests):\r\n for i in list_of_guests: \r\n print('Witaj ' + i)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}\"\n print(msg)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}!\"\n print(msg)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}!\"\n print(msg)", "def greet_user(names):\n\n for name in names:\n msg = f\"Hello, {name.title()} !\"\n print(msg)", "def print_users(self):\n for i, item in enumerate(self.users):\n print(\"{}. {}\".format(i, item.name))", "def greet_user(self):\n print(\"Hello, \" + self.first_name.title() + \" \" + self.last_name.title() + \"!\")", "def greet_user(self):\n print(\"Hello \" + self.first_name.title() + \" \" +\n self.last_name.title() + \", welcome back!\")", "def greet_user(self):\n print(\"Welcome, \" + self.first_name.title() + \"!\")", "def greet_user(self):\r\n\r\n print(\"Hello \" + self.first_name.title() + \" \" + self.last_name.title() + \".\")", "def greet_user(self):\n greeting = f\"Hi {self.first_name.title()}, welcome back!\\n\"\n print(greeting)", "def greet_user(self):\n print(\"\\nWelcome, \" + self.username + \"!\")", "def greet_user(self):\n print(\"\\nWelcome, \" + self.username + \"!\")", "def greet_user(self):\r\n\t\tprint(f\"\\nHello {self.first_name}!\\n\")", "def greet_user(self):\n print(\"Hello \" + self.f_name.title() + \"!, hope you're well today!\")", "def greet_user(username):\r\n print(f\"Hello, {username.title()}!\")", "def greet_user(self):\n\t\tprint(f\"How are you doing {self.first_name.title()}?\")", "def greet_user():\r\n print('Hi,' + FirstName + ' ' + LastName + ' thanks for joining us inside the beer app!')", "async def _welcome(self, ctx, *, users: discord.User = None):\n if not users:\n await ctx.send(f\"Welcome {ctx.author.mention} to {ctx.guild.name}!\")\n else:\n if len(users) > 1:\n users = humanize_list(users)\n else:\n users = users[0].mention\n await ctx.send(f\"Welcome {users} to {ctx.guild.name}!\")", "def print_users(self):\n for user in self.users.values():\n print(user)", "def greet_user(self):\n print(f\"Hiya {self.username}!\")", "def greet_user(username):\n print(\"Hello, \" + username.title() + \"!\")", "def greet_user(username):\n print(\"Hello, \" + username.title() + \"!\")", "def greet_user(username):\r\n print(\"Hello, \" + username + \"!\")", "def greeter_user(username):\n print(f\"Hello {username}\")", "def greet_user(username):\n print(\"Hello, \" + username + \"!\")", "def greet_user(self):\n print(\"Greatings \"+ self.first_name + \" \" + self.last_name +\", welcome to the matrix.\")", "def greet_user():\n print(\"Hello\")", "def greeting(players_name):\n print \"\\nGreat! Welcome, \" + players_name + \". The purpose of this game is to fill in the blanks for all the sentences provided.\"", "def greet_user():\n username = get_user()\n\n if username:\n print(f\"Välkommen tillbaks {username.title()}!!\")\n else:\n username = create_new_user()\n print(f\"Vi kommer ihåg dig när du återvänder {username}\")", "def greet_username(username):\n print(\"Hello, \" + username.title() + \"!\")", "async def greeting(self, ctx):\n await self.bot.say('hello {0.name}'.format(ctx.message.author))", "def greet_user():\n print(\"Hello!\")", "def greet_user():\n print(\"Hello!\")", "def greet_user():\n print(\"Hello!\")", "def greet(*names):\n print(names)\n for name in names:\n print(' Hello {0}!'.format(name))", "def list_users(item):\n users = User.load_all_users(item)\n for user in users:\n print(user.username)", "def show_greeting(self):\n self.output(' ------------------------ ')\n self.output('You are now playing ' + self.name)\n self.output(self.greeting)\n self.output(' ------------------------ ')", "def welcome_user():\n welcome_string = 'Welcome to the brain Brain Games!\\n'\n print(welcome_string)\n name = prompt.string('May I have your name? ')\n print('{0}, {1}!'.format('Hello', name))", "def greet_user():\r\n print(\"hello!\")", "def greet_user():\n username = get_stored_username()\n if username:\n print(\"Welcome back, \" + username['Name'] + \"!\")\n else:\n username = get_new_username()\n print(\"We'll remember you when you come back, \" + username + \"!\")", "def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')", "def greeting(self):\n print(\"Hello! My name is {name}.\".format(name=self.name))", "def greeting(self):\n print(\"Hello! My name is {name}.\".format(name=self.name))", "def greet_user():\n username = load_user_data()\n if username != None:\n print(\"Welcome back, \" + username)\n else:\n register_user()", "def greeting(name, message):\n print(f'Hello {name}!')\n print(message)", "def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())", "def list_users():\n\n users = User.query.order_by(\"last_name\").all()\n return render_template(\"users/user_list.html\", users=users)", "def list_users():\n users = User.query.order_by(User.last_name, User.first_name).all()\n return render_template('index.html', users=users)", "def show_users():\n return 'hehe'", "def home_list():\n\tui.lst1()\n\tui.lst2()\n\tx = db.get_users()\n\tfor i in x:\n\t\tz = len(i[1])\n\t\tf = 16-z\n\t\tl = \" \"*f\n\t\tprint i[1], l, i[4],l,i[5]\n\traw_input(\"Press [enter] to goto homescreen\")\n\thome()", "def greet_user(name = 'Menoetius'):\n print(\"Hello, my name is \" + name.title() + \"!\")", "def welcome():\n name = prompt.string('May I have your name? ')\n print(f'Hello, {name}!') # noqa: WPS421", "def greeting(name):\n print(\"\")\n print(\n \"Welcome to Mister Sweet's Mad Lib Story Telling Journey \" + name +\n \" :)\")\n print(\"\")", "def show():\n return render_template(\n 'listUsers.html',\n title='List Users',\n message='These are the users in our system'\n )", "def greet_user():\n\tusername = get_stored_username()\n\tif username:\n\t\tprint(\"Welcome back, \" + username + \"!\")\n\telse:\n\t\tusername = get_new_username()\n\t\tprint(\"We'll remember you when you come back, \" + username + \"!\")", "def do_list_users(self, line):\n users = self.protocol.cmd_list_users(exp=line, return_list=True)\n t = PrettyTable(users[0])\n for u in users[1:]:\n t.add_row(u)\n print(t)", "def greet_guest():\n print('Welcome')", "def greeting():\n print('\\n',\n '=' * 45,\n \"\\n Welcome to this Module-Creation-Test-Program!!!\",\n '\\n',\n '=' * 45)", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "def display_list(the_list):\n print(\"\\n===================================\")\n for person in the_list:\n print(\"{name:12s}\\t\\t{phone}\".format(name=person.name, phone=person.phone))\n if the_list == []:\n print(\"\\nNo entries found!\\n\")\n print(\"===================================\\n\")", "def printusers(self, irc, msg, args):\n irc.reply(self.user_report(), prefixNick=False)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list_all(message=''):\n return Response(render_template('employee/user/list.html',\n users=User.query.all(),\n message=message),\n mimetype='text/html')", "def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def guestlist_handler(userdata, *args):\n\t\tfor guest in userdata[\"guestlist\"]:\n\t\t\tprint(shlex.quote(guest), end=\" \")\n\t\t\n\t\tprint()", "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "async def tod_list(self, ctx, *args):\n message = \"__Currently Playing__\\n\"\n if len(self.players) == 0:\n message = \"There are currently no users playing.\"\n for player in self.players:\n message += f\"> {str(player)[:-5]}\\n\"\n await ctx.send(message)", "def do_user_list(cs, args):\n _, users = cs.users.list()\n fields = ['user_id', 'username', 'email', 'realname', 'comment']\n utils.print_list(users, fields, sortby=args.sortby)", "def print_user_host(host_list, user, ul):\n \n print('\\nHost-group info pertaining to {0}:'.format(user))\n print('-'.center(80, '-'))\n if len(ul) == 0:\n print('User is not a member of any user-lists.')\n else:\n print('User belongs to the following user-lists:')\n for u in ul:\n print(u)\n print()\n print('User has access to the following host-groups:')\n for hg in host_list:\n print(hg)\n print('-'.center(80, '-') + '\\n')\n return", "def user_list():\n\n users = User.query.all()\n return render_template(\"/user_list.html\", users=users)", "async def list_users(self, ctx):\n \n path = \"Users\"\n headers = {\n 'accept': 'application/json'\n }\n response = send_request(method=\"get\", path=path, headers=headers)\n users = []\n for page in response:\n users.append(f\"**{page['Name']}**: ``{page['Id']}``\")\n log.debug(users)\n\n embed = embeds.make_embed(ctx=ctx, title=\"List Users\", image_url=\"https://emby.media/resources/logowhite_1881.png\")\n\n await LinePaginator.paginate([line for line in users], ctx, embed, restrict_to_user=ctx.author)", "def describe_user(self):\n\t\tprint(f\"\\n{self.first_name.title()} {self.last_name.title()} \"\n\t\t\tf\"is from {self.location.title()}.\")", "def greet_user():\n\tusername = get_stored_username()\n\tif username:\n\t\tprint(f\"WElcome back {username}!\")\n\telse:\n\t\tusername = get_new_username()\n\t\tprint(f\"We'll remember you when you come back again {username}! \")", "def list( self, mess, args):\n user = self.get_sender_username(mess)\n args = args.replace(' ', '_')\n if user in self.users:\n user_list = 'All these users are subscribed - \\n'\n user_list += '\\n'.join(['%s :: %s' %(u, self.users[u]) for u in sorted(self.users)])\n if self.invited.keys():\n user_list += '\\n The following users are invited - \\n'\n user_list += '\\n'.join(self.invited.keys())\n self.log.info( '%s checks list of users.' % user)\n return user_list", "def user_list(message=''):\n target_users = User.query.filter_by(name=request.form['name']).all()\n\n if not any(target_users):\n return search_user(\"Unknown user.\")\n\n return Response(render_template('employee/user/list.html',\n users=target_users,\n message=message),\n mimetype='text/html')", "def do_list(self, arg):\n print('The roster includes the following members:\\n')\n lines = formatTable(\n map(self.memberToList, self.roster),\n [\n ColumnFormat('id', 4),\n ColumnFormat('name', 30),\n ColumnFormat('introduced', 12)\n ]) \n for line in lines: \n print(line)", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def welcome_user():\n print('Welcome to the Brain Games!')\n name = prompt.string('May I have your name? ')\n print('Hello, {0}!'.format(name)) # noqa: WPS421\n return name", "def list_users():\n\n db_users = User.query.all()\n\n return render_template(\"list_users.html\", headline=\"Blogly Users\", users=db_users)", "def greeting_message(name):\n print('My name is ' + name)", "def user_list(message=''):\n return Response(render_template('admin/user/list.html',\n users=User.query.all(),\n message=message),\n mimetype='text/html')", "def welcome(self):\n print(\"\\t\\t\\t\\t\\t============================\")\n print(\"\\t\\t\\tWelcome to Shakespeare's Phrase Hunter!\"\n \"\\n\\t\\t\\tYou will be tested on your Shakespeare knowledge with various \"\n \"\\n\\t\\t\\tphrases from his writings.\")\n print(\"\\t\\t\\t\\t\\t============================\")", "def say_hi(self):\n print(\"Hi there, everyone!\")", "def __ui_list_all_persons(self):\n persons_list = self.__person_service.service_get_persons_list()\n\n if len(persons_list) == 0:\n print(\"The list of persons is empty!\")\n else:\n print(\"The list of persons in your agenda:\")\n for person in persons_list:\n print(\" \" + str(person))\n print(\"\")", "def view_users(stdscr):\n stdscr.clear()\n safe_put(stdscr, \"* marks a user online at last update. Hit any key to return to menu.\", (2, 1))\n row = 4\n for user in taunet.users.all():\n if user.is_on:\n safe_put(stdscr, \"*\", (row, 1))\n safe_put(stdscr, user.name, (row, 3))\n row += 1\n stdscr.refresh()\n\n # Wait for any key, then clear and return to menu.\n stdscr.getch()\n stdscr.clear()\n stdscr.refresh()", "def main():\n data = get_db()\n myCursor = data.cursor()\n myCursor.execute(\"SELECT * FROM users\")\n description = [desc[0] for desc in myCursor.description]\n\n logger = get_logger()\n\n for user in myCursor:\n userInfo = \"\".join(\n f'{des}={str(usr)}; ' for usr, des in zip(user, description)\n )\n logger.info(userInfo)\n\n myCursor.close()\n data.close()", "def describe_user(self):\r\n print('\\nFirst Name: ' + self.first_name.title(), end='\\n',)\r\n print('Last Name: ' + self.last_name.title(), end='\\n')\r\n print('Address: ' + self.address.title(), end='\\n',)\r\n print('State: ' + self.state.title(), end='\\n',)\r\n print('Country: ' + self.country.title())", "def show_users():\r\n users = User.query.order_by(User.last_name,User.first_name).all()\r\n return render_template('list.html', users=users)", "def greet_player(name):\n\t\n\tprint \"How are are you doing %s?\" % name", "def greet(*args):\n print(greet.__doc__)\n # names is a tuple with arguments\n for name in args:\n print(\"Hello\", name)" ]
[ "0.7734112", "0.77260417", "0.77145106", "0.76987046", "0.76987046", "0.7657923", "0.7309932", "0.7257354", "0.72450185", "0.71396", "0.7127663", "0.71238464", "0.70934796", "0.70934796", "0.7048947", "0.70248073", "0.69579583", "0.69458425", "0.6898827", "0.6775238", "0.67534596", "0.6752124", "0.67099774", "0.67099774", "0.6647687", "0.6625254", "0.66171527", "0.66072917", "0.6599232", "0.6530941", "0.6511075", "0.6386513", "0.63414574", "0.6332193", "0.6332193", "0.6332193", "0.63153094", "0.6304812", "0.62947744", "0.62882507", "0.6280475", "0.6264375", "0.6261962", "0.6204842", "0.6204842", "0.61728466", "0.6123615", "0.61222816", "0.6119334", "0.6117244", "0.61068803", "0.61064", "0.6101039", "0.60837436", "0.6071284", "0.60652494", "0.6060082", "0.6052348", "0.6016096", "0.6013506", "0.60024095", "0.6000968", "0.600021", "0.5997099", "0.59944695", "0.59944695", "0.59944695", "0.59944695", "0.59944695", "0.59944695", "0.5994234", "0.59913105", "0.59907746", "0.5987951", "0.59814095", "0.5979029", "0.59762615", "0.59697515", "0.59508884", "0.59490275", "0.59419936", "0.59373695", "0.5933945", "0.5927176", "0.59161186", "0.59079194", "0.59035516", "0.59019226", "0.5887828", "0.58837277", "0.585537", "0.5853326", "0.5848565", "0.5838953", "0.58369875", "0.58303624", "0.581508", "0.58148783", "0.58062875" ]
0.7734813
1
Return a full name, neatly formatted.
def get_formatted_name(first_name, last_name, middle_name=''): if middle_name: full_name = first_name + ' ' + middle_name + ' ' + last_name else: full_name = first_name + ' ' + last_name return full_name.title()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()", "def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def full_name(self) -> str:\r\n\t\tname = f'{self.last_name} {self.first_name}'\r\n\t\tif self.middle_name:\r\n\t\t\tname += ' ' + self.middle_name\r\n\t\treturn name", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s' % (self.name)\r\n return full_name.strip()", "def get_formatted_name(first, last):\n\tfull_name = first + ' ' + last\n\treturn full_name.title()", "def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def get_full_name(self):\n return \"%s %s\" % (self._first_name, self._last_name)", "def get_full_name(self):\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_formatted_name(first_name, last_name): \r\n full_name = f\"{first_name} {last_name}\"\r\n return full_name.title()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)", "def get_formatted_name(first, last):\n full_name = first + ' ' + last\n return full_name.title()", "def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()", "def get_full_name(self):\n full_name = '{} {}'.format(self.first_name, self.last_name)\n return full_name.strip()", "def get_formatted_name(first_name, last_name):\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name,last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_full_name(self):\n return u'%s %s' % (self.first_name, self.last_name)", "def get_formatted_name(first_name,last_name):\n\tfull_name=first_name+ ' ' +last_name\n\treturn full_name.title()", "def get_formatted_name(first, last):\n full_name = f\"{first} {last}\"\n return full_name.title()", "def get_full_name(self):\n if self.patronymic_name:\n return '{} {} {}'.format(\n self.first_name,\n self.patronymic_name,\n self.last_name,\n )\n\n return '{} {}'.format(\n self.first_name,\n self.last_name,\n )", "def get_full_name(self):\n return \"{} {}\".format(self.first_name, self.last_name)", "def get_formatted_name(first_name, last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first, last):\n full_name = f'{first} {last}'\n return full_name.title()", "def _get_full_name(self):\n if self.middle_name:\n return u'%s %s %s' % (self.first_name, self.middle_name,\n self.last_name)\n else:\n return u'%s %s' % (self.first_name, self.last_name)", "def get_formatted_name(first_name,last_name):\n full_name= first_name + \" \"+last_name\n return full_name.title()", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def get_full_name(self):\n return f'{self.first_name} {self.last_name}'", "def get_formatted_name(first_name, middle_name, last_name):\n full_name = f\"{first_name} {middle_name} {last_name}\"\n return full_name.title()", "def get_full_name(self):\n return self.name+self.last_name", "def full_name(self):\n return self.first_name + \" \" + self.last_name", "def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)", "def get_full_name(self) -> str:\n return f\"{self.first_name} {self.last_name}\"", "def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def get_formatted_name(self):\n\n return '%s %s' % (self.last_name, self.first_initial)", "def full_name(first_name, last_name):\n\t\n\treturn first_name + \" \" + last_name", "def get_full_name(self):\n return self.first_name + ' ' + self.last_name", "def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def get_formatted_name(first_name, last_name, middle_name=''):\r\n if middle_name:\r\n full_name = f\"{first_name} {middle_name} {last_name}\"\r\n else:\r\n full_name = f\"{first_name} {last_name}\"\r\n return full_name.title()", "def get_full_name(self):\n return self.last_name + self.first_name", "def get_formatted_name(first_name, last_name, middle_name = ''):\n if middle_name:\n full_name = f\"{first_name} {middle_name} {last_name}\"\n else:\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def full_name_short(self):\n return \"{}. {}\".format(str(self.user.first_name)[:1], self.user.last_name)", "def full_name(first_name, last_name):\n return first_name + \" \" + last_name", "def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def get_formated_name(first_name,last_name):\n\tfull_name = first_name + '' + last_name\n\treturn full_name.title()", "def get_formatted_name(first_name, last_name, middle_name = ''):\n if middle_name:\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n else:\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_full_name(self):\n return self.name + \" \" + self.email", "def formatted_name(first_name, last_name, middle_name = ''):\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n # Describe the function\n # The names are joined into full name\n formatted_name = first_name + ' ' + last_name\n # return the value, don't do anything with it yet\n return formatted_name.title()", "def short_name(self) -> str:\r\n\t\treturn f'{self.last_name} {self.first_name}'", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def get_full_name(self):\n return self.first_name+\" \"+self.last_name", "def get_formatted_name(first,last,middle = ''):\n if middle:\n full_name = first + ' ' + middle + ' ' + last\n else:\n full_name = first + ' ' + last\n return full_name.title()", "def fullname(self):\n return '{} {}'.format(self.fname,self.lname)", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def get_full_name(self):\n return '{}, {}'.format(self.last_name, self.first_name)", "def fullname(self):\n return self.fname() + ' ' + self.lname()", "def get_full_name(self):\n return self.name" ]
[ "0.87166595", "0.86399925", "0.860218", "0.860218", "0.8595642", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.8579464", "0.85681736", "0.8555431", "0.85347563", "0.85347563", "0.85340965", "0.8530991", "0.85229343", "0.85161304", "0.85101086", "0.8504636", "0.848572", "0.8475814", "0.84587014", "0.84312713", "0.84312713", "0.84312713", "0.8427818", "0.8427174", "0.8419169", "0.84167993", "0.84123874", "0.8409802", "0.84084505", "0.84084505", "0.84084505", "0.8394749", "0.83689845", "0.8364981", "0.83552843", "0.83552843", "0.83552843", "0.834448", "0.83326125", "0.8326628", "0.83254", "0.83004296", "0.8295617", "0.8280516", "0.8280516", "0.8263089", "0.8261442", "0.8251206", "0.82287455", "0.82184637", "0.82184035", "0.820879", "0.819135", "0.8184172", "0.8176628", "0.8160043", "0.81207067", "0.81086946", "0.80960333", "0.8095", "0.80775666", "0.8075149", "0.8074684", "0.806589", "0.80025256", "0.7995147", "0.79878163", "0.79397106", "0.792853", "0.78728807", "0.7862347", "0.7800958", "0.77908313" ]
0.8130383
83
Return a dictionary of information about a person.
def build_person(first_name, last_name, age=''): person = {'first': first_name, 'last': last_name} if age: person['age'] = age return person
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_person(self):\n person_dict = {\n 'firstname': self.__firstname,\n 'lastname': self.__lastname,\n 'height': self.__height,\n 'weight': self.__weight,\n 'age': self.__age\n }\n return person_dict", "def who_am_i():\n return {'name': 'Jonathan Martinez', 'id': '201095569', 'email': '[email protected]'}", "def get_person(self, url: str) -> dict:\n person = self.read_html(url)\n\n return {\n # TODO: There's a better way of doing this.\n self._columns[0]: person.select_one(\"h1\").text.strip(),\n self._columns[1]: person.select_one(\".party-membership--party\").text,\n self._columns[2]: \"; \".join(\n [a.text for a in person.select('[href^=\"tel:\"]')]\n ),\n self._columns[3]: \"; \".join(\n [a.text for a in person.select(\".email-address a\")]\n ),\n self._columns[4]: \"; \".join(\n [a.text.strip() for a in person.select(\".contact-actions__twitter\")]\n ),\n }", "def parse_person(person):\n person_name = person['name']\n person_id = person['id']\n\n pages = get_pages_from_wiki_search(person_name)\n page = select_page_like_person(pages, person)\n\n wiki_person = {\n 'id': person_id, 'fullname': person_name,\n 'url': page.get('fullurl', None), \n 'declarator_profile': page.get('declarator_profile', None),\n 'words_intersection': page.get('words_intersection', None),\n }\n\n # if page with image\n if 'pageimage' in page:\n try:\n image = page['pageimage']\n url = page['original']['source']\n license = get_license_from_wiki(image, is_file=True)\n\n photo = {'title': image, 'url': url, 'license': license}\n wiki_person['photo'] = photo\n except KeyError as e:\n message = \"person_id: %i, Not found key: %s\" % (person_id, e)\n Querylog.error(message)\n return wiki_person", "def get_person(self, id):\n try:\n person = Person.get(Person.id == id)\n data = model_to_dict(person)\n except DoesNotExist:\n response.status = 404\n data = \"Not found\"\n return dict(name='Person', data=data)", "def get_person_like_json(self):\n return json.dumps(self.get_person())", "def display_person(person):\n name = person['name']\n followers = person['follower_count']\n description = person['description']\n country = person['country']\n print(f'{name}, a(n) {description}, from {country}.')\n return followers", "def showInfo(p,personDict):\n info1 = personDict['EnterpriseID'][p[0]]\n info2 = personDict['EnterpriseID'][p[1]]\n print (\"Person A:\",info1)\n print (\"Person B:\",info2)", "def getData(self):\r\n return personData(\r\n self.title.getVal(),\r\n self.first.getVal(),\r\n self.middle.getVal(),\r\n self.last.getVal(),\r\n self.suffix.getVal(),\r\n self.phone.getVal(),\r\n self.ext.getVal(),\r\n self.email.getVal(),\r\n self.affiliation.getVal())", "def get_user_info_by_name(self, username: str) -> dict:", "def info() -> Dict[str, Any]:", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def current_person(self):\n d = self.people_table_data[self.row_i]\n\n # \"fullname\", \"lunaid\", \"age\", \"dob\", \"sex\", \"lastvisit\", \"maxdrop\", \"studies\",\n info = dict(zip(self.person_columns, d))\n info[\"pid\"] = d[8] # pid not shown\n\n # dont get fname and lname from table\n # could word split, but need to be accurate at least for edit module\n if self.sql:\n res = self.sql.query.get_name(pid=info[\"pid\"])\n info[\"fname\"] = res[0][0]\n info[\"lname\"] = res[0][1]\n return info\n # # main model\n # self.checkin_button.setEnabled(False)\n # print('people table: subject selected: %s' % d[8])\n # self.render_person(pid=d[8], fullname=d[0], age=d[2],\n # sex=d[4], lunaid=d[1])\n # self.render_schedule(ScheduleFrom.PERSON)", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def get_info(self) -> Optional[Dict[str, Any]]:", "def find(self, person):\n page = self.find_page(person)\n try:\n entity_id = self.get_entity_id(page.title)\n entity = self.get_entity(entity_id)\n person.dob = self.get_birthday(entity)\n person.occupation = self.get_occupation(entity)\n person.nationality = self.get_country_of_citizenship(entity)\n res_domicile = self.get_domicile(entity)\n if res_domicile:\n person.domicile = res_domicile\n elif person.nationality == self.get_birthcountry(entity):\n person.domicile = person.nationality # this is an assumption!\n birth_name = self.get_birth_name(entity)\n person.middle_name = self.get_middle_name(birth_name, person)\n if page:\n person.is_famous = 'True'\n else:\n person.is_famous = ''\n person.net_worth = self.get_networth(entity)\n person.description = page.summary\n person.set_raw()\n except:\n pass", "def to_dict(self) -> dict:\n return {\n 'author_id': self.id,\n 'fullname': self.fullname\n }", "def all_persons(self):\n all_persons = {}\n all_persons.update(self.staff)\n all_persons.update(self.fellows)\n return all_persons", "def get_personal_info(self):\n self.get(\"INFO\",\"GetPersonalInfo\")\n response = self.send()\n return response", "def get_person(self, requestId):\n return self.get_json('/verification/%s/person' % str(requestId))", "def get_persons(self):\n response = self.do_request('/management/persons/export/json/')\n if response:\n return response.json()", "def info(self) -> dict:", "def fetch_extra_data(resource):\n person_id = resource.get(\"cern_person_id\")\n return dict(person_id=person_id)", "def get_people(self):\n url = self.base_url + 'memberships'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "def get_donor_info(self):\n name = self.get_donor()\n if name in self.all_donors:\n person = self.r.hgetall(name)\n print(f\"Person: {name}\")\n for key, value in person.items():\n print(f\"{key}: {value}\")\n else:\n print(\"Name not in database.\")", "def _to_dict(self):\n\t\treturn {'id': self.id,\n\t\t\t\t'name': self.name,\n\t\t\t\t'surname': self.surname}", "def get_info_dict(self):\n return {\n 'bidi': self.bidi,\n 'code': self.code,\n 'name': self.name,\n 'name_local': self.name_local\n }", "def describe(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'age': calculate_current_age(self.birth_date),\n 'gender': self.gender,\n 'filmography': [appearance.movies.title for appearance in\n Appearance.query.filter(Appearance.actor_id == self.id).all()]\n }", "def info():\n if g.party_id is None:\n # No party is configured for the current site.\n abort(404)\n\n party = party_service.get_party(g.party_id)\n\n return {\n 'party': party,\n }", "def get_contact_info(self):\n outputDict = {\"USERNAME\": consts.USERNAME,\n \"IP\": consts.IPADDRESS, \n \"MACHINE\": consts.HOSTNAME, \n \"EMAIL\": '[email protected]', \n \"PHONE\": '203-722-6620'} # ::: TO DO::: dynamically get phone and email info automatically\n return outputDict", "def get_user_info_by_id(self, user_id: int) -> dict:", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def get_details_for_email(email):\n try:\n result = dict(clearbit.Enrichment.find(email=email, stream=True))\n except HTTPError as e:\n logging.info('Skipping clearbit.com services. REASON %s', str(e))\n\n return {}\n\n return {\n 'first_name': result.get('person', {}).get('name', {}).get('givenName') or '',\n 'last_name': result.get('person', {}).get('name', {}).get('familyName') or '',\n 'gender': result.get('person', {}).get('gender') or '',\n 'bio': result.get('person', {}).get('bio') or ''\n }", "def select_person():\r\n body = request.get_json()\r\n\r\n try:\r\n SELECT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n message = get_person(sqlite_client, body.get('id'))\r\n\r\n return jsonify({'name': message[0][1], 'cpf': message[0][2]})", "def to_cff_person(person: pybtex.database.Person) -> dict:\n # Map BibTeX to CFF fields\n name_fields = {\n \"last\": \"family-names\",\n \"bibtex_first\": \"given-names\",\n \"prelast\": \"name-particle\",\n \"lineage\": \"name-suffix\",\n }\n result = {\n cff_field: \" \".join(person.get_part(bibtex_field))\n for bibtex_field, cff_field in name_fields.items()\n if person.get_part(bibtex_field)\n }\n # Use CFF \"entity\" format if BibTex has no first & last names\n if list(result.keys()) == [\"family-names\"]:\n return {\"name\": result[\"family-names\"]}\n return result", "def person(self, person_id):\r\n return persons.Person(self, person_id)", "def person_object_factory():\n person = {\n 'lastName': rl_fake().last_name(),\n 'gender': random.choice(('M', 'F'))\n }\n\n # Make the person's name match their gender.\n person['firstName'] = rl_fake().first_name_male() if person['gender'] == 'M' else rl_fake().first_name_female()\n\n # These are all optional in the DB. Over time, we'll try all possibilities.\n if flip():\n person['birthday'] = rl_fake().date_of_birth(minimum_age=18).strftime('%Y-%m-%d')\n if flip():\n person['phone'] = rl_fake().phone_number()\n if flip():\n person['email'] = rl_fake().email()\n return person", "def get_author_info(self, author: str):\n for writer_word in self._writer_words:\n data = json.loads(requests.get(WIKIDATA_SEARCH + \"&srsearch=\" + author + \" \" + writer_word).text)\n pages = data.get(\"query\").get(\"search\")\n if len(pages) >= 1:\n pageid = pages[0].get(\"title\")\n author_details = self._reference.author_map.get(author)\n if author_details:\n return author_details\n if pageid == -1:\n continue\n\n else:\n response = requests.get(WIKIDATA_PARSE + pageid + \".json\")\n data = json.loads(response.text)\n if author.lower() not in data.get(\"entities\").get(pageid).get(\"labels\").get(\"en\").get(\"value\").lower():\n continue\n else:\n try:\n id = data.get(\"entities\").get(pageid).get(\"claims\").get(\"P31\")[0].get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n if str(id) != \"Q5\": # the id for human\n continue\n except IndexError:\n continue\n properties = data.get(\"entities\").get(pageid).get(\"claims\")\n author_details = {\"id\": pageid, \"gender\": self.get_gender(properties)}\n country_details = self.get_country(properties)\n author_details[\"country\"] = country_details\n self._reference.author_map[author] = author_details\n return author_details\n return {\"id\": \"Unknown\", \"gender\": \"Unknown\", \"country\": [{\"name\": \"Unknown\", \"region\": \"Unknown\"}]}", "def build_person(first_name, last_name, age=''):\r\n person = {'first':first_name, 'last':last_name}\r\n if age:\r\n person['age'] = age\r\n return person", "def __getstate__(self):\n\n # Get data in long format\n long = self.long_format()\n return {\"date\": self.date,\n \"persons\": long\n }", "def get_persons():\n resp = requests.get(API_URL).content\n persons = json.loads(resp)\n return persons", "def get_info(self):\n return {}", "def _to_dict(self):\n\t\tauthor = Author.query.get(self.authors_id)\n\t\treturn {'id': self.id,\n\t\t\t\t'name': self.name,\n\t\t\t\t'author_id': self.authors_id,\n\t\t\t\t'author_name': author.name,\n\t\t\t\t'author_surname': author.surname}", "def build_person(first_name, last_name, middle_name='', age=None): \n person = {'first': first_name, 'middle': middle_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def get_person(request):\n\n email = request.args.get(\"email\", None, str)\n # log_info(\"email is \" + email)\n\n if not email:\n log_info(\"get_person was called, but no email was provided in request\")\n return None\n\n if validators.email(email) and (email_requester := auth.check_teacher(request)):\n if email_requester and validators.email(email_requester):\n db = database.Database()\n student = db.get_student(email)\n return dict(student)\n\n elif validators.email(email) and (email_requester := auth.check_login(request)):\n if email_requester and validators.email(email_requester) and email == email_requester:\n db = database.Database()\n student = db.get_student(email)\n if 'notes' in student:\n del student['notes']\n\n return dict(student)\n\n log_info(\"No person with email \" + email + \" found in database\")\n return None", "def person_fields(self):\r\n return persons.PersonFields(self)", "def read_person(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person WHERE personid =?\", (person_id,))\n _person = None\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n conn.close()\n return _person\n except:\n return None", "def infos(self):\r\n\t\tname = name\r\n\t\tlast_name = last_name", "def build_person(first_name,last_name, age =''):\n person = { 'first': first_name.title(), 'last' : last_name.title()}\n if age:\n person['age'] = age\n return person", "def parse_person(person):\n parsed = person_parser(person)\n if not parsed:\n parsed = person_parser_only_name(person)\n name = parsed.group('name')\n email = None\n else:\n name = parsed.group('name')\n email = parsed.group('email')\n\n return name, email", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def details(self):\n details = ProfileDetailsParser(self.details_string())\n\n return dict(\n follows=details.follows(),\n favorites=details.favorites(),\n rating=details.rating(),\n language=details.language(),\n genres=details.genres(),\n characters=details.characters(),\n )", "def get_person(self, user_id):\n endpoint = '/user/{}'.format(user_id)\n return self.get_request(endpoint)", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def get_user_details(self, response):\n name = response.get(\"name\")\n return {\n \"username\": str(response.get(\"account_id\")),\n \"email\": response.get(\"email\"),\n \"fullname\": name.get(\"display_name\"),\n \"first_name\": name.get(\"given_name\"),\n \"last_name\": name.get(\"surname\"),\n }", "def get_author_info_from_people_collection(info):\n # TODO: probably we will need to extract this somewhere else\n URL = ('https://cds.cern.ch/submit/get_authors?'\n 'query={0}&relative_curdir=cdslabs%2Fvideos')\n if '0' in info or not info.get('a'):\n # There is already enough information or we don't have a name to query\n return info\n author_name = info.get('a')\n if PY2:\n # In Python 3, encoded name will change type to bytes and this will\n # cause query to CDS to fail\n author_name = author_name.encode('utf-8')\n author_info = _get_http_request(url=URL.format(author_name), retry=10)\n if not author_info or len(author_info) > 1:\n # Didn't find anything or find to many matches\n return info\n\n # Prepare author name\n author_info = author_info[0]\n if 'name' not in author_info:\n author_info['name'] = '{0}, {1}'.format(author_info['lastname'],\n author_info['firstname'])\n return MementoDict([\n (k, v) for k, v in chain(info.items(), iteritems(author_info))])", "def seat_profile(first_name, last_name, **passenger_info):\n\tprofile = {}\n\tprofile['first_name'] = first\n\tprofile['last_name'] = last\n\tfor key, value in passenger_info.items():\n\t\tprofile[key] = value\n\treturn profile", "def to_dict(self):\n return {\n \"id\":self.id,\n \"username\":self.email,\n \"email\":self.email,\n \"firstname\":self.firstname,\n \"lastname\":self.lastname\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"company\": self.company,\n \"member_since\": self.member_since,\n \"email\": self.email,\n \"job_title\": self.job_title,\n \"website\": self.website,\n \"about\": self.about,\n \"profile_photo_path\": self.profile_photo_path,\n \"events\": self.events,\n \"saved_events\": self.saved_events,\n \"sponsorships\": self.sponsorships,\n \"role\": self.role\n }", "def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None", "def identify_user(picture: str) -> Optional[dict]:\n data = api.identify_person_api(picture)\n return data.json()", "def to_dict(self):\n result = {\n \"id\": self.get_id(),\n \"name\": self.get_name(),\n \"email\": self.get_email(),\n \"google_id\": self.get_google_id(),\n \"creation_date\": self.get_creation_date(),\n \"last_updated\": self.get_last_updated(),\n \"berechtigung\": self.get_berechtigung(),\n \"student\": self.get_student(),\n }\n return result", "def get_profile_info(self):\n\n drill_path = str(Path.home())+\"/Documents/ball_e_profiles/drill_profiles/{drill_name}/{drill_name}.csv\".format(\n drill_name=self.drill_name)\n with open(drill_path) as file:\n csv_reader = csv.reader(file, delimiter=',')\n row_count = 0\n info_dict = dict()\n for row in csv_reader:\n if row_count == 0:\n row_count += 1\n else:\n info_dict[row[0]] = [row[1], row[2], row[3]]\n row_count += 1\n\n return info_dict", "def info(self):\n return f\"{self.get_first_name}, {self.get_last_name}. {self.get_age} y.o. #{self.get_id_number}\"", "def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system}", "def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None", "def get_user_details(self, response):\n fullname, first_name, last_name = self.get_user_names(\n response.get(\"fullName\"),\n response.get(\"firstName\"),\n response.get(\"lastName\"),\n )\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\") or \"\",\n \"fullname\": fullname,\n \"first_name\": first_name,\n \"last_name\": last_name,\n }", "def show_all_information(self):\n return self.__dict__\n # print(self.first_name)\n # print(self.last_name)\n # print(self.age)\n # print(self.name)\n # print(self.gender)\n # print(self.number_of_children)", "def format_person(adj, dict):\n name = dict['name']\n net = int(dict['net_worth (USD)'])\n return \"The {0} person is {1}. They have a net worth of ${2:,}\".format(adj, name, net)", "def seat_profile(first, last, **passenger_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in passenger_info.items():\n profile[key] = value\n return profile", "def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }", "def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }", "def get_pers_trans(lang: Lang) -> dict:\n return read_json(f'languages/{lang}/persons')", "def user_info(self):\n response = self.query('user_info')\n return response", "def seat_profile(first, last, **passenger_info):\n\tprofile = {}\n\tprofile['first_name'] = first\n\tprofile['last_name'] = last\n\tfor key, value in passenger_info.items():\n\t\tprofile[key] = value\n\treturn profile", "def serialize(self):\n return {\n \"first_name\" : self.first_name.capitalize(),\n \"last_name\" : self.last_name.capitalize(),\n \"name\" : self.first_name.capitalize() + ' ' + self.last_name.capitalize(),\n \"user_id\" : self.id,\n }", "def person_name(self):\n return self._person_name", "def serialize(self):\n return {\n 'name': self.name,\n 'year_of_birth': self.year_of_birth,\n 'location': self.location,\n 'telephone': self.telephone,\n 'primary_publisher': self.primary_publisher,\n }", "def info(self):\n return {}", "def get_patient_dict():\r\n return common.get_dict_all(get_patient_filename(), None)", "def enumerate_person(hf=0.5, age=(18, 60), n=100):\n for i in range(n):\n hfi = random.random() <= hf\n agei = random.randint(*age)\n namei = first_names[hfi]\n yield dict(gender=(1 if hfi else 0), age=agei, name=namei, idc=uuid.uuid4())", "def get_user_details(self, response):\n\n return {\n 'email': response.get('email'),\n 'id': response.get('id'),\n 'full_name': response.get('name')\n }", "def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'gross_floor_area': self.fake.random_number(digits=6),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }", "def __str__(self):\n return \"Person: {}, {},{}\".format(self.get_id(),self.get_creation_date(),self.get_name())", "def get_user_details(self, response):\n first_name, last_name = response['first-name'], response['last-name']\n email = response.get('email-address', '')\n return {'username': first_name + last_name,\n 'fullname': first_name + ' ' + last_name,\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email}", "def userinfo(self, access_token: str) -> dict[str, Any]:\n data: dict[str, Any] = self.client.get(\n url=f\"{self.protocol}://{self.domain}/userinfo\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n return data", "def get_person(request, person_id):\n person = get_object_or_404(Person, pk=person_id)\n\n\n return render_to_response('people/person_detail.html', {\n 'person': person,\n })", "def persons(self):\r\n return persons.Persons(self)", "def getInfo(self):\n request = self._connection.get('bookmarklet')\n userdata = self._userinfo_regex.search(request.text)\n if userdata is None: userdata = self._userinfo_regex_2.search(request.text)\n if userdata is None: raise errors.DiaspyError('cannot find user data')\n userdata = userdata.group(1)\n return json.loads(userdata)", "def get_user_details(self, response):\n values = {\n 'username': unquote(response['nick']),\n 'email': unquote(response['email']),\n 'first_name': unquote(response['first_name']),\n 'last_name': unquote(response['last_name'])\n }\n\n if values['first_name'] and values['last_name']:\n values['fullname'] = '%s %s' % (values['first_name'],\n values['last_name'])\n return values", "def _get_information(self):\n info_dict = {}\n table = self._tab.find('table', class_='details')\n for row in table.find_all('tr'):\n row_key, row_value = row.find(class_='key'), row.find(class_='value')\n key_with_emoji, value = self._get_key(row_key), self._get_value(row_value)\n split_key = key_with_emoji.split(\" \", 1)\n\n if len(split_key):\n key = split_key[-1]\n info_dict.update({key: value})\n\n return info_dict", "def get_people(self):\n return self._people", "def who_handler(self, data, suffix=''):\n # Just to show data coming in...\n assert data['requested'] == 'name'\n\n return {\n 'name': self.get_current_user_full_name(),\n 'email': self.get_current_user_emails()\n }", "def __repr__(self):\n return f\"<Person {self.first_name.title()} {self.last_name.title()}>\"", "def describe_user(self):\n print(self.first_name.title() + \" \" + self.last_name.title() +\n \" is a \" + str(self.age) + \" year old who identifies as \" +\n self.gender + \".\")" ]
[ "0.78430605", "0.69874537", "0.68447304", "0.67983556", "0.6684294", "0.6533277", "0.64916706", "0.6416313", "0.64020026", "0.63483405", "0.63206047", "0.62963533", "0.6294688", "0.6291786", "0.6261915", "0.6250636", "0.62482584", "0.62299424", "0.62139595", "0.61749566", "0.6166419", "0.6141492", "0.6128635", "0.61096424", "0.61090773", "0.60918844", "0.60778916", "0.6075775", "0.6061554", "0.6049044", "0.60467345", "0.6007558", "0.59948546", "0.59868145", "0.59776676", "0.59728974", "0.5965312", "0.59625274", "0.5961576", "0.5955198", "0.59268564", "0.5914678", "0.59141594", "0.59012234", "0.5889845", "0.5862512", "0.5857002", "0.5842631", "0.5825038", "0.5824619", "0.58236694", "0.5823201", "0.5813905", "0.581051", "0.5804377", "0.5804243", "0.5804243", "0.5793926", "0.57915103", "0.57892233", "0.5787595", "0.57847065", "0.5779743", "0.5774924", "0.5755235", "0.57475406", "0.57376105", "0.57331", "0.5732158", "0.5726344", "0.57232374", "0.5716009", "0.57027775", "0.5696364", "0.56840235", "0.5678872", "0.5663425", "0.5652523", "0.56507766", "0.56466895", "0.56264484", "0.5616449", "0.5610548", "0.56087047", "0.5607547", "0.56049436", "0.5602947", "0.55985063", "0.5598037", "0.55908924", "0.55825245", "0.55601716", "0.55574024", "0.55556625", "0.55534", "0.5550851", "0.55495816", "0.55462015", "0.55394095" ]
0.58440995
48