query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
this function shows the read window. it allows the client to read all the messages. both received and sent. | def read_messages(self, msg_num):
self.clear_screen()
user_label = Label(self.root, text="Hello " + self.username, font=self.title_font,
bg=self.bg_color, height=2)
user_label.pack(pady=5, padx=50)
lbl_msg = Label(self.root, text="Message " + str(msg_num), font=self.title_font,
bg=self.bg_color)
lbl_msg.pack(pady=5, padx=10)
self.refresh_button = Button(self.root, text="Refresh page", font=self.text_font,
bg=self.bg_color, command=lambda: self.refresh(msg_num))
self.refresh_button.pack(padx=10, pady=10)
messages_frame = Frame(self.root)
messages_frame.pack(padx=30, pady=15)
scrollbar_msg = Scrollbar(messages_frame)
scrollbar_msg.pack(side=RIGHT, fill=Y)
text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,
yscrollcommand=scrollbar_msg.set)
text_widget.pack()
scrollbar_msg.config(command=text_widget.yview)
button_send = Button(self.root, text="go back", font=self.text_font,
height=2, width=20, command=self.go_back_read)
button_send.pack(pady=5, side=BOTTOM)
button_send = Button(self.root, text="see/close message\ncontrol panel",
font=self.text_font,
height=2, width=20,
command=lambda: self.new_window_messages(button_send))
button_send.pack(pady=5, side=BOTTOM)
if self.msg_list:
if msg_num < len(self.msg_list):
next_msg = Button(self.root, text="next message", font=self.text_font,
height=2, width=20,
command=lambda: self.read_messages(msg_num + 1))
next_msg.pack(pady=5, padx=5, side=RIGHT)
if msg_num > 1:
previous_msg = Button(self.root, text="previous message", font=self.text_font,
height=2, width=20,
command=lambda: self.read_messages(msg_num - 1))
previous_msg.pack(pady=5, padx=5, side=LEFT)
text_widget.insert(END, "from: " + self.msg_list[msg_num - 1][2] + "\n")
text_widget.tag_add('sender', '1.0', '1.end')
text_widget.tag_config('sender', font='none 14')
text_widget.insert(END, self.msg_list[msg_num - 1][0])
text_widget.tag_add('msg', '2.0', END)
text_widget.tag_config('msg', font='none 12')
text_widget.config(state=DISABLED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_messages(self):\n for msg in self.messages:\n print msg['text']",
"def show(self):\n self.set_text(self.read())",
"def show_messages(self):\n self.masterlog.revealme()",
"def read(self):\n from x84.bbs import getch\n from x84.bbs.session import getsession\n from x84.bbs.output import echo\n session = getsession()\n self._quit = False\n echo(self.refresh())\n while not self.quit:\n echo(self.process_keystroke(getch()))",
"def showRecvMsg(self, recvmsg):\r\n s = self.bytesToStr(recvmsg, self.chkHexShow.isChecked())\r\n self.txtRecvMsg.append(s)\r\n # self.txtRecvMsg.setPlainText(self.txtRecvMsg.toPlainText() + s)\r\n\r\n self.m_count[0] += len(s)\r\n self.showCount(self.m_count)\r\n\r\n if self.m_callback != 0:\r\n self.parseRecvMsg(msg)",
"def display_messages(self):\n\n\t\twhile self.joined:\n\t\t\tif len(self.messages) != 0:\n\t\t\t\tfor msg in self.messages:\n\t\t\t\t\t#: If the message is empty, ignore it.\n\t\t\t\t\tif msg == \"\":\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t#: If the message is close\", then the server has told the client\n\t\t\t\t\t#: to shut down, so it will. This is not an issue, as users\n\t\t\t\t\t#: messages will always have an identifier and : before their\n\t\t\t\t\t#: message, thus,the only messages that don't include an\n\t\t\t\t\t#: identifier will be from the server itself.\n\t\t\t\t\telif msg[:5] == \"close\":\n\n\t\t\t\t\t\treason = msg[6:]\n\n\t\t\t\t\t\tprint(\"This client was closed due to {}.\".format(reason))\n\t\t\t\t\t\tself.quit(True)\n\n\t\t\t\t\t#: Otherwise, print the message to the commandline.\n\t\t\t\t\telif not self.silent:\n\t\t\t\t\t\tprint('\\r' + msg, end='')\n\n\t\t\t\t\t\tprint(\"\\nYou: \", end='')\n\t\t\t\t\t\tself.displayed_you = True\n\n\t\t\t\t\t#: Remove the processed message\n\t\t\t\t\tself.messages.remove(msg)",
"def __read_message(self):\r\n\t\t# instr = QDataStream(self.__tcpSocket)\r\n\t\t# instr.setVersion(QDataStream.Qt_5_0)\r\n\t\t# if self.blockSize == 0:\r\n\t\t#\t if self.__tcpSocket.bytesAvailable() < 2:\r\n\t\t#\t\t return\r\n\t\t#\t self.blockSize = instr.readUInt16()\r\n\t\t# if self.__tcpSocket.bytesAvailable() < self.blockSize:\r\n\t\t#\t return\r\n\t\t# # Print response to terminal, we could use it anywhere else we wanted.\r\n\t\t# message = str(instr.readString(), encoding='utf8')\r\n\t\t# print(\"New message received : '{}'.\".format(message))\r\n\t\t# self.decode_message(message)\r\n\r\n\t\tinstr = self.__tcpSocket.readAll()\r\n\t\tmessage = str(instr, encoding=\"utf8\")\r\n\t\tself.decode_message(message)",
"def show(self):\n self.present(orientations=ORIENTATIONS)\n # launch a background thread\n # we can not use ui.in_background here\n # because some dialogs would not open anymoe\n thr = threading.Thread(target=self.show_messages)\n thr.daemon = True\n thr.start()",
"def show_data(self, msg):\n\n message = msg\n # self.ECGWin.append(message)\n self.getter.get(message)\n # self.ECGWin.append(msg2)\n # self.ECGWin.append(msg3)",
"def showMessage(self):",
"def readlines():\n while 1:\n line = nb_server.stdout.readline().decode(\"utf-8\").strip()\n if line:\n print(line)",
"def receiveMessage(self,size):\n self.messageReceived = self.com.getData(size)\n print('+--------------------------------+')\n print('| Mensagem Recebida |')\n print('+--------------------------------+')\n print(self.messageReceived)",
"def show_message(self, message):\n self.sense.show_message(\n message,\n scroll_speed=self.SCROLL_SPEED,\n text_colour=self.TEXT_COLOUR\n )",
"def read_for_explore_run(self):\n b_data = self.client_sock.recv(1024)\n if b_data!=None and len(b_data)!=0:\n if b_data!=\"GRID\": # AUTO mode in android, to avoid flush cmd\n print \"Received from Android: %s\" % b_data\n if b_data==\"explore\":\n print_msg(self.name, \"Setting \\\"explore\\\" flag\")\n self.explore_start = True\n elif b_data==\"run\":\n print_msg(self.name, \"Setting \\\"run\\\" flag\")\n self.run_start = True\n else:\n pass",
"def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass",
"def displayMessages(window,messages=['']):\n \n # update messages text\n message_in_line = ''\n for msg in messages:\n message_in_line += '\\n'+msg\n\n window['messages'].update(f'{message_in_line}')",
"def show(self, window):\r\n\r\n return",
"def refresh(self, msg_num):\r\n if self.messages_window is not None:\r\n self.messages_window.destroy()\r\n self.messages_window = None\r\n self.read_messages(msg_num)",
"def show_messages(self):\n if not self.messages:\n u_print(\" Queue.show_messages() ERR - There is no messages or malformed messages on queue. \")\n u_print(json.dumps(self.messages, indent=4))\n sys.exit(1)\n\n try:\n for m in self.messages:\n self.show_message(m.body)\n except:\n raise",
"def startReading(self):\n self.reading = True\n self.thread = ReadSocket(self)\n self.thread.start()",
"def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)",
"def display_data(self):\n data = self.client_sock.recv(1024)\n data_string = data.decode('utf-8')\n data_split = data_string.split(\",\")\n temperature = int(data_split[0])\n humidity = int(data_split[1])\n self.show_message(\"The temperature is %d\" % (temperature))\n self.show_message(\"The humidity is %d\" % (humidity))",
"def show_msgdialog(self):\n log_msg = log.getBufferAsString()\n if not log_msg:\n return\n\n # initialise message dialog\n msg_dialog = msgdialog.MessageDialog(None, -1, \"\")\n msg_dialog.msg_list.InsertColumn(0, \"\")\n\n # clear dialog and show new messages\n msg_dialog.msg_list.Freeze()\n msg_dialog.msg_list.DeleteAllItems()\n for line in log_msg.split('\\n'):\n msg_dialog.msg_list.Append([line, ])\n msg_dialog.msg_list.SetColumnWidth(0, -1)\n msg_dialog.msg_list.Thaw()\n msg_dialog.ShowModal()\n msg_dialog.Destroy()",
"def start(self):\n self.show_greeting()\n self.read_frame()",
"def new_window_messages(self, button_see_all_msgs):\r\n # changing the button command to closing the window\r\n button_see_all_msgs.config(command=lambda: self.close_window(button_see_all_msgs))\r\n\r\n # creating the chat Tk object\r\n self.messages_window = Tk()\r\n self.messages_window.resizable(False, False)\r\n self.messages_window.config(bg=self.bg_color)\r\n self.messages_window.protocol(\"WM_DELETE_WINDOW\",\r\n lambda: self.close_window(button_see_all_msgs))\r\n\r\n chat_label = Label(self.messages_window, text=\"Hello \" + self.username +\r\n \"\\nHere are your messages\",\r\n bg=self.bg_color, font=self.title_font)\r\n chat_label.pack(padx=20, pady=10)\r\n chat_frame = Frame(self.messages_window)\r\n chat_frame.pack(padx=15, pady=15)\r\n scrollbar_chat = Scrollbar(chat_frame)\r\n scrollbar_chat.pack(side=RIGHT, fill=Y)\r\n text_chat = Text(chat_frame, width=30, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_chat.set)\r\n text_chat.pack()\r\n scrollbar_chat.config(command=text_chat.yview)\r\n for msg, encryption_data, sender_user in self.msg_list:\r\n text_chat.insert(END, \"from: \" + sender_user + \"\\n\")\r\n text_chat.insert(END, msg + \"\\n\\n\")\r\n text_chat.config(state=DISABLED)",
"def display_messages(self, layout):",
"def show_messages(self):\n console.alert(\n \"Info\",\n \"If StaSh does not launch anymore after you changed the config, run the 'launch_stash.py' script with \\n'--no-cfgfile'.\",\n \"Ok\",\n hide_cancel_button=True,\n )\n while True:\n self.wait_modal()\n if not self.subview_open:\n break\n console.alert(\n \"Info\",\n \"Some changes may only be visible after restarting StaSh and/or Pythonista.\",\n \"Ok\",\n hide_cancel_button=True,\n )",
"def read(self):\n return self.rpc.call(MsfRpcMethod.ConsoleRead, [self.cid])",
"def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()",
"def show_data():"
] | [
"0.67919517",
"0.6346908",
"0.6337899",
"0.6265047",
"0.6182557",
"0.618034",
"0.6140841",
"0.6108053",
"0.6107602",
"0.60914344",
"0.6021179",
"0.5947622",
"0.58789885",
"0.5855277",
"0.58248854",
"0.5822408",
"0.5808994",
"0.5804789",
"0.58033186",
"0.57892084",
"0.5787142",
"0.57804567",
"0.57802457",
"0.5679505",
"0.56750065",
"0.5649629",
"0.56479144",
"0.56053996",
"0.560143",
"0.55960846"
] | 0.6980507 | 0 |
this function refreshes the read messages page. | def refresh(self, msg_num):
if self.messages_window is not None:
self.messages_window.destroy()
self.messages_window = None
self.read_messages(msg_num) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def chat_refresh(self, label):\n room = await self.get_room(label)\n messages = await self.fetch_all_message(room)\n await self.send_json(\n return_value(ACTION_REFRESH_CHAT, label, self.user.username, MSG_MESSAGE, messages)\n )",
"def read_messages(self, msg_num):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username, font=self.title_font,\r\n bg=self.bg_color, height=2)\r\n user_label.pack(pady=5, padx=50)\r\n lbl_msg = Label(self.root, text=\"Message \" + str(msg_num), font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_msg.pack(pady=5, padx=10)\r\n self.refresh_button = Button(self.root, text=\"Refresh page\", font=self.text_font,\r\n bg=self.bg_color, command=lambda: self.refresh(msg_num))\r\n self.refresh_button.pack(padx=10, pady=10)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=15)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n text_widget.pack()\r\n scrollbar_msg.config(command=text_widget.yview)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.go_back_read)\r\n button_send.pack(pady=5, side=BOTTOM)\r\n button_send = Button(self.root, text=\"see/close message\\ncontrol panel\",\r\n font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.new_window_messages(button_send))\r\n button_send.pack(pady=5, side=BOTTOM)\r\n if self.msg_list:\r\n if msg_num < len(self.msg_list):\r\n next_msg = Button(self.root, text=\"next message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num + 1))\r\n next_msg.pack(pady=5, padx=5, side=RIGHT)\r\n if msg_num > 1:\r\n previous_msg = Button(self.root, text=\"previous message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num - 1))\r\n previous_msg.pack(pady=5, padx=5, side=LEFT)\r\n text_widget.insert(END, \"from: \" + self.msg_list[msg_num - 1][2] + \"\\n\")\r\n text_widget.tag_add('sender', '1.0', '1.end')\r\n text_widget.tag_config('sender', font='none 14')\r\n\r\n text_widget.insert(END, self.msg_list[msg_num - 1][0])\r\n text_widget.tag_add('msg', '2.0', END)\r\n text_widget.tag_config('msg', font='none 12')\r\n\r\n text_widget.config(state=DISABLED)",
"def _keep_getting_new_messages(self):\n while True:\n new_messages = self.get_new_messages()\n for message in new_messages:\n self.handle(message)\n time.sleep(self.refresh_delay)",
"def refresh_chat(self):\n self.chat_container.noutrefresh()\n self.chat_win.noutrefresh()\n curses.doupdate()",
"def do_refresh(self, validated_message):\n raise NotImplementedError() # implement in child",
"def refresh(self, new_content):\n pass",
"def refresh(self):\n self.log_info(f\"Browser.refresh: Refreshing the page\")\n self.CORE.refresh()\n return",
"def Refresh(self):\n pass",
"async def poll_refresh(self) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.poll_refresh))",
"def trigger_refresh(self):\n self.get_selected()\n self.manage_loading(loading=True)\n self.current_feed.fetch_content(unread_only=self.show_unread_only)\n self.manage_actions()",
"def receive_reload_request(self, _: EmptyMsg):\n self.update()",
"def refresh(self):\n pass",
"def refresh(self):\n pass",
"def refresh_page(self):\n self.m_driver.refresh()\n time.sleep(30)",
"def refresh_screen(self):",
"def refresh(self):\n\n self._refreshed_on = time.time() * 1000",
"def refresh():\n buffer = io.StringIO()\n with mail.CaptureLogs(manage.logger, buffer):\n try:\n manage.update_aggregates()\n manage.retry_itemized()\n manage.refresh_itemized()\n manage.update_schemas()\n download.clear_bucket()\n except Exception as error:\n manage.logger.exception(error)\n try:\n mail.send_mail(buffer)\n except Exception as error:\n logger.exception(error)",
"def RefreshReport(self):\r\n report = self.data.getRefreshReport()\r\n if report: showInfo(self,report,self.data.title)",
"def refresh(self):\n self.__refresh()",
"def refresh(self) -> None:\n pass",
"def refresh(self) -> None:\n pass",
"def refresh(self) -> None:\n pass",
"def update_messages():\n\n scrollbar = Scrollbar(root)\n scrollbar.pack(side=RIGHT, fill=Y)\n listbox = Text(root, wrap =WORD, yscrollcommand=scrollbar.set, background=\"#CCFFCC\", fg=\"black\", selectbackground=\"#003300\",\n highlightcolor=\"#0033CC\")\n\n msgs = []\n run = True\n while run:\n\n time.sleep(0.1) # update every 1/10 of a second\n new_messages = c1.get_messages() # get any new messages from client\n msgs.extend(new_messages) # add to local list of messages\n\n for msg in new_messages: # display new messages\n print(msg)\n #title_label = Label(text=str(msg), bg=\"#CCFFCC\", fg=\"black\", padx=34, pady=5, font=\"comicsansms 9 bold\",borderwidth=3,wraplength=300, relief=SUNKEN)\n #title_label.pack(side=TOP)\n\n listbox.insert(END, str(msg)+'\\n\\n')\n listbox.pack(fill=BOTH, padx=36)\n scrollbar.config(command=listbox.yview)\n\n if msg == \"{quit}\":\n root.destroy()\n run = False\n break",
"def refresh(self):\n self.Refresh()",
"def refresh_view():\n pass",
"def refresh_dialog(self):\n self._client.update_elements()",
"def on_refresh(self):\n pass",
"def refresh():\n\tsocketio.emit('refresh')\n\treturn status()",
"def refresh_screen(self):\n stdscr = self.stdscr\n stdscr.refresh()",
"def refresh_all(self):\n self.stdscr.noutrefresh()\n self.chat_container.noutrefresh()\n self.chat_win.noutrefresh()\n self.prompt_win.noutrefresh()\n curses.doupdate()"
] | [
"0.6961772",
"0.67089885",
"0.66341877",
"0.65815735",
"0.6418059",
"0.6338058",
"0.6332131",
"0.63038486",
"0.6238911",
"0.6234152",
"0.62187123",
"0.621134",
"0.621134",
"0.6167453",
"0.6129225",
"0.5976966",
"0.5967419",
"0.5957173",
"0.59484446",
"0.5933708",
"0.5933708",
"0.5933708",
"0.59259754",
"0.5915373",
"0.59150225",
"0.5880287",
"0.5874371",
"0.58725965",
"0.58035195",
"0.57930416"
] | 0.76593846 | 0 |
this function makes sure that when going back from the read window, all windows work properly. | def go_back_read(self):
if self.messages_window is not None:
self.messages_window.destroy()
self.messages_window = None
self.choose_path() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __window_back(self):\n pass",
"def ev_windowrestored(self, event: WindowEvent) -> None:",
"def __window_forward(self):\n pass",
"def TransferToWindow(self):\n return True",
"def TransferFromWindow(self):\n return True",
"def ev_windowrestored(self, event: tcod.event.WindowEvent) -> T | None:",
"def wm_update(self):\n readback = self.get_pvobj(\"readback\")\n show_pos = self._update_cb(0)\n show_pos()\n with CallbackContext(readback, show_pos):\n try:\n while True:\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass",
"def end(self):\n #self.manipulator_restore()\n #self.header_text_restore()\n #self.cursor_modal_restore()\n pass",
"def _restore_windows(session):\n current_window = session.window_handle\n\n for window in _windows(session, exclude=[current_window]):\n session.window_handle = window\n if len(session.window_handles) > 1:\n session.close()\n\n session.window_handle = current_window",
"def ev_windowfocuslost(self, event: WindowEvent) -> None:",
"def read(self):\n self.event, self.values = self.window.read()\n\n if self.event in (sg.WIN_CLOSED, \"Exit\", \"Close\"):\n self.close()",
"def on_exit(self, event):\r\n conf.LastText = self.edit_text.Value\r\n conf.LastLanguage = conf.Languages[self.list_lang.Selection][0]\r\n if not self.mediactrl.Tell() < 0: # Nothing loaded and 0 volume if -1\r\n conf.LastVolume = round(self.mediactrl.GetVolume(), 2)\r\n conf.WindowPosition = self.Position[:]\r\n conf.WindowSize = [-1, -1] if self.IsMaximized() else self.Size[:]\r\n conf.save()\r\n event.Skip()",
"def handleResetUI(self):\n editor = self.activeWindow()\n if editor is None:\n self.__setSbFile()\n else:\n line, pos = editor.getCursorPosition()\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n zoom = editor.getZoom()\n self.__setSbFile(editor.getFileName(), line + 1, pos, enc, lang,\n eol, zoom)",
"def back_window(self):\n\n self.controller.set_new_model_running(False)\n self.controller.show_frame(\"MainWindow\")",
"def events(self, instance, keyboard):\n if keyboard in (1001, 27):\n if self.manager_open:\n self.file_manager.back()\n return True",
"def run(self):\n while True:\n event, values = self.window.read()\n if event == sg.WIN_CLOSED:\n break\n\n ev.fire(self.window, event, values)",
"def reopen():",
"def __previousChange(self):\n self.activeWindow().previousChange()",
"def backpage(self):\n self.parent.contpage = True\n self.parent.adjpages[1] = True\n if self.shortCheck.isChecked():\n self.adjdict[\"disabled\"] = False\n self.adjdict[\"stem\"] = self.stemEdit.text()\n self.adjdict[\"masc\"] = self.rumascEdit.text()\n self.adjdict[\"fem\"] = self.rufemEdit.text()\n self.adjdict[\"nuet\"] = self.runuetEdit.text()\n self.adjdict[\"plur\"] = self.ruplurEdit.text()\n self.adjdict[\"eng\"] = self.enEdit.text()\n else:\n self.adjdict[\"disabled\"] = True\n self.parent.adjdict = self.adjdict.copy()\n self.close()",
"def refresh_window(self):\n self.buf[:] = self.player.playlist()\n if self.prev_song != None:\n self.refresh_mark()",
"def set_navigation(self):\n self.close_button.controlUp(self.reader1_button)\n self.close_button.controlLeft(self.next_button)\n self.reader1_button.controlRight(self.reader2_button)\n self.reader2_button.controlRight(self.reader3_button)\n self.reader3_button.controlRight(self.reader4_button)\n self.reader4_button.controlRight(self.reader5_button)\n self.reader1_button.controlDown(self.next_button)\n self.reader2_button.controlDown(self.next_button)\n self.reader3_button.controlDown(self.next_button)\n self.reader4_button.controlDown(self.next_button)\n self.reader5_button.controlDown(self.next_button)\n self.next_button.controlUp(self.reader5_button)\n self.next_button.controlRight(self.close_button)\n self.reader5_button.controlLeft(self.reader4_button)\n self.reader4_button.controlLeft(self.reader3_button)\n self.reader3_button.controlLeft(self.reader2_button)\n self.reader2_button.controlLeft(self.reader1_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)",
"def __window_focus(self):\n pass",
"def update_ui(self):\r\n pass\r\n windowdata = self.window.get_data(self.__class__.__name__)\r\n windowdata['action_group'].get_action('UndoClose').set_sensitive(len(self.tabs_closed) > 0)\r\n windowdata['action_group'].get_action('CloseAll').set_sensitive(self.notebook.get_n_pages() > 0)\r\n windowdata['action_group'].get_action('CloseOthers').set_sensitive(self.notebook.get_n_pages() > 1)",
"def refresh(self, msg_num):\r\n if self.messages_window is not None:\r\n self.messages_window.destroy()\r\n self.messages_window = None\r\n self.read_messages(msg_num)",
"def update_window_formatting(self):\n self.update_sequence_window()\n if self.pDB_open:\n self.pDB_open.refresh_primer()\n if self.show_comp_sequence.get==1:\n self.sequ_win.refresh_DNAseq()\n return",
"def refresh_window():\n global lineno\n lineno = 0\n curses.endwin()\n win.erase()\n print_header()\n win.refresh()",
"def renderWindowClosed(self):\n i = 0\n while i < len(self.rendererWindows):\n rw = self.rendererWindows[i]\n\n if rw.closed:\n self.rendererWindows.pop(i)\n self.rendererWindowsSubWin.pop(i)\n\n else:\n i += 1\n\n for rw in self.rendererWindows:\n rw.outputDialog.imageTab.imageSequenceTab.refreshLinkedRenderers()",
"def __window_stop(self):\n pass",
"def window_tasks(self):\n if self._handle != win32gui.GetForegroundWindow():\n #print \"not in foreground\"\n self.restore_window()\n self.fix_ui()\n self.set_foreground()\n\n self._shell.AppActivate(self._handle)",
"def back(self):\n if self.index.get() != 0:\n for i in range(len(self.frame_list)):\n self.frame_list[i].pack_forget()\n\n self.index.set(self.index.get() - 1)\n self.frame_list[self.index.get()].pack(fill=\"both\", expand=True)\n\n self.work_out_pages()"
] | [
"0.66543084",
"0.64756507",
"0.6218632",
"0.61270016",
"0.6059469",
"0.5967816",
"0.59462374",
"0.5911877",
"0.5886296",
"0.5857155",
"0.5838113",
"0.5830953",
"0.5825556",
"0.58215696",
"0.5817999",
"0.5753222",
"0.5739507",
"0.5733871",
"0.57087755",
"0.5703051",
"0.56999665",
"0.56935656",
"0.56865245",
"0.56798255",
"0.56763375",
"0.56683606",
"0.5628966",
"0.5615507",
"0.56108886",
"0.55391777"
] | 0.68879884 | 0 |
opens a new window that contains all the messages. | def new_window_messages(self, button_see_all_msgs):
# changing the button command to closing the window
button_see_all_msgs.config(command=lambda: self.close_window(button_see_all_msgs))
# creating the chat Tk object
self.messages_window = Tk()
self.messages_window.resizable(False, False)
self.messages_window.config(bg=self.bg_color)
self.messages_window.protocol("WM_DELETE_WINDOW",
lambda: self.close_window(button_see_all_msgs))
chat_label = Label(self.messages_window, text="Hello " + self.username +
"\nHere are your messages",
bg=self.bg_color, font=self.title_font)
chat_label.pack(padx=20, pady=10)
chat_frame = Frame(self.messages_window)
chat_frame.pack(padx=15, pady=15)
scrollbar_chat = Scrollbar(chat_frame)
scrollbar_chat.pack(side=RIGHT, fill=Y)
text_chat = Text(chat_frame, width=30, height=15, font=self.text_font,
yscrollcommand=scrollbar_chat.set)
text_chat.pack()
scrollbar_chat.config(command=text_chat.yview)
for msg, encryption_data, sender_user in self.msg_list:
text_chat.insert(END, "from: " + sender_user + "\n")
text_chat.insert(END, msg + "\n\n")
text_chat.config(state=DISABLED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)",
"def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()",
"def _create_window(self):\n wc = win32gui.WNDCLASS()\n wc.lpfnWndProc = self._on_message\n wc.lpszClassName = self.__class__.__name__\n wc.hInstance = win32api.GetModuleHandle(None)\n class_atom = win32gui.RegisterClass(wc)\n return win32gui.CreateWindow(class_atom, self.__class__.__name__, 0, 0, 0, 0, 0, 0, 0, wc.hInstance, None)",
"def startWindow():\n\n m = mainWindow()\n\n # Show Window\n m.show()\n\n # Return to stay alive\n return m",
"def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)",
"def show(self, window):\r\n\r\n return",
"def show(self):\n self.present(orientations=ORIENTATIONS)\n # launch a background thread\n # we can not use ui.in_background here\n # because some dialogs would not open anymoe\n thr = threading.Thread(target=self.show_messages)\n thr.daemon = True\n thr.start()",
"def show_window(self):\n self.show()",
"def open_generatorWindow(self):\n self.window = generatorWindow(self)\n self.hide()",
"def handle_new_window(event):\n url = event.GetURL()\n webbrowser.open(url)",
"def open(self):\n self.state = True\n self.mainwindow.sendMessage('a')\n print(\"opening \" + self.name)",
"def open_window():\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())",
"def open_mwindow_my_subjects(self) -> None:\n self.mwindow_my_subjects.show()",
"def show_msgdialog(self):\n log_msg = log.getBufferAsString()\n if not log_msg:\n return\n\n # initialise message dialog\n msg_dialog = msgdialog.MessageDialog(None, -1, \"\")\n msg_dialog.msg_list.InsertColumn(0, \"\")\n\n # clear dialog and show new messages\n msg_dialog.msg_list.Freeze()\n msg_dialog.msg_list.DeleteAllItems()\n for line in log_msg.split('\\n'):\n msg_dialog.msg_list.Append([line, ])\n msg_dialog.msg_list.SetColumnWidth(0, -1)\n msg_dialog.msg_list.Thaw()\n msg_dialog.ShowModal()\n msg_dialog.Destroy()",
"def doMessageWindow(msg):\n _loadMsgSettings()\n if settings.has_key(msg):\n return\n global dialog\n dialog = QtGui.QDialog()\n msgDialog = ui.message.Ui_Dialog()\n msgDialog.setupUi(dialog)\n msgDialog.messageLabel.setText(msg)\n dialog.exec_()\n if msgDialog.showAgainCheckBox.isChecked():\n settings[msg] = True\n _saveMsgSettings()",
"def msg_new(self,msg):\r\n self.frame.notebook.New()\r\n self.frame.Show()\r\n self.frame.Raise()",
"def displayMessages(window,messages=['']):\n \n # update messages text\n message_in_line = ''\n for msg in messages:\n message_in_line += '\\n'+msg\n\n window['messages'].update(f'{message_in_line}')",
"def open_new_window(self, selector):\n current_window = self.driver.current_window_handle\n element = self.get_element(selector)\n element.click()\n all_handles = self.driver.window_handles\n for handle in all_handles:\n if handle != current_window:\n self.driver.switch_to.window(handle)",
"def createWindow():\n\n windowName = \"ObjectSpawner\"\n\n if cmds.window(windowName, query=True, exists=True):\n cmds.deleteUI(windowName)\n\n cmds.window(windowName)\n\n populateUI()\n enableEditorDrop()\n\n cmds.showWindow(windowName)",
"def newwindow(url):\n\n # Open the URL\n webbrowser.open_new(url)",
"def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()",
"def open_mwindow_agenda(self) -> None:\n self.mwindow_agenda.show()",
"def open_mwindow_timetable(self) -> None:\n self.mwindow_timetable.show()",
"def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})",
"def build_window(self):\n\n main_frame = tk.Frame(self.root)\n main_frame.pack(fill='both')\n\n self.open_machine_learner_window_button = tk.Button(main_frame, text=\"Open Machine Learner\")\n self.open_machine_learner_window_button.bind('<Button-1>', self.open_machine_learner_window)\n self.open_machine_learner_window_button.pack(side=\"left\")\n\n self.open_web_crawler_window_button = tk.Button(main_frame, text=\"Open Web Crawler\")\n self.open_web_crawler_window_button.bind('<Button-1>', self.open_web_crawler_window)\n self.open_web_crawler_window_button.pack(side=\"left\")\n\n self.open_webpage_classifier_window_button = tk.Button(main_frame, text=\"Open WebPage Classifier\")\n self.open_webpage_classifier_window_button.bind('<Button-1>', self.open_webpage_classifier_window)\n self.open_webpage_classifier_window_button.pack(side=\"left\")\n\n self.run_steady_state_genetic_button = tk.Button(main_frame, text=\"Run Steady State\")\n self.run_steady_state_genetic_button.bind('<Button-1>', self.run_steady_state)\n self.run_steady_state_genetic_button.pack(side=\"left\")\n\n # Protocol for closing window using 'x' button\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)",
"def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win",
"def message_box(subject, content):\r\n root = tk.Tk()\r\n root.attributes(\"-topmost\", True)\r\n root.withdraw()\r\n messagebox.showinfo(subject, content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass",
"def show_popup(cls, content, level):\n\n current_view = sublime.active_window().active_view()\n message = cls.get_message_template(content, level)\n\n current_view.show_popup(content=message, max_width=400)",
"def build_second_window():\r\n\r\n new_window = tk.Tk()\r\n windows.append(new_window)\r\n new_window.protocol(\"WM_DELETE_WINDOW\", new_round(new_window))\r\n\r\n ask = tk.Label(new_window, text='Would You Like To Play Again?', bg='Cyan')\r\n ask.pack(fill=tk.X)\r\n\r\n frame = tk.Frame(new_window)\r\n frame.pack()\r\n\r\n yes_button = tk.Button(frame, text='Yes', bg='green',\r\n command=new_round(new_window))\r\n yes_button.pack(side=tk.LEFT)\r\n\r\n no_button = tk.Button(frame, text='No', bg='red',\r\n command=close)\r\n no_button.pack(side=tk.LEFT)",
"def createAboutWindow(self):\n if (not hasattr(self, \"about_window\")):\n self.about_window = AboutWindow(self)\n self.about_window.show()"
] | [
"0.71605724",
"0.6652545",
"0.6622106",
"0.6552646",
"0.65350586",
"0.6522257",
"0.6512286",
"0.64399976",
"0.6398846",
"0.6395453",
"0.63476604",
"0.6302807",
"0.6253829",
"0.6239314",
"0.6238414",
"0.6192727",
"0.6176429",
"0.6140522",
"0.6134743",
"0.61296725",
"0.60993296",
"0.60828215",
"0.60809666",
"0.6033043",
"0.6019643",
"0.6003147",
"0.5982593",
"0.5937445",
"0.59159017",
"0.59108084"
] | 0.7269994 | 0 |
this function recognizes the input of the microphone and turns it into text. the text is inserted to the text widget and then the user will be able to send it as a message | def speech_recognizer_function(self, text_widget):
label_listening = Label(self.root, text="listening to input...",
font=self.text_font, bg=self.bg_color)
label_listening.pack(pady=10)
recognizer = speech_recognition.Recognizer()
microphone = speech_recognition.Microphone()
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
try:
text = recognizer.recognize_google(audio)
text += " "
except:
text = ""
text_widget.insert(END, text)
label_listening.destroy()
self.thread_speech_is_running = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mic_input():\n try:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print('Say something...')\n r.pause_threshold = 1\n r.adjust_for_ambient_noise(source, duration=1)\n audio = r.listen(source)\n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n except sr.UnknownValueError:\n print('....')\n command = self.mic_input()\n return command\n except Exception as e:\n print(e)\n return False",
"def handle(text, mic, profile):\n messages = [\"Neustart wird eingeleitet, bis gleich. \"]\n\n message = random.choice(messages)\n\n mic.say(message)\n os.system(\"sudo reboot\")",
"def handle(text, mic, profile):\n #it heard the word Music\n mic.say(\"You said \" + text)\n\n #contact the hub requesting a file (NAMED songoptions.txt that overwrites) containg 3 random songs and numbers on the same line\n #hubmusic.getoptions()\n\n #for line in file, read out the line which will be (1 jayz - brush your shoulders off ....) \n with open(\"songoptions.txt\", \"r\") as searchfile:\n for line in searchfile:\n mic.say(line.strip())\n\n #listen for user input\n #if user chooses a valid number, send that number to the HUB and the HUB will send over that song\n #play the song\n\n #probably import hubmusic and in there function playsong. \n #rasp.toggleLamp(veraIP, text.lower())",
"def text_to_speech(entry):\n text = entry.get_text()\n if text:\n subprocess.call([\"milena_say\", text])",
"def speech_recognize_from_microphone():\n speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)\n speech_config.request_word_level_timestamps()\n speech_config.output_format = speechsdk.OutputFormat(1)\n\n speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)\n\n done = False\n\n def stop_cb(evt):\n \"\"\"callback that signals to stop continuous recognition upon receiving an event `evt`\"\"\"\n print('CLOSING on {}'.format(evt))\n nonlocal done\n done = True\n\n def recognized_cb(evt):\n \"\"\"callback for recognized event\"\"\"\n if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:\n #print('RECOGNIZED: {}'.format(evt.result.text))\n #print('All params: {}'.format(evt.result))\n #print(evt.result.json)\n response = json.loads(evt.result.json)\n #print('All params: {}'.format(response))\n Text = response[\"DisplayText\"]\n duration = 0;\n for word in response[\"NBest\"][0][\"Words\"]:\n duration += word[\"Duration\"]\n duration = duration / 10000000\n print(\"dur :\"+str(duration)+\" text: \" + Text)\n\n # Connect callbacks to the events fired by the speech recognizer\n speech_recognizer.recognized.connect(recognized_cb)\n speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))\n speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))\n speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))\n # stop continuous recognition on either session stopped or canceled events\n speech_recognizer.session_stopped.connect(stop_cb)\n speech_recognizer.canceled.connect(stop_cb)\n\n # Start keyword recognition\n speech_recognizer.start_continuous_recognition()\n\n while not done:\n time.sleep(.5)\n\n speech_recognizer.stop_continuous_recognition()",
"def make_silence_phones_txt(self):\n raise NotImplementedError",
"def process_speak_listen(device_index, mp3_filename, text, record, flag):\n\n mp3_filename = mp3_filename + \".mp3\"\n try:\n tts = gTTS(text=text, lang='en', slow=False)\n tts.save(mp3_filename)\n playsound(mp3_filename)\n os.remove(mp3_filename)\n\n if flag != 1:\n with sr.Microphone(device_index=device_index) as source:\n record.adjust_for_ambient_noise(source, duration=1)\n print(\"Speak:\")\n os.system(\"zenity --progress --width=400 --height=200 --title='Speak Now' \"\n \"--text='Speak Now......No need to click OK button' --no-cancel &\")\n try:\n audio = record.listen(source, timeout=5)\n text = record.recognize_google(audio)\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(text)\n except LookupError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : LookupError - Could not able to understand\")\n text = None\n except speech_recognition.WaitTimeoutError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : WaitTimeoutError - Could not able to listen anything for 5 seconds\")\n text = None\n except speech_recognition.UnknownValueError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : UnknownValueError - Could not able to listen anything for 5 seconds\")\n text = None\n except gtts.tts.gTTSError:\n print(\"ERROR : Connection Error : No internet connection.\")\n exit_program()\n except PermissionError:\n print(\"ERROR : No permission\")\n exit_program()\n\n return text",
"def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)",
"def qr_code_text_dialogue(update: Update, _: CallbackContext) -> int:\n\n update.message.reply_text(\n 'Enter your text',\n reply_markup=ReplyKeyboardRemove(),\n )\n return QR_CODE_TEXT_INPUT",
"def takecommand():\r\n\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"listening....\")\r\n r.pause_threshold=1\r\n \"\"\" Pause_threshold will let you to speak with your own pace\"\"\"\r\n\r\n #r.energy_threshold=500\r\n \"\"\" energy threshold will stop hindrens from outside\"\"\"\r\n\r\n audio=r.listen(source)\r\n\r\n try:\r\n print(\"In process of recognizing..\")\r\n query=r.recognize_google(audio,language=\"en-in\")\r\n \"\"\" query will take date that has been spoken by user with the help of google API\"\"\"\r\n print(\"you said :\",query)\r\n\r\n except Exception as e:\r\n print(\"can you speak this again\")\r\n return \"none\"\r\n return query",
"def get_user_speech_input(self):\n\t\twith sr.Microphone() as source:\n\t\t\tprint \"You can speak!\"\n\t\t\taudio = self.recog.listen(source, 5)\n\t\t\t\n\t\t#WIT_AI_KEY = \"4KKA5EH6VFWPMWYZTSFHNJJZYCZHGTAQ\"\n\t\tprint \"sending it\"\n\t\ttry:\n\t\t\tprint \"Google thinks: \" + self.recog.recognize_google(audio)\n\t\texcept sr.UnknownValueError:\n\t\t\tprint(\"Google Speech Recognition could not understand audio\")\n\t\texcept sr.RequestError as e:\n\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e))",
"def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")",
"def m() -> str:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.adjust_for_ambient_noise(source)\n logger.info(\"Microphone Active! Waiting for prompt!\")\n audio = r.listen(source)\n\n s = r.recognize_google(audio) #Send the audio to google\n result = s.lower()\n return result",
"def takeCommand():\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing... \")\n voice_input = r.recognize_google(audio, language=\"en-US\")\n print(f\"The user said: {voice_input}\\n\")\n except Exception as e:\n # print(e)\n print(\"Please say that again\")\n return \"None\"\n return voice_input",
"def take_command(self):\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening.....\")\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language=\"en-in\")\r\n print(\"Recognizing.....\")\r\n print(\"Query=\", query)\r\n except Exception as e :\r\n print(e)\r\n self.speak(\"Say that again please....\")\r\n return \"None\"\r\n return query",
"def __call__(self, text, defaultText='', failsafe=False, timeout=None, mode='Abc', orientation='portrait', delayBetweenPresses=None):\r\n if len(text):\r\n if self.phone.isFullBlackBox(): # input using hardcoded settings\r\n if self.blackBoxVkb == None:\r\n self.blackBoxVkb = BlackBoxVirtualKeyboard(self.phone)\r\n self.blackBoxVkb._writeBlackBox(text, mode=mode, orientation=orientation, delayBetweenPresses=delayBetweenPresses)\r\n else:\r\n self.write(text, defaultText,failsafe, timeout=timeout)\r\n else:\r\n self.phone.comment(\"Empty string given for input!\")",
"def takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source: #don't forget the () after microphone\n print(\"Listening ...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing..\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n print(e)\n print(\"Say that again please..\")\n return \"None\"\n return query",
"def text_cell_phone(self, sender, message):\n if self.cell_phone:\n text_message.send_sms(sender, message, self.cell_phone)",
"def send_text(self, phone_number):\n sms_params = urllib.urlencode({\n '_rnr_se': self.key,\n 'phoneNumber': phone_number,\n 'text': self.text\n })\n # Send the text, display status message \n self.response = \"true\" in self.opener.open(self.sms_url, sms_params).read()",
"def speech_callback(self, data):\n speech = data.data\n print \"RECEIVED SPEECH: \", speech\n if \"keyword detected\" in speech:\n if self.idling:\n self.control_pub.publish(\"ft go; idle stop; stt go\")\n self.behav_pub.publish(\"greet\")\n # self.behav_pub.publish(random.choice(categorized_behaviors['greeting']))\n elif \"play\" in speech:\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n elif \"bye\" in speech:\n self.control_pub.publish(\"idle go; stt go; stt_keyword go\")\n elif \"okay\" in speech:\n self.ok = True",
"def message(self, text):\n lines = str(text).split('\\n') # Split at newline(s)\n for i, line in enumerate(lines): # For each substring...\n if i > 0: # If newline(s),\n self.write_lcd(self.LCD_DATA_E1, 0xC0) # set DDRAM address to 2nd line\n self.write_lcd(self.LCD_DATA_E1, line, True) # Issue substring",
"def handle(text, mic, profile):\n os.system('if [ `cat /sys/class/gpio/gpio18/value` -eq \"0\" ]; then echo \"1\" > /sys/class/gpio/gpio18/value; else echo \"0\" > /sys/class/gpio/gpio18/value; fi')",
"def handle(text, audio):\n print 'handling light module'\n r = re.compile(r'\\bon\\b | \\boff\\b', flags=re.I | re.X)\n matchedWordArr = r.findall(text)\n arrLen = len(matchedWordArr)\n if arrLen == 1: # on or off\n action = matchedWordArr[0]\n print action\n audio.speak('I am turning' + action + ' the lights')\n elif arrLen == 0: # no action\n audio.speak('Please decide the action to perform with the lights!')\n elif arrLen >= 2: # ambigious\n audio.speak('Please decide the action first!')",
"def takeCommand():\r\n r=sr.Recognizer()\r\n\r\n with sr.Microphone() as source:\r\n print(\"Listening....\")\r\n r.pause_threshold = 1 #pause threshold is if we pause in between speaking it shouldnt consider the sentence as complete\r\n audio = r.listen(source)\r\n\r\n try:\r\n print(\"Recognizing...\")\r\n query= r.recognize_google(audio,language='en-in')\r\n print(f\"User said: {query} \\n\")\r\n\r\n except Exception as e:\r\n print(e)\r\n print(\"Please say that again...\")\r\n return \"None\"\r\n\r\n\r\n return query",
"def getTextFromSpeak(self):\n raise NotImplementedError",
"def b10_clicked(self):\n self.tts.tts(self.text2speech)\n self.text2speech = ''\n pass",
"def takeCommand():\r\n recognizer = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listenging...\")\r\n audio = recognizer.listen(source)\r\n\r\n try:\r\n print(\"LOADING...\")\r\n command = recognizer.recognize_google(audio, language=\"en-un\")\r\n print(f\"user said: {command}\")\r\n\r\n except Exception as e:\r\n speak(f\"Please say that again\")\r\n command = None\r\n return command",
"def act(self, audio_file=None):\n #file as source\n if self.src == 'file':\n if audio_file is None:\n raise ValueError(\"Please provide a audio_file\")\n return None\n elif not os.path.exists(audio_file):\n raise FileNotFoundError(\"Specified file not found\")\n return None\n else:\n file = speech_recognition.AudioFile(audio_file)\n with file:\n speech = self.recog_obj.record(file)\n \n #mic as source\n elif self.src == 'mic':\n if audio_file is not None:\n print(\"WARNING: source is set to device microphone. Audio file will be ignored\\n\")\n \n try:\n with self.mic_obj:\n print(\"Speak into the mic....\\n\")\n self.recog_obj.adjust_for_ambient_noise(self.mic_obj)\n speech = self.recog_obj.listen(self.mic_obj)\n #if microphone is not detected\n except OSError:\n print(\"Error: Microphone not detected\")\n return None\n \n \n try:\n print(\"Please wait while we transcribe...\\n\")\n text = self.recog_obj.recognize_google(speech, language='en', show_all=self.debug)\n \n #if audio is not detected\n except speech_recognition.UnknownValueError:\n print(\"Error: Sorry audio not detected by device microphone\")\n return None\n \n #if there is connection issue or api issue\n except speech_recognition.RequestError:\n print(\"Error: API for transcription is not reachable. There may be some connection issue or server side issue\")\n return None\n \n #for imposing various rules to text \n #But if debug mode is enabled, transcript variable will store a dictionary of various transcriptions \n #along with their confidence probabilities, so conversion rules are disabled meanwhile \n transcript = self.tcr.deconcat(text) if not self.debug else text\n return transcript",
"def sms_reply():\n # Start our TwiML response\n # if body.lower()==\"good\":\n message=\"Hi I'm IRIS, an Immediately Responsive Intelligent System\\nHow are you feeling today?\"\n user=request.form['Body']\n\n # message=\"Hi \"+ name+ \"\"\n # user=request.form['Body']\n\n if user==\"good\":\n message=\"Glad to hear it! I hope you continue to feel this way! Celebrate this feeling and hold onto what happened ot make you feel this way so that you can repeat it in the future!\"\n\n if user==\"sad\":\n message=\"I’m sorry to hear that. Here are some things I do to make me feel better: take a walk outside, listen to uplifting music, call or message a loved one, or watch or read something positive to take my mind off of what I’m feeling.\"\n\n if user==\"nervous\":\n message=\"It’s going to be ok! This feeling will not last forever.\"\n if user==\"lonely\":\n message=\"I’m here for you, and know that you are loved, supported, and important. The world would not be the same without you! For a loving quote respond\"\n\n if user==\"angry\":\n message=\"“Let me help you turn your anger into something positive. Here are some ways to burn off energy productively: take a long walk, remove yourself from the situation, paint of draw, listen to loud music, or take a break from what you are doing.\"\n\n if user==\"tired\":\n message=\"I understand what you are feeling well. I recommend taking a break to do an activity you enjoy, taking a nap, getting a coffee, doing 20 jumping jacks, listening to a pump-up playlist, or standing up to stretch for a bit.\"\n\n if user==\"average\":\n message=\"There are many things to look forward to!\"\n resp = MessagingResponse()\n\t # Add a message\n \n resp.message(message)\n\t # Add a picture message\n\t #msg.media(\"https://farm8.staticflickr.com/7090/6941316406_80b4d6d50e_z_d.jpg\")\n\n return str(resp)",
"def send_text(msg, up):\n try:\n client = TwilioRestClient(account=TWILIO_ACCOUNT_SID,\n token=TWILIO_AUTH_TOKEN)\n c = client.sms.messages.create(to=up.phone,\n from_=WATTTIME_PHONE,\n body=msg.msg)\n TwilioSMSEvent(user=up.user,\n msg_type=msg.msg_type,\n to_number=up.phone,\n from_number=WATTTIME_PHONE,\n body=msg.msg).save()\n\n debug(\"texted '{}' to {}\".format(msg, str(up.name)))\n return True\n except:\n print (\"Faild message\", up.phone, WATTTIME_PHONE, msg.msg)\n debug(\"failed to text '{}' to {}\".format(msg, str(up.name)))\n return False"
] | [
"0.63976574",
"0.6331272",
"0.6316408",
"0.62616754",
"0.6221507",
"0.6133672",
"0.60445386",
"0.6034115",
"0.599703",
"0.5995486",
"0.59736764",
"0.5960848",
"0.5958265",
"0.59164345",
"0.58853114",
"0.5819938",
"0.58131707",
"0.5773462",
"0.57678306",
"0.57649124",
"0.57376164",
"0.5732757",
"0.572098",
"0.569911",
"0.56899244",
"0.56863546",
"0.567684",
"0.56743395",
"0.56674254",
"0.5661662"
] | 0.72579855 | 0 |
this function colors the encrypted letter label in the simulator for 300 milliseconds | def color_letter(self, letter, lst_labels, plain_text_widget, encrypted_text_widget):
new_letter, txt_encryption = self.simulator_enigma.encrypt_letter(letter)
lst_encryption_letter_stages = [i[-1] for i in txt_encryption.split("\n")]
lst_encryption_letter_stages.remove(')')
self.simulator_encryption.append((txt_encryption, lst_encryption_letter_stages))
lst_labels[ord(new_letter) - 65].config(bg="yellow")
lst_labels[ord(new_letter) - 65].after(300, lambda: lst_labels[ord(new_letter) -
65].config(bg="khaki"))
plain_text_widget.config(state=NORMAL)
plain_text_widget.insert(END, letter)
plain_text_widget.config(state=DISABLED)
encrypted_text_widget.config(state=NORMAL)
encrypted_text_widget.insert(END, new_letter)
encrypted_text_widget.config(state=DISABLED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ColorizeDNA(self, text):\n if (text == 'A'):\n escape = '\\033[92m' # Green\n elif (text == 'G'):\n escape = '\\033[93m' # Yellow\n elif (text == 'T'):\n escape = '\\033[91m' # Red\n elif (text == 'C'):\n escape = '\\033[96m' # Blue\n else:\n return text\n return escape + text + '\\033[0m'",
"def colorText(s, c):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n HEAD = \"\\033[\"\n TAIL = \"m\"\n\n color = \"39;49\"\n lastDifference = 800\n\n for i in COLORS:\n diff = abs(i[0] - c[0]) + abs(i[1] - c[1]) + abs(i[2] - c[2]) #calculates difference to stock color\n if diff < lastDifference:\n lastDifference = diff #chooses closest match\n color = i[3]\n\n return HEAD+color+TAIL+s+COLOR_RESET #color code + string + reset code",
"def colorize(text, color):\n return COLOR_DICT[color] + str(text) + COLOR_DICT['end']",
"def paint(self):\r\n self.win.bkgd(\" \", COLOR_PAIR[\"con_text\"])",
"def preloop(self):\n greet = [ \" \" +color.colorize(\"_____\",color.YELLOW),\n \" __ __.\"+ color.colorize(\"__.\", color.RED) + \"__ .____________ \" + color.colorize(\" | | \", color.YELLOW),\n \"/ \\ / \\\\\"+ color.colorize(\"__|\", color.RED) + \" | __| _/\\_ ___ \\_____\" + color.colorize(\" ___| |___\", color.YELLOW),\n \"\\ \\/\\/ /\" + color.colorize(\" |\", color.YELLOW) + \" | / __ | / \\ \\/\\__ \\\\\" + color.colorize(\"\\\\_\",color.YELLOW) + color.colorize(\" _|_ \", color.RED) + color.colorize(\"__|\", color.YELLOW),\n \" \\ /\" + color.colorize(\"| |\",color.YELLOW) + \" |__/ /_/ | \\ \\____/ __ \\ \"+ color.colorize(\"|\", color.YELLOW) + color.colorize(\" | \", color.RED) + color.colorize(\"| \", color.YELLOW),\n \" \\__/\\ / \" + color.colorize(\"|__|\",color.YELLOW) + \"____/\\____ | \\_______ (____ / \"+ color.colorize(\"|\", color.YELLOW) + color.colorize(\" | \", color.RED) + color.colorize(\"| \", color.YELLOW),\n \" \\/ \\/ \\/ \\/ \" + color.colorize(\"|\", color.YELLOW) + color.colorize(\" | \", color. RED) + color.colorize(\"|\", color.YELLOW),\n \" \" + color.colorize(\"|\", color.YELLOW) + color.colorize(\" | \", color. RED) + color.colorize(\"|\", color.YELLOW),\n \" Main Commands: \" + color.colorize(\"|\", color.YELLOW) + color.colorize(\" | \", color. RED) + color.colorize(\"|\", color.YELLOW),\n \" \" + color.colorize(\"\\\\\", color.YELLOW) + color.colorize(\"|\", color.RED) + color.colorize(\"/\", color.YELLOW)\n ]\n\n self.PRINT.line('\\n'.join(greet[:len(greet) - 1]))\n # then get all command names\n names = list(set([a[3:] for a in self.get_names() if a.startswith('do_')]) - set(['EOF']))\n for name in names:\n self.PRINT.newline()\n self.PRINT.line(' ' * 8 + \"{:<38}\".format(name) + color.colorize(\"|\", color.YELLOW) + color.colorize(\" | \", color. RED) + color.colorize(\"|\", color.YELLOW))\n\n self.PRINT.newline()\n self.PRINT.line(greet[len(greet) - 1])\n self.PRINT.newline()",
"def abdul(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"1;33m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.A220+self.ESC+\"1;43m\"+self.A219+self.A178+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A223+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"33m\"+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"C\"+self.A254+self.ESC+\"33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+self.ESC+\"1;43m\"+self.A177+self.A176+self.ESC+\"C\"+self.A176+self.ESC+\"C\"+self.ESC+\"37;40mSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"33mAbduls\"+self.ESC+\"CArmour \"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A177+self.ESC+\"0;32m\"+self.A220+self.ESC+\"C\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"2C\"+self.ESC+\"0;33m\"+self.A223+self.A219+self.ESC+\"C\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"33m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"33m\"+self.A220+self.A219+self.A223+self.ESC+\"37m\"+self.A220+self.ESC+\"1;47m\"+self.A176+self.ESC+\"0m\"+self.A219+self.A223+self.ESC+\"1;30;47m\"+self.A176+self.A176+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A177+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;30m\"+self.A223+self.ESC+\"4C\"+self.ESC+\"0;33mBehind\"+self.ESC+\"Cthe\"+self.ESC+\"Cdesk\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"Carmour\"+self.ESC+\"Cshop\"+self.ESC+\"Cis\"+self.ESC+\"Can\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"37;40m\"+self.A223+self.ESC+\"1;47m\"+self.A177+self.A176+self.ESC+\"C\"+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A178+self.ESC+\"C\"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"4C\"+self.ESC+\"0;33mamazingly\"+self.ESC+\"Cattractive\"+self.ESC+\"Clooking\"+self.ESC+\"Cfemale - she seems\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"33;40m\"+self.A223+self.ESC+\"1;37m\"+self.A220+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.A220+self.A223+self.ESC+\"47m\"+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.ESC+\"1;30;43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;32m\"+self.A220+self.A254+self.ESC+\"3C\"+self.ESC+\"33mbusy, doing her mails but she\"+self.ESC+\"Casks\"+self.ESC+\"C\\\"\"+self.ESC+\"1mHow\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A223+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A178+self.ESC+\"40m\"+self.A222+self.A222+self.ESC+\"47m\"+self.A176+self.ESC+\"C\"+self.ESC+\"30m\"+self.A176+self.ESC+\"C\"+self.A177+self.ESC+\"40m\"+self.A220+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"47m\"+self.A219+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A223+self.ESC+\"32m\"+self.A220+self.A178+self.ESC+\"6C\"+self.ESC+\"1;33mmay\"+self.ESC+\"CI\"+self.ESC+\"Cbe\"+self.ESC+\"Cof\"+self.ESC+\"Cservice?\"+self.ESC+\"0;33m\\\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"C\"+self.A220+self.A220+self.ESC+\"C\"+self.ESC+\"1;37m\"+self.A223+self.ESC+\"47m\"+self.A178+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A177+self.ESC+\"0m\"+self.A220+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0m\"+self.A220+self.ESC+\"1;30m\"+self.A223+self.A223+self.A220+self.ESC+\"47m\"+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.ESC+\"33;40m\"+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1;33;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A220+self.A223+self.ESC+\"C\"+self.A220+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;37m\"+self.A223+self.ESC+\"0m\"+self.A220+self.ESC+\"2C\"+self.A220+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.A176+self.ESC+\"40m\"+self.A223+self.ESC+\"47m\"+self.A219+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"1;30m\"+self.A220+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mB\"+self.ESC+\"0;33m]\"+self.ESC+\"1muy\"+self.ESC+\"CArmour\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A220+self.A223+self.ESC+\"1;37m\"+self.A220+self.A220+self.ESC+\"0m\"+self.A223+self.ESC+\"C\"+self.ESC+\"1;47m\"+self.A223+self.ESC+\"0m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"40m\"+self.A220+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"1;40m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mS\"+self.ESC+\"0;33m]\"+self.ESC+\"1mell\"+self.ESC+\"CArmour\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A219+self.A178+self.ESC+\"40m\"+self.A220+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A219+self.A220+self.A223+self.ESC+\"1;30m\"+self.A220+self.A220+self.A219+self.A219+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mY\"+self.ESC+\"0;33m]\"+self.ESC+\"1mour\"+self.ESC+\"CStats\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A178+self.ESC+\"0;33m\"+self.A223+self.A220+self.A223+self.ESC+\"30;43m\"+self.A176+self.A223+self.ESC+\"1;37;40m\"+self.A223+self.A223+self.ESC+\"47m\"+self.A178+self.A177+self.A176+self.A176+self.ESC+\"0m\"+self.A219+self.A219+self.A223+self.ESC+\"1;30m\"+self.A220+self.A176+self.A177+self.A178+self.A223+self.A223+self.ESC+\"0;33m\"+self.A220+self.A219+self.ESC+\"1;30m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mR\"+self.ESC+\"0;33m]\"+self.ESC+\"1meturn\"+self.ESC+\"Cto\"+self.ESC+\"CTown\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A177+self.ESC+\"0;33m\"+self.A219+self.A220+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A220+self.ESC+\"C\"+self.ESC+\"33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A219+self.ESC+\"30;43m\"+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A220+self.A254+self.ESC+\"C\"+self.ESC+\"1;30m\"+self.A220+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"33;43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"C\"+self.A178+self.A254+self.ESC+\"33m\"+self.A219+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A223+self.A220+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.A223+self.A220+self.A223+self.A223+self.A220+self.ESC+\"3C\"+self.ESC+\"1;30m\"+self.A177+self.A220+self.ESC+\"2C\"+self.ESC+\"33m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"33m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.A220+self.ESC+\"32m\"+self.A254+self.ESC+\"33m\"+self.A223+self.A219+self.A219+self.A220+self.A223+self.A220+self.A220+self.A223+self.A223+self.ESC+\"1;30m\"+self.A220+self.A220+self.A219+self.ESC+\"2C\"+self.A220+self.A178+self.A220+self.ESC+\"C\"+self.ESC+\"33;43m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"0m \"+self.ESC+\"3C\"+self.ESC+\"33m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.A223+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.A223+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg",
"def print_colored(word):\n for char in word:\n print(c.rc() + char + c.x, end='')",
"def charcolor(message):\n try:\n print(c.clear)\n while True:\n print_colored(c.clear + c.multi + \"Hello\" + \" \" + who + \"!\")\n except KeyboardInterrupt:\n exit()",
"def color(color):\n if sys.platform == \"win32\":\n if color == \"green\":\n set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"yellow\":\n set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"red\":\n set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"blue\":\n set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"reset\":\n set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)\n else :\n if color == \"green\":\n sys.stdout.write('\\033[92m')\n elif color == \"red\":\n sys.stdout.write('\\033[91m')\n elif color == \"blue\":\n sys.stdout.write('\\033[94m')\n elif color == \"reset\":\n sys.stdout.write('\\033[0m')",
"def color(code):\n return lambda t: \"\\033[{0}{1}\\033[0;m\".format(code, t)",
"def ColorizeAA(self, text):\n if (text in ['A', 'F', 'H', 'I', 'K', 'L', 'M', 'P', 'R', 'V', 'W']):\n escape = '\\033[91m' # Red\n elif (text in ['C', 'G', 'N', 'Q', 'S', 'T', 'Y', 'B', 'Z']):\n escape = '\\033[96m' # Blue\n elif (text in ['D', 'E']):\n escape = '\\033[92m' # Green\n elif (text in ['X', '*']):\n escape = '\\033[93m' # Yellow\n else:\n return text\n return escape + text + '\\033[0m'",
"def show_text(self, txt, col=YELLOW, bg=None):\n self.textL = self.fontL.render(txt, True, col, bg)\n self.textL2 = self.fontL.render(txt, True, WHITE, bg)\n phi = 0\n t0 = time()\n while time() < t0 + 3:\n surf = pygame.transform.rotate(self.textL, phi)\n surf2 = pygame.transform.rotate(self.textL2, -phi)\n rect = surf.get_rect()\n rect.center = (self.w//2, self.h//2)\n self.screen.blit(surf, rect)\n self.screen.blit(surf2, rect) \n pygame.display.update()\n phi += 2",
"def textColor(colorNumber):\n return '\\033[%dm' % (30 + colorNumber)",
"def FlashyText(win,center,text,timing):\n winner = Text(center,text)\n winner.setFace(\"arial\")\n winner.setFill(\"black\")\n winner.setSize(30)\n for i in range(1,6):\n time.sleep(timing)\n if i % 2 == 0:\n winner.draw(win)\n else:\n winner.undraw()",
"def in_green(s: str) -> str:\n return f\"\\033[92m{str(s)}\\033[0m\"",
"def colorful_text(text, color=Fore.RESET):\n return color + text + Fore.RESET",
"def messagecleanner(cls):\n cls.message_label['text'] = \" \"\n cls.message_label['fg'] = \"green\"",
"def hash_coloured(text):\n ansi_code = int(sha256(text.encode(\"utf-8\")).hexdigest(), 16) % 230\n return colored(text, ansi_code=ansi_code)",
"def rainbow_text(x,y,ls,lc,**kw):\n t = plt.gca().transData\n fig = plt.gcf()\n plt.show()\n \n #horizontal version\n for s,c in zip(ls,lc):\n text = plt.text(x,y,\" \"+s+\" \",color=c, transform=t, **kw)\n text.draw(fig.canvas.get_renderer())\n ex = text.get_window_extent()\n t = transforms.offset_copy(text._transform, x=ex.width, units='dots')",
"def paintText(self, text):\n return '@paint '+text * 2",
"def text_color(string: str, color: str) -> str:\n return f\"\\x1b{_code(color)}{string}\\x1b[0m\"",
"def _colorstr(self, args):",
"def colorize(text, color):\n\n if not supports_color():\n return text\n\n return color + text + Colors.ENDC",
"def flash_label(self, message, color, time):\n self.set_label(message, color)\n self.after(time, self.unset_label)",
"def label(self, message, fg = None, bg = None, bold = None, blink = None):\n self.savepos()\n self.out.write(self._colorize(message, fg, bg, bold, blink))\n self.restorepos()",
"def controls(msg, color):\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 3, dis_height / 3])",
"def chase(self, colours, wait_ms=50, iterations=10):\n for it in range(iterations):\n for icol in range(len(colours)):\n for i in range(self._strip.numpixels()):\n self._strip.set_element(i, colours[(i+icol)%len(colours)])\n self._strip.show()\n time.sleep(wait_ms/1000.0)",
"def colour(string: str) -> str:\n string = f\"\\033[32m{string}\\033[0m\"\n return string",
"def use_black_text(self): \n black_foreground = 0\n for color in range(curses.COLORS):\n curses.init_pair(color, black_foreground, color)",
"def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')"
] | [
"0.6488497",
"0.64416146",
"0.6429084",
"0.6220818",
"0.62159586",
"0.61817086",
"0.6160907",
"0.61200315",
"0.61137754",
"0.6078523",
"0.60778445",
"0.6075475",
"0.6072182",
"0.6067036",
"0.606216",
"0.60522145",
"0.602722",
"0.6022543",
"0.60124403",
"0.59898627",
"0.5970709",
"0.5939717",
"0.59361756",
"0.59237605",
"0.5923278",
"0.5912851",
"0.5887154",
"0.58803535",
"0.58767015",
"0.58687913"
] | 0.801215 | 0 |
displays the enigma simulator | def simulator(self, rotors_settings=(1, 2, 3, 'A', 'A', 'A'),
plugboard_settings=None, plain_text=""):
self.clear_screen()
user_label = Label(self.root, text="Hello " + self.username,
font=self.title_font, bg=self.bg_color, height=2)
user_label.grid(pady=10, padx=50, row=0, column=11, columnspan=5)
if plain_text == "":
self.simulator_encryption = []
if plugboard_settings is None:
self.simulator_enigma.plugboard.reset_plugboard()
self.simulator_enigma.rotors.set_rotors(rotors_settings[0], rotors_settings[1],
rotors_settings[2], rotors_settings[3],
rotors_settings[4], rotors_settings[5])
simulator_title = Label(self.root, text="Enigma Simulator",
font=self.title_font, bg=self.bg_color)
simulator_title.grid(row=0, column=2, columnspan=8, rowspan=2, pady=15, padx=5)
lst_labels = []
plain_text_frame = Frame(self.root, width=300, height=200)
plain_text_frame.grid(row=2, column=11, columnspan=5, rowspan=3, padx=10)
plain_text_label = Label(plain_text_frame, text="Plain Text",
width=12, font=self.title_font)
plain_text_label.pack(padx=5, pady=3)
text_widget_frame1 = Frame(plain_text_frame)
text_widget_frame1.pack()
scrollbar1 = Scrollbar(text_widget_frame1)
scrollbar1.pack(side=RIGHT, fill=Y)
plain_text_text = Text(text_widget_frame1, width=30, height=8, font=self.text_font,
yscrollcommand=scrollbar1.set)
plain_text_text.pack(padx=5, pady=3)
scrollbar1.config(command=plain_text_text.yview)
plain_text_text.insert(END, plain_text)
plain_text_text.config(state=DISABLED)
encrypted_text_frame = Frame(self.root, width=300, height=200)
encrypted_text_frame.grid(row=6, column=11, columnspan=5, rowspan=3, padx=10)
encrypted_text_label = Label(encrypted_text_frame, text="Encrypted Text",
width=12, font=self.title_font)
encrypted_text_label.pack(padx=5, pady=3)
text_widget_frame2 = Frame(encrypted_text_frame)
text_widget_frame2.pack()
scrollbar2 = Scrollbar(text_widget_frame2)
scrollbar2.pack(side=RIGHT, fill=Y)
encrypted_text_text = Text(text_widget_frame2, width=30, height=8, font=self.text_font,
yscrollcommand=scrollbar2.set)
encrypted_text_text.pack(padx=5, pady=3)
scrollbar2.config(command=encrypted_text_text.yview)
encrypted_text_text.insert(END, self.simulator_enigma.decrypt_encrypt_text(plain_text))
encrypted_text_text.config(state=DISABLED)
for i in range(65, 75):
letter_label = Label(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3)
letter_label.grid(row=2, column=i - 64, pady=5, padx=5)
lst_labels.append(letter_label)
for i in range(75, 85):
letter_label = Label(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3)
letter_label.grid(row=3, column=i - 74, pady=5, padx=5)
lst_labels.append(letter_label)
for i in range(85, 91):
letter_label = Label(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3)
letter_label.grid(row=4, column=i - 82, pady=5, padx=5)
lst_labels.append(letter_label)
label_line = Label(self.root, text=" ", font=self.text_font, bg=self.bg_color)
label_line.grid(row=5, column=0)
for i in range(65, 75):
letter_button = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
height=2, width=3, bg="sienna2",
command=lambda letter_ord=i:
self.color_letter(chr(letter_ord),
lst_labels,
plain_text_text,
encrypted_text_text))
letter_button.grid(row=6, column=i - 64, pady=5, padx=5)
for i in range(75, 85):
letter_button = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
height=2, width=3, bg="sienna2",
command=lambda letter_ord=i:
self.color_letter(chr(letter_ord),
lst_labels,
plain_text_text,
encrypted_text_text))
letter_button.grid(row=7, column=i - 74, pady=5, padx=5)
for i in range(85, 91):
letter_button = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
height=2, width=3, bg="sienna2",
command=lambda letter_ord=i:
self.color_letter(chr(letter_ord),
lst_labels,
plain_text_text,
encrypted_text_text))
letter_button.grid(row=8, column=i - 82, pady=5, padx=5)
button_go_back = Button(self.root, text="go back to\nchoose path", font=self.text_font,
height=2, width=15, command=self.choose_path)
button_go_back.grid(row=10, column=1, columnspan=4, rowspan=2, pady=20, padx=5)
button_change_settings = Button(self.root, text="change settings", font=self.text_font,
height=2, width=15, command=self.change_settings)
button_change_settings.grid(row=10, column=5, columnspan=4, rowspan=2, pady=20, padx=5)
button_explain = Button(self.root, text="See Encryption", font=self.text_font,
height=2, width=15,
command=lambda: self.show_simulator_encryption(rotors_settings,
plugboard_settings,
plain_text_text.get(
"1.0", END).
replace("\n", "")))
button_explain.grid(row=10, column=9, columnspan=4, rowspan=2, pady=20, padx=5)
plugboard_settings_to_send = [self.simulator_enigma.plugboard.plugboard1,
self.simulator_enigma.plugboard.plugboard2]
button_change_settings = Button(self.root, text="send encrypted\nmessage",
height=2, width=15, font=self.text_font,
command=lambda: self.send(plain_text_text, rotors_settings,
plugboard_settings_to_send))
button_change_settings.grid(row=10, column=13, columnspan=4, rowspan=2, pady=20, padx=5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_eng(self):\n self.clear_terminal()\n self.menu_eng()\n self.handle_selection_eng()",
"def display(self):\n viewer = SpectraViewer(spectrometer=self)\n viewer.display()",
"def display(self):\n viewer = SpectraViewer(spectrometer=self)\n viewer.display()",
"def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation",
"def show_simulator_encryption(self, rotors_settings, plugboard_settings, plain_text,\r\n letter_number=1):\r\n self.clear_screen()\r\n\r\n if len(self.simulator_encryption) > 0:\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.text_font, bg=self.bg_color)\r\n user_label.grid(pady=5, row=0, column=0, columnspan=5)\r\n lbl_encryption = Label(self.root,\r\n text=\"Encrypting The Letter: \" +\r\n self.simulator_encryption[letter_number - 1][1][0],\r\n font=self.text_font, bg=self.bg_color)\r\n lbl_encryption.grid(row=1, column=0, columnspan=5, pady=5, padx=10)\r\n\r\n # text widget to display the stages of the encryption written\r\n encryption_text_widget = Text(self.root, width=30, height=19,\r\n bg=\"khaki\", font=self.text_font)\r\n encryption_text_widget.grid(row=2, rowspan=7, column=0,\r\n columnspan=5, padx=10, pady=5)\r\n encryption_text_widget.insert(END, self.simulator_encryption[letter_number - 1][0])\r\n encryption_text_widget.config(state=DISABLED)\r\n\r\n # setting canvas to display the encryption visually\r\n encryption_stages_list = self.simulator_encryption[letter_number - 1][1]\r\n show_canvas = ShowEncryption(self.root, encryption_stages_list)\r\n show_canvas.set_canvas()\r\n\r\n # setting a next/previous button if necessary\r\n if len(self.simulator_encryption) > letter_number:\r\n next_button = Button(self.root, width=20, height=2,\r\n text=\"Next Letter\", font=self.text_font,\r\n command=lambda:\r\n self.show_simulator_encryption(rotors_settings,\r\n plugboard_settings,\r\n plain_text,\r\n letter_number + 1))\r\n next_button.grid(row=11, column=0, columnspan=5, padx=10, pady=5)\r\n if letter_number > 1:\r\n previous_button = Button(self.root, width=20, height=2,\r\n text=\"Previous Letter\", font=self.text_font,\r\n command=lambda:\r\n self.show_simulator_encryption(rotors_settings,\r\n plugboard_settings,\r\n plain_text,\r\n letter_number - 1))\r\n previous_button.grid(row=9, column=0, columnspan=5, padx=10, pady=5)\r\n else:\r\n # no letters were encrypted\r\n lbl_encryption = Label(self.root, text=\"No Letters Have Been Encrypted\",\r\n font=self.text_font, bg=self.bg_color)\r\n lbl_encryption.grid(row=0, column=0, columnspan=5, pady=10, padx=10)\r\n\r\n button_go_back = Button(self.root, text=\"go back to simulator\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.simulator(rotors_settings,\r\n plugboard_settings, plain_text))\r\n button_go_back.grid(row=10, column=0, columnspan=5, padx=10, pady=5)",
"def play(self):\n self.mu1=random.randrange(20,40,1)\n self.sd=round(self.mu1*(5/30))\n self.loss=random.randrange(5,20,1)\n self.alpha = 1.96 # set to 5%\n self.beta = 0.84 # set to 80%\n self.mu2= self.mu1-self.mu1*(self.loss/100)\n self.n=((self.alpha+self.beta)/((self.mu2-self.mu1)/self.sd))**2\n # clear all fields \n self.clear() \n #\n # Add instruction text to the first text window\n self.instruction_message=(\"If you have a population mean of %s \"\n \"\\nand a standard deviation of %s,\"\n \"\\nwhat sample size is required\\n\" \n \"to detect a loss of %s percent? \\n\"\n \"\\nNote: alpha = 5 percent and power = 80 percent\" %(self.mu1,self.sd,self.loss))\n \n self.text_instructions.insert(0.0,self.instruction_message)\n # disable the play button so it cannot be pressed again",
"def display(self):\n art = \"\\n\".join([\"\".join(row) for row in self.text])\n if self.args.output:\n with open(self.args.output, \"w\") as f:\n f.write(art)\n\n if self.args.verbose:\n print(art)",
"def display_eink(image):\n if epd:\n epd.display(epd.getbuffer(image))\n else:\n image.show()",
"def show(self):\n # Used for testing because there is obviously no way back\n # from VISU_Gen.SetCurrentStudy\n if not self.display:\n return\n\n # Desactivation : Load the med file in the PARAVIS component\n #import smeca_utils.visu_utils as VU\n #log.info(\"Loading Paravis module...\")\n #msg = VU.load_med_file(self.read_fname())\n #log.info(msg)",
"def display(self, message=\"\"):\n print(\"-\" * (79 - len(message)), end=\" \")\n print(message)\n if self.mat is None:\n print(\"None\")\n else:\n print(self.__repr__())\n print(\"=\" * 80)",
"def display():\r\n fill(un)\r\n ellipse(x,y,2*rayonBalle,2*rayonBalle)\r\n \r\n fill(deux)\r\n ellipse(xDeux,yDeux,2*rayonBalle,2*rayonBalle) \r\n \r\n fill(trois)\r\n ellipse(xTrois,yTrois,2*rayonBalle,2*rayonBalle)",
"def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")",
"def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)",
"def print_actuator(upperarm_length = 11, forarm_preActuator_length = 1, elbow_angle = 0, forarm_postActuator_length =4, theta=0, num_pads = 2):\r\n \r\n # pad parameters\r\n pad_length = 2.6 # length in Y\r\n pad_width = 2.6 # width in x\r\n n_meanders = 8\r\n pad_print_speed = 4.5\r\n pad_print_speed = pad_print_speed * 0.75\r\n meander_separation_dist = pad_length/n_meanders\r\n vent_stem_length = 1.5 # added 20150327\r\n vent_spot_dwell = 0.5 # added 20150327\r\n \r\n def print_actuator_pad():\r\n \"\"\"Helper funciton. Print one actuator pad here\"\"\"\r\n e3DPGlobals.g.write(\"\\n; Print actuator pad.\")\r\n e3DPGlobals.g.feed(pad_print_speed)\r\n e3DMatrixPrinting.move_x(-pad_width/2, theta) #move to the lower left corner of the pad\r\n for meander in range(n_meanders-1):\r\n e3DMatrixPrinting.move_xy(x_distance=pad_width, y_distance=meander_separation_dist,theta=theta) # horizontal across the whole pad\r\n e3DMatrixPrinting.move_x(-pad_width,theta)\r\n e3DMatrixPrinting.move_xy(x_distance=pad_width, y_distance=meander_separation_dist,theta=theta) \r\n e3DMatrixPrinting.move_x(-pad_width/2, theta) # move to the middle of the top of the pad\r\n \r\n e3DPGlobals.g.write(\"\\n; PRINT ACTUATOR.\") \r\n e3DPGlobals.g.relative() \r\n e3DPGlobals.g.feed(e3DMatrixPrinting.default_print_speed*3) # *3 added on D-52\r\n e3DPGlobals.g.feed(e3DMatrixPrinting.default_print_speed/2) # *3 added on D-52\r\n e3DPGlobals.g.feed(e3DMatrixPrinting.default_print_speed) \r\n \r\n #print the forarm up to the elbow bend\r\n e3DMatrixPrinting.move_y(upperarm_length, theta)\r\n \r\n theta+=elbow_angle # make a turn at the elbow\r\n \r\n #print the forarm after the elbow bend to the first pad\r\n e3DMatrixPrinting.move_y(forarm_preActuator_length, theta)\r\n \r\n # block below added on 2015.04.03\r\n if num_pads == 1:\r\n sign = (-1 if theta<0 else 1)\r\n theta = sign * 120\r\n print \"theta_new\"\r\n print theta\r\n e3DMatrixPrinting.move_y(1, theta) \r\n \r\n #print actuator pad 1\r\n print_actuator_pad()\r\n \r\n if num_pads == 2:\r\n #print connection stem to actuator second actuator pad\r\n e3DPGlobals.g.feed(e3DMatrixPrinting.default_print_speed*3) # *3 added on D-52\r\n e3DPGlobals.g.feed(e3DMatrixPrinting.default_print_speed/2) # *3 added on D-52\r\n e3DPGlobals.g.feed(e3DMatrixPrinting.default_print_speed) \r\n e3DMatrixPrinting.move_y(forarm_postActuator_length, theta)\r\n \r\n #print actuator pad 2\r\n print_actuator_pad()\r\n \r\n #added 20150327: print short stem to vent\r\n e3DMatrixPrinting.move_y(vent_stem_length, theta)\r\n e3DPGlobals.g.dwell(vent_spot_dwell)\r\n #added 20150501, Experiment D-104 to avoid blobs at end of actuator\r\n e3DMatrixPrinting.turn_pressure_off(com_port = 1, start_stop_dwell_time = 0)\r\n e3DPGlobals.g.dwell(1)\r\n e3DMatrixPrinting.move_y(2, theta)\r\n \r\n e3DPGlobals.g.absolute() \r\n e3DMatrixPrinting.travel_mode()\r\n e3DPGlobals.g.write(\"\\n; Done with Actuator.\\n\\n\")",
"def imprimir_menu():\n print(\"Que desea realizar en la matriz\")\n print(\"1. Presentar el nro Central \")\n print(\"2. Presentar los nros en forma espiral desde el centro \")\n print(\"3. Multiplos del nro central\")",
"def display(self, index):\n img = self.img(index)\n transcription = self.transcript(index)\n plt.imshow(self.norm_img(img), cmap='bone')\n plt.title(transcription, fontdict={'fontsize': 64})\n plt.show()",
"def display_menu(self):\n\t\t\n\t\tmenu = {\n\t\t\t'1' : self.jouerMusique,\n\t\t\t'2' : self.enregistrerMusique,\n\t\t\t'3' : self.notesCmd,\n\t\t\t'4' : self.notesGraphical,\n\t\t\t'5' : self.changeInstrument,\n\t\t\t'6' : self.switchDisplay,\n\t\t\t'7' : self.stop\n\t\t}\n\t\t\n\t\tif self.display:\n\t\t\tstatut = \"activé\"\n\t\telse:\n\t\t\tstatut = \"désactivé\"\n\t\t\n\t\tprint \"################\"\n\t\tprint \"##### MENU #####\"\n\t\tprint \"################\"\n\t\tprint\n\t\tprint \"1. Jouer une musique écrite\"\n\t\tprint \"2. Enregistrer une musique écrite\"\n\t\tprint \"3. Jouer des notes en ligne de commande\"\n\t\tprint \"4. Jouer des notes sur un clavier graphique\"\n\t\tprint \"5. Changer d'instrument\"\n\t\tprint \"6. Activer/désactiver les affichages et enregistrements. (actuellement \" + statut + \")\"\n\t\tprint \"7. Quitter\"\n\t\tprint\n\t\t\n\t\tn = str(raw_input('Choix : '))\n\t\tmenu.get(n,self.default)()",
"def show_encs():\n encs = g.encoders\n out = \"%sEncoding profiles:%s\\n\\n\" % (c.ul, c.w)\n\n for x, e in enumerate(encs):\n sel = \" (%sselected%s)\" % (c.y, c.w) if Config.ENCODER.get == x else \"\"\n out += \"%2d. %s%s\\n\" % (x, e['name'], sel)\n\n g.content = out\n message = \"Enter %sset encoder <num>%s to select an encoder\"\n g.message = message % (c.g, c.w)",
"def henhouseDisplayMenu () :\r\n print('1.Predict egg production')\r\n print('2.Display needs')\r\n print('0.Exit henhouse management')\r\n print()\r\n print('Please choose an option from the above menu')",
"def show(self):",
"def show_es():\n img = rotated_e()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(img[i], cmap=plt.cm.gray, interpolation='nearest')\n plt.show()",
"def displayScene(self):\n sceneprint = \"\"\n sceneprint += \" \"*40 + Back.LIGHTRED_EX + Fore.LIGHTCYAN_EX + Style.BRIGHT + \"M A N D A L O R I A N\\n\" + RESET\n sceneprint += Fore.LIGHTBLUE_EX +\"SCORE : \" +\\\n str(self.__score) + \" \"*30 +\"TIME : \" + str(self.__remaining_time) + \" \"*30 +\\\n \"LIVES:\" + str(self.__lives)+\"\\n\"+ RESET\n if self.__start >= self.__fullwidth - self.__width:\n self.__start = self.__fullwidth - self.__width\n for i in range(0, self.__height):\n for j in range(self.__start, self.__start + self.__width):\n sceneprint += str(self.__matrix[i][j])\n sceneprint += '\\n'\n \n if self.__start + sc_span < sc_full - 5:\n self.__start = self.__start + 1\n if self.__score < 420420420:\n self.__score += 1\n pass\n\n return sceneprint",
"def show_game_mission():\n print_bold(\"Misija:\")\n print(\"\\tOdaberi kućicu u kojoj se Talion može odmoriti ...\")\n print_bold(\"SAVJET:\")\n print(\"PAZI kako biraš jer neprijatelji su blizu!\")\n print_dotted_line()",
"def display_eng_word():\n en_word = rand_word[\"English\"] # Grabs the English word of the current word\n canvas.itemconfig(card_title, text=\"English\", fill=\"white\") # Change screen title to English\n canvas.itemconfig(card_word, text=en_word, fill=\"white\") # Display the english word of the current displaying french word\n canvas.itemconfig(canvas_image, image=back_image) # Changes the background",
"def main():\n # background\n background = background_maker()\n\n # face\n face = face_maker()\n\n # eye\n eye_l = eye_maker()\n eye_r = eye_maker()\n\n # mouth\n mouth = mouth_maker()\n mouth_1 = GArc(60, 60, 290, 60)\n mouth_2 = GArc(60, 60, 190, 60)\n\n # nose\n nose = GOval(10, 10)\n nose.filled = True\n\n # ear\n ear_l = ear_maker()\n ear_r = ear_maker()\n ear_ll = ear2_maker()\n ear_rr = ear2_maker()\n\n # body\n body = body_maker()\n body2 = body2_maker()\n body3 = body3_maker()\n\n # label\n label = label_maker('Rilakkuma', 70)\n label2 = label_maker('Min', 10, font='Dialog')\n\n # arm\n arm_l = arm1_maker()\n arm_r = arm2_maker()\n\n # leg\n leg = leg_maker()\n leg2 = leg_maker()\n\n # show my draw\n window.add(background)\n window.add(leg, (window.width - leg.width) / 2 - body.width/3.7, (window.height - leg.height) / 2 + body.height*1.1)\n window.add(leg2, (window.width - leg2.width) / 2 + body.width / 3.7,\n (window.height - leg2.height) / 2 + body.height * 1.1)\n window.add(body, (window.width - body.width) / 2, (window.height - body.height) / 2 + face.height/1.4)\n window.add(body2, (window.width - body2.width) / 2,\n (window.height - body2.height) / 2 + face.height/1.4 + body.height/3.3)\n window.add(body3, (window.width - body3.width) / 2, (window.height - body3.height) / 2 + face.height/1.2)\n window.add(arm_l, (window.width - arm_l.width) / 2 - body.width / 2.9,\n (window.height - arm_l.height) / 2 + face.height / 1.5)\n window.add(arm_r, (window.width - arm_r.width) / 2 + body.width / 2.9,\n (window.height - arm_r.height) / 2 + face.height / 1.5)\n window.add(label, (window.width-label.width)/2, window.height/4)\n window.add(ear_l, (window.width - ear_l.width) / 2 - face.width / 2.25,\n (window.height - ear_l.height) / 2 - face.height / 3)\n window.add(ear_ll, (window.width - ear_ll.width) / 2 - face.width / 2.25,\n (window.height - ear_ll.height) / 2 - face.height / 3.5)\n window.add(ear_r, (window.width - ear_r.width) / 2 + face.width / 2.25,\n (window.height - ear_r.height) / 2 - face.height / 3)\n window.add(ear_rr, (window.width - ear_rr.width) / 2 + face.width / 2.25,\n (window.height - ear_rr.height) / 2 - face.height / 3.5)\n window.add(face, (window.width - face.width) / 2, (window.height - face.height) / 2)\n window.add(eye_l, (window.width - eye_l.width) / 2 - face.width / 5, (window.height - eye_l.height) / 2)\n window.add(eye_r, (window.width - eye_r.width) / 2 + face.width / 5, (window.height - eye_r.height) / 2)\n window.add(mouth, (window.width - mouth.width) / 2, (window.height - mouth.height) / 2 + face.height / 8)\n window.add(nose, (window.width - nose.width) / 2, (window.height - nose.height) / 2 + face.height / 12)\n window.add(mouth_1, (window.width - mouth_1.width) / 2 - face.width / 20,\n (window.height - mouth_1.height) / 2 + face.height / 11)\n window.add(mouth_2, (window.width - mouth_2.width) / 2 + face.width / 20,\n (window.height - mouth_2.height) / 2 + face.height / 11)\n window.add(label2, window.width-label2.width, window.height)\n\n # kuma2\n kuma2_color = '0xFFEEDD'\n face2 = face_maker(140, color=kuma2_color)\n\n eye2_l = eye_maker(size=15)\n eye2_r = eye_maker(size=15)\n\n mouth2 = mouth_maker(size=40)\n mouth2_1 = GArc(60, 60, 290, 60)\n mouth2_2 = GArc(60, 60, 190, 60)\n\n nose2 = GOval(8, 8)\n nose2.filled = True\n\n ear2_l = ear_maker(size=50, color=kuma2_color)\n ear2_r = ear_maker(size=50, color=kuma2_color)\n ear2_ll = ear2_maker(size=30, color='0xFFC1E0')\n ear2_rr = ear2_maker(size=30, color='0xFFC1E0')\n\n body_2 = body_maker(size=100, color=kuma2_color)\n body2_2 = body2_maker(size=85, color=kuma2_color)\n body3_2 = body3_maker(size=60)\n\n arm2_l = arm1_maker(size=40, color=kuma2_color)\n arm2_r = arm2_maker(size=40, color=kuma2_color)\n\n leg_2 = leg_maker(size=25, color=kuma2_color)\n leg2_2 = leg_maker(size=25, color=kuma2_color)\n\n buttons = GOval(15, 15)\n buttons.filled = True\n buttons.fill_color = 'red'\n\n window.add(leg_2, (window.width - leg_2.width) / 2 - face.width / 1.05 - body_2.width/3.3,\n (window.height - leg_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(leg2_2, (window.width - leg2_2.width) / 2 - face.width / 1.05 + body_2.width/3.3,\n (window.height - leg2_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(body_2, (window.width - body_2.width) / 2 - face.width/1.05,\n (window.height - body_2.height) / 2 + face.height / 1.4)\n window.add(body2_2, (window.width - body2_2.width) / 2 - face.width/1.05,\n (window.height - body2_2.height) / 2 + face.height / 1.4 + body_2.height / 3.3)\n window.add(body3_2, (window.width - body3_2.width) / 2 - face.width/1.05,\n (window.height - body3_2.height) / 2 + face.height / 1.2)\n window.add(arm2_l, (window.width - arm2_l.width) / 2 - face.width / 1.05 - body_2.width/2.9,\n (window.height - arm2_l.height) / 2 + face2.height / 1.06)\n window.add(arm2_r, (window.width - arm2_r.width) / 2 - face.width / 1.05 + body_2.width/2.9,\n (window.height - arm2_r.height) / 2 + face2.height / 1.06)\n window.add(ear2_l, (window.width - ear2_l.width) / 2 - face.width / 0.8,\n (window.height - ear2_l.height) / 2 - face2.height / 9)\n window.add(ear2_ll, (window.width - ear2_ll.width) / 2 - face.width / 0.8,\n (window.height - ear2_ll.height) / 2 - face2.height / 15)\n window.add(ear2_r, (window.width - ear2_r.width) / 2 - face.width / 1.5,\n (window.height - ear2_r.height) / 2 - face2.height / 9)\n window.add(ear2_rr, (window.width - ear2_rr.width) / 2 - face.width / 1.52,\n (window.height - ear2_rr.height) / 2 - face2.height / 15)\n window.add(face2, (window.width-face2.width)/2 - face.width/1.05, (window.height-face2.height)/2 + face2.height/4)\n window.add(eye2_l, (window.width - eye2_l.width) / 2 - face.width / 0.9,\n (window.height - eye2_l.height) / 2 + face2.height/4)\n window.add(eye2_r, (window.width - eye2_r.width) / 2 - face.width / 1.25,\n (window.height - eye2_r.height) / 2 + face2.height/4)\n window.add(mouth2, (window.width - mouth2.width) / 2 - face.width/1.05,\n (window.height - mouth2.height) / 2 + face2.height / 2.4)\n window.add(nose2, (window.width - nose2.width) / 2 - face.width/1.05,\n (window.height - nose2.height) / 2 + face2.height / 2.5)\n window.add(mouth2_1, (window.width - mouth2_1.width) / 2 - face.width / 1,\n (window.height - mouth2_1.height) / 2 + face2.height / 2.5)\n window.add(mouth2_2, (window.width - mouth2_2.width) / 2 - face.width / 1.1,\n (window.height - mouth2_2.height) / 2 + face2.height / 2.5)\n window.add(buttons, (window.width-buttons.width)/2 - face.width/1.05,\n (window.height-buttons.height)/2 + face.height/1.62)",
"def viz(analogies):\n print(\"Index\".ljust(12) + \"Analogy\".center(45) + \"Gender score\".rjust(12))\n print(\"-\" * 69)\n print(\n \"\\n\".join(\n str(i).rjust(4) + a[0].rjust(29) + \" | \" + a[1].ljust(29) + (str(a[2]))[:4]\n for i, a in enumerate(analogies)\n )\n )",
"def print_welcome():\n print(\"Welcome to Langton's ant simulator! Choose option: \")\n print(\"1 -> Create white blank picture\")\n print(\"2 -> Load file\")\n print(\"3 -> Generate picture with given probability\")",
"def abdul(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"1;33m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.A220+self.ESC+\"1;43m\"+self.A219+self.A178+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A223+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"33m\"+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"C\"+self.A254+self.ESC+\"33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+self.ESC+\"1;43m\"+self.A177+self.A176+self.ESC+\"C\"+self.A176+self.ESC+\"C\"+self.ESC+\"37;40mSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"33mAbduls\"+self.ESC+\"CArmour \"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A177+self.ESC+\"0;32m\"+self.A220+self.ESC+\"C\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"2C\"+self.ESC+\"0;33m\"+self.A223+self.A219+self.ESC+\"C\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"33m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"33m\"+self.A220+self.A219+self.A223+self.ESC+\"37m\"+self.A220+self.ESC+\"1;47m\"+self.A176+self.ESC+\"0m\"+self.A219+self.A223+self.ESC+\"1;30;47m\"+self.A176+self.A176+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A177+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;30m\"+self.A223+self.ESC+\"4C\"+self.ESC+\"0;33mBehind\"+self.ESC+\"Cthe\"+self.ESC+\"Cdesk\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"Carmour\"+self.ESC+\"Cshop\"+self.ESC+\"Cis\"+self.ESC+\"Can\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"37;40m\"+self.A223+self.ESC+\"1;47m\"+self.A177+self.A176+self.ESC+\"C\"+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A178+self.ESC+\"C\"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"4C\"+self.ESC+\"0;33mamazingly\"+self.ESC+\"Cattractive\"+self.ESC+\"Clooking\"+self.ESC+\"Cfemale - she seems\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"33;40m\"+self.A223+self.ESC+\"1;37m\"+self.A220+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.A220+self.A223+self.ESC+\"47m\"+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.ESC+\"1;30;43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;32m\"+self.A220+self.A254+self.ESC+\"3C\"+self.ESC+\"33mbusy, doing her mails but she\"+self.ESC+\"Casks\"+self.ESC+\"C\\\"\"+self.ESC+\"1mHow\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A223+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A178+self.ESC+\"40m\"+self.A222+self.A222+self.ESC+\"47m\"+self.A176+self.ESC+\"C\"+self.ESC+\"30m\"+self.A176+self.ESC+\"C\"+self.A177+self.ESC+\"40m\"+self.A220+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"47m\"+self.A219+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A223+self.ESC+\"32m\"+self.A220+self.A178+self.ESC+\"6C\"+self.ESC+\"1;33mmay\"+self.ESC+\"CI\"+self.ESC+\"Cbe\"+self.ESC+\"Cof\"+self.ESC+\"Cservice?\"+self.ESC+\"0;33m\\\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"C\"+self.A220+self.A220+self.ESC+\"C\"+self.ESC+\"1;37m\"+self.A223+self.ESC+\"47m\"+self.A178+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A177+self.ESC+\"0m\"+self.A220+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0m\"+self.A220+self.ESC+\"1;30m\"+self.A223+self.A223+self.A220+self.ESC+\"47m\"+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.ESC+\"33;40m\"+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1;33;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A220+self.A223+self.ESC+\"C\"+self.A220+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;37m\"+self.A223+self.ESC+\"0m\"+self.A220+self.ESC+\"2C\"+self.A220+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.A176+self.ESC+\"40m\"+self.A223+self.ESC+\"47m\"+self.A219+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"1;30m\"+self.A220+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mB\"+self.ESC+\"0;33m]\"+self.ESC+\"1muy\"+self.ESC+\"CArmour\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A220+self.A223+self.ESC+\"1;37m\"+self.A220+self.A220+self.ESC+\"0m\"+self.A223+self.ESC+\"C\"+self.ESC+\"1;47m\"+self.A223+self.ESC+\"0m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"40m\"+self.A220+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"1;40m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mS\"+self.ESC+\"0;33m]\"+self.ESC+\"1mell\"+self.ESC+\"CArmour\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A219+self.A178+self.ESC+\"40m\"+self.A220+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A219+self.A220+self.A223+self.ESC+\"1;30m\"+self.A220+self.A220+self.A219+self.A219+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mY\"+self.ESC+\"0;33m]\"+self.ESC+\"1mour\"+self.ESC+\"CStats\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A178+self.ESC+\"0;33m\"+self.A223+self.A220+self.A223+self.ESC+\"30;43m\"+self.A176+self.A223+self.ESC+\"1;37;40m\"+self.A223+self.A223+self.ESC+\"47m\"+self.A178+self.A177+self.A176+self.A176+self.ESC+\"0m\"+self.A219+self.A219+self.A223+self.ESC+\"1;30m\"+self.A220+self.A176+self.A177+self.A178+self.A223+self.A223+self.ESC+\"0;33m\"+self.A220+self.A219+self.ESC+\"1;30m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mR\"+self.ESC+\"0;33m]\"+self.ESC+\"1meturn\"+self.ESC+\"Cto\"+self.ESC+\"CTown\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A177+self.ESC+\"0;33m\"+self.A219+self.A220+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A220+self.ESC+\"C\"+self.ESC+\"33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A219+self.ESC+\"30;43m\"+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A220+self.A254+self.ESC+\"C\"+self.ESC+\"1;30m\"+self.A220+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"33;43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"C\"+self.A178+self.A254+self.ESC+\"33m\"+self.A219+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A223+self.A220+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.A223+self.A220+self.A223+self.A223+self.A220+self.ESC+\"3C\"+self.ESC+\"1;30m\"+self.A177+self.A220+self.ESC+\"2C\"+self.ESC+\"33m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"33m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.A220+self.ESC+\"32m\"+self.A254+self.ESC+\"33m\"+self.A223+self.A219+self.A219+self.A220+self.A223+self.A220+self.A220+self.A223+self.A223+self.ESC+\"1;30m\"+self.A220+self.A220+self.A219+self.ESC+\"2C\"+self.A220+self.A178+self.A220+self.ESC+\"C\"+self.ESC+\"33;43m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"0m \"+self.ESC+\"3C\"+self.ESC+\"33m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.A223+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.A223+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg",
"def show(self) -> None:",
"def intro():\n print(\" ___ _ _ _ ____ \")\n print(\"|_ _|_ __ __| (_) __ _ _ __ __ _ | | ___ _ __ ___ ___ |___ \\\\ \")\n print(\" | || '_ \\\\ / _` | |/ _` | '_ \\\\ / _` | _ | |/ _ \\\\| '_ \\\\ / _ \\\\/ __| __) |\")\n print(\" | || | | | (_| | | (_| | | | | (_| | | |_| | (_) | | | | __/\\\\__ \\\\ / __/ \")\n print(\"|___|_| |_|\\\\__,_|_|\\\\__,_|_| |_|\\\\__,_| \\\\___/ \\\\___/|_| |_|\\\\___||___/ |_____|\")\n print('and his Great Python Adventure'.center(80))\n print()"
] | [
"0.6631773",
"0.6175526",
"0.6175526",
"0.61313206",
"0.6080019",
"0.5969126",
"0.5952151",
"0.5921271",
"0.5917744",
"0.59115",
"0.59061027",
"0.5887677",
"0.58850664",
"0.5868575",
"0.583884",
"0.5790793",
"0.5734754",
"0.5731992",
"0.5710725",
"0.57077515",
"0.5699957",
"0.56661326",
"0.56649774",
"0.5656126",
"0.56526923",
"0.56486535",
"0.56440806",
"0.56296104",
"0.56210434",
"0.56136036"
] | 0.6890839 | 0 |
this function lets the user change the settings of the simulator | def change_settings(self):
self.clear_screen()
# making sure the screen grid will be organized
label_line = Label(self.root, text=" ", font=self.text_font, bg=self.bg_color)
label_line.grid(row=0, column=0)
label_line = Label(self.root, text=" ", font=self.text_font, bg=self.bg_color)
label_line.grid(row=0, column=10)
user_label = Label(self.root, text="Hello " + self.username,
font=self.title_font, bg=self.bg_color, height=2)
user_label.grid(pady=10, padx=50, row=0, column=6, columnspan=4)
settings_title = Label(self.root, text="Enigma Settings",
font=self.title_font, bg=self.bg_color)
settings_title.grid(row=0, column=2, columnspan=4, pady=15)
rotor1_num, rotor2_num, rotor3_num, rotor1_letter, rotor2_letter, rotor3_letter = \
self.simulator_enigma.rotors.get_initial_setting()
lst_roman_rotor_num = ["I", "II", "III", "IV", "V"]
rotors_number = Label(self.root, text="the rotors in the enigma",
font=self.title_font, bg=self.bg_color)
rotors_number.grid(row=1, column=3, columnspan=5, pady=5)
numbers_lst = ["I", "II", "III", "IV", "V"]
first_rotor_label_num = Label(self.root, text="First Rotor",
font=self.text_font, bg=self.bg_color)
first_rotor_label_num.grid(row=2, column=1, columnspan=3)
options_rotor1 = StringVar()
options_rotor1.set(lst_roman_rotor_num[int(rotor1_num) - 1])
rotor_num1_options = OptionMenu(self.root, options_rotor1, *numbers_lst)
rotor_num1_options.grid(row=3, column=1, columnspan=3, padx=15)
second_rotor_label_num = Label(self.root, text="Second Rotor",
font=self.text_font, bg=self.bg_color)
second_rotor_label_num.grid(row=2, column=4, columnspan=3)
options_rotor2 = StringVar()
options_rotor2.set(lst_roman_rotor_num[int(rotor2_num) - 1])
rotor_num2_options = OptionMenu(self.root, options_rotor2, *numbers_lst)
rotor_num2_options.grid(row=3, column=4, columnspan=3, padx=15)
third_rotor_label_num = Label(self.root, text="Third Rotor",
font=self.text_font, bg=self.bg_color)
third_rotor_label_num.grid(row=2, column=7, columnspan=3)
options_rotor3 = StringVar()
options_rotor3.set(lst_roman_rotor_num[int(rotor3_num) - 1])
rotor_num3_options = OptionMenu(self.root, options_rotor3, *numbers_lst)
rotor_num3_options.grid(row=3, column=7, columnspan=3, padx=15)
rotors_letters = Label(self.root, text="the letters on the rotors",
font=self.title_font, bg=self.bg_color)
rotors_letters.grid(row=4, column=3, columnspan=5, pady=5)
abc_lst = [chr(i) for i in range(65, 91)]
first_rotor_label_letter = Label(self.root, text="first Rotor",
font=self.text_font, bg=self.bg_color)
first_rotor_label_letter.grid(row=5, column=1, columnspan=3)
options_rotor_l1 = StringVar()
options_rotor_l1.set(rotor1_letter)
rotor_l1_options = OptionMenu(self.root, options_rotor_l1, *abc_lst)
rotor_l1_options.grid(row=6, column=1, columnspan=3, padx=15)
second_rotor_label_letter = Label(self.root, text="second Rotor",
font=self.text_font, bg=self.bg_color)
second_rotor_label_letter.grid(row=5, column=4, columnspan=3)
options_rotor_l2 = StringVar()
options_rotor_l2.set(rotor2_letter)
rotor_l2_options = OptionMenu(self.root, options_rotor_l2, *abc_lst)
rotor_l2_options.grid(row=6, column=4, columnspan=3, padx=15)
third_rotor_label_letter = Label(self.root, text="Third Rotor",
font=self.text_font, bg=self.bg_color)
third_rotor_label_letter.grid(row=5, column=7, columnspan=3)
rotors_letters = Label(self.root, text="the letters on the rotors",
font=self.title_font, bg=self.bg_color)
rotors_letters.grid(row=4, column=3, columnspan=5, pady=5)
options_rotor_l3 = StringVar()
options_rotor_l3.set(rotor3_letter)
rotor_l3_options = OptionMenu(self.root, options_rotor_l3, *abc_lst)
rotor_l3_options.grid(row=6, column=7, columnspan=3, padx=15)
plugboard_title = Label(self.root, text="Plugboard settings",
font=self.title_font, bg=self.bg_color)
plugboard_title.grid(row=7, column=3, columnspan=5, pady=5)
plugboard_note = Label(self.root, text="Plugboard can contain 10 pairs max",
bg=self.bg_color, font=self.text_font)
plugboard_note.grid(row=8, column=3, columnspan=5, pady=5)
lst_buttons = []
for i in range(65, 74):
plugboard_letter = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3,
command=lambda letter=chr(i):
self.add_letter_in_plugboard(letter, lst_buttons))
plugboard_letter.grid(row=9, column=i - 64, pady=5, padx=5)
lst_buttons.append(plugboard_letter)
for i in range(74, 83):
plugboard_letter = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3,
command=lambda letter=chr(i):
self.add_letter_in_plugboard(letter, lst_buttons))
plugboard_letter.grid(row=10, column=i - 73, pady=5, padx=5)
lst_buttons.append(plugboard_letter)
for i in range(83, 91):
plugboard_letter = Button(self.root, text=" " + chr(i) + " ", font=self.text_font,
bg="khaki", relief=RIDGE, height=2, width=3,
command=lambda letter=chr(i):
self.add_letter_in_plugboard(letter, lst_buttons))
plugboard_letter.grid(row=11, column=i - 82, pady=5, padx=5)
lst_buttons.append(plugboard_letter)
self.set_plugboard(lst_buttons)
button_save_settings = Button(self.root, text="save settings and go to simulator",
height=2, width=35, font=self.text_font,
command=lambda: self.save_settings(options_rotor1.get(),
options_rotor2.get(),
options_rotor3.get(),
options_rotor_l1.get(),
options_rotor_l2.get(),
options_rotor_l3.get()))
button_save_settings.grid(row=12, column=0, columnspan=10, rowspan=2, pady=20, padx=5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]",
"def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)",
"def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()",
"def test_020_change_settings(self):\n\n testflow.step(\"Modifying settings via CLI\")\n assert self.settings_cli.run(\n 'set',\n name='MESSAGE_OF_THE_DAY',\n value='Zdravicko',\n )[0], \"Failed to change MESSAGE_OF_THE_DAY setting\"\n\n testflow.step(\"Querying for modified setting\")\n show_out = self.settings_cli.run(\n 'show',\n name='MESSAGE_OF_THE_DAY',\n )\n assert show_out[0], 'Failed to run show command'\n assert 'Zdravicko' in show_out[1], 'Setting value was not changed'\n\n testflow.step(\"Modifying setting back to default\")\n assert self.settings_cli.run( # Change value back to default\n 'set',\n name='MESSAGE_OF_THE_DAY',\n value='',\n )[0], \"Failed to change MESSAGE_OF_THE_DAY setting to defaul value\"",
"def edit_settings(self):\n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n valid_numbers, number_setting_corr = self.print_settings()\n print('Which setting you want to change? Enter \"number, new value\" to modify, or \"done\" to exit.')\n print('Observe the possible values for each setting! They are case sensitive. '\n 'Inputting wrong values might break the program. \\n')\n choice = input('Input:')\n if choice == 'done':\n break\n if ',' not in choice:\n print('Invalid input. Place the number, followed by a comma, followed by its value. Eg: 1,TRUE')\n continue\n if len(choice.split(',')) != 2:\n print('Invalid input, must have only one comma')\n continue\n\n var, val = choice.split(',')\n if var not in valid_numbers:\n print('Invalid number.')\n continue\n real_var = number_setting_corr[var] # Changes from a number to the actual parameter\n if val.lower() == 'true':\n setattr(self, real_var, True)\n continue\n elif val.lower() == 'false':\n setattr(self, real_var, False)\n continue\n else:\n setattr(self, real_var, val)\n\n # todo: check for all possible values to avoid inputting wrong settings and messing everything up.\n # if val not in valid_options_nl_sorting:\n # print('Invalid nonlinear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in valid_options_lin_sorting:\n # print('Invalid linear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in models:\n # print('Invalid nonlinear fitting model. Case sensitive! Be very precise.')\n # continue\n\n print('===Final settings===')\n _, _ = self.print_settings()\n self.save_settings()\n return",
"def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))",
"def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!",
"def on_actionSettings_triggered(self):\n self.start_app(SettingsApp)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def updateSettingsUI(self):\n\n pass",
"def _edit_setting(self):\n settings = fileIO.load_json(\"settings.json\")\n self._list_settings(settings=settings)\n option = False\n while not option: #While loop until valid setting given\n option = input(\"Please type the setting you would like to change: \")\n if option not in settings:\n option = False\n newSetting = input(\"Please enter what you would like to change that setting to: \")\n command = \"edit_setting {0} {1}\".format(option, newSetting)\n return(command)",
"def settings( self, selection ):\r\n if( self.__optionsDatabase.showOptionsDatabase() ):\r\n self.main( selection )",
"def settings_load(self):\n self.ui.spinBox_ATSP.setValue(self.default['ATSP'])\n\n if self.default['serialLabel'] == 'bt':\n self.ui.btRadio.setChecked(True)\n try:\n os.system(\"blueman-manager\")\n except:\n print \"Please install 'blueman' package\"\n elif self.default['serialLabel'] == 'usb':\n self.ui.usbRadio.setChecked(True)\n else:\n self.ui.devRadio.setChecked(True)\n\n if self.default['units'] == 'metric':\n self.ui.units_metric_radio.setChecked(True)\n else:\n self.ui.units_US_radio.setChecked(True)\n\n return",
"def update_ionic_settings(self, key, value):\n if self._ionic_settings:\n if key in self._ionic_settings:\n self._ionic_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {ediff ,nsw, ibrion ,isif, isym, nblock, kblock}\")\n else:\n print(\"magnetic settings not present!\")",
"async def change(self, ctx: Context):\n\t\tawait self.send(f\"If you wish to see your settings, go on our site: https://asxlvm.github.io/#/settings • If you already saw your settings and wish to change them. What do you want to change?\", whisper=[ctx.author.id])\n\t\tawait asyncio.sleep(2)\n\t\tawait self.send(f\"Options: allowMentions [bool] • autoRejectFights [bool] • passiveMode [bool] • whisperEconomy [bool]• onJoinMsg [bool] • allowUserInteraction [bool] | [bool] = True / False\", whisper=[ctx.author.id])\n\t\twaitforevent = await self.wait_for('message', check=lambda message: ctx.author.id == message.author.id)\n\t\twfcl = waitforevent.content.lower()\n\t\tusers = await self.get_settings_data()\n\t\tuserid = ctx.author.id\n\t\tif wfcl == \"allowmentions true\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"allowMentions\"] = True\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed allowMentions to True for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"allowmentions false\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"allowMentions\"] = False\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed allowMentions to False for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"autorejectfights true\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"autoRejectFights\"] = True\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed autoRejectFights to True for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"autorejectfights false\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"autoRejectFights\"] = False\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed autoRejectFights to False for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"passivemode true\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} as there isn't economy right now, you may not change this setting.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"passivemode false\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} as there isn't economy right now, you may not change this setting.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"whispereconomy true\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} as there isn't economy right now, you may not change this setting.\")\n\t\telif wfcl == \"whispereconomy false\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} as there isn't economy right now, you may not change this setting.\")\n\t\telif wfcl == \"onjoinmsg true\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"onJoinMsg\"] = True\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed onJoinMsg to True for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"onjoinmsg false\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"onJoinMsg\"] = False\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed onJoinMsg to False for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"allowuserinteraction true\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"allowUserInteraction\"] = True\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed allowUserInteraction to True for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"allowuserinteraction false\":\n\t\t\tawait self.open_settings(userid, ctx.author.username)\n\t\t\tusers[str(userid)][\"allowUserInteraction\"] = True\n\t\t\twith open(\"settings.json\", \"w\") as f:\n\t\t\t\tjson.dump(users, f)\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I have changed allowUserInteraction to True for you.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"allowmentions\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} you didn't supply a boolean, run the command again.\", whisper=[ctx.author.id])\n\n\t\telif wfcl == \"autorejectfights\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} you didn't supply a boolean, run the command again.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"passivemode\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} you didn't supply a boolean, run the command again.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"whispereconomy\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} you didn't supply a boolean, run the command again.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"onjoinmsg\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} you didn't supply a boolean, run the command again.\", whisper=[ctx.author.id])\n\t\telif wfcl == \"allowuserinteraction\":\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} you didn't supply a boolean, run the command again.\", whisper=[ctx.author.id])\n\t\telse:\n\t\t\tawait asyncio.sleep(2)\n\t\t\treturn await self.send(f\"{ctx.author.mention} I believe that is an incorrect argument, try running the command again.\", whisper=[ctx.author.id])",
"def _onSettings(self, event):\n dialog = sc.SettingsDialog(self)\n if dialog.ShowModal() == wx.ID_OK:\n dialog.saveSettings()\n dialog.Destroy()",
"def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True",
"def onSettings(self):\n pass",
"def change_settings(new_settings={}, file=None):\n gl = globals()\n if file is not None:\n execfile(file)\n gl.update(locals())\n gl.update(new_settings)\n # Here you can add some code to check that the new configuration\n # values are valid.",
"def configure(self,user_input):\n if user_input==\"configure formal\":\n self.configure_formality(True)\n elif user_input==\"configure informal\":\n self.configure_formality(False)\n elif user_input==\"configure delay\":\n self.configure_delay(2)\n elif user_input==\"configure no delay\":\n self.configure_delay(0)\n '''\n #%%\n '''",
"def update_settings(self):\n settings = {\n \"reference\": self,\n \"draw_tangents\": self.cbDrawTangents.isChecked(),\n }\n if self.cbShowSolarAngle.isChecked():\n settings[\"show_solar_angle\"] = self.cbSolarAngleType.currentText(), self.cbSolarBody.currentText()\n else:\n settings[\"show_solar_angle\"] = None\n\n self.view.set_remote_sensing_appearance(settings)",
"def settings_mod(r):\r\n\tif \"Speech:\" in r:\r\n\t\tsettings[0] = yesNo(r, 'Speech: on', 'Speech: off')\r\n\t\tr = \"settings\"\r\n\t\timport speech\r\n\t\tif \"off\" in settings[0]:\r\n\t\t\tspeech.speechOn = False\r\n\t\telse:\r\n\t\t\tspeech.speechOn = True\r\n\telif r == \"exit\":\r\n\t\tr = \"start\"\r\n\treturn r",
"def showSettings(self):\n self.c.show()",
"def state_chosen_do(cfg, app, win, events):",
"def setSettings(self):\r\n # 根据默认参数设置,根据是否使用config来设定参数\r\n if self.__config__[\"config\"] is False:\r\n self.json.setChecked(False)\r\n self.json_path.setEnabled(False)\r\n self.json_select.setEnabled(False)\r\n\r\n tem = [self.l_line, self.r_line, self.p_line]\r\n [x.setEnabled(True) for x in tem]\r\n\r\n for key, value in self.elements.items():\r\n key.setEnabled(True)\r\n\r\n # 设定程序或者json文件的路径\r\n if self.__config__[\"exe\"]:\r\n self.executable.setText(self.__config__[\"exe\"])\r\n else:\r\n self.executable.clear()\r\n if self.__config__[\"config_path\"]:\r\n self.json_path.setText(self.__config__[\"config_path\"])\r\n else:\r\n self.json_path.clear()\r\n \r\n # 设定其他参数\r\n if self.__config__[\"paras\"]:\r\n for key, value in self.__config__[\"paras\"].items():\r\n element = self.parameters[key]\r\n if value not in (\"::\", \"\"):\r\n element.setEnabled(True)\r\n\r\n key1 = get_key_by_value(self.elements, element)\r\n if key1:\r\n key1.setEnabled(True)\r\n key1.setChecked(True)\r\n\r\n if isinstance(element, QLineEdit):\r\n element.setText(value)\r\n elif isinstance(element, QComboBox):\r\n index = element.findText(value, Qt.MatchFixedString)\r\n if index >= 0:\r\n element.setCurrentIndex(index)",
"def changeRingSetting(self):\n #Input code to accommodate function of Ring setting",
"def on_pre_enter(self):\n Logger.info('Application: Changed to the Settings screen.')",
"def change_settings(settings, methods=['GET', 'POST']):\n message = resolve_settings(settings)\n socketio.emit('settings_update', SETTINGS)\n socketio.emit('log', message)",
"def setup_mode():\n status_label.color = WHITE\n status_label.text = \"-SET-\"\n\n ave_label.color = BLACK # Turn off average label and value display\n ave_value.color = BLACK\n\n max_value.text = str(MAX_RANGE_F) # Display maximum range value\n min_value.text = str(MIN_RANGE_F) # Display minimum range value\n\n time.sleep(0.8) # Show SET status text before setting parameters\n status_label.text = \"\" # Clear status text\n\n param_index = 0 # Reset index of parameter to set\n\n setup_state = \"SETUP\" # Set initial state\n while setup_state == \"SETUP\":\n # Select parameter to set\n setup_state = \"SELECT_PARAM\" # Parameter selection state\n while setup_state == \"SELECT_PARAM\":\n param_index = max(0, min(2, param_index))\n status_label.text = SETUP_COLORS[param_index][0]\n image_group[param_index + 226].color = BLACK\n status_label.color = BLACK\n time.sleep(0.25)\n image_group[param_index + 226].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.25)\n\n param_index -= get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_index = param_index - 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_index = param_index + 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"ADJUST_VALUE\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Adjust parameter value\n param_value = int(image_group[param_index + 230].text)\n\n while setup_state == \"ADJUST_VALUE\":\n param_value = max(32, min(157, param_value))\n image_group[param_index + 230].text = str(param_value)\n image_group[param_index + 230].color = BLACK\n status_label.color = BLACK\n time.sleep(0.05)\n image_group[param_index + 230].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.2)\n\n param_value += get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_value = param_value + 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_value = param_value - 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"SETUP\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Exit setup process\n status_label.text = \"RESUME\"\n time.sleep(0.5)\n status_label.text = \"\"\n\n # Display average label and value\n ave_label.color = YELLOW\n ave_value.color = YELLOW\n return int(alarm_value.text), int(max_value.text), int(min_value.text)"
] | [
"0.67250514",
"0.6616514",
"0.6587167",
"0.6572284",
"0.65574336",
"0.65571165",
"0.65545833",
"0.6387912",
"0.6380834",
"0.6380834",
"0.6371281",
"0.63633394",
"0.63439715",
"0.61811125",
"0.6178413",
"0.61066014",
"0.60833853",
"0.6053914",
"0.6019179",
"0.60047317",
"0.59926873",
"0.59766376",
"0.592824",
"0.59200627",
"0.5888633",
"0.5875814",
"0.5872004",
"0.5865873",
"0.5856222",
"0.5827039"
] | 0.7095382 | 0 |
this function adds a letter to the plugboard | def add_letter_in_plugboard(self, letter, lst_buttons):
self.simulator_enigma.plugboard.add_letter(letter)
self.set_plugboard(lst_buttons) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_letter(self, letter):\r\n if (len(self.plugboard1) < 10 or\r\n (len(self.plugboard1) == 10 and self.plugboard2[-1] is None)) and \\\r\n letter not in self.plugboard1 and letter not in self.plugboard2:\r\n if len(self.plugboard1) == 0 or (len(self.plugboard1) < 10 and\r\n self.plugboard2[-1] is not None):\r\n self.plugboard1.append(letter)\r\n self.plugboard2.append(None)\r\n elif self.plugboard2[-1] is None:\r\n self.plugboard2[-1] = letter\r\n else:\r\n if letter in self.plugboard1:\r\n position = self.plugboard1.index(letter)\r\n self.plugboard1.remove(letter)\r\n if self.plugboard2[position] is not None:\r\n self.plugboard2.remove(self.plugboard2[position])\r\n elif letter in self.plugboard2:\r\n position = self.plugboard2.index(letter)\r\n self.plugboard2.remove(letter)\r\n self.plugboard1.remove(self.plugboard1[position])\r\n else:\r\n return \"plugboard is full\"\r\n return None",
"def addChar(self, char):\n self.guessedChars.append(char)",
"def addch(self, stdscr, y, x, text):\n stdscr.addch(y, x, text, curses.color_pair(self.i))",
"def say_letter(self, keyboard, keycode, char, modifiers):\n\n if keycode[1] in ('shift', 'rshift'):\n return # ignore.. shifted keys will have their Shift modifier set\n elif keycode[1] == 'tab':\n self.play_sound('tab')\n elif keycode[1] == 'delete':\n self.play_sound('delete')\n elif keycode[1] == 'backspace':\n self.textbox.text = self.textbox.text[:-1]\n self.play_sound('backspace')\n elif keycode[1] == 'enter':\n self.textbox.text += '\\n'\n self.play_sound('enter')\n elif char == ' ':\n self.textbox.text += ' '\n self.play_sound('space') \n elif char is None:\n self.play_sound('error')\n else:\n if 'shift' in modifiers or 'rshift' in modifiers:\n self.textbox.text += char.upper()\n else:\n self.textbox.text += char\n if RENAMED_CHAR.get(char):\n self.play_sound(RENAMED_CHAR[char])\n else: \n self.play_sound(char)",
"def update_letters_guessed(self, letter):\n self.letters_guessed = self.letters_guessed + letter",
"def insert(self, word):\n pointer = self.tries\n for i in range(len(word)):\n ascii = ord(word[i]) - ord('a')\n if pointer[ascii] == None:\n pointer[ascii] = [None] * 26\n pointer = pointer[ascii]\n pointer.append(word)",
"def add_char(self, coord, char, modify=False):\n if modify:\n range_y, range_x = self._map_dims\n new_coord = [coord[0]+range_y[0]-1, coord[1]+range_x[0]-1]\n self._screen.addch(new_coord[0], new_coord[1], char)\n self._screen.refresh()\n return new_coord\n else:\n self._screen.addch(coord[0], coord[1], char)\n self._screen.refresh()\n return coord",
"def insertChar(self, ch):\n word, cx = self.edCursor.getPos()\n string = word.string[:cx] + ch + word.string[cx:]\n word.setString(string)\n # Re-render from tline:\n self.rsubject.linify(word.tline)\n self.edCursor.setPos(word, cx+1)",
"def addch(self, posy, posx, character, color_pair):\r\n if posy < 0 or posy > self.height - 1:\r\n return\r\n if posx < 0 or posx > self.width - 1:\r\n return\r\n if posx == self.width - 1 and posy == self.height - 1:\r\n return\r\n self.win.addch(posy, posx, character, color_pair)",
"def append(self, char):\n self.sequence += char",
"def on_text(self, char: str, game: type):",
"def display():\n screen.addch(head[0],head[1],'x')",
"def display_letters(word, guesses):\n pass",
"def find_letter(self,letter):\n for i in range(0,len(self._word)):\n if self._word[i] == letter:\n self.new_string[i] = letter\n\n if letter not in self._word:\n self.letters_wrong += 1\n\n self.selected_letters += letter + ', '\n return self.print_new_word(self.new_string)",
"def color_letter(self, letter, lst_labels, plain_text_widget, encrypted_text_widget):\r\n new_letter, txt_encryption = self.simulator_enigma.encrypt_letter(letter)\r\n lst_encryption_letter_stages = [i[-1] for i in txt_encryption.split(\"\\n\")]\r\n lst_encryption_letter_stages.remove(')')\r\n self.simulator_encryption.append((txt_encryption, lst_encryption_letter_stages))\r\n lst_labels[ord(new_letter) - 65].config(bg=\"yellow\")\r\n lst_labels[ord(new_letter) - 65].after(300, lambda: lst_labels[ord(new_letter) -\r\n 65].config(bg=\"khaki\"))\r\n\r\n plain_text_widget.config(state=NORMAL)\r\n plain_text_widget.insert(END, letter)\r\n plain_text_widget.config(state=DISABLED)\r\n encrypted_text_widget.config(state=NORMAL)\r\n encrypted_text_widget.insert(END, new_letter)\r\n encrypted_text_widget.config(state=DISABLED)",
"def addChar (self, c) :\r\n # Notice the \\n so we can notice when new lines begin\r\n if (c=='\\n') :\r\n self.lineNumber_ += 1\r\n self.charNumber_ = 0\r\n \r\n # Keep the last 1024 or so characters\r\n if (self.data_.full()) :\r\n self.data_.get()\r\n self.data_.put(c)\r\n self.charNumber_ += 1",
"async def character(self, ctx, character=None):\n\n if character.lower() in [c.lower() for c in self.characters]:\n return await ctx.send(f\"`ERROR: Duplicate Character` {character} is already added.\")\n\n created_char = eqdkp.create_character(character.capitalize())\n if created_char:\n self.characters.append(created_char)\n await ctx.send(f\"{created_char.name} was created!\")\n else:\n await ctx.send(f\"Failed to create {character}. Please try again later, or create them manually.\")",
"def insert(self, index, chars, *args):\n self.config(state=NORMAL)\n Text.insert(self,index, chars, args)\n self.config(state=DISABLED)",
"def add_char(self, char):\n if self.pos >= self.line_length():\n self.buffer.append_char(char, self.line)\n else:\n self.buffer.insert_char(char, self.line, self.pos)\n \n self.pos += 1\n self.has_changes = True",
"def encode(self, letter):\n\n for plug in self.plugleads:\n if plug.pair[0] == letter or plug.pair[1] == letter:\n return plug.encode(letter)\n return letter",
"def place_in_alphabet(letters):\r\n\tfor l in letters:\r\n\t\tprint(l, ':', str(ALPHABET.index(l)+1))",
"def ranPlugboard(self):\n alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') #Creates a list of the alphabet characters\n plugboard = []\n \n while len(alphabet) != 0: #While there are characters remaining in the alphabet\n i = random.choice(alphabet) #Make a random choice\n alphabet.remove(i) #Remove it from the alphabet\n plugboard.append(i) #And add it to the plugboard array\n \n raw_plugboard = ''.join(plugboard) #Join all the characters in the plugboard array into a string\n \n plugboard = [(raw_plugboard[i:i+2]) for i in range(0, len(raw_plugboard), 2)] #Split the array of characters into groups of 2\n \n return ' '.join(plugboard) #Join each of the individual arrays to make a string",
"def add_special_character(self, roll):\n \n # Determine where to add special character, 0 means at the beginning, \n #* means new roll\n # 1st dimension -- number of letters, 2nd dimension -- dice roll\n char_pos = { 2: { '1': 1, '2': 2, '3': 0, '4': 1, '5': 2, '6': 0},\n 3: { '1': 1, '2': 2, '3': 3, '4': 0, '5': '*', '6': '*'},\n 4: { '1': 1, '2': 2, '3': 3, '4': 4, '5': 0, '6': '*'},\n 5: { '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 0},\n 6: { '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6}}\n \n # Determin which character to add\n # 1st dimension -- 1st roll, 2nd dimension -- 2nd roll\n char_table = { '1': { '1': '!', '2': '&', '3': '+', '4': '|', '5': '<', '6': '~'},\n '2': { '1': '@', '2': '*', '3': '[', '4': '`', '5': '>', '6': '_'},\n '3': { '1': '#', '2': '(', '3': ']', '4': ';', '5': '/', '6': '3'},\n '4': { '1': '$', '2': ')', '3': '{', '4': ':', '5': '?', '6': '5'},\n '5': { '1': '%', '2': '-', '3': '}', '4': '\\'', '5': '.', '6': '7'},\n '6': { '1': '^', '2': '=', '3': '\\\\', '4': '\"', '5': ',', '6': '9'}}\n \n if(self.generate):\n roll = {0: str(random.randint(1,len(self.passphrase)))}\n word = self.passphrase[int(roll[0])-1]\n elif(int(roll[0]) > len(self.passphrase)):\n print(\"No special character added -- you have to roll again and change 1st number of \\nyour special character quadruple\\n\")\n return\n else:\n word = self.passphrase[int(roll[0])-1]\n \n if(self.generate):\n # DO roll dice WHILE _p == '*'\n while True:\n _p = char_pos[len(word)][str(random.randint(1,6))]\n if(_p != '*'):\n position = _p\n break\n else:\n position = char_pos[len(word)][roll[1]]\n if(position == '*'):\n print(\"No special character added -- you have to roll again and change 2nd number of \\nyour special character quadruple\\n\")\n return\n \n if(self.generate):\n roll[2] = str(random.randint(1,6))\n roll[3] = str(random.randint(1,6))\n \n char = char_table[roll[2]][roll[3]]\n new_word = word[:position] + char + word[position:]\n \n self.passphrase[int(roll[0])-1] = new_word",
"def _put_chr_at(self, char, row, col, color, adjustment_x=.19, adjustment_y=.19):\n self._goto_piece_xy(row, col, adjustment_x, adjustment_y)\n self.pen.color(color)\n self.pen.write(char, font=(\"Courier\", round(self.square_side_size * .7),\n \"normal\"))",
"def int_21H_1(self):\r\n\r\n ascii_char = self.GUI.get_key_value() # ten do w wczytania\r\n self.registers['AX'].move_into(ascii_char, 0, is_int=True) # zapisanie kodu ascii do AXL\r",
"def add(self, name, command):",
"def make_move(self, move, letter):\n self.positions[move] = letter",
"def addKeyWord(self, kWord):\n #kWord.printKeyWord()\n self.sentence.append(kWord)",
"def addstr(self,name,string):\n\t\tself.windows[name].addstr(string)",
"def guess_letter(self):\r\n letter = input(\"# Enter a Letter :\")\r\n if not letter:\r\n print(\"Please Enter a Valid Value\")\r\n else:\r\n result = game_instance.check_letter(letter)\r\n\r\n if result == \"NOT FOUND\":\r\n print(\"WRONG. No corresponding letters found in the word. Try Again!\")\r\n else:\r\n temp = list(self.current_word)\r\n count=0;\r\n for x in result:\r\n count+=1\r\n temp[x] = letter\r\n self.current_word = \"\".join(temp)\r\n print(\"Good Job. You Found \"+str(count)+\" Letters.\")"
] | [
"0.74521935",
"0.6864215",
"0.6653494",
"0.6578328",
"0.6481865",
"0.64462173",
"0.64435834",
"0.6416639",
"0.6378046",
"0.61352384",
"0.61273557",
"0.60847396",
"0.60462296",
"0.60263854",
"0.60161275",
"0.597719",
"0.5976378",
"0.5974929",
"0.596187",
"0.59597677",
"0.5939957",
"0.5928814",
"0.59145117",
"0.5872332",
"0.5860631",
"0.5813127",
"0.5811276",
"0.5785548",
"0.5764174",
"0.57488906"
] | 0.84619296 | 0 |
clears the screen from widgets. | def clear_screen(self):
lst_grid = self.root.grid_slaves()
for widget in lst_grid:
widget.destroy()
lst_pack = self.root.pack_slaves()
for widget in lst_pack:
widget.destroy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self) -> None:\n self.screen.clear()",
"def clear(screen):\n screen.clear()\n screen.refresh()",
"def clear_widgets(self):\n self.json_progress = None\n self.progress_message_bar = None\n self.json_progress_message_bar = None\n if self.progress_message_bar_widget:\n self.iface.messageBar().popWidget(self.progress_message_bar_widget)\n self.progress_message_bar_widget = None\n if self.json_progress_message_bar_widget:\n self.iface.messageBar().popWidget(self.json_progress_message_bar_widget)\n self.json_progress_message_bar_widget = None",
"def clear(self):\r\n\r\n # Clear the widgets list\r\n self.widgets_list = []\r\n\r\n # Refresh the scroll area\r\n self._refresh()",
"def clear_screen(self) -> None:\n assert self.screen is not None\n self.screen.clear()\n self.refresh_screen()",
"def clearScreen():\n pass",
"def reset_widgets(self):\n\n widgets = [\n self.test_input,\n self.results_input\n ]\n\n for widget in widgets:\n clear_text(widget)",
"def clear(screen=None):\n if screen is None:\n screen = lv.scr_act()\n screen.clean()",
"def clearScreen(self):\n self.removeFrame(self.frame1)\n self.removeFrame(self.frame2)\n self.separator.destroy()\n #Here, the app will lose the row and column configuration and does not\n #apply new configuration. Don't know why?. So that, I destroy the\n #parent (in this case, a frame), create a new frame and set it again.\n self.parent.destroy()\n mainFrame = tk.Frame(self.store[\"root\"], bg=\"#FFF\")\n self.parent = mainFrame\n self.parent.grid(column=0, row=0, sticky=\"nsew\")",
"def clearScreen():\n dislin.erase()",
"def ClearDisplay():\n display.fill(0)",
"def clear(self):\r\n self.delete(0, tkinter.END)",
"def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n self.display_heading()\n self.display_empty_lines()",
"def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()",
"def clear(self):\n self.command(self.LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self._cols for _ in range(self._rows)]\n self._msleep(2)",
"def clear_screen():\n os.system('cls')",
"def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clearFrame(self, event=None):\n for widget in self.winfo_children():\n widget.destroy()\n del self.tiles[:]",
"def clear():\n # TODO: this should actually create a stack of output so I can test each screen\n lines.clear()",
"def clear_display(self) -> None:\n pass",
"def clear(self):\n self.command(_LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self.cols for _ in range(self.rows)]\n time.sleep(2*MILLISECOND)",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.selected_element = None\n self.pressed_elements.clear()",
"def clear_text(self):\n # use the .children attribute to access all widgets that are \"in\" another widget\n self.root.ids.Title.text = \"\"\n self.root.ids.Artist.text = \"\" #Empty the text boxes\n self.root.ids.Year.text = \"\"\n for instance in self.root.ids.entriesBox.children: #Normalise the button state\n instance.state = 'normal'\n self.root.ids.statusLabel2.text=\"\" #Empty the status label text box",
"def clear_canvas():\n self.parent_class.canvas.delete(\"all\")",
"def clear(self) -> None:\n\n self.screen.fill(self.bg)",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)",
"def _clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')"
] | [
"0.8092894",
"0.803967",
"0.78183275",
"0.7779582",
"0.77743113",
"0.7751474",
"0.7615676",
"0.76115173",
"0.7582601",
"0.75689197",
"0.74467134",
"0.7411865",
"0.74054694",
"0.7385278",
"0.7318546",
"0.7276925",
"0.72769",
"0.72769",
"0.72497535",
"0.7242884",
"0.7230591",
"0.72142535",
"0.7156242",
"0.7150485",
"0.714945",
"0.71442276",
"0.71416587",
"0.71416587",
"0.71416587",
"0.7084573"
] | 0.86054236 | 0 |
Retrieve fastq files for the given lane, ready to process. | def get_fastq_files(directory, work_dir, item, fc_name, bc_name=None, glob_ext="_fastq.txt",
config=None, unpack=True):
if "files" in item and bc_name is None:
names = item["files"]
if isinstance(names, basestring):
names = [names]
files = [x if os.path.isabs(x) else os.path.join(directory, x) for x in names]
else:
assert fc_name is not None
lane = item["lane"]
if bc_name:
glob_str = "%s_*%s_%s_*%s" % (lane, fc_name, bc_name, glob_ext)
else:
glob_str = "%s_*%s*%s" % (lane, fc_name, glob_ext)
files = glob.glob(os.path.join(directory, glob_str))
# Include gzipped files
glob_str = "%s.gz" % glob_str
files.extend(glob.glob(os.path.join(directory, glob_str)))
files.sort()
if len(files) > 2 or len(files) == 0:
raise ValueError("Did not find correct files for %s %s %s %s" %
(directory, lane, fc_name, files))
ready_files = []
for fname in files:
if fname.endswith(".gz") and unpack:
# TODO: Parallelize using pgzip
ready_name = os.path.splitext(fname)[0]
ready_files.append(ready_name)
if not os.path.exists(ready_name):
cl = ["gunzip", fname]
subprocess.check_call(cl)
elif fname.endswith(".bam"):
ready_files = convert_bam_to_fastq(fname, work_dir, config)
else:
assert os.path.exists(fname), fname
ready_files.append(fname)
ready_files = [x for x in ready_files if x is not None]
return ready_files[0], (ready_files[1] if len(ready_files) > 1 else None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_fastqs(location, project_id, sample_id, lane=None):\n basename = '*.fastq.gz'\n if lane:\n basename = '*L00' + str(lane) + basename\n\n pattern = os.path.join(location, project_id, sample_id, basename)\n fastqs = find_files(pattern)\n app_logger.debug('Found %s fastq files for %s', len(fastqs), pattern)\n return fastqs",
"def get_fastqc_files(sample, unit, pairs, config, pre):\n if config[\"preprocessing\"][\"fastqc\"]:\n files = expand(config[\"paths\"][\"results\"]+\"/intermediate/fastqc/{sample}_{unit}_{pair}{PREPROCESS}_fastqc.zip\",\n sample=sample, unit=unit, pair=pairs, PREPROCESS=pre)\n return files\n return []",
"def find_all_fastqs(location):\n fastqs = []\n for name, dirs, files in os.walk(location):\n fastqs.extend(os.path.join(name, f) for f in files if f.endswith('.fastq.gz'))\n app_logger.debug('Found %s fastqs in %s', len(fastqs), location)\n return fastqs",
"def _extract_fastqs_from_fast5(self):\n\t\tfor id, h5path in fastq_paths[self.version].iteritems():\n\t\t\ttry:\n\t\t\t\ttable = self.hdf5file[h5path % self.group]\n\t\t\t\tfq = formats.Fastq(table['Fastq'][()])\n\t\t\t\tfq.name += \" \" + self.filename\n\t\t\t\tself.fastqs[id] = fq\n\t\t\texcept Exception, e:\n\t\t\t\tpass",
"def get_result_files_fastqc(config):\n res_zip = []\n res_html = []\n for path in get_result_files_demux(config):\n ext = \".fastq.gz\"\n if path.endswith(ext):\n folder = os.path.dirname(path)\n base = os.path.basename(path)[: -len(ext)]\n res_zip.append(os.path.join(folder, \"qc\", \"fastqc\", base + \"_fastqc.zip\"))\n res_html.append(os.path.join(folder, \"qc\", \"fastqc\", base + \"_fastqc.html\"))\n return {\"zip\": res_zip, \"html\": res_html}",
"def download_fastq():\n\n mkdir(FASTQ_DIR)\n\n template = \"\"\"fastq-dump --split-files --gzip {}\"\"\"\n\n printp(\"\"\"\\n#\\n# download all the fastq files\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label fastq-download\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(FASTQ_DIR))\n\n for library, info in DATA.items():\n printp(template.format(get_srr(library)))\n printp(template.format(get_input_control_srr(library)))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")",
"def get_files(self, step):\n dht = get_remote_node(self.dht_ip, self.dht_port)\n files = dht.get(get_hash(filestep + \"|\" + str(step)))\n return files",
"def to_fastq(self, prefix='', threads=1):\n # Write to uncompressed FASTQ for speed\n fastqs = [\n f'{prefix}_{i+1}.fastq.gz' if prefix else f'{i+1}.fastq.gz'\n for i in range(self.technology.n_files)\n ]\n logger.info(f'Splitting BAM file into FASTQs {\", \".join(fastqs)}')\n logger.warning('All quality scores will be converted to F')\n files = []\n lengths = [0, 0, 0]\n for substring in self.technology.barcode_positions + self.technology.umi_positions:\n lengths[substring.file\n ] = max(lengths[substring.file], substring.stop)\n\n try:\n for fastq in fastqs:\n files.append(open_as_text(fastq, 'w'))\n\n # Count total number only if the bam is local\n parse = urlparse(self.path)\n if not parse.scheme:\n with pysam.AlignmentFile(self.path, 'rb', threads=threads) as f:\n count = f.count(until_eof=True)\n logger.info(f'Detected {count} BAM entries')\n else:\n logger.warning((\n 'Skip counting total BAM entries in remote BAM. '\n 'This means a progress bar can not be displayed.'\n ))\n\n with pysam.AlignmentFile(self.path, 'rb', threads=threads) as f,\\\n tqdm() if parse.scheme else tqdm(total=count) as pbar:\n for item in f.fetch(until_eof=True):\n reads = ['N' * l for l in lengths] # noqa\n barcodes, umis, sequence = BAM.EXTRACT_FUNCTIONS[\n self.technology.name](item) # noqa\n\n # Set sequence.\n reads[self.technology.reads_file.file] = sequence\n\n # Barcode and UMI\n for barcode, substring in zip(\n barcodes, self.technology.barcode_positions):\n bc = reads[substring.file]\n reads[\n substring.file\n ] = f'{bc[:substring.start]}{barcode}{bc[substring.stop:]}'\n for umi, substring in zip(umis,\n self.technology.umi_positions):\n u = reads[substring.file]\n reads[\n substring.file\n ] = f'{u[:substring.start]}{umi}{u[substring.stop:]}'\n\n # Write to each file.\n for file, read in zip(files, reads):\n file.write(f'@{item.query_name}\\n')\n file.write(f'{read.upper()}\\n')\n file.write('+\\n')\n file.write(f'{\"F\" * len(read)}\\n')\n\n pbar.update(1)\n\n finally:\n for file in files:\n file.close()\n\n return fastqs, [\n OrderedTechnology(self.technology, tuple(range(len(fastqs))))\n ]",
"def find_and_download_files(context):\n\n\n input_path = 'input/'\n if os.path.isdir(input_path):\n log.debug('Path already exists: ' + input_path)\n else:\n log.debug('Creating: ' + input_path)\n os.mkdir(input_path)\n\n fw = context.client\n\n if 'classification_measurement' in context.config:\n class_meas = context.config['classification_measurement'].split()\n else:\n class_meas = ['T1']\n\n # session and acquisition include/exclude lists can come from:\n # project info metadata,\n # subject info metadata, and\n # config options\n # The last one wins (how about getting it from an input file also, eh?)\n ses_exclude_list = None\n ses_include_list = None\n acq_exclude_list = None\n acq_include_list = None\n\n fs = 'freesurfer_longitudinal_'\n where = 'Found in project info'\n # check for exclude/include lists of regexs for sessions in project info\n sel = context.gear_dict['project'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['project'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in project info\n ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in subject info'\n # check for exclude/include lists of regexs for sessions in subject info\n sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['subject'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in subject info\n ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in config'\n # set up exclude/include lists of reegexs for sessions in config\n if 'session_excludelist' in context.config:\n ses_exclude_list = context.config['session_excludelist'].split()\n log.info(where+' session_excludelist: \"'+str(ses_exclude_list)+'\"')\n if 'session_includelist' in context.config:\n ses_include_list = context.config['session_includelist'].split()\n log.info(where+' session_includelist: \"'+str(ses_include_list)+'\"')\n\n # set up exclude/include lists of reegexs for acquisitions in config\n if 'acquisition_excludelist' in context.config:\n acq_exclude_list = context.config['acquisition_excludelist'].split()\n log.info(where+' acquisition_excludelist: \"'+str(acq_exclude_list)+'\"')\n if 'acquisition_includelist' in context.config:\n acq_include_list = context.config['acquisition_includelist'].split()\n log.info(where+' acquisition_includelist: \"'+str(acq_include_list)+'\"')\n\n # go through all sessions, acquisitions to find files\n for session in context.gear_dict['subject'].sessions():\n\n lemme_out = False\n if ses_exclude_list:\n for regex in ses_exclude_list:\n if re.search(regex, session.label): # if excluded, skip\n log.info('Session \"' + session.label + '\" matches ' + \\\n 'exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if ses_include_list:\n match = False\n for regex in ses_include_list:\n if not re.search(regex, session.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Session \"' + session.label + '\" matches ' \\\n 'an inclusion regex, keeping it')\n\n for acquisition in fw.get_session_acquisitions(session.id):\n\n lemme_out = False\n if acq_exclude_list:\n for regex in acq_exclude_list:\n if re.search(regex, acquisition.label): # if excluded, skip\n log.info('Acquisition \"' + acquisition.label + \\\n '\" matches exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if acq_include_list:\n match = False\n for regex in acq_include_list:\n if not re.search(regex, acquisition.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Acquisition \"' + acquisition.label + '\" ' + \\\n 'matches an inclusion regex, keeping it')\n\n for afile in acquisition.files:\n\n # Scan must be nifti\n if afile.type == 'nifti':\n\n found_one = False\n for cm in class_meas:\n if 'Measurement' in afile.classification:\n if cm in afile.classification['Measurement']:\n found_one = True\n log.info('Found ' + cm + ' file')\n\n if found_one:\n download_it(fw, acquisition, afile.name, input_path)\n context.gear_dict['visits'].append(\n make_file_name_safe(session.label, '_'))\n else:\n log.info('Ignoring ' + afile.name)",
"def _extract_fastas_from_fast5(self):\n\t\tfor id, h5path in fastq_paths[self.version].iteritems():\n\t\t\ttry:\n\t\t\t\ttable = self.hdf5file[h5path % self.group]\n\t\t\t\tfa = formats.Fasta(table['Fastq'][()])\n\t\t\t\tfa.name += \" \" + self.filename\n\t\t\t\tself.fastas[id] = fa\n\t\t\texcept Exception, e:\n\t\t\t\tpass",
"def get_fastq_files(self) -> List[Path]:\n return list(self.sequence_data_paths.fastq_path.glob(\"*.fastq.gz\")) # type: ignore",
"def get_fastq_info(output_dir, flowcell_id, storage_directory):\n filenames = os.listdir(output_dir)\n\n # Filter for gzipped fastq files\n filenames = filter(lambda x: \".fastq.gz\" in x, filenames)\n\n # Remove undetermined fastqs\n filenames = filter(lambda x: \"Undetermined\" not in x, filenames)\n\n # Check that the path actually has fastq files\n if not filenames:\n raise Exception(\"no fastq files in output directory {}\".format(output_dir))\n\n # Cell info keyed by dlp library id\n cell_info = {}\n\n # Fastq filenames and info keyed by fastq id, read end\n fastq_file_info = []\n\n for filename in filenames:\n match = re.match(\n r\"^(\\w+)-(\\w+)-R(\\d+)-C(\\d+)_S(\\d+)(_L(\\d+))?_R([12])_001.fastq.gz$\",\n filename,\n )\n\n if match is None:\n raise Exception(\n \"unrecognized fastq filename structure for {}\".format(filename)\n )\n\n filename_fields = match.groups()\n\n # primary_sample_id = filename_fields[0]\n library_id = filename_fields[1]\n row = int(filename_fields[2])\n column = int(filename_fields[3])\n lane_number = filename_fields[6]\n if lane_number is not None:\n lane_number = int(lane_number)\n read_end = int(filename_fields[7])\n\n if library_id not in cell_info:\n cell_info[library_id] = query_colossus_dlp_cell_info(library_id)\n\n index_sequence = cell_info[library_id][row, column][\"index_sequence\"]\n sample_id = cell_info[library_id][row, column][\"sample_id\"]\n\n fastq_path = os.path.join(output_dir, filename)\n\n if not fastq_path.startswith(storage_directory):\n raise Exception(\n \"file {} expected in directory {}\".format(fastq_path, storage_directory)\n )\n fastq_filename = fastq_path.replace(storage_directory, \"\")\n fastq_filename = filename.lstrip(\"/\")\n\n fastq_file_info.append(\n dict(\n dataset_type=\"FQ\",\n sample_id=sample_id,\n library_id=library_id,\n library_type=BRC_LIBRARY_TYPE,\n index_format=BRC_INDEX_FORMAT,\n sequence_lanes=[\n dict(\n flowcell_id=flowcell_id,\n lane_number=lane_number,\n sequencing_centre=BRC_SEQ_CENTRE,\n sequencing_instrument=BRC_INSTRUMENT,\n read_type=BRC_READ_TYPE,\n )\n ],\n size=os.path.getsize(fastq_path),\n created=pd.Timestamp(\n time.ctime(os.path.getmtime(fastq_path)), tz=\"Canada/Pacific\"\n ),\n file_type=\"FQ\",\n read_end=read_end,\n index_sequence=index_sequence,\n compression=\"GZIP\",\n filename=fastq_filename,\n )\n )\n\n return fastq_file_info",
"def get_queue(lane):\n\tvehicles_in_lane = lane.Vehs\n\t# Collecte the attribute in lane of the vehicle of the lane and sum them\n\tqueue_in_lane = np.sum([vehicle.AttValue('InQueue') for vehicle in vehicles_in_lane])\n\treturn(queue_in_lane)",
"def get_fastqs(self, choice):\n\t\tif self.have_fastqs is False:\n\t\t\tself._extract_fastqs_from_fast5()\n\t\t\tself.have_fastqs = True\n\n\t\tfqs = []\n\t\tif choice == \"all\":\n\t\t\tfor fastq in self.fastqs:\n\t\t\t\tfqs.append(self.fastqs[fastq])\n\t\telif choice == \"fwd\":\n\t\t\t\tfqs.append(self.fastqs.get('template'))\n\t\telif choice == \"rev\":\n\t\t\t\tfqs.append(self.fastqs.get('complement'))\n\t\telif choice == \"2D\":\n\t\t\t\tfqs.append(self.fastqs.get('twodirections'))\n\t\telif choice == \"fwd,rev\":\n\t\t\t\tfqs.append(self.fastqs.get('template'))\n\t\t\t\tfqs.append(self.fastqs.get('complement'))\n\t\telif choice == \"best\":\n\t\t\t\tfqs.append(self.fastqs.get(self.get_best_type()))\n\n\t\treturn fqs",
"def all_lq_fq(self):\n return op.join(self.combined_dir, 'all.polished_lq.fastq')",
"def get_fastq_files(wildcards):\n return expand(os.path.join(fastq_dir, \"{sample}_{readpair}.fastq\"), readpair=[1, 2], **wildcards)",
"def poretools_fastq():\n dirs = os.listdir(my_dir)\n for folder in dirs:\n path_to_folder = os.path.join(my_dir, folder)\n subprocess.check_output(\"poretools fastq --type fwd {}//*.fast5 > {}_poretools.fq\"\n .format(path_to_folder, path_to_folder), shell=True)\n print(\"Finished folder {}\".format(folder))\n print(\"Finished extractions of FASTQs.\")",
"def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)",
"def list_of_expected_arrow_fq_files(self):\n def iter_script_to_get_fq(script_filename):\n for line in open(script_filename):\n # line might be like:\n # bash <arrow_dir>/c0to9.sh\n sh_file = line.strip().split()[-1]\n assert sh_file.endswith('.sh')\n yield sh_file[:-3] + '.arrowed.fastq'\n\n\n sge_ids = []\n submitted = {} # expected fq --> (\"local\" or SGE jobid, script used to get this)\n for line in open(self.arrow_submission_run_file):\n jobid, script = line.strip().split('\\t')\n # read the script to see which c<i>to<j>.sh files are associated with this\n for fq in iter_script_to_get_fq(script):\n submitted[fq] = (jobid, script)\n if jobid!='local':\n sge_ids.append(jobid)\n\n return sge_ids, submitted",
"def start_download_queue(self, n_threads=5):\n return _loader.start_queue(self.process_queue_entry, n_threads=n_threads, thread_prefix=\"DriveDownloader\")",
"async def queue_file_hashes(self, search):\n async with vt.Client(self.apikey) as client:\n it = client.iterator(\n \"/intelligence/search\", params={\"query\": search}, limit=self.num_files\n )\n async for file_obj in it:\n await self.queue.put(file_obj.sha256)",
"def download_fastq_files(fastq1_s3_path, fastq2_s3_path, working_dir):\n fastq_folder = os.path.join(working_dir, 'fastq')\n\n try:\n os.mkdir(fastq_folder)\n except Exception as e:\n pass\n\n local_fastq1_path = download_file(fastq1_s3_path, fastq_folder)\n local_fastq2_path = download_file(fastq2_s3_path, fastq_folder)\n\n # Isaac requires the fastqs to be symlinked as lane1_read1.fastq.gz and lane1_read2.fastq.gz\n os.symlink(local_fastq1_path, os.path.join(fastq_folder, 'lane1_read1.fastq.gz'))\n os.symlink(local_fastq2_path, os.path.join(fastq_folder, 'lane1_read2.fastq.gz'))\n\n return fastq_folder",
"def vpn_file_queue(folder):\n files = glob.glob(folder + '/*.ovpn')\n jobs = Queue(maxsize=0)\n for f in files:\n jobs.put(f)\n return jobs",
"def query_file(self):\n print(\"Start sending Query requests of av after AV upload for file {}\".format(self.file_name))\n request = copy.deepcopy(self.request_template)\n request['request'][0]['md5'] = self.md5\n data = json.dumps(request)\n response_j = json.loads('{}')\n status_label = False\n retry_no = 0\n while (not status_label) or (status_label == \"NOT_FOUND\"):\n print(\"Sending Query request for av for file {}\".format(self.file_name))\n response = requests.post(url=self.url + \"query\", data=data, verify=False)\n response_j = response.json()\n status_label = response_j['response'][0]['status']['label']\n if status_label != \"NOT_FOUND\":\n break\n print(\"av Query response status for file {} is still pending\".format(self.file_name))\n time.sleep(SECONDS_TO_WAIT)\n retry_no += 1\n if retry_no == MAX_RETRIES:\n print(\"Reached query max retries. Stop waiting for av results for file {}\".format(self.file_name))\n break\n return response_j",
"def fetchfile(self, query):\n cur = self.conn.cursor()\n exists = False\n i = 0\n limit = 1000\n datas = None\n while (not exists) and (i < limit):\n cur.execute(query)\n rows = cur.fetchall()\n if len(rows) == 0:\n break\n i = i + 1\n exists = path.isfile(rows[0][1])\n datas = rows[0]\n\n if not exists:\n self.markfile(datas[0])\n\n return datas",
"def scan(self):\n try:\n for dataset_folder in os.scandir(\n self.path_dict['DATASETS_FOLDER']): # phase one -> scan local datasets dir\n if not dataset_folder.name.startswith('.') and dataset_folder.is_dir():\n self.local_datasets.append(dataset_folder.name)\n print(\"Local dataset found : \", dataset_folder.name, 'Folder size',\n self.get_tree_size(\n os.path.join(self.path_dict['DATASETS_FOLDER'], dataset_folder.name)) / 10 ** 6,\n 'MB')\n for dataset in self.to_be_used_datasets:\n if dataset not in self.local_datasets:\n print(dataset, ' verisetinin bilgisayarınızda yüklü olmadığı görüldü. İndirilecek.')\n self.download_queue.append(dataset)\n print(\"Eğer bir verisetinin yanlış indirildiğini düşünüyorsanız, \"\n \"verisetini silip programı tekrar çalıştırın.\")\n return self.local_datasets\n except:\n print(\"Dataset Okuma sırasında bir hata oluşmuş olabilir.\")",
"def get_fastq(wildcards):\n return units.loc[(wildcards.sample, wildcards.unit), [\"fq1\", \"fq2\"]].dropna()",
"def find_fastq_files(directory):\n\n filepaths = []\n for dirpath, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if filename.endswith('.fastq'):\n filepaths.append(os.path.join(dirpath, filename))\n return filepaths",
"def readFastq(filename):\n sequences = []\n qualities = []\n \n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() #read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rstrip() # base quality line\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n \n return sequences, qualities",
"def readfq(fp): # this is a generator function\n last = None # this is a buffer keeping the last unprocessed line\n while True: # mimic closure; is it a bad idea?\n if not last: # the first record or a record following a fastq\n for l in fp: # search for the start of the next record\n if l[0] in '>@': # fasta/q header line\n last = l[:-1] # save this line\n break\n if not last: break\n name, seqs, last = last[1:].partition(\" \")[0], [], None\n for l in fp: # read the sequence\n if l[0] in '@+>':\n last = l[:-1]\n break\n seqs.append(l[:-1])\n if not last or last[0] != '+': # this is a fasta record\n yield name, ''.join(seqs), None # yield a fasta record\n if not last: break\n else: # this is a fastq record\n seq, leng, seqs = ''.join(seqs), 0, []\n for l in fp: # read the quality\n seqs.append(l[:-1])\n leng += len(l) - 1\n if leng >= len(seq): # have read enough quality\n last = None\n yield name, seq, ''.join(seqs); # yield a fastq record\n break\n if last: # reach EOF before reading enough quality\n yield name, seq, None # yield a fasta record instead\n break"
] | [
"0.74992424",
"0.5997929",
"0.5949587",
"0.59173447",
"0.5860292",
"0.5804238",
"0.55708313",
"0.55603546",
"0.54513943",
"0.5397355",
"0.53798234",
"0.53523076",
"0.5351622",
"0.534139",
"0.5237773",
"0.52329355",
"0.5212102",
"0.5193668",
"0.5143916",
"0.5136838",
"0.5118127",
"0.50939554",
"0.50505567",
"0.5045431",
"0.50289124",
"0.5022956",
"0.5013551",
"0.50097",
"0.49994037",
"0.49979636"
] | 0.61918277 | 1 |
Convert barcode id to sample description, changing extension from _fastq.txt to .fastq in the process | def convert_barcode_id_to_name(multiplex, fc_name, fq):
fqout = list([None, None])
if multiplex is None:
fqout[0] = fq[0]
if not fq[1] == None:
fqout[1] = fq[1]
else:
bcid2name = dict([(mp['barcode_id'], mp['name']) for mp in multiplex])
for bcid in bcid2name.keys():
mstr = "%s_%s_" % (fc_name, bcid)
if fq[0].find(mstr) != -1:
from_str = "%s_%s_" %(fc_name, bcid)
to_str = "%s_%s_" %(fc_name, bcid2name[bcid])
fqout[0] = fq[0].replace(from_str, to_str)
if not fq[1] == None:
fqout[1] = fq[1].replace(from_str, to_str)
fqout[0] = fqout[0].replace("_fastq.txt", ".fastq")
if not fqout[1] == None:
fqout[1] = fqout[1].replace("_fastq.txt", ".fastq")
return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_name_to_barcode_id(multiplex, fc_name, fq):\n fqout = list([None, None])\n name2bcid = dict([(mp['name'], mp['barcode_id']) for mp in multiplex])\n for name in name2bcid.keys():\n mstr = \"%s_%s_\" % (fc_name, name) \n if fq[0].find(mstr) != -1:\n from_str = \"%s_%s_\" %(fc_name, name)\n to_str = \"%s_%s_\" %(fc_name, name2bcid[name])\n fqout[0] = fq[0].replace(from_str, to_str)\n if not fq[1] == None:\n fqout[1] = fq[1].replace(from_str, to_str)\n fqout[0] = fqout[0].replace(\".fastq\", \"_fastq.txt\")\n if not fqout[1] == None:\n fqout[1] = fqout[1].replace(\".fastq\", \"_fastq.txt\")\n return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None)",
"def formatRead(raw_barcode, corrected_barcode, title, sequence, quality):\n \n # for bowtie, anything after space in name will go to SAM\n # remove existing comments as they may not be properly formatted\n mod_title = title.split(\" \")[0]\n \n mod_title += \" CB:Z:\" + corrected_barcode + \"\\tCR:Z:\" + raw_barcode\n\n return(\"@%s\\n%s\\n+\\n%s\\n\" % (mod_title, sequence, quality))",
"def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),",
"def genSampleID(path):\n head, tail = ntpath.split(path)\n result = tail or ntpath.basename(head)\n return genBaseName(result.split(\".\")[0]) # Gets just the sample name, cleans out the \".cleaned.[EXT]\"",
"def test_illumina_data_to_fastq(self):\r\n in1 = (\r\n \"M10\",\r\n \"68\",\r\n \"1\",\r\n \"1\",\r\n \"28680\",\r\n \"29475\",\r\n \"0\",\r\n \"1\",\r\n \"AACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACG.\",\r\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\r\n \"0\")\r\n expected = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACGN\\n+\\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"\"\", 0\r\n\r\n self.assertEqual(illumina_data_to_fastq(in1), expected)\r\n\r\n expected12 = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAG\\n+\\nBBBBBBBBBBBB\"\"\", 0\r\n self.assertEqual(\r\n illumina_data_to_fastq(\r\n in1,\r\n number_of_bases=12),\r\n expected12)\r\n\r\n # different value in the pass filter field\r\n in2 = (\r\n \"M10\",\r\n \"68\",\r\n \"1\",\r\n \"1\",\r\n \"28680\",\r\n \"29475\",\r\n \"0\",\r\n \"1\",\r\n \"AACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACG.\",\r\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\r\n \"1\")\r\n expected = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACGN\\n+\\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"\"\", 1\r\n\r\n self.assertEqual(illumina_data_to_fastq(in2), expected)",
"def illumina_data_to_fastq(record_data, number_of_bases=None):\r\n seq_index = 8\r\n qual_index = 9\r\n pass_filter_index = 10\r\n\r\n try:\r\n pass_filter = int(record_data[pass_filter_index])\r\n except IndexError:\r\n pass_filter = 2\r\n\r\n if number_of_bases is None:\r\n seq = record_data[seq_index].replace('.', 'N')\r\n qual = record_data[qual_index]\r\n else:\r\n seq = record_data[seq_index][:number_of_bases].replace('.', 'N')\r\n qual = record_data[qual_index][:number_of_bases]\r\n\r\n header = '%s_%s:%s:%s:%s:%s#%s/%s' % (\r\n record_data[0],\r\n record_data[1],\r\n record_data[2],\r\n record_data[3],\r\n record_data[4],\r\n record_data[5],\r\n record_data[6],\r\n record_data[7])\r\n\r\n return '@%s\\n%s\\n+\\n%s' % (header, seq, qual), pass_filter",
"def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)",
"def genSamName(fastq):\n return os.path.join(samFolder, os.path.splitext(fastq)[0] + \".sam\")\n # return os.path.join(samFolder, ntpath.split(fastq)[1].replace(\".fastq\", \".sam\"))",
"def get_fastq_id(fastq_name):\n return fastq_name.split(' ')[0]",
"def get_fastq(wildcards):\n if sample_is_single_end(wildcards.sample):\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\"]].dropna()\n else:\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\", \"fq2\"]].dropna()",
"def process_barcode_in_label(read1_data,\r\n read2_data,\r\n output_bc_fastq,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n char_delineator=\":\"):\r\n header_index = 0\r\n\r\n # Check for char_delineator in sequence\r\n try:\r\n bc1_read = read1_data[header_index].split(\r\n char_delineator)[-1][0:bc1_len]\r\n # If there is an index error, it means the char_delineator wasn't found\r\n except IndexError:\r\n raise IndexError(\"Found sequence lacking character delineator. \"\r\n \"Sequence header %s, character delineator %s\" %\r\n (read1_data[header_index], char_delineator))\r\n\r\n # Create fake quality scores, using 6 here to match the existing qual fake\r\n # qual scores that were all F.\r\n bc1_qual = np.ones(len(bc1_read), dtype=np.int8) * 6\r\n if rev_comp_bc1:\r\n bc1_read = str(DNA(bc1_read).rc())\r\n\r\n if read2_data:\r\n bc2_read =\\\r\n read2_data[header_index].strip().split(\r\n char_delineator)[-1][0:bc2_len]\r\n bc2_qual = np.ones(len(bc2_read), dtype=np.int8) * 6\r\n if rev_comp_bc2:\r\n bc2_read = str(DNA(bc2_read).rc())\r\n else:\r\n bc2_read = \"\"\r\n bc2_qual = np.array([], dtype=np.int8)\r\n\r\n if not bc1_read and not bc2_read:\r\n raise ValueError(\"Came up with empty barcode sequence, please check \"\r\n \"character delineator with -s, and fastq label \"\r\n \"%s\" % read1_data[header_index])\r\n\r\n bc_lines = format_fastq_record(read1_data[header_index],\r\n bc1_read + bc2_read,\r\n np.hstack([bc1_qual, bc2_qual]))\r\n\r\n output_bc_fastq.write(bc_lines)\r\n\r\n return",
"def test_process_fastq_single_end_read_file_toggle_thirteen_base_barcodes(\r\n self):\r\n fastq_f = [\r\n \"@990:2:4:11272:5533#1/1\",\r\n \"GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC\",\r\n \"+\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"]\r\n barcode_fastq_f = [\r\n \"@990:2:4:11272:5533#1/2\",\r\n \"AAAAAAAAAAAAT\",\r\n \"+\",\r\n \"bbbbbbbbbbbbb\"]\r\n barcode_to_sample_id = {'AAAAAAAAAAAA': 's1', 'TAAAAAAAAAAA': 's2'}\r\n\r\n # rev_comp = False\r\n actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0)\r\n actual = list(actual)\r\n expected = [(\r\n 's1_0 990:2:4:11272:5533#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0',\r\n \"GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC\",\r\n np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,\r\n 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34,\r\n 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32,\r\n 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20,\r\n 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8),\r\n 0)]\r\n np.testing.assert_equal(actual, expected)",
"def attach_barcode(sam, output):\n \n if output is None:\n output = sam.replace('.sam', '_bcqt.sam')\n infile = pysam.AlignmentFile(sam, \"r\")\n outfile = pysam.AlignmentFile(output, \"wh\", template=infile)\n for read in infile.fetch():\n id_sam = read.query_name\n sep_si = id_sam.index(':')\n# TODO Abort and raise exception if randomer info is not kept properly in the \n# read's name.\n bc_seq = id_sam[0:sep_si]\n sep_qi = sep_si + 1 + len(bc_seq)\n bc_pqs = id_sam[sep_si + 1: sep_qi]\n read.set_tag('BC', bc_seq)\n read.set_tag('QT', bc_pqs)\n read.query_name = id_sam[sep_qi+1:]\n outfile.write(read)\n outfile.close()\n infile.close()",
"def make_fastq(pair, filename, id):\n \n fname = filename + \"-R1.fastq\"\n with open(fname, \"w\") as r1:\n r1.write(\"@\" + id + \"\\n\")\n r1.write(pair[0])\n r1.write(\"\\n+\\n\")\n r1.write(\"E\" * len(pair[0]))\n\n fname = filename + \"-R2.fastq\"\n with open(fname, \"w\") as r2:\n r2.write(\"@\" + id + \"\\n\")\n r2.write(pair[1])\n r2.write(\"\\n+\\n\")\n r2.write(\"E\" * len(pair[1]))",
"def process_fastq_single_end_read_file(fastq_read_f,\r\n fastq_barcode_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0,\r\n filter_bad_illumina_qual_digit=False,\r\n log_f=None,\r\n histogram_f=None,\r\n barcode_correction_fn=None,\r\n max_barcode_errors=1.5,\r\n strict_header_match=True,\r\n phred_to_ascii_f=None):\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n seq_id = start_seq_id\r\n # grab the first lines and then seek back to the beginning of the file\r\n try:\r\n fastq_read_f_line1 = fastq_read_f.readline()\r\n fastq_read_f_line2 = fastq_read_f.readline()\r\n fastq_read_f.seek(0)\r\n except AttributeError:\r\n fastq_read_f_line1 = fastq_read_f[0]\r\n fastq_read_f_line2 = fastq_read_f[1]\r\n\r\n post_casava_v180 = is_casava_v180_or_later(fastq_read_f_line1)\r\n if post_casava_v180:\r\n offset = 33\r\n check_header_match_f = check_header_match_180_or_later\r\n else:\r\n offset = 64\r\n check_header_match_f = check_header_match_pre180\r\n\r\n # compute the barcode length, if they are all the same.\r\n # this is useful for selecting a subset of the barcode read\r\n # if it's too long (e.g., for technical reasons on the sequencer)\r\n barcode_lengths = set([len(bc)\r\n for bc, sid in barcode_to_sample_id.items()])\r\n if len(barcode_lengths) == 1:\r\n barcode_length = barcode_lengths.pop()\r\n else:\r\n barcode_length = None\r\n\r\n # compute the minimum read length as a fraction of the length of the input\r\n # read\r\n min_per_read_length = min_per_read_length_fraction * \\\r\n len(fastq_read_f_line2)\r\n\r\n # prep data for logging\r\n input_sequence_count = 0\r\n count_barcode_not_in_map = 0\r\n count_too_short = 0\r\n count_too_many_N = 0\r\n count_bad_illumina_qual_digit = 0\r\n count_barcode_errors_exceed_max = 0\r\n sequence_lengths = []\r\n seqs_per_sample_counts = {}\r\n for bc_data, read_data in izip(\r\n parse_fastq(fastq_barcode_f, strict=False, phred_offset=offset),\r\n parse_fastq(fastq_read_f, strict=False, phred_offset=offset)):\r\n input_sequence_count += 1\r\n # Confirm match between barcode and read headers\r\n if strict_header_match and \\\r\n (not check_header_match_f(bc_data[header_index], read_data[header_index])):\r\n raise FastqParseError(\"Headers of barcode and read do not match. Can't continue. \"\r\n \"Confirm that the barcode fastq and read fastq that you are \"\r\n \"passing match one another.\")\r\n else:\r\n header = read_data[header_index]\r\n\r\n # Grab the barcode sequence\r\n if barcode_length:\r\n # because thirteen cycles are sometimes used for\r\n # techical reasons, this step looks only at the\r\n # first tweleve bases. note that the barcode is\r\n # rev-comp'ed after this step if requested since\r\n # the thirteen base is a technical artefact, not\r\n # barcode sequence.\r\n barcode = bc_data[sequence_index][:barcode_length]\r\n else:\r\n barcode = bc_data[sequence_index]\r\n if rev_comp_barcode:\r\n barcode = str(DNA(barcode).rc())\r\n # Grab the read sequence\r\n sequence = read_data[1]\r\n # Grab the read quality\r\n quality = read_data[2]\r\n\r\n # correct the barcode (if applicable) and map to sample id\r\n num_barcode_errors, corrected_barcode, correction_attempted, sample_id = \\\r\n correct_barcode(\r\n barcode,\r\n barcode_to_sample_id,\r\n barcode_correction_fn)\r\n # skip samples with too many errors\r\n if (num_barcode_errors > max_barcode_errors):\r\n count_barcode_errors_exceed_max += 1\r\n continue\r\n\r\n # skip unassignable samples unless otherwise requested\r\n if sample_id is None:\r\n if not store_unassigned:\r\n count_barcode_not_in_map += 1\r\n continue\r\n else:\r\n sample_id = 'Unassigned'\r\n\r\n quality_filter_result, sequence, quality =\\\r\n quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length,\r\n phred_quality_threshold,\r\n min_per_read_length,\r\n seq_max_N,\r\n filter_bad_illumina_qual_digit)\r\n\r\n # process quality result\r\n if quality_filter_result != 0:\r\n # if the quality filter didn't pass record why and\r\n # move on to the next record\r\n if quality_filter_result == 1:\r\n count_too_short += 1\r\n elif quality_filter_result == 2:\r\n count_too_many_N += 1\r\n elif quality_filter_result == 3:\r\n count_bad_illumina_qual_digit += 1\r\n else:\r\n raise ValueError(\r\n \"Unknown quality filter result: %d\" %\r\n quality_filter_result)\r\n continue\r\n\r\n sequence_lengths.append(len(sequence))\r\n\r\n try:\r\n seqs_per_sample_counts[sample_id] += 1\r\n except KeyError:\r\n seqs_per_sample_counts[sample_id] = 1\r\n\r\n if rev_comp:\r\n sequence = str(DNA(sequence).rc())\r\n quality = quality[::-1]\r\n\r\n fasta_header = '%s_%s %s orig_bc=%s new_bc=%s bc_diffs=%d' %\\\r\n (sample_id, seq_id, header, barcode,\r\n corrected_barcode, num_barcode_errors)\r\n yield fasta_header, sequence, quality, seq_id\r\n seq_id += 1\r\n\r\n # Add sample IDs with zero counts to dictionary for logging\r\n for curr_sample_id in barcode_to_sample_id.values():\r\n if curr_sample_id not in seqs_per_sample_counts.keys():\r\n seqs_per_sample_counts[curr_sample_id] = 0\r\n\r\n if log_f is not None:\r\n log_str = format_split_libraries_fastq_log(count_barcode_not_in_map,\r\n count_too_short,\r\n count_too_many_N,\r\n count_bad_illumina_qual_digit,\r\n count_barcode_errors_exceed_max,\r\n input_sequence_count,\r\n sequence_lengths,\r\n seqs_per_sample_counts)\r\n log_f.write(log_str)\r\n\r\n if len(sequence_lengths) and histogram_f is not None:\r\n counts, bin_edges = make_histograms(sequence_lengths)\r\n histogram_str = format_histogram_one_count(counts, bin_edges)\r\n histogram_f.write(histogram_str)\r\n histogram_f.write('\\n--\\n\\n')",
"def _bio_sample(self, barcode):\n search = self._transaction.getSearchService()\n criteria = SearchCriteria()\n barcode_match = SearchCriteria.MatchClause.createAttributeMatch(\n SearchCriteria.MatchClauseAttribute.CODE, barcode\n )\n criteria.addMatchClause(barcode_match)\n samples = search.searchForSamples(criteria)\n if len(samples) > 1:\n raise RuntimeError(\n \"Found more than one sample for barcode %s.\" % barcode\n )\n if not samples:\n raise ValueError(\n \"Could not find a sample for barcode %s\" % barcode\n )\n sample = samples[0]\n return sample.getSpace(), self.barcode[:5], sample",
"def test_ordinary(self):\n date = datetime(2016, 11, 12)\n seq = 36\n name = star_barcode.barcode_filename(date, seq)\n self.assertEqual(\n name,\n 'Barcode_2016-W45-6_36.pdf'\n )",
"def fastq_filename(fastq_base):\n return fastq_base+\"_1.fastq\", fastq_base+\"_2.fastq\"",
"def prepare_fastq(Fastq_Root=\"2.Fastq/\", ):\n fastqs = glob.glob(Fastq_Root + \"*.fastq\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq\", \"\")\n if s.endswith(\"_1\"):\n sample = s.replace(\"_1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_2\"):\n sample = s.replace(\"_2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n if not s.endswith(\"_1\") and not s.endswith(\"_2\"):\n data[s] = [fq]\n return data",
"def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name",
"def getcodetofilename(index_file_parameters,bamfile_id):\n index_file=index_file_parameters['index']\n relative_flg=index_file_parameters['relative']\n \n index_dict=dict([(lntxt.rstrip().split(',')[0],lntxt.rstrip().split(',')[1]) for lntxt in open(index_file).readlines()])\n \n if bamfile_id not in index_dict:\n return ''\n \n if relative_flg==0:\n return index_dict[bamfile_id]\n else:\n relative_dir='/'.join(index_file.split('/')[0:-1])\n return '%s/%s'%(relative_dir,index_dict[bamfile_id])",
"def __return_new_file_name(self, file_name: str, file_path: str):\n\n fastq_runid = re.split('[_.]', file_name) # split on `_` or `.`\n barcode_number = file_path.split(\"/\")[-1] # get the barcode number\n fastq_or_fasta = fastq_runid[-1] # get the .fastq/.fasta file extension\n\n # create the new file name\n new_file_name = \"_\".join(fastq_runid[:3]) # join first three elements\n new_file_name += \"_%s.%s\" % (barcode_number, fastq_or_fasta) # append the barcode number and file extension\n\n return new_file_name",
"def strip_barcodes(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_adapters_removed.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fasta\"):\n match = re.search(r'\\S*:', record.id)\n if match:\n correct = match.group().rstrip(\":\")\n else:\n correct = str(record.id)\n SEQ = str(record.seq)\n if correct in wanted_set:\n out.write(\">\" + correct + \"\\n\" + SEQ + \"\\n\")",
"def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")",
"def test_get_barcode_kit(self):\n run_dir = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n sample_sheet_104 = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/SQK-LSK109_sample_sheet.csv'\n run_104 = MinIONqc(run_dir, sample_sheet_104, None)\n got_kit_104 = run_104._get_barcode_kit()\n \n sample_sheet_114 = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/SQK-LSK109_sample_sheet.csv'\n run_114 = MinIONqc(run_dir, sample_sheet_114, None)\n got_kit_114 = run_114._get_barcode_kit()\n self.assertEqual(got_kit_104, 'EXP-NBD104')\n self.assertEqual(got_kit_114, 'EXP-NBD114')",
"def generateUniqueId(context):\n\n fn_normalize = getUtility(IFileNameNormalizer).normalize\n id_normalize = getUtility(IIDNormalizer).normalize\n prefixes = context.bika_setup.getPrefixes()\n\n year = context.bika_setup.getYearInPrefix() and \\\n DateTime().strftime(\"%Y\")[2:] or ''\n separator = '-'\n for e in prefixes:\n if 'separator' not in e:\n e['separator'] = ''\n if e['portal_type'] == context.portal_type:\n separator = e['separator']\n # Analysis Request IDs\n if context.portal_type == \"AnalysisRequest\":\n sample = context.getSample()\n s_prefix = fn_normalize(sample.getSampleType().getPrefix())\n sample_padding = context.bika_setup.getSampleIDPadding()\n ar_padding = context.bika_setup.getARIDPadding()\n sample_id = sample.getId()\n sample_number = sample_id.split(s_prefix)[1]\n ar_number = sample.getLastARNumber()\n ar_number = ar_number and ar_number + 1 or 1\n\n return fn_normalize(\n (\"%s%s\" + separator + \"R%s\") % (s_prefix,\n str(sample_number).zfill(sample_padding),\n str(ar_number).zfill(ar_padding))\n )\n\n # Sample Partition IDs\n if context.portal_type == \"SamplePartition\":\n # We do not use prefixes. There are actually codes that require the 'P'.\n # matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']\n # prefix = matches and matches[0]['prefix'] or 'samplepartition'\n # padding = int(matches and matches[0]['padding'] or '0')\n\n # at this time the part exists, so +1 would be 1 too many\n partnr = str(len(context.aq_parent.objectValues('SamplePartition')))\n # parent id is normalized already\n return (\"%s\" + separator + \"P%s\") % (context.aq_parent.id, partnr)\n\n if context.bika_setup.getExternalIDServer():\n\n # if using external server\n\n for d in prefixes:\n # Sample ID comes from SampleType\n if context.portal_type == \"Sample\":\n prefix = context.getSampleType().getPrefix()\n padding = context.bika_setup.getSampleIDPadding()\n new_id = str(idserver_generate_id(context, \"%s%s-\" % (prefix, year)))\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n elif d['portal_type'] == context.portal_type:\n prefix = d['prefix']\n padding = d['padding']\n new_id = str(idserver_generate_id(context, \"%s%s-\" % (prefix, year)))\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n # no prefix; use portal_type\n # year is not inserted here\n # portal_type is be normalized to lowercase\n npt = id_normalize(context.portal_type)\n new_id = str(idserver_generate_id(context, npt + \"-\"))\n return ('%s' + separator + '%s') % (npt, new_id)\n\n else:\n\n # No external id-server.\n\n def next_id(prefix):\n # normalize before anything\n prefix = fn_normalize(prefix)\n plone = context.portal_url.getPortalObject()\n # grab the first catalog we are indexed in.\n at = getToolByName(plone, 'archetype_tool')\n if context.portal_type in at.catalog_map:\n catalog_name = at.catalog_map[context.portal_type][0]\n else:\n catalog_name = 'portal_catalog'\n catalog = getToolByName(plone, catalog_name)\n\n # get all IDS that start with prefix\n # this must specifically exclude AR IDs (two -'s)\n rr = re.compile(\"^\"+prefix+separator+\"[\\d+]+$\")\n ids = [int(i.split(prefix+separator)[1]) \\\n for i in catalog.Indexes['id'].uniqueValues() \\\n if rr.match(i)]\n\n #plone_tool = getToolByName(context, 'plone_utils')\n #if not plone_tool.isIDAutoGenerated(l.id):\n ids.sort()\n _id = ids and ids[-1] or 0\n new_id = _id + 1\n\n return str(new_id)\n\n for d in prefixes:\n if context.portal_type == \"Sample\":\n # Special case for Sample IDs\n prefix = fn_normalize(context.getSampleType().getPrefix())\n padding = context.bika_setup.getSampleIDPadding()\n sequence_start = context.bika_setup.getSampleIDSequenceStart()\n new_id = next_id(prefix+year)\n # If sequence_start is greater than new_id. Set\n # sequence_start as new_id. (Jira LIMS-280)\n if sequence_start > int(new_id):\n new_id = str(sequence_start)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n elif d['portal_type'] == context.portal_type:\n prefix = d['prefix']\n padding = d['padding']\n sequence_start = d.get(\"sequence_start\", None)\n new_id = next_id(prefix+year)\n # Jira-tracker LIMS-280\n if sequence_start and int(sequence_start) > int(new_id):\n new_id = str(sequence_start)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n\n if context.portal_type == \"StorageUnit\":\n if context.getStorageUnitID():\n return context.getStorageUnitID()\n\n if context.portal_type == \"StorageManagement\":\n prefix = ''\n if context.getType() == \"Freeze\":\n prefix = \"FZ\"\n elif context.getType() == \"Tank\":\n prefix = \"LN\"\n\n if context.aq_parent.portal_type == \"StorageUnit\":\n padding = 3\n year = DateTime().strftime(\"%Y\")[2:]\n new_id = next_id(prefix + year)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + '-' + '%s') % (prefix, year, new_id)\n else:\n l = context.Title().split(' ')\n if len(l) == 2:\n return l[1]\n elif len(l) == 1:\n return l[0]\n \n return context.Title().replace(' ', '')\n\n if context.portal_type == \"StorageInventory\":\n prefix = 'INV'\n parent = context.aq_parent\n new_id = next_id(prefix)\n\n if parent.portal_type == \"StorageUnit\":\n new_id = new_id.zfill(int(3))\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n elif parent.portal_type == \"StorageInventory\":\n room = context.aq_parent.aq_parent\n return room.id + '.' + parent.id + '.' + context.Title()\n\n else:\n raise AssertionError(\"Unknown Portal type\")\n\n if context.portal_type == \"Kit\":\n prefix = context.getPrefix() and context.getPrefix() or \"KIT\"\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n if context.portal_type == \"StorageLocation\":\n return context.Title()\n\n if context.portal_type == \"Aliquot\":\n # subject = context.getSubjectID()\n # prefix = subject + '-SP' if subject else 'SP'\n prefix = 'AL'\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n if context.portal_type == \"Biospecimen\":\n prefix = \"BS\"\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n # no prefix; use portal_type\n # no year inserted here\n # use \"IID\" normalizer, because we want portal_type to be lowercased.\n prefix = id_normalize(context.portal_type)\n new_id = next_id(prefix)\n return ('%s' + separator + '%s') % (prefix, new_id)",
"def extract_fastq_info(fastq):\n f = gzip.open(fastq, 'rb')\n header_lines = [x.replace(\"\\n\",\"\") for x in f.readlines(10000) if x.startswith(\"@\")]\n\n for heading in header_lines:\n l = re.split(r'(\\:|#| )',heading)\n line = {}\n index_set = []\n if len(l) == 11:\n line[\"instrument\"] = l[0]\n line[\"flowcell_lane\"] = l[2]\n line[\"flowcell_tile\"] = l[4]\n try:\n line[\"pair\"] = l[10].split(\"/\")[1]\n index_set.append(l[10].split(\"/\")[0])\n except:\n pass\n elif len(l) == 21:\n line[\"instrument\"] = l[0]\n line[\"run_id\"] = l[2]\n line[\"flowcell_id\"] = l[4]\n line[\"flowcell_lane\"] = l[6]\n line[\"flowcell_tile\"] = l[8]\n line[\"pair\"] = l[14]\n line[\"filtered\"] = l[16]\n line[\"control_bits\"] = l[16]\n line[\"index\"] = l[20]\n index_set.append(l[20])\n else:\n print \"error\", l\n line[\"index\"] = most_common(index_set)\n return line",
"def control_fastq_filename(demux_folder):\n pattern=re.compile(\"^(P[0-9]+)-([0-9]{3,4}).+fastq.*$\")\n for root, dirs, files in os.walk(demux_folder):\n for f in files:\n matches=pattern.search(f)\n if matches:\n new_name=f.replace(\"{}-{}\".format(matches.group(1), matches.group(2)), \"{}_{}\".format(matches.group(1), matches.group(2)))\n os.rename(os.path.join(root, f), os.path.join(root, new_name))",
"def _fetch_formatted_analysis_description(\n analysis_description,fastq_run_list,feature_column='feature_type',\n sample_column='sample_igf_id',run_column='run_igf_id',file_column='file_path'):\n try:\n formatted_analysis_description = dict()\n analysis_description_df = pd.DataFrame(analysis_description)\n fastq_run_list_df = pd.DataFrame(fastq_run_list)\n fastq_run_list_df['fastq_dir'] = \\\n fastq_run_list_df[file_column].\\\n map(lambda x: os.path.dirname(x))\n tmp_dir = get_temp_dir(use_ephemeral_space=True)\n for feature,f_data in analysis_description_df.groupby(feature_column):\n feature = \\\n feature.replace(' ','_').\\\n lower()\n sample_igf_id = \\\n list(f_data[sample_column].values)[0]\n sample_records = \\\n fastq_run_list_df[fastq_run_list_df[sample_column]==sample_igf_id]\n if len(sample_records.index)==0:\n raise ValueError(\n 'No records found for sample: {0}, feature: {1}'.\\\n format(sample_igf_id,feature))\n total_runs_for_sample = \\\n len(list(\n sample_records[run_column].\\\n drop_duplicates().\\\n values))\n fastq_file_name = \\\n list(sample_records[file_column].values)[0]\n file_name_pattern = \\\n re.compile(r'(\\S+)_S\\d+_L00\\d_(R|I)(\\d)_001\\.fastq\\.gz')\n sample_prefix_match = \\\n re.match(\n file_name_pattern,\n os.path.basename(fastq_file_name))\n if sample_prefix_match is None:\n raise ValueError(\n 'Failed to match fastq file for {0}'.\\\n format(fastq_file_name))\n sample_prefix = sample_prefix_match.groups()[0]\n sample_records = \\\n sample_records[[run_column,'fastq_dir']].\\\n drop_duplicates()\n sample_records = \\\n sample_records.\\\n to_dict(orient='records')\n formatted_run_records = dict()\n for i,run in enumerate(sample_records):\n run_igf_id = run.get(run_column)\n fastq_dir = run.get('fastq_dir')\n tmp_output_path = \\\n os.path.join(tmp_dir,feature,sample_igf_id,run_igf_id)\n if not os.path.exists(tmp_output_path):\n os.makedirs(tmp_output_path)\n formatted_run_records.\\\n update({\n str(i):{\n \"run_igf_id\":run_igf_id,\n \"fastq_dir\":fastq_dir,\n \"output_path\":tmp_output_path\n }})\n formatted_analysis_description.\\\n update({\n feature:{\n 'sample_igf_id':sample_igf_id,\n 'sample_name':sample_prefix,\n 'run_count':total_runs_for_sample,\n 'runs':formatted_run_records\n }})\n return formatted_analysis_description\n except Exception as e:\n raise ValueError(e)",
"def demo():\r\n print \"---------------------------------------\"\r\n print \"Processing %d sequences from %d samples\" % (\r\n len(DEMO_SEQUENCES), len(DEMO_SAMPLE_MAPPING))\r\n print \"---------------------------------------\"\r\n\r\n for ix, cur_seq in enumerate(DEMO_SEQUENCES):\r\n barcode = cur_seq[:8]\r\n seq_read = cur_seq[8:]\r\n print \"---> processing demo sequence\", ix\r\n print \"read barcode :\", barcode\r\n try:\r\n corrected_barcode = decode_barcode_8(barcode)\r\n orig_sample_id = DEMO_SAMPLE_MAPPING[corrected_barcode]\r\n\r\n if corrected_barcode != barcode:\r\n print \"*corrected barcode:\", corrected_barcode\r\n else:\r\n print \"-no error barcode:\", corrected_barcode\r\n\r\n print \"original sample id:\", orig_sample_id\r\n print \"sequence read :\", seq_read\r\n\r\n except ValueError as e:\r\n print \"!\", str(e), \"skipping...\"\r\n continue"
] | [
"0.6623599",
"0.63540995",
"0.59808934",
"0.5929442",
"0.58410364",
"0.57554716",
"0.5716192",
"0.5710112",
"0.5637982",
"0.5631498",
"0.5599545",
"0.5590032",
"0.5577469",
"0.55256295",
"0.5518841",
"0.5507794",
"0.5507233",
"0.5487758",
"0.5482285",
"0.54775095",
"0.54474163",
"0.5414724",
"0.54130566",
"0.537644",
"0.53710264",
"0.53702825",
"0.5364173",
"0.535707",
"0.5354941",
"0.5298185"
] | 0.6429612 | 1 |
Convert sample description to barcode id, changing extension from .fastq to _fastq.txt in the process | def convert_name_to_barcode_id(multiplex, fc_name, fq):
fqout = list([None, None])
name2bcid = dict([(mp['name'], mp['barcode_id']) for mp in multiplex])
for name in name2bcid.keys():
mstr = "%s_%s_" % (fc_name, name)
if fq[0].find(mstr) != -1:
from_str = "%s_%s_" %(fc_name, name)
to_str = "%s_%s_" %(fc_name, name2bcid[name])
fqout[0] = fq[0].replace(from_str, to_str)
if not fq[1] == None:
fqout[1] = fq[1].replace(from_str, to_str)
fqout[0] = fqout[0].replace(".fastq", "_fastq.txt")
if not fqout[1] == None:
fqout[1] = fqout[1].replace(".fastq", "_fastq.txt")
return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_barcode_id_to_name(multiplex, fc_name, fq):\n fqout = list([None, None])\n if multiplex is None:\n fqout[0] = fq[0]\n if not fq[1] == None:\n fqout[1] = fq[1]\n else:\n bcid2name = dict([(mp['barcode_id'], mp['name']) for mp in multiplex])\n for bcid in bcid2name.keys():\n mstr = \"%s_%s_\" % (fc_name, bcid) \n if fq[0].find(mstr) != -1:\n from_str = \"%s_%s_\" %(fc_name, bcid)\n to_str = \"%s_%s_\" %(fc_name, bcid2name[bcid])\n fqout[0] = fq[0].replace(from_str, to_str)\n if not fq[1] == None:\n fqout[1] = fq[1].replace(from_str, to_str)\n fqout[0] = fqout[0].replace(\"_fastq.txt\", \".fastq\")\n if not fqout[1] == None:\n fqout[1] = fqout[1].replace(\"_fastq.txt\", \".fastq\")\n return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None)",
"def formatRead(raw_barcode, corrected_barcode, title, sequence, quality):\n \n # for bowtie, anything after space in name will go to SAM\n # remove existing comments as they may not be properly formatted\n mod_title = title.split(\" \")[0]\n \n mod_title += \" CB:Z:\" + corrected_barcode + \"\\tCR:Z:\" + raw_barcode\n\n return(\"@%s\\n%s\\n+\\n%s\\n\" % (mod_title, sequence, quality))",
"def genSampleID(path):\n head, tail = ntpath.split(path)\n result = tail or ntpath.basename(head)\n return genBaseName(result.split(\".\")[0]) # Gets just the sample name, cleans out the \".cleaned.[EXT]\"",
"def get_fastq_id(fastq_name):\n return fastq_name.split(' ')[0]",
"def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),",
"def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)",
"def genSamName(fastq):\n return os.path.join(samFolder, os.path.splitext(fastq)[0] + \".sam\")\n # return os.path.join(samFolder, ntpath.split(fastq)[1].replace(\".fastq\", \".sam\"))",
"def illumina_data_to_fastq(record_data, number_of_bases=None):\r\n seq_index = 8\r\n qual_index = 9\r\n pass_filter_index = 10\r\n\r\n try:\r\n pass_filter = int(record_data[pass_filter_index])\r\n except IndexError:\r\n pass_filter = 2\r\n\r\n if number_of_bases is None:\r\n seq = record_data[seq_index].replace('.', 'N')\r\n qual = record_data[qual_index]\r\n else:\r\n seq = record_data[seq_index][:number_of_bases].replace('.', 'N')\r\n qual = record_data[qual_index][:number_of_bases]\r\n\r\n header = '%s_%s:%s:%s:%s:%s#%s/%s' % (\r\n record_data[0],\r\n record_data[1],\r\n record_data[2],\r\n record_data[3],\r\n record_data[4],\r\n record_data[5],\r\n record_data[6],\r\n record_data[7])\r\n\r\n return '@%s\\n%s\\n+\\n%s' % (header, seq, qual), pass_filter",
"def test_illumina_data_to_fastq(self):\r\n in1 = (\r\n \"M10\",\r\n \"68\",\r\n \"1\",\r\n \"1\",\r\n \"28680\",\r\n \"29475\",\r\n \"0\",\r\n \"1\",\r\n \"AACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACG.\",\r\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\r\n \"0\")\r\n expected = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACGN\\n+\\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"\"\", 0\r\n\r\n self.assertEqual(illumina_data_to_fastq(in1), expected)\r\n\r\n expected12 = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAG\\n+\\nBBBBBBBBBBBB\"\"\", 0\r\n self.assertEqual(\r\n illumina_data_to_fastq(\r\n in1,\r\n number_of_bases=12),\r\n expected12)\r\n\r\n # different value in the pass filter field\r\n in2 = (\r\n \"M10\",\r\n \"68\",\r\n \"1\",\r\n \"1\",\r\n \"28680\",\r\n \"29475\",\r\n \"0\",\r\n \"1\",\r\n \"AACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACG.\",\r\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\r\n \"1\")\r\n expected = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACGN\\n+\\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"\"\", 1\r\n\r\n self.assertEqual(illumina_data_to_fastq(in2), expected)",
"def make_fastq(pair, filename, id):\n \n fname = filename + \"-R1.fastq\"\n with open(fname, \"w\") as r1:\n r1.write(\"@\" + id + \"\\n\")\n r1.write(pair[0])\n r1.write(\"\\n+\\n\")\n r1.write(\"E\" * len(pair[0]))\n\n fname = filename + \"-R2.fastq\"\n with open(fname, \"w\") as r2:\n r2.write(\"@\" + id + \"\\n\")\n r2.write(pair[1])\n r2.write(\"\\n+\\n\")\n r2.write(\"E\" * len(pair[1]))",
"def fastq_filename(fastq_base):\n return fastq_base+\"_1.fastq\", fastq_base+\"_2.fastq\"",
"def generateUniqueId(context):\n\n fn_normalize = getUtility(IFileNameNormalizer).normalize\n id_normalize = getUtility(IIDNormalizer).normalize\n prefixes = context.bika_setup.getPrefixes()\n\n year = context.bika_setup.getYearInPrefix() and \\\n DateTime().strftime(\"%Y\")[2:] or ''\n separator = '-'\n for e in prefixes:\n if 'separator' not in e:\n e['separator'] = ''\n if e['portal_type'] == context.portal_type:\n separator = e['separator']\n # Analysis Request IDs\n if context.portal_type == \"AnalysisRequest\":\n sample = context.getSample()\n s_prefix = fn_normalize(sample.getSampleType().getPrefix())\n sample_padding = context.bika_setup.getSampleIDPadding()\n ar_padding = context.bika_setup.getARIDPadding()\n sample_id = sample.getId()\n sample_number = sample_id.split(s_prefix)[1]\n ar_number = sample.getLastARNumber()\n ar_number = ar_number and ar_number + 1 or 1\n\n return fn_normalize(\n (\"%s%s\" + separator + \"R%s\") % (s_prefix,\n str(sample_number).zfill(sample_padding),\n str(ar_number).zfill(ar_padding))\n )\n\n # Sample Partition IDs\n if context.portal_type == \"SamplePartition\":\n # We do not use prefixes. There are actually codes that require the 'P'.\n # matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']\n # prefix = matches and matches[0]['prefix'] or 'samplepartition'\n # padding = int(matches and matches[0]['padding'] or '0')\n\n # at this time the part exists, so +1 would be 1 too many\n partnr = str(len(context.aq_parent.objectValues('SamplePartition')))\n # parent id is normalized already\n return (\"%s\" + separator + \"P%s\") % (context.aq_parent.id, partnr)\n\n if context.bika_setup.getExternalIDServer():\n\n # if using external server\n\n for d in prefixes:\n # Sample ID comes from SampleType\n if context.portal_type == \"Sample\":\n prefix = context.getSampleType().getPrefix()\n padding = context.bika_setup.getSampleIDPadding()\n new_id = str(idserver_generate_id(context, \"%s%s-\" % (prefix, year)))\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n elif d['portal_type'] == context.portal_type:\n prefix = d['prefix']\n padding = d['padding']\n new_id = str(idserver_generate_id(context, \"%s%s-\" % (prefix, year)))\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n # no prefix; use portal_type\n # year is not inserted here\n # portal_type is be normalized to lowercase\n npt = id_normalize(context.portal_type)\n new_id = str(idserver_generate_id(context, npt + \"-\"))\n return ('%s' + separator + '%s') % (npt, new_id)\n\n else:\n\n # No external id-server.\n\n def next_id(prefix):\n # normalize before anything\n prefix = fn_normalize(prefix)\n plone = context.portal_url.getPortalObject()\n # grab the first catalog we are indexed in.\n at = getToolByName(plone, 'archetype_tool')\n if context.portal_type in at.catalog_map:\n catalog_name = at.catalog_map[context.portal_type][0]\n else:\n catalog_name = 'portal_catalog'\n catalog = getToolByName(plone, catalog_name)\n\n # get all IDS that start with prefix\n # this must specifically exclude AR IDs (two -'s)\n rr = re.compile(\"^\"+prefix+separator+\"[\\d+]+$\")\n ids = [int(i.split(prefix+separator)[1]) \\\n for i in catalog.Indexes['id'].uniqueValues() \\\n if rr.match(i)]\n\n #plone_tool = getToolByName(context, 'plone_utils')\n #if not plone_tool.isIDAutoGenerated(l.id):\n ids.sort()\n _id = ids and ids[-1] or 0\n new_id = _id + 1\n\n return str(new_id)\n\n for d in prefixes:\n if context.portal_type == \"Sample\":\n # Special case for Sample IDs\n prefix = fn_normalize(context.getSampleType().getPrefix())\n padding = context.bika_setup.getSampleIDPadding()\n sequence_start = context.bika_setup.getSampleIDSequenceStart()\n new_id = next_id(prefix+year)\n # If sequence_start is greater than new_id. Set\n # sequence_start as new_id. (Jira LIMS-280)\n if sequence_start > int(new_id):\n new_id = str(sequence_start)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n elif d['portal_type'] == context.portal_type:\n prefix = d['prefix']\n padding = d['padding']\n sequence_start = d.get(\"sequence_start\", None)\n new_id = next_id(prefix+year)\n # Jira-tracker LIMS-280\n if sequence_start and int(sequence_start) > int(new_id):\n new_id = str(sequence_start)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n\n if context.portal_type == \"StorageUnit\":\n if context.getStorageUnitID():\n return context.getStorageUnitID()\n\n if context.portal_type == \"StorageManagement\":\n prefix = ''\n if context.getType() == \"Freeze\":\n prefix = \"FZ\"\n elif context.getType() == \"Tank\":\n prefix = \"LN\"\n\n if context.aq_parent.portal_type == \"StorageUnit\":\n padding = 3\n year = DateTime().strftime(\"%Y\")[2:]\n new_id = next_id(prefix + year)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + '-' + '%s') % (prefix, year, new_id)\n else:\n l = context.Title().split(' ')\n if len(l) == 2:\n return l[1]\n elif len(l) == 1:\n return l[0]\n \n return context.Title().replace(' ', '')\n\n if context.portal_type == \"StorageInventory\":\n prefix = 'INV'\n parent = context.aq_parent\n new_id = next_id(prefix)\n\n if parent.portal_type == \"StorageUnit\":\n new_id = new_id.zfill(int(3))\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n elif parent.portal_type == \"StorageInventory\":\n room = context.aq_parent.aq_parent\n return room.id + '.' + parent.id + '.' + context.Title()\n\n else:\n raise AssertionError(\"Unknown Portal type\")\n\n if context.portal_type == \"Kit\":\n prefix = context.getPrefix() and context.getPrefix() or \"KIT\"\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n if context.portal_type == \"StorageLocation\":\n return context.Title()\n\n if context.portal_type == \"Aliquot\":\n # subject = context.getSubjectID()\n # prefix = subject + '-SP' if subject else 'SP'\n prefix = 'AL'\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n if context.portal_type == \"Biospecimen\":\n prefix = \"BS\"\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n # no prefix; use portal_type\n # no year inserted here\n # use \"IID\" normalizer, because we want portal_type to be lowercased.\n prefix = id_normalize(context.portal_type)\n new_id = next_id(prefix)\n return ('%s' + separator + '%s') % (prefix, new_id)",
"def get_fastq(wildcards):\n if sample_is_single_end(wildcards.sample):\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\"]].dropna()\n else:\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\", \"fq2\"]].dropna()",
"def test_process_fastq_single_end_read_file_toggle_thirteen_base_barcodes(\r\n self):\r\n fastq_f = [\r\n \"@990:2:4:11272:5533#1/1\",\r\n \"GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC\",\r\n \"+\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"]\r\n barcode_fastq_f = [\r\n \"@990:2:4:11272:5533#1/2\",\r\n \"AAAAAAAAAAAAT\",\r\n \"+\",\r\n \"bbbbbbbbbbbbb\"]\r\n barcode_to_sample_id = {'AAAAAAAAAAAA': 's1', 'TAAAAAAAAAAA': 's2'}\r\n\r\n # rev_comp = False\r\n actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0)\r\n actual = list(actual)\r\n expected = [(\r\n 's1_0 990:2:4:11272:5533#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0',\r\n \"GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC\",\r\n np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,\r\n 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34,\r\n 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32,\r\n 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20,\r\n 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8),\r\n 0)]\r\n np.testing.assert_equal(actual, expected)",
"def attach_barcode(sam, output):\n \n if output is None:\n output = sam.replace('.sam', '_bcqt.sam')\n infile = pysam.AlignmentFile(sam, \"r\")\n outfile = pysam.AlignmentFile(output, \"wh\", template=infile)\n for read in infile.fetch():\n id_sam = read.query_name\n sep_si = id_sam.index(':')\n# TODO Abort and raise exception if randomer info is not kept properly in the \n# read's name.\n bc_seq = id_sam[0:sep_si]\n sep_qi = sep_si + 1 + len(bc_seq)\n bc_pqs = id_sam[sep_si + 1: sep_qi]\n read.set_tag('BC', bc_seq)\n read.set_tag('QT', bc_pqs)\n read.query_name = id_sam[sep_qi+1:]\n outfile.write(read)\n outfile.close()\n infile.close()",
"def process_fastq_single_end_read_file(fastq_read_f,\r\n fastq_barcode_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0,\r\n filter_bad_illumina_qual_digit=False,\r\n log_f=None,\r\n histogram_f=None,\r\n barcode_correction_fn=None,\r\n max_barcode_errors=1.5,\r\n strict_header_match=True,\r\n phred_to_ascii_f=None):\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n seq_id = start_seq_id\r\n # grab the first lines and then seek back to the beginning of the file\r\n try:\r\n fastq_read_f_line1 = fastq_read_f.readline()\r\n fastq_read_f_line2 = fastq_read_f.readline()\r\n fastq_read_f.seek(0)\r\n except AttributeError:\r\n fastq_read_f_line1 = fastq_read_f[0]\r\n fastq_read_f_line2 = fastq_read_f[1]\r\n\r\n post_casava_v180 = is_casava_v180_or_later(fastq_read_f_line1)\r\n if post_casava_v180:\r\n offset = 33\r\n check_header_match_f = check_header_match_180_or_later\r\n else:\r\n offset = 64\r\n check_header_match_f = check_header_match_pre180\r\n\r\n # compute the barcode length, if they are all the same.\r\n # this is useful for selecting a subset of the barcode read\r\n # if it's too long (e.g., for technical reasons on the sequencer)\r\n barcode_lengths = set([len(bc)\r\n for bc, sid in barcode_to_sample_id.items()])\r\n if len(barcode_lengths) == 1:\r\n barcode_length = barcode_lengths.pop()\r\n else:\r\n barcode_length = None\r\n\r\n # compute the minimum read length as a fraction of the length of the input\r\n # read\r\n min_per_read_length = min_per_read_length_fraction * \\\r\n len(fastq_read_f_line2)\r\n\r\n # prep data for logging\r\n input_sequence_count = 0\r\n count_barcode_not_in_map = 0\r\n count_too_short = 0\r\n count_too_many_N = 0\r\n count_bad_illumina_qual_digit = 0\r\n count_barcode_errors_exceed_max = 0\r\n sequence_lengths = []\r\n seqs_per_sample_counts = {}\r\n for bc_data, read_data in izip(\r\n parse_fastq(fastq_barcode_f, strict=False, phred_offset=offset),\r\n parse_fastq(fastq_read_f, strict=False, phred_offset=offset)):\r\n input_sequence_count += 1\r\n # Confirm match between barcode and read headers\r\n if strict_header_match and \\\r\n (not check_header_match_f(bc_data[header_index], read_data[header_index])):\r\n raise FastqParseError(\"Headers of barcode and read do not match. Can't continue. \"\r\n \"Confirm that the barcode fastq and read fastq that you are \"\r\n \"passing match one another.\")\r\n else:\r\n header = read_data[header_index]\r\n\r\n # Grab the barcode sequence\r\n if barcode_length:\r\n # because thirteen cycles are sometimes used for\r\n # techical reasons, this step looks only at the\r\n # first tweleve bases. note that the barcode is\r\n # rev-comp'ed after this step if requested since\r\n # the thirteen base is a technical artefact, not\r\n # barcode sequence.\r\n barcode = bc_data[sequence_index][:barcode_length]\r\n else:\r\n barcode = bc_data[sequence_index]\r\n if rev_comp_barcode:\r\n barcode = str(DNA(barcode).rc())\r\n # Grab the read sequence\r\n sequence = read_data[1]\r\n # Grab the read quality\r\n quality = read_data[2]\r\n\r\n # correct the barcode (if applicable) and map to sample id\r\n num_barcode_errors, corrected_barcode, correction_attempted, sample_id = \\\r\n correct_barcode(\r\n barcode,\r\n barcode_to_sample_id,\r\n barcode_correction_fn)\r\n # skip samples with too many errors\r\n if (num_barcode_errors > max_barcode_errors):\r\n count_barcode_errors_exceed_max += 1\r\n continue\r\n\r\n # skip unassignable samples unless otherwise requested\r\n if sample_id is None:\r\n if not store_unassigned:\r\n count_barcode_not_in_map += 1\r\n continue\r\n else:\r\n sample_id = 'Unassigned'\r\n\r\n quality_filter_result, sequence, quality =\\\r\n quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length,\r\n phred_quality_threshold,\r\n min_per_read_length,\r\n seq_max_N,\r\n filter_bad_illumina_qual_digit)\r\n\r\n # process quality result\r\n if quality_filter_result != 0:\r\n # if the quality filter didn't pass record why and\r\n # move on to the next record\r\n if quality_filter_result == 1:\r\n count_too_short += 1\r\n elif quality_filter_result == 2:\r\n count_too_many_N += 1\r\n elif quality_filter_result == 3:\r\n count_bad_illumina_qual_digit += 1\r\n else:\r\n raise ValueError(\r\n \"Unknown quality filter result: %d\" %\r\n quality_filter_result)\r\n continue\r\n\r\n sequence_lengths.append(len(sequence))\r\n\r\n try:\r\n seqs_per_sample_counts[sample_id] += 1\r\n except KeyError:\r\n seqs_per_sample_counts[sample_id] = 1\r\n\r\n if rev_comp:\r\n sequence = str(DNA(sequence).rc())\r\n quality = quality[::-1]\r\n\r\n fasta_header = '%s_%s %s orig_bc=%s new_bc=%s bc_diffs=%d' %\\\r\n (sample_id, seq_id, header, barcode,\r\n corrected_barcode, num_barcode_errors)\r\n yield fasta_header, sequence, quality, seq_id\r\n seq_id += 1\r\n\r\n # Add sample IDs with zero counts to dictionary for logging\r\n for curr_sample_id in barcode_to_sample_id.values():\r\n if curr_sample_id not in seqs_per_sample_counts.keys():\r\n seqs_per_sample_counts[curr_sample_id] = 0\r\n\r\n if log_f is not None:\r\n log_str = format_split_libraries_fastq_log(count_barcode_not_in_map,\r\n count_too_short,\r\n count_too_many_N,\r\n count_bad_illumina_qual_digit,\r\n count_barcode_errors_exceed_max,\r\n input_sequence_count,\r\n sequence_lengths,\r\n seqs_per_sample_counts)\r\n log_f.write(log_str)\r\n\r\n if len(sequence_lengths) and histogram_f is not None:\r\n counts, bin_edges = make_histograms(sequence_lengths)\r\n histogram_str = format_histogram_one_count(counts, bin_edges)\r\n histogram_f.write(histogram_str)\r\n histogram_f.write('\\n--\\n\\n')",
"def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name",
"def control_fastq_filename(demux_folder):\n pattern=re.compile(\"^(P[0-9]+)-([0-9]{3,4}).+fastq.*$\")\n for root, dirs, files in os.walk(demux_folder):\n for f in files:\n matches=pattern.search(f)\n if matches:\n new_name=f.replace(\"{}-{}\".format(matches.group(1), matches.group(2)), \"{}_{}\".format(matches.group(1), matches.group(2)))\n os.rename(os.path.join(root, f), os.path.join(root, new_name))",
"def __return_new_file_name(self, file_name: str, file_path: str):\n\n fastq_runid = re.split('[_.]', file_name) # split on `_` or `.`\n barcode_number = file_path.split(\"/\")[-1] # get the barcode number\n fastq_or_fasta = fastq_runid[-1] # get the .fastq/.fasta file extension\n\n # create the new file name\n new_file_name = \"_\".join(fastq_runid[:3]) # join first three elements\n new_file_name += \"_%s.%s\" % (barcode_number, fastq_or_fasta) # append the barcode number and file extension\n\n return new_file_name",
"def process_barcode_in_label(read1_data,\r\n read2_data,\r\n output_bc_fastq,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n char_delineator=\":\"):\r\n header_index = 0\r\n\r\n # Check for char_delineator in sequence\r\n try:\r\n bc1_read = read1_data[header_index].split(\r\n char_delineator)[-1][0:bc1_len]\r\n # If there is an index error, it means the char_delineator wasn't found\r\n except IndexError:\r\n raise IndexError(\"Found sequence lacking character delineator. \"\r\n \"Sequence header %s, character delineator %s\" %\r\n (read1_data[header_index], char_delineator))\r\n\r\n # Create fake quality scores, using 6 here to match the existing qual fake\r\n # qual scores that were all F.\r\n bc1_qual = np.ones(len(bc1_read), dtype=np.int8) * 6\r\n if rev_comp_bc1:\r\n bc1_read = str(DNA(bc1_read).rc())\r\n\r\n if read2_data:\r\n bc2_read =\\\r\n read2_data[header_index].strip().split(\r\n char_delineator)[-1][0:bc2_len]\r\n bc2_qual = np.ones(len(bc2_read), dtype=np.int8) * 6\r\n if rev_comp_bc2:\r\n bc2_read = str(DNA(bc2_read).rc())\r\n else:\r\n bc2_read = \"\"\r\n bc2_qual = np.array([], dtype=np.int8)\r\n\r\n if not bc1_read and not bc2_read:\r\n raise ValueError(\"Came up with empty barcode sequence, please check \"\r\n \"character delineator with -s, and fastq label \"\r\n \"%s\" % read1_data[header_index])\r\n\r\n bc_lines = format_fastq_record(read1_data[header_index],\r\n bc1_read + bc2_read,\r\n np.hstack([bc1_qual, bc2_qual]))\r\n\r\n output_bc_fastq.write(bc_lines)\r\n\r\n return",
"def test_ordinary(self):\n date = datetime(2016, 11, 12)\n seq = 36\n name = star_barcode.barcode_filename(date, seq)\n self.assertEqual(\n name,\n 'Barcode_2016-W45-6_36.pdf'\n )",
"def prepare_fastq(Fastq_Root=\"2.Fastq/\", ):\n fastqs = glob.glob(Fastq_Root + \"*.fastq\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq\", \"\")\n if s.endswith(\"_1\"):\n sample = s.replace(\"_1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_2\"):\n sample = s.replace(\"_2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n if not s.endswith(\"_1\") and not s.endswith(\"_2\"):\n data[s] = [fq]\n return data",
"def getcodetofilename(index_file_parameters,bamfile_id):\n index_file=index_file_parameters['index']\n relative_flg=index_file_parameters['relative']\n \n index_dict=dict([(lntxt.rstrip().split(',')[0],lntxt.rstrip().split(',')[1]) for lntxt in open(index_file).readlines()])\n \n if bamfile_id not in index_dict:\n return ''\n \n if relative_flg==0:\n return index_dict[bamfile_id]\n else:\n relative_dir='/'.join(index_file.split('/')[0:-1])\n return '%s/%s'%(relative_dir,index_dict[bamfile_id])",
"def extract_fastq_info(fastq):\n f = gzip.open(fastq, 'rb')\n header_lines = [x.replace(\"\\n\",\"\") for x in f.readlines(10000) if x.startswith(\"@\")]\n\n for heading in header_lines:\n l = re.split(r'(\\:|#| )',heading)\n line = {}\n index_set = []\n if len(l) == 11:\n line[\"instrument\"] = l[0]\n line[\"flowcell_lane\"] = l[2]\n line[\"flowcell_tile\"] = l[4]\n try:\n line[\"pair\"] = l[10].split(\"/\")[1]\n index_set.append(l[10].split(\"/\")[0])\n except:\n pass\n elif len(l) == 21:\n line[\"instrument\"] = l[0]\n line[\"run_id\"] = l[2]\n line[\"flowcell_id\"] = l[4]\n line[\"flowcell_lane\"] = l[6]\n line[\"flowcell_tile\"] = l[8]\n line[\"pair\"] = l[14]\n line[\"filtered\"] = l[16]\n line[\"control_bits\"] = l[16]\n line[\"index\"] = l[20]\n index_set.append(l[20])\n else:\n print \"error\", l\n line[\"index\"] = most_common(index_set)\n return line",
"def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")",
"def rename_records(f, fh, i):\n from Bio import SeqIO\n import gzip as gz\n for record in SeqIO.parse(gz.open(f, 'rt'), 'fastq'):\n record.id = \"{}_{}\".format(i, record.id)\n SeqIO.write(record, fh, \"fastq\")\n return fh",
"def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"",
"def extract_id(file_path):\n # An example of file path is AlkEthOH_tripos/AlkEthOH_chain_filt1/AlkEthOH_c555.crd\n return os.path.splitext(os.path.basename(file_path))[0][9:]",
"def process_fastq(fastq_file):\n current_record = {}\n\n for name, seq, blank, quality in zip(*[iter(fastq_file)]*4):\n current_record['name'] = name.strip('\\n')\n current_record['seq'] = seq.strip('\\n')\n current_record['quality'] = quality.strip('\\n')\n\n yield current_record",
"def strip_barcodes(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_adapters_removed.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fasta\"):\n match = re.search(r'\\S*:', record.id)\n if match:\n correct = match.group().rstrip(\":\")\n else:\n correct = str(record.id)\n SEQ = str(record.seq)\n if correct in wanted_set:\n out.write(\">\" + correct + \"\\n\" + SEQ + \"\\n\")"
] | [
"0.635295",
"0.6289378",
"0.6153358",
"0.59830755",
"0.5973969",
"0.59590584",
"0.59040374",
"0.58626246",
"0.5838718",
"0.57817864",
"0.5680377",
"0.5662274",
"0.5659372",
"0.56566316",
"0.5644152",
"0.5641196",
"0.5615736",
"0.5564444",
"0.5530782",
"0.5522661",
"0.54676783",
"0.5462157",
"0.5459018",
"0.5453389",
"0.5437876",
"0.540737",
"0.5377344",
"0.5336644",
"0.5330946",
"0.5330608"
] | 0.6725366 | 0 |
Restore alembic table for gdrive migrations with latest correct content. Since the migration chain is disabled, this table won't be used. If the migration chain gets enabled, this table will contain correct tag for downgrades. | def downgrade():
op.execute("""
CREATE TABLE ggrc_gdrive_integration_alembic_version (
version_num varchar(32) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8
""")
op.execute("""
INSERT INTO ggrc_gdrive_integration_alembic_version (version_num)
VALUES ('3f64d03c6c01')
""") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def downgrade():\n # # commands auto generated by Alembic - please adjust! ###\n op.drop_table('downstream_map')\n # # end Alembic commands ###",
"def upgrade():\n try:\n op.drop_table(\"ggrc_gdrive_integration_alembic_version\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in a new DB with no trace of the removed chain\n pass\n else:\n raise\n\n # The following duplicates a part of a gdrive-related migration,\n # since a bunch of old migrations in ggrc refer to meetings table.\n # This part is relevant only for db_reset (new databases), so we\n # shouldn't recreate this table in downgrade.\n try:\n op.drop_table(\"meetings\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in an old DB where meetings has been dropped in the removed chain\n pass\n else:\n raise",
"def upgrade():\n # commands auto generated by Alembic - please adjust! ###\n op.drop_table('review')\n # end Alembic commands ###",
"def downgrade():\n # commands auto generated by Alembic - please adjust! ###\n op.create_table('review',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('approved', sa.BOOLEAN(), autoincrement=False, nullable=True),\n sa.Column('user', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('timestamp', postgresql.TIMESTAMP(), autoincrement=False,\n nullable=True),\n sa.Column('comment', sa.TEXT(), autoincrement=False, nullable=True),\n sa.Column('epv', sa.VARCHAR(length=255), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['user'], ['user.id'], name='review_user_fkey'),\n sa.PrimaryKeyConstraint('id', name='review_pkey'))\n # end Alembic commands ###",
"def downgrade():\n op.drop_table(\"dag_owner_attributes\")",
"def downgrade():\n\n op.drop_column('shares', 'revert_to_snapshot_support')",
"def downgrade():\n op.drop_table(\"task_instance_note\")\n op.drop_table(\"dag_run_note\")",
"def downgrade():\n op.execute(\n f\"\"\"\n ALTER TABLE\n {config.CLEAN_SCHEMA}.forecasts\n RENAME COLUMN\n train_horizon\n TO\n training_horizon;\n \"\"\",\n ) # noqa:WPS355",
"def downgrade(self, revision: str = \"head\") -> \"Alembic\":\n\n self.configure()\n\n if not self.migrator_base.does_table_exists(\n \"alembic_version\"\n ) or self.is_revision_different(revision):\n PyFunceble.facility.Logger.info(\n \"Started downgrade (%r) of the database schema(s).\", revision\n )\n\n alembic_command.downgrade(self.alembic_config, revision)\n\n PyFunceble.facility.Logger.info(\n \"Finished downgrade (%r) of the database schema(s).\", revision\n )",
"def downgrade():\n with op.batch_alter_table(\"dag_run\") as batch_op:\n batch_op.drop_index(\"idx_dag_run_queued_dags\")",
"def downgrade():\n op.drop_table(\"dota_hero_win_rate\")\n op.rename_table('dota_fetch_summary', 'fetch_summary')\n op.create_table(\"fetch_history\",\n sa.Column('match_id', sa.BigInteger, primary_key=True),\n sa.Column('start_time', sa.BigInteger))\n\n op.create_table(\"fetch_win_rate\",\n sa.Column('hero_skill', sa.String(128), primary_key=True),\n sa.Column('skill', sa.Integer),\n sa.Column('hero', sa.String(128)),\n sa.Column('time_range', sa.String(128)),\n sa.Column('radiant_win', sa.Integer),\n sa.Column('radiant_total', sa.Integer),\n sa.Column('radiant_win_pct', sa.Float),\n sa.Column('dire_win', sa.Integer),\n sa.Column('dire_total', sa.Integer),\n sa.Column('dire_win_pct', sa.Float),\n sa.Column('win', sa.Integer),\n sa.Column('total', sa.Integer),\n sa.Column('win_pct', sa.Float))",
"def schema_downgrades():\n op.drop_table('vpp_licenses')",
"def downgrade(revision, sql):\n alembic_command.downgrade(alembic_config, revision, sql=sql)",
"def downgrade(self, revision):\n alembic.command.downgrade(self.alembic_config(), revision)",
"def db_downgrade(step):\n to_use = [_.strip('.sql') for _ in migration_files()]\n\n # since it's a downgrade, a reverse of the migration is essential\n to_use.reverse()\n\n generate_migration_file()\n dbd_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n\n try:\n count = 0\n for _ in to_use:\n count += 1\n if MySQLScheme.fetch_one(REVISION_EXISTS,\n **{\"args\": {'revision': _}}):\n MySQLScheme.commit(getattr(dbd_query, f\"downgrade_{_}\").sql)\n LOGGER.info(f\"successful downgrade: {_}\")\n if count == step:\n break\n except errors.ProgrammingError:\n print(\"no more downgrade left\")",
"def upgrade():\n # # commands auto generated by Alembic - please adjust! ###\n op.create_table('downstream_map',\n sa.Column('key', sa.String(length=255), nullable=False),\n sa.Column('value', sa.String(length=512), nullable=True),\n sa.PrimaryKeyConstraint('key'))\n # # end Alembic commands ###",
"def drop_restored_table(opts, stats):\n # TODO: This may no longer be needed if and when we integrate\n # restoring HMS metadata and the table is restored as \"Impala-managed\".\n print(\"--------------------------------------\")\n print(\"Dropping restored table %s\" % (get_restored_table_name(opts), ))\n print(\"--------------------------------------\")\n print(timestamp())\n cmd = 'kudu table delete %s %s' % (opts.master_addresses, opts.table_prefix +\n get_restored_table_name(opts))\n run_command(opts, cmd)",
"def downgrade():\n op.drop_column(\"revisions\", \"resource_slug\")",
"def upgrade():\n session = sa.orm.Session(bind=op.get_bind().connect())\n\n # Add create_share_from_snapshot_support attribute to shares table\n op.add_column(\n 'shares',\n sa.Column('revert_to_snapshot_support', sa.Boolean, default=False))\n\n # Set revert_to_snapshot_support on each share\n shares_table = sa.Table(\n 'shares',\n sa.MetaData(),\n sa.Column('id', sa.String(length=36)),\n sa.Column('deleted', sa.String(length=36)),\n sa.Column('revert_to_snapshot_support', sa.Boolean),\n )\n # pylint: disable=no-value-for-parameter\n update = shares_table.update().where(\n shares_table.c.deleted == 'False').values(\n revert_to_snapshot_support=False)\n session.execute(update)\n session.commit()\n\n session.close_all()",
"def downgrade():\n with op.batch_alter_table(\"task_map\") as batch_op:\n batch_op.drop_constraint(\"task_map_task_instance_fkey\", type_=\"foreignkey\")\n batch_op.create_foreign_key(\n \"task_map_task_instance_fkey\",\n \"task_instance\",\n [\"dag_id\", \"task_id\", \"run_id\", \"map_index\"],\n [\"dag_id\", \"task_id\", \"run_id\", \"map_index\"],\n ondelete=\"CASCADE\",\n )",
"def migration():",
"def rollback(migrator, database, fake=False, **kwargs):\n pass",
"def rollback(migrator, database, fake=False, **kwargs):\n\n migrator.remove_model('tea_teas_types')\n migrator.remove_model('tea_types')\n migrator.remove_model('tea_lists_items')\n migrator.remove_model('tea_lists')\n migrator.remove_model('tea_teas')\n migrator.remove_model('tea_vendors')",
"def rename_back(self):\n if (\n self.table_swapped\n and self.table_exists(self.renamed_table_name)\n and not self.table_exists(self.table_name)\n ):\n self.unlock_tables()\n self.execute_sql(sql.rename_table(self.renamed_table_name, self.table_name))",
"def downgrade():\n op.drop_table('sub_project')\n op.drop_index(op.f('ix_projecttags_tag_id'), table_name='projecttags')\n op.drop_table('projecttags')\n op.drop_table('project')\n op.drop_table('tag')",
"def upgrade_if_clean(dburl):\n alembic_cfg = alembic_config(dburl)\n engine = create_engine(dburl)\n script_ = script.ScriptDirectory.from_config(alembic_cfg)\n if not table_exists('results_schema_versions', engine):\n logger.info(\"No results_schema_versions table exists, which means that this installation \"\n \"is fresh. Upgrading db.\")\n upgrade_db(dburl=dburl)\n return\n with engine.begin() as conn:\n current_revision = conn.execute(\n 'select version_num from results_schema_versions limit 1'\n ).scalar()\n logger.debug(\"Database's triage_metadata schema version is %s\", current_revision)\n triage_head = script_.get_current_head()\n logger.debug(\"Code's triage_metadata schema version is %s\", triage_head)\n database_is_ahead = not any(\n migration.revision == current_revision\n for migration in script_.walk_revisions()\n )\n if database_is_ahead:\n raise ValueError(\n f\"Your database's results schema version, {current_revision}, is not a known \"\n \"revision to this version of Triage. Usually, this happens if you use a branch \"\n \"with a new results schema version and upgrade the database to that version. \"\n \"To use this version of Triage, you will likely need to check out that branch \"\n f\"and downgrade to {triage_head}\",\n )\n elif current_revision != triage_head:\n raise ValueError(\n f\"Your database's results schema revision, {current_revision}, is out of date \"\n \"for this version of Triage. However, your database can be upgraded to this \"\n \"revision. If you would like to upgrade your database from the console, and \"\n \"you've installed Triage, you may execute `triage db upgrade`. \"\n \"If the `triage` command is unavailable, (because you are running Triage directly \"\n \" from a repository checkout), then `manage alembic upgrade head`. \"\n \"The database changes may take a long time on a heavily populated database. \"\n \"Otherwise, you can also downgrade your Triage version to match your database.\"\n )",
"def to_rollback(self, migrations):\n applied = self.get_applied_migration_hashes()\n ms = (m for m in migrations if m.hash in applied)\n return migrations.__class__(\n reversed(topological_sort(ms)), migrations.post_apply\n )",
"def upgrade():\n op.execute(\n f\"\"\"\n ALTER TABLE\n {config.CLEAN_SCHEMA}.forecasts\n RENAME COLUMN\n training_horizon\n TO\n train_horizon;\n \"\"\",\n ) # noqa:WPS355",
"def downgrade():\n with op.batch_alter_table(\"slot_pool\") as batch_op:\n batch_op.drop_column(\"include_deferred\")",
"def _create_intermediate_old_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n amnesty_granted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_old_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_old_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration"
] | [
"0.6851007",
"0.68070155",
"0.6506316",
"0.64786386",
"0.64288044",
"0.6319381",
"0.61630434",
"0.59586895",
"0.5892953",
"0.5826305",
"0.58031607",
"0.57475394",
"0.57333404",
"0.5704555",
"0.566395",
"0.5661779",
"0.56194675",
"0.5548934",
"0.5502293",
"0.549745",
"0.5483659",
"0.5481394",
"0.5462713",
"0.5450196",
"0.5439383",
"0.5409868",
"0.54062116",
"0.53919876",
"0.53743905",
"0.53069735"
] | 0.70814365 | 0 |
Browse the folder to locate the json file | def browse_folder(self):
# Get the file name from the user selection
filename = tkinter.filedialog.askopenfilename(initialdir=".", title="Select JSON file",
filetypes=(("json files", "*.json"), ("txt files", "*.txt"),
("All files", "*.*")))
# Update the file name to the file name text entry
self.filename_entry.delete(0, tkinter.END)
self.filename_entry.insert(0, filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def test_files(self, location):\n for filename in os.listdir(location):\n with open(location + '/' + filename) as json_file:\n data = json.load(json_file)\n self.test_data(data)",
"def openFolder(self, path=None):\n if not path:\n dialog = OpenDialog()\n dialog.set_folders_only(True)\n path = dialog.getExistingDirectory(self, \"Open Folder\", '')\n\n if path:\n self.handleFileChanged(path)#, filename='index.rst')\n with open('./config.json', 'r') as f:\n c = json.load(f)\n c['last_folder'] = path\n with open('./config.json', 'w') as f:\n json.dump(c, f)",
"def _browse_folder(self):\n folder = QtWidgets.QFileDialog.getExistingDirectory(\n parent=self,\n caption=\"Select folder\",\n dir=self.folder_line_edit.text(),\n options=QtWidgets.QFileDialog.ShowDirsOnly |\n QtWidgets.QFileDialog.DontResolveSymlinks)\n self.folder_line_edit.setText(folder)",
"def select_json_file(self):\n new_file_path_json = QFileDialog.getOpenFileName()[0]\n if not new_file_path_json == '' and new_file_path_json[-len('.json'):] == '.json':\n self.clear_all_plots()\n self.file_path_json = new_file_path_json\n self.resistance_json_path = None",
"def Browse(self):\n \n #run the folder manipulation routines...\n self.dir_opt = options = {}\n options['mustexist'] = False\n options['title'] = 'Select a directory...'\n \n #launch the directory selector\n self.FolderPath = tkFileDialog.askdirectory(**self.dir_opt)\n \n self.FolderPath = os.path.abspath(self.FolderPath)\n \n #set the folder path to the entry window\n self.FolderEntry.delete(0, tk.END)\n self.FolderEntry.insert(0, self.FolderPath)",
"def load_json(filename, folder):\n with open(os.path.join(folder, filename), 'r') as f:\n return json.load(f)",
"def download_json(self):\n # make the path dir if it doesn't exist\n if not self.path.is_dir():\n self.path.mkdir(parents=True)\n\n # open a file, send a request for the json and write to the file\n with self.file.open('w') as json_file:\n try:\n json_data = json.dumps(requests.get(self.endpoint).json())\n json_file.write(json_data)\n except json.JSONDecodeError as error:\n print(\"Error fetching json: \", error)",
"def explore(self):\n path = self.req_get.get('path') or ''\n root_path = self.root_path\n abspath = browser.absolute_path(path, root_path)\n try:\n folders, filenames = browser.get_files(self.extensions, abspath,\n root_path, relative=True)\n except IOError, e:\n if os.path.isfile(abspath):\n raise exc.HTTPFound()\n # TODO: make sure we don't have absolute url in the error message.\n raise exc.HTTPNotFound(str(e))\n\n lis = []\n\n for folder in folders:\n lis += [{\n 'name': os.path.basename(folder),\n 'type': 'folder',\n 'path': folder,\n # status will be updated in js\n 'status': None,\n }]\n for filename in filenames:\n lis += [{\n 'name': os.path.basename(filename),\n 'type': 'file',\n 'path': filename,\n # status will be updated in js\n 'status': None,\n }]\n # We want to order alphabetically by columns\n n = int(math.ceil(len(lis) / 2.0))\n return filter(bool, list(sum(izip_longest(lis[:n], lis[n:]), ())))",
"def openFile(self, path):\n with open(path) as f:\n return json.load(f)",
"def add_json_file_path(self):\n found = False\n for folder in json_folders:\n try_path = os.path.join(folder, 'part{}.json'.format(self.cbg))\n self.logger.debug(f'Considering path {try_path}.')\n if os.path.exists(try_path):\n found = True\n self.json_file_path = try_path\n break\n # Return True if the file is found.\n if found:\n return True\n else:\n self.logger.warning('cbg {} does not have a corresponding json file.'.format(self.cbg))\n return False",
"def browse_input(self):\n path = getAFolder()\n if len(path) > 0:\n self.in_directory.setText(path)\n self.out_directory.setText(join(path, 'merged_results'))\n self.preprocessfolder()",
"def browse_folder(self, subdir=\".\"):\n if self.show_save_action:\n self.ui_Action.setEnabled(True)\n if self.show_dirs_only:\n self.ui_Action.setEnabled(True)\n self.ui_DirList.clear()\n if subdir == \".\":\n _sub_dir = self.active_url\n else:\n _sub_dir = subdir\n if len(self.directory_history) == 0:\n self.directory_history.append(_sub_dir)\n for item in reversed(self.directory_history):\n self.ui_DirList.addItem(item)\n self.ui_DirList.setCurrentIndex(self.last_dir_index)",
"def open_file_browser(path: str):\n call(file_browser + [path])",
"def open_json(path):\n with open(path, \"r\") as json_data_file:\n data = json.load(json_data_file)\n return data",
"def check_for_json_folder(check_path):\n check_abspath = os.path.abspath(check_path)\n json_folders = [\"_JSON\", \"JSON\"]\n for jf in json_folders:\n if jf in check_abspath:\n print(\"{} folder exists : {}\".format(jf, check_abspath))\n top_path, base_path = check_abspath.split(\"{}/\".format(jf))\n out_path = os.path.dirname(os.path.join(top_path, base_path))\n if os.path.exists(out_path):\n print(\"Path exists : {}\".format(out_path))\n return out_path\n else:\n print(\"Path does not exist : {}\".format(out_path))\n print(\"Please create this folder and try again\")\n exit(1)",
"def load_json(directory=None):\n \n if directory:\n chdir(directory)\n with open('memedPost_json_data.txt') as json_data:\n data = load(json_data)\n return data\n else:\n chdir(curdir)\n with open('memedPost_json_data.txt') as json_data:\n data = load(json_data)\n return data",
"def read(self, filepath, dirpath=None):\n try:\n #filepath = os.path.normpath(filepath)\n with open(filepath) as f_p:\n try:\n self.json_dict = json.load(f_p)\n self.filepath = filepath\n return self.json_dict\n except ValueError as err:\n print('JSON content error in \"%s\"' % filepath)\n print(err)\n except (IOError, FileNotFoundError):\n print(\n 'Failed to open JSON file \"%s\" \"%s\"' %\n (os.path.abspath(''), filepath))\n raise NoSuchFileError(filepath)\n raise JsonContentError",
"def onBrowse(self, event):\n data_folder = \"\"\n\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n )\n default_path = self.m_textCtrl_searchfolder.GetValue()\n if default_path != '':\n default_path = os.path.dirname(default_path)\n dlg.SetPath(default_path)\n\n if dlg.ShowModal() == wx.ID_OK:\n self.m_textCtrl_searchfolder.Clear()\n\n self.m_textCtrl_searchfolder.write(dlg.GetPath())\n self.m_search_folder = dlg.GetPath()\n dlg.Destroy()",
"def test_find_raw_file_json(self):\n\n this_file_name = probsevere_io.find_raw_file(\n top_directory_name=TOP_DIRECTORY_NAME,\n unix_time_sec=VALID_TIME_UNIX_SEC,\n file_extension=probsevere_io.JSON_FILE_EXTENSION,\n raise_error_if_missing=False)\n\n self.assertTrue(this_file_name == JSON_FILE_NAME)",
"def select_resistance_json_file(self):\n new_file_path_json = QFileDialog.getOpenFileName()[0]\n if not new_file_path_json == '' and new_file_path_json[-len('.json'):] == '.json':\n self.resistance_line.clear()\n self.resistance_json_path = new_file_path_json",
"def json_files_from_folder(folder: str) -> list:\n\n files = []\n for file_name in os.listdir(folder):\n splitted_filename = file_name.split(\".\")\n if splitted_filename[-1] == \"json\":\n files.append(file_name)\n return files",
"def go(self):\n self.analyse_folder(BASE)\n self.analyse_folder(JS_FOLDER)",
"def read_json():\n json_path = Path.home() / Path(\"pdf2notion.json\")\n if json_path.exists():\n try:\n with open(json_path) as f:\n json_data = json.load(f)\n return json_data\n except json.decoder.JSONDecodeError as e:\n print(e)\n print(type(e))",
"def select_file():\n filename = filedialog.askopenfilename(\n initialdir=os.getcwd(), title=\"Select Backup file...\",\n filetypes=((\"JSON Files\", \"*.json\"),\n (\"Text Files\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n self.init_data(filename)",
"def read_json_file_to_be_edited(self):\n _json_file = self.job[\"JSONfileToBeEdited\"]\n if _json_file in self.config:\n # Substitute the path defined in the macro\n _json_file = self.config[_json_file]\n self.json_o.read(_json_file)",
"def import_json(path):\n click.echo(\"WARNING: Continue will delete all data in the databse\")\n if not click.confirm('Do you want to continue?'):\n raise click.Abort()\n\n init_db(False)\n import_clean_json(path)\n click.echo('JSON data has been imported')",
"def menu_browse_folder(self, event=None):\n if self.app.children:\n self.parentPanel.browse_folder()",
"def fullpath(data_folder, name):\n return os.path.join(data_folder, f\"{alias(name)}.json\")"
] | [
"0.62802076",
"0.62802076",
"0.6238737",
"0.61273605",
"0.60416484",
"0.5966435",
"0.59093004",
"0.59044904",
"0.5819813",
"0.5813722",
"0.57989556",
"0.5781541",
"0.56752443",
"0.5675066",
"0.5660146",
"0.5640253",
"0.56322503",
"0.5631106",
"0.5621598",
"0.56130177",
"0.5605892",
"0.5592427",
"0.55512613",
"0.55280083",
"0.5495325",
"0.54776347",
"0.5437914",
"0.5430294",
"0.54289913",
"0.54192424"
] | 0.7507324 | 0 |
Build/load the barcode use the json file | def build_barcode(self):
# Get the file name/path to the json file
filename = self.filename_entry.get()
# Check if the filename is given
if not os.path.exists(filename):
showerror("JSON File Not Exists", "JSON file not exists.\n"
"Please check the JSON file path.")
return
try:
# Generate the barcode from json file use the barcode generator
barcode_type = self.type_variable.get()
self.barcode_generator.generate_barcode_from_json(filename, barcode_type)
except:
showerror("Error Occurred in Loading JSON Barcode", "An error occurred in loading the JSON barcode.\n\n"
"Please make sure the type of Barcode saved\n"
"in the JSON file is correctly specified.\n"
"Color or Brightness")
return
# Get the name of the json file
start_pos = filename.rfind("/") + 1
if start_pos < 0:
start_pos = 0
# Use that as the key to the newly built/loaded barcode
barcode_name = filename[start_pos: filename.rfind(".json")]
self.barcode_stack[barcode_name] = copy.deepcopy(self.barcode_generator.get_barcode())
# Get which barcode in the main window to replace with
which_barcode = self.barcode_option.get()
if which_barcode == "Barcode 1":
self.barcode_1.__dict__ = self.barcode_generator.get_barcode().__dict__.copy()
self.barcode_1.__class__ = self.barcode_generator.get_barcode().__class__
elif which_barcode == "Barcode 2":
self.barcode_2.__dict__ = self.barcode_generator.get_barcode().__dict__.copy()
self.barcode_2.__class__ = self.barcode_generator.get_barcode().__class__
# Clear the plotted axes in the main window
self.axes[0][0].cla()
self.axes[1][0].cla()
self.axes[0][1].cla()
self.axes[1][1].cla()
# Always plotted the barcode with longer width below
if self.barcode_1.get_barcode().shape[1] > self.barcode_2.get_barcode().shape[1]:
temp = copy.deepcopy(self.barcode_1)
self.barcode_1.__dict__ = self.barcode_2.__dict__.copy()
self.barcode_2.__dict__ = temp.__dict__.copy()
# Update the graph/plotted figure in the main window
update_graph(barcode_1=self.barcode_1, barcode_2=self.barcode_2, axes=self.axes)
# Redraw the main window
self.canvas.draw()
# Quit the main window
self.window.destroy()
showinfo("Barcode Loaded Successfully", "{:s} Barcode has been successfully loaded into the memory.\n\n"
"Name key in memory: {:20s}".format(barcode_type, barcode_name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')",
"def __init__(self, designfile):\r\n with open(designfile, 'r') as fp:\r\n self.design = json.load(fp)",
"def json2register(self):\n try:\n with open('registered.json', 'r') as file:\n self.final_dicc = json.load(file)\n except (FileNotFoundError, ValueError, json.decoder.JSONDecodeError):\n pass",
"def __init__(self):\n self.data = json.loads(resource_string(__name__, 'data/oz_postcodes.json'))",
"def read(self):\n self.data = {}\n if path.isfile(self.json_file):\n with open(self.json_file) as data_file:\n self.data = json.load(data_file)\n data_file.close()\n if (self.custom_path and self.is_only\n and path.exists(self.custom_path)):\n self.data[\"icons_path\"].append(self.custom_path)\n self.check_paths()\n be_added = (len(self.data[\"icons_path\"]) > 0\n and len(self.data[\"app_path\"]) > 0)\n if be_added:\n self.dont_install = False\n if isinstance(self.data[\"icons\"], list):\n self.data[\"icons\"] = get_iterated_icons(self.data[\"icons\"])\n self.get_app_icons()",
"def load_barcodes(self, path):\n self._barcodes = pickle.load(open(path, 'rb'))",
"def from_dict(self, json_data: Dict) -> None:\n self.package_name = json_data[\"name\"]\n # self.package_path = Path(json_data[\"path\"])\n self.description = json_data[\"description\"]\n self.mpy_version = json_data[\"mpy_version\"]\n self._publish = json_data[\"publish\"]\n self.hash = json_data[\"hash\"]\n self.stub_hash = json_data[\"stub_hash\"]\n # create folder\n if not self.package_path.exists():\n self.package_path.mkdir(parents=True, exist_ok=True)\n # create the pyproject.toml file\n self.create_update_pyproject_toml()\n # set pkg version after creating the toml file\n self.pkg_version = json_data[\"pkg_version\"]\n self.stub_sources = []\n for name, path in json_data[\"stub_sources\"]:\n if path.startswith(\"stubs/\"):\n path = path.replace(\"stubs/\", \"\")\n self.stub_sources.append((name, Path(path)))",
"def load_file(self):\n self._check_setup()\n json_str = self.get_json_file()\n if json_str is None:\n return\n\n if not self._is_json_str():\n with open(json_str, 'r') as f:\n jf = json.load(f)\n else:\n jf = json.loads(json_str)\n\n\n self.jf = jf\n\n target = jf['target']\n if isinstance(target, str):\n target = eval(target)\n\n goal = jf['goal']\n if isinstance(goal, str):\n goal = eval(goal)\n\n self.gen_target_pos = np.array(target)\n self.gen_goal_pos = np.array(goal)\n\n if 'place_walls' in jf:\n self.place_walls = jf['place_walls']\n\n if self.get_is_rnd():\n self.rnd_map = jf['rnd']\n self.env_jf = jf['env']",
"def __init__(self):\n with open('info.json') as file:\n self.info = json.load(file)\n file.close()\n self.count = 0",
"def main(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n\n course_dict = {}\n course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])\n course_dict['blocks'] = build_course_map(data)\n\n filename = '%s' % course_dict['course_id']\n filepath = os.path.join('../input/', filename)\n\n with open(filepath, 'w') as outfile:\n json.dump(course_dict, outfile, indent=4)",
"def __init__(self, filename):\n #Opening the file and storing its contents in a list\n with open(filename) as fp:\n self.data = json.load(fp)",
"def build(self, file_number, data):\n pass",
"def __init__(self):\n self.signs = None\n self.command = None\n with open('signs_path.json', 'r') as f:\n self.signs = json.load(f)\n\n for sign, path in self.signs.items():\n self.signs[sign] = cv2.imread(path)",
"def test_generate_barcode_upce(self):\n pass",
"def restore(self, filename=\".azimint.json\"):\n logger.debug(\"Restore\")\n if not op.isfile(filename):\n logger.error(\"No such file: %s\" % filename)\n return\n data = json.load(open(filename))\n setup_data = { \"poni\": self.poni.setText,\n# \"detector\": self.all_detectors[self.detector.getCurrentIndex()],\n \"wavelength\":lambda a:self.wavelength.setText(str_(a)),\n \"splineFile\":lambda a:self.splineFile.setText(str_(a)),\n \"pixel1\":lambda a: self.pixel1.setText(str_(a)),\n \"pixel2\":lambda a:self.pixel2.setText(str_(a)),\n \"dist\":lambda a:self.dist.setText(str_(a)),\n \"poni1\":lambda a:self.poni1.setText(str_(a)),\n \"poni2\":lambda a:self.poni2.setText(str_(a)),\n \"rot1\":lambda a:self.rot1.setText(str_(a)),\n \"rot2\":lambda a:self.rot2.setText(str_(a)),\n \"rot3\":lambda a:self.rot3.setText(str_(a)),\n \"do_dummy\": self.do_dummy.setChecked,\n \"do_dark\": self.do_dark.setChecked,\n \"do_flat\": self.do_flat.setChecked,\n \"do_polarization\": self.do_polarization.setChecked,\n \"val_dummy\":lambda a: self.val_dummy.setText(str_(a)),\n \"delta_dummy\":lambda a: self.delta_dummy.setText(str_(a)),\n \"do_mask\": self.do_mask.setChecked,\n \"mask_file\":lambda a:self.mask_file.setText(str_(a)),\n \"dark_current\":lambda a:self.dark_current.setText(str_(a)),\n \"flat_field\":lambda a:self.flat_field.setText(str_(a)),\n \"polarization_factor\":self.polarization_factor.setValue,\n \"nbpt_rad\":lambda a:self.nbpt_rad.setText(str_(a)),\n \"do_2D\":self.do_2D.setChecked,\n \"nbpt_azim\":lambda a:self.nbpt_azim.setText(str_(a)),\n \"chi_discontinuity_at_0\": self.chi_discontinuity_at_0.setChecked,\n \"do_radial_range\": self.do_radial_range.setChecked,\n \"do_azimuthal_range\": self.do_azimuthal_range.setChecked,\n \"do_poisson\": self.do_poisson.setChecked,\n \"radial_range_min\":lambda a:self.radial_range_min.setText(str_(a)),\n \"radial_range_max\":lambda a:self.radial_range_max.setText(str_(a)),\n \"azimuth_range_min\":lambda a:self.azimuth_range_min.setText(str_(a)),\n \"azimuth_range_max\":lambda a:self.azimuth_range_max.setText(str_(a)),\n \"do_solid_angle\": self.do_solid_angle.setChecked,\n }\n for key, value in setup_data.items():\n if key in data:\n value(data[key])\n if \"unit\" in data:\n for unit, widget in self.units.items():\n if unit.REPR == data[\"unit\"] and widget is not None:\n widget.setChecked(True)\n break\n if \"detector\" in data:\n detector = data[\"detector\"].lower()\n if detector in self.all_detectors:\n self.detector.setCurrentIndex(self.all_detectors.index(detector))",
"def build(self, data: dict):",
"def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)",
"def dump(self, filename=\".azimint.json\"):\n logger.info(\"Dump!\")\n to_save = { \"poni\": str_(self.poni.text()).strip(),\n \"detector\": str_(self.detector.currentText()).lower(),\n \"wavelength\":float_(self.wavelength.text()),\n \"splineFile\":str_(self.splineFile.text()).strip(),\n \"pixel1\": float_(self.pixel1.text()),\n \"pixel2\":float_(self.pixel2.text()),\n \"dist\":float_(self.dist.text()),\n \"poni1\":float_(self.poni1.text()),\n \"poni2\":float_(self.poni2.text()),\n \"rot1\":float_(self.rot1.text()),\n \"rot2\":float_(self.rot2.text()),\n \"rot3\":float_(self.rot3.text()),\n \"do_dummy\": bool(self.do_dummy.isChecked()),\n \"do_mask\": bool(self.do_mask.isChecked()),\n \"do_dark\": bool(self.do_dark.isChecked()),\n \"do_flat\": bool(self.do_flat.isChecked()),\n \"do_polarization\":bool(self.do_polarization.isChecked()),\n \"val_dummy\":float_(self.val_dummy.text()),\n \"delta_dummy\":float_(self.delta_dummy.text()),\n \"mask_file\":str_(self.mask_file.text()).strip(),\n \"dark_current\":str_(self.dark_current.text()).strip(),\n \"flat_field\":str_(self.flat_field.text()).strip(),\n \"polarization_factor\":float_(self.polarization_factor.value()),\n \"nbpt_rad\":int_(self.nbpt_rad.text()),\n \"do_2D\":bool(self.do_2D.isChecked()),\n \"nbpt_azim\":int_(self.nbpt_azim.text()),\n \"chi_discontinuity_at_0\": bool(self.chi_discontinuity_at_0.isChecked()),\n \"do_solid_angle\": bool(self.do_solid_angle.isChecked()),\n \"do_radial_range\": bool(self.do_radial_range.isChecked()),\n \"do_azimuthal_range\": bool(self.do_azimuthal_range.isChecked()),\n \"do_poisson\": bool(self.do_poisson.isChecked()),\n \"radial_range_min\":float_(self.radial_range_min.text()),\n \"radial_range_max\":float_(self.radial_range_max.text()),\n \"azimuth_range_min\":float_(self.azimuth_range_min.text()),\n \"azimuth_range_max\":float_(self.azimuth_range_max.text()),\n }\n for unit, widget in self.units.items():\n if widget is not None and widget.isChecked():\n to_save[\"unit\"] = unit.REPR\n break\n else:\n logger.warning(\"Undefined unit !!!\")\n try:\n with open(filename, \"w\") as myFile:\n json.dump(to_save, myFile, indent=4)\n except IOError as error:\n logger.error(\"Error while saving config: %s\" % error)\n else:\n logger.debug(\"Saved\")\n return to_save",
"def dump(self, filename=\".azimint.json\"):\n print \"Dump!\"\n to_save = { \"poni\": str(self.poni.text()).strip(),\n \"detector\": str(self.detector.currentText()).lower(),\n \"wavelength\":float_(self.wavelength.text()),\n \"splineFile\":str(self.splineFile.text()).strip(),\n \"pixel1\": float_(self.pixel1.text()),\n \"pixel2\":float_(self.pixel2.text()),\n \"dist\":float_(self.dist.text()),\n \"poni1\":float_(self.poni1.text()).strip(),\n \"poni2\":float_(self.poni2.text()).strip(),\n \"rot1\":float_(self.rot1.text()).strip(),\n \"rot2\":float_(self.rot2.text()).strip(),\n \"rot3\":float_(self.rot3.text()).strip(),\n \"do_dummy\": bool(self.do_dummy.isChecked()),\n \"do_mask\": bool(self.do_mask.isChecked()),\n \"do_dark\": bool(self.do_dark.isChecked()),\n \"do_flat\": bool(self.do_flat.isChecked()),\n \"do_polarization\":bool(self.do_polarization.isChecked()),\n \"val_dummy\":float_(self.val_dummy.text()).strip(),\n \"delta_dummy\":float_(self.delta_dummy.text()).strip(),\n \"mask_file\":str(self.mask_file.text()).strip(),\n \"dark_current\":str(self.dark_current.text()).strip(),\n \"flat_field\":str(self.flat_field.text()).strip(),\n \"polarization_factor\":float_(self.polarization_factor.value()),\n \"nbpt_rad\":int_(self.rad_pt.text()),\n \"do_2D\":bool(self.do_2D.isChecked()),\n \"nbpt_azim\":int_(self.nbpt_rad.text()),\n \"chi_discontinuity_at_0\": bool(self.chi_discontinuity_at_0.isChecked()),\n \"do_radial_range\": bool(self.do_radial_range.isChecked()),\n \"do_azimuthal_range\": bool(self.do_azimuthal_range.isChecked()),\n \"radial_range_min\":float_(self.radial_range_min.text()),\n \"radial_range_max\":float_(self.radial_range_max.text()),\n \"azimuth_range_min\":float_(self.azimuth_range_min.text()),\n \"azimuth_range_max\":float_(self.azimuth_range_max.text()),\n }\n if self.q_nm.isChecked():\n to_save[\"unit\"] = \"q_nm^-1\"\n elif self.tth_deg.isChecked():\n to_save[\"unit\"] = \"2th_deg\"\n elif self.r_mm.isChecked():\n to_save[\"unit\"] = \"r_mm\"\n with open(filename, \"w\") as myFile:\n json.dump(to_save, myFile, indent=4)\n logger.debug(\"Saved\")",
"def restore(self, filename=\".azimint.json\"):\n logger.debug(\"Restore\")\n if not os.path.isfile(filename):\n logger.error(\"No such file: %s\" % filename)\n return\n data = json.load(open(filename))\n setup_data = { \"poni\": self.poni.setText,\n# \"detector\": self.all_detectors[self.detector.getCurrentIndex()],\n \"wavelength\":self.wavelength.setText,\n \"splineFile\":self.splineFile.setText,\n \"pixel1\": self.pixel1.setText,\n \"pixel2\":self.pixel2.setText,\n \"dist\":self.dist.setText,\n \"poni1\":self.poni1.setText,\n \"poni2\":self.poni2.setText,\n \"rot1\":self.rot1.setText,\n \"rot2\":self.rot2.setText,\n \"rot3\":self.rot3.setText,\n \"do_dummy\": self.do_dummy.setChecked,\n \"do_dark\": self.do_dark.setChecked,\n \"do_flat\": self.do_flat.setChecked,\n \"do_polarization\": self.do_polarization.setChecked,\n \"val_dummy\": self.val_dummy.setText,\n \"delta_dummy\": self.delta_dummy.setText,\n \"do_mask\": self.do_mask.setChecked,\n \"mask_file\":self.mask_file.setText,\n \"dark_current\":self.dark_current.setText,\n \"flat_field\":self.flat_field.setText,\n \"polarization_factor\":self.polarization_factor.setValue,\n \"nbpt_rad\":self.rad_pt.setText,\n \"do_2D\":self.do_2D.setChecked,\n \"nbpt_azim\":self.azim_pt.setText,\n \"chi_discontinuity_at_0\": self.chi_discontinuity_at_0.setChecked,\n \"do_radial_range\": self.do_radial_range.setChecked,\n \"do_azimuthal_range\": self.do_azimuthal_range.setChecked,\n \"radial_range_min\":self.radial_range_min.setText,\n \"radial_range_max\":self.radial_range_max.setText,\n \"azimuth_range_min\":self.azimuth_range_min.setText,\n \"azimuth_range_max\":self.azimuth_range_max.setText,\n }\n for key, value in setup_data.items():\n if key in data:\n value(data[key])\n if \"unit\" in data:\n unit = data[\"unit\"].lower()\n if unit == \"q_nm^-1\":\n self.q_nm.setChecked(1)\n elif unit == \"2th_deg\":\n self.tth_deg.setChecked(1)\n elif unit == \"r_mm\":\n self.r_mm.setChecked(1)\n if \"detector\" in data:\n detector = data[\"detector\"].lower()\n if detector in self.all_detectors:\n self.detector.setCurrentIndex(self.all_detectors.index(detector))",
"def load(self):\n logger.info(\"Loading File!!!!!!!!!!!\")\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file',\n BASE_DIR,\"Template Files(*.json)\") #creates file dialog\n with open(file) as template_json:\n data = json.load(template_json) #json template data\n logger.debug(data)\n for tab in data[\"tabs\"]:\n #create new tab for each specified in data\n tabInfo = data[\"tabs\"][tab]\n newTab =self.tabwidget.newTab(tabInfo[\"name\"], image = tabInfo[\"image\"]) #make tab\n for btn in tabInfo[\"buttons\"]: #make buttons in each tab\n btnInfo = tabInfo[\"buttons\"][btn]\n newbtn = self.newDragBtn(btnInfo[\"color\"], btnInfo[\"connections\"],btnInfo[\"name\"], newTab, btnInfo[\"width\"], btnInfo[\"height\"],newTab)\n newbtn.move(btnInfo[\"x\"],btnInfo[\"y\"]) #move button to location on screen",
"def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)",
"def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data",
"def json2mask(txt, mattr, filepath):\n img = np.zeros((2048, 2448, 3),\n dtype=np.uint8)\n info = json.loads(txt)['codes']\n for code in info:\n barcode_area = (slice(code['y0'], code['y1']),\n slice(code['x0'], code['x1']), slice(0, 3))\n leny = barcode_area[0].stop - barcode_area[0].start\n lenx = barcode_area[1].stop - barcode_area[1].start\n img[barcode_area] = 1\n if leny * lenx > (2048 * 2448) / 16: # if barcodearea larger than a\n # 16th of the original image\n return None\n return img",
"def import_json(self):\n with open(self.json_file_path, 'r') as json_file:\n self.json = json.load(json_file)\n self.logger.debug('Json loaded for cbg {}.'.format(self.cbg))\n self.non_empty = 'businesses' in self.json\n return None",
"def __init__(self, bc_file):\r\n self.bc_file = bc_file\r\n self.beta = []\r\n self.code = []\r\n self.load_bc()",
"def __init__(self, json):\n\n self.height = json[\"height\"]\n self.width = json[\"width\"]\n self.src = json[\"src\"]",
"def test_generate_barcode_upca(self):\n pass",
"def parse_processing_file(file, mismatches, suffix, lane, outdir, ignore_failed_lanes=False):\n barcodes = {}\n labels = {}\n with open(file) as data_file:\n data = json.load(data_file)\n\n run_type = data['flowcell']['run_type']\n index_len = data['flowcell']['index_length']\n # Only some flowcell types need to treat different lanes differently\n if run_type == \"NextSeq 500\":\n lane_libraries = data['libraries']\n elif run_type == \"HISEQ V4\":\n lane_libraries = [lib for lib in data['libraries'] if lib['lane'] == lane]\n elif run_type == \"HiSeq 4000\":\n lane_libraries = [lib for lib in data['libraries'] if lib['lane'] == lane]\n # TODO: Is this always correct?\n elif run_type.startswith(\"Novaseq 6000\"):\n lane_libraries = [lib for lib in data['libraries'] if lib['lane'] == lane]\n else:\n logging.warn(\n \"Run type %s not supported; using all libraries\" % run_type)\n lane_libraries = data['libraries']\n\n for library in lane_libraries:\n\n if library.get('alignments', []):\n label = library['alignments'][0]['sample_name']\n else:\n label = \"%s_%s_L%03d\" % (\n library['samplesheet_name'], library['barcode_index'], library['lane'])\n\n if ignore_failed_lanes and library[\"failed\"]:\n logging.info(\"Ignoring failed library %s\" % label)\n continue\n\n project_dir = \"Project_%s\" % library['project']\n sample_dir = \"Sample_%s\" % library['samplesheet_name']\n library_dir = os.path.join(outdir, project_dir, sample_dir)\n outfile_name = os.path.join(\n library_dir, \"%s%s.fastq.gz\" % (label, suffix))\n\n try:\n os.makedirs(library_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n barcode_indices = library['barcode_index'].split(\"-\")\n barcode1 = barcode_indices[0]\n barcode2 = barcode_indices[1] if len(barcode_indices) > 1 else \"\"\n\n # If any barcodes are longer than the index length\n # Trim those barcodes down to match.\n if len(barcode1) > index_len:\n barcode1 = barcode1[:index_len]\n if len(barcode2) > index_len:\n barcode2 = barcode2[:index_len]\n\n lengths.add((len(barcode1), len(barcode2)))\n\n for b1 in mismatch(barcode1, mismatches):\n for b2 in mismatch(barcode2, mismatches):\n barcode = (b1, b2)\n # TODO: This can be smarter\n if barcode in barcodes:\n logging.error(\n \"Barcode %s already taken, lower --mismatches! (taken by %s+%s)\" % (barcode, barcode1, barcode2))\n sys.exit(1)\n barcodes[barcode] = label\n\n labels[label] = {\"filtered\": 0, \"unfiltered\": 0, \"total\": 0}\n # TODO: Warning! this will overwrite files!\n outfile = open(outfile_name, 'wb')\n labels[label][\"fh\"] = outfile\n labels[label][\"out\"] = subprocess.Popen(\n ['gzip', '-7'], stdout=outfile, stdin=subprocess.PIPE)\n\n logging.info(\"Mapping %d barcodes to %s libraries\" %\n (len(barcodes), len(lane_libraries)))\n logging.debug(barcodes)\n\n return barcodes, labels",
"def build():"
] | [
"0.58827484",
"0.578684",
"0.5714043",
"0.56993884",
"0.5639737",
"0.5583572",
"0.54636484",
"0.5450821",
"0.54405653",
"0.54265416",
"0.5424942",
"0.5409185",
"0.53689647",
"0.53638726",
"0.5357695",
"0.5356618",
"0.53302157",
"0.5321161",
"0.53171873",
"0.53061134",
"0.52957726",
"0.5283275",
"0.52523965",
"0.52486795",
"0.52340543",
"0.52239335",
"0.5222903",
"0.5211691",
"0.52004635",
"0.5190417"
] | 0.78892285 | 0 |
this function resize the recived video to size(480,480) and captured resized frames during the process | def resizeVideo(n, format, vpath, cpath):
start_time = time.time()
t = time.process_time()
vidcap = cv2.VideoCapture(vpath)
success, image = vidcap.read()
cv2.namedWindow('image')
cv2.imshow('image', image)
cv2.waitKey(1)
count = 0
CODE = 'XVID'
# default save to avi
CODE1 = 'XVID'
format1 = '.avi'
CODE2 = 'WMV1' # OR WMV2
format2 = '.wmv'
CODE3 = 'FLV1'
format3 = '.flv'
CODE4 = 'MPEG'
format4 = '.mp4'
if (format == format1):
CODE = CODE1
if (format == format2):
CODE = CODE2
if (format == format3):
CODE = CODE3
if (format == format4):
CODE = CODE4
if format == '':
CODE = CODE1
format = '.avi'
print("default save the resized video to .avi")
# fourcc used for saving videos
fourcc = cv2.VideoWriter_fourcc(*CODE)
# video saved to the same path as the capatured frame
out = cv2.VideoWriter((str(cpath) + 'ResizedVideo%d' % n + format), fourcc, vidcap.get(5), (480, 480))
infotxt = open(cpath + 'Resize Info' + '.txt', 'w')
infotxt.write(vpath + '\n')
print("Resizing...")
while success:
if success:
resize = cv2.resize(image, (480, 480), interpolation=cv2.INTER_LINEAR)
# frame name save as Frame%5d.jpg
cv2.imwrite((str(cpath) + "Frame%05d.jpg" % count), resize)
# write resized frame to saved video
out.write(resize)
cv2.imshow('image', resize)
# print converage rate of the frame
end_time = time.time()
executeTime = end_time - start_time
converageRate = executeTime / (count + 1)
infotxt.write('converage rate is: %f' % converageRate + 'f/s' + '\n')
cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# check whether capture finished
success, image = vidcap.read()
count += 1
infotxt.write('Resizing Completed')
print("Resizing Completed")
end_time = time.time()
executeTime = end_time - start_time
infotxt.close()
printInfo.printInfo(executeTime, vidcap, cpath)
cv2.destroyAllWindows()
return executeTime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resize(frame: Frame):\n\n frame.img = cv2.resize(frame.img, (720, 480))\n frame.orginal_img = cv2.resize(frame.orginal_img, (720, 480))\n\n return frame",
"def resize_video(frames, width: int = UNIVERSAL_RESIZE):\n if frames.size < 1:\n raise ValueError('Must provide at least one frame')\n\n edited_frames = []\n\n for frame in frames:\n edited_frames.append(resize(frame, width))\n\n logger.debug('Video resized successfully')\n return np.array(edited_frames)",
"def video_to_frames(video_filename,output_dir):\n cap = cv2.VideoCapture(video_filename)\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1\n vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_fps = int(cap.get(cv2.CAP_PROP_FPS))\n print(\"vid_res=%d x %d, fps=%d\\n\" % (vid_width, vid_height,vid_fps))\n crop_width=int(vid_width/128)*128\n crop_height=int(vid_height/128)*128\n grab_step=int(vid_fps/2)\n if cap.isOpened() and video_length > 0:\n count = 0\n frame_id=0\n success, image = cap.read()\n while success and frame_id <= 9999:\n if count%grab_step==0:\n crop_img = image[0:crop_width, 0:crop_height]\n resized_img = cv2.resize(crop_img, (128, 128)) \n cv2.imwrite(output_dir+\"/frame%05d.jpg\" % frame_id, resized_img)\n frame_id+=1\n success, image = cap.read()\n count += 1\n return 0",
"def run(input_video_file, output_video_file):\n print(\"Debut de la transformation du format de la video\")\n #récupération de la vidéo\n video = cv2.VideoCapture(input_video_file)\n #fps de la vidéo\n fps = video.get(cv2.CAP_PROP_FPS)\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #nombre d'images dans la vidéo\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n #durée de la vidéo\n duration = frame_count/fps\n #nouvelle durée de la vidéo (on arrondi)\n new_duration = math.floor(duration)\n #nouveau fps de la vidéo\n new_fps = float(round(fps))\n #appliquer le nouveau fps\n video.set(cv2.CAP_PROP_FPS,new_fps)\n #appliquer la nouvelle durée\n print(new_duration)\n print(new_fps)\n print(new_duration*new_fps)\n new_frame_count = new_duration*new_fps\n video.set(cv2.CAP_PROP_FRAME_COUNT,new_duration*new_fps)\n #déffinition du format de la vidéo en sortie\n video_out = cv2.VideoWriter(output_video_file,0x7634706d,new_fps,(width_video,height_video),True)\n \n count = 0\n #ouverture de la vidéo\n while(video.isOpened()):\n #lecture image par image\n ret, frame = video.read()\n if ret==True:\n\n #ecriture de l'image dans la vidéo en sortie\n video_out.write(frame)\n count = count + 1\n \n if (count > (new_frame_count-1)):\n # Libérer la vidéo\n video.release()\n break\n else:\n break\n\n print(\"fin de la transformation\")\n #fermer les vidéos\n video.release()\n video_out.release()",
"def crop_and_resize(vid, width, height, x_min, y_min, directory,\n resize_factor):\n crop_vid = os.path.join(directory, 'cropped_out.avi')\n subprocess.Popen(\n 'ffmpeg -y -loglevel quiet -i {0} -filter:v \\\"crop={1}:{2}:{3}:{4}\\\" {5}'\n .format(vid, str(width), str(height), str(x_min), str(y_min),\n crop_vid),\n shell=True).wait()\n subprocess.Popen(\n 'ffmpeg -y -loglevel quiet -i {0} -vf scale={2}*iw:{2}*ih {1}'.format(\n crop_vid, os.path.join(directory, 'inter_out.avi'),\n str(resize_factor)),\n shell=True).wait()\n os.remove(os.path.join(directory, 'cropped_out.avi'))",
"def _resize_video(self, images, dim=64):\n ret = np.zeros((images.shape[0], dim, dim, 3))\n\n for i in range(images.shape[0]):\n ret[i] = cv2.resize(images[i], dsize=(dim, dim),\n interpolation=cv2.INTER_CUBIC)\n\n return ret.astype(np.uint8)",
"def process_frame(frame, iou):\n img = frame[:iou[0],iou[1]:iou[2]]\n img_res = cv2.resize(img, (1280, 720), interpolation=cv2.INTER_NEAREST)\n return img_res",
"def video2frame(self, skip=1, resize_dims=None, mirror=False, keep_aspect=True, max_frames=10, rotate=0):\n if len(os.listdir(self.frame_dir)) > 0:\n print('Picture from this movie already extracted in that directory.')\n else:\n video_object = cv2.VideoCapture(self.video_file) # make video object\n\n index = 0\n last_mirrored = True\n\n frame_count = video_object.get(cv2.CAP_PROP_FRAME_COUNT)\n\n skip_delta = 0\n if max_frames and frame_count > max_frames:\n skip_delta = frame_count / max_frames\n\n while True:\n success, frame = video_object.read() # extract frames\n if success:\n if index % skip == 0:\n\n # resize frames\n if resize_dims is not None:\n if keep_aspect is True:\n frame = resize_keep_aspect(frame, resize_dims)\n else:\n frame = cv2.resize(frame, resize_dims, interpolation=cv2.INTER_CUBIC)\n\n # mirror frames\n if mirror and last_mirrored:\n frame = np.fliplr(frame)\n last_mirrored = not last_mirrored\n\n # Rotate if needed:\n if rotate > 0:\n if rotate == 90:\n frame = cv2.transpose(frame)\n frame = cv2.flip(frame, 1)\n elif rotate == 180:\n frame = cv2.flip(frame, -1)\n elif rotate == 270:\n frame = cv2.transpose(frame)\n frame = cv2.flip(frame, 0)\n\n # write images to output file\n frame_fp = os.path.join(self.frame_dir, 'frame_' + str(index) + '.png')\n cv2.imwrite(frame_fp, frame)\n else:\n break\n\n index += int(1 + skip_delta)\n video_object.set(cv2.CAP_PROP_POS_FRAMES, index)\n\n print('frame extracted from video')",
"def resizeButton(format,vpath,cpath):\r\n if os.path.exists(cpath):\r\n cPath=cpath+'/vid-instance'\r\n if os.path.exists(vpath):\r\n vPath=vpath\r\n N, cPath = dirCapture(1, cPath)\r\n resizeVideo(N, format, vPath, cPath)",
"def process_video(self):\n if os.path.isfile(self.source):\n self.cap = cv2.VideoCapture(self.source)\n else:\n try:\n file_name = \"input.mp4\"\n self.source = self.source.replace('open', 'uc')\n print( \"\\nDownloading video file from drive link to %s\\n\"%file_name)\n gdown.download(self.source, file_name, quiet=False)\n print( \"%s downloaded!\\n\"%file_name )\n self.cap = cv2.VideoCapture(file_name)\n except Exception:\n raise RuntimeError(\"Invalid source input, please specify a Google drive link or a downloaded local file as input \\n\")\n\n\n assert self.cap.isOpened(), \"Failed to open %s\" % self.source\n\n self.w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps = self.cap.get(cv2.CAP_PROP_FPS) \n self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n return",
"def resize_frame(\n frame: numpy.ndarray, width: int, height: int, mode: str = \"RGB\"\n) -> numpy.ndarray:\n from PIL import Image\n\n frame = Image.fromarray(frame)\n frame = frame.convert(mode).resize(size=(width, height))\n return numpy.array(frame)",
"def resize_frame(event,x,y,flags,param):\t\t\n global upperPt, lowerPt, frame\n if event == cv2.EVENT_LBUTTONDOWN:\n upperPt = [(x,y)]\n print upperPt\n if event == cv2.EVENT_LBUTTONUP:\n lowerPt = [(x,y)]\n print lowerPt\n cv2.rectangle(frame, upperPt[0], lowerPt[0],(0,0,0),1)\n cv2.destroyWindow('Select region of interest')\t\t\t\n #crop frame\n frame = frame[upperPt[0][1]:lowerPt[0][1],upperPt[0][0]:lowerPt[0][0]]\n cv2.imwrite('resized.jpg',frame)\n frame = histNorm(frame)\n print('Resize successful')\n cv2.imshow('Select region of interest', frame)\t\n\n color_data[\"upperPt\"] = upperPt\n color_data[\"lowerPt\"] = lowerPt",
"def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()",
"def changeRes(width, height):\n\n\tlive_capture.set(3, width)\n\tlive_capture.set(4, height)",
"def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()",
"def capture():\n stream = BytesIO()\n cam.capture(stream, 'jpeg')\n data = np.fromstring(stream.getvalue(), dtype=np.uint8)\n # \"Decode\" the image preserving color\n img = cv2.imdecode(data, 1)\n # switch BGR order to RGB order\n img = img[:, :, ::-1]\n\n # resize image to match training size\n img = cv2.resize(img, (args.resize, args.resize), interpolation=cv2.INTER_AREA)\n print(\"done resizing\")\n\n# cv2.imshow('image',img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n return img.flatten()",
"def grab_next_frame(self):\n if Rescue_PI.input_video_file_path is None:\n self.orig_frame = self.vs.read()\n self.frame = self.orig_frame.copy()\n else:\n _, self.frame = self.vs.read()\n # self.frame = cv2.rotate(self.frame, cv2.ROTATE_180)\n if self.frame is None:\n pass\n else:\n self.frame = imutils.resize(self.frame, width=frame_width_in_pixels)",
"def process_video(video_dir, save_dir):\n for sig_vid in tqdm(find_files(video_dir, '*.{}'.format(VID_FORMAT))):\n \n vc = cv2.VideoCapture(sig_vid) \n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n rig_bot_height, rig_bot_width = height // 2, width // 2\n\n if rig_bot_height == 540 and rig_bot_width == 960:\n # right bottom, r_h, l_w, r_w\n iou = [390, 90, 890]\n\n elif rig_bot_height == 720 and rig_bot_width == 1280:\n log.info('high resolution video, please confirm iou param')\n\n else:\n assert 'please confirm video resolution'\n\n count = 0\n cout_save = 0\n\n while vc: \n rval, frame = vc.read() \n\n if rval == True:\n count += 1\n # fisheye extract front preview\n ext_region = frame[rig_bot_height:, rig_bot_width:]\n cv2.imshow('ori frame', ext_region)\n\n key = cv2.waitKey(0) & 0xFF\n if key == ord('q'):\n break\n\n elif key == ord('s'): \n # Interval 20 frame save \n if cout_save % 20 == 0 or cout_save > 20: \n file_name = create_files(save_dir, sig_vid)\n img_res = process_frame(ext_region, iou)\n cv2.imwrite(os.path.join(save_dir, file_name)+\"/\"+ file_name+\"_{}.jpg\".format(count),img_res)\n cout_save = 0\n log.info('successful save current frame {}'.format(count))\n\n else:\n cout_save += 1\n continue\n cout_save += 1\n\n else:\n # skip current frame and cout pre save frame interval\n if cout_save > 0:\n cout_save += 1\n continue\n\n else:\n break\n \n vc.release()\n cv2.destroyAllWindows()",
"def show_frame(self, seconds, in_grayscale=False, size=(480, 360)):\n _, frame = self.video.read()\n if in_grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if frame.shape < size: # tuples are compared position by position\n frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)\n else:\n frame = cv2.resize(frame, size, interpolation=cv2.INTER_CUBIC)\n cv2.imshow('SnapShot', frame)\n key_pressed = cv2.waitKey(int(seconds * 1000)) # unit in milliseconds\n\n return key_pressed & 0xFF",
"def resize_image(self, name, frame_dims):\n logger.debug(\"Resizing image: (name: '%s', frame_dims: %s\", name, frame_dims)\n displayimg = self._previewtrain[name][0]\n if frame_dims:\n frameratio = float(frame_dims[0]) / float(frame_dims[1])\n imgratio = float(displayimg.size[0]) / float(displayimg.size[1])\n\n if frameratio <= imgratio:\n scale = frame_dims[0] / float(displayimg.size[0])\n size = (frame_dims[0], int(displayimg.size[1] * scale))\n else:\n scale = frame_dims[1] / float(displayimg.size[1])\n size = (int(displayimg.size[0] * scale), frame_dims[1])\n logger.debug(\"Scaling: (scale: %s, size: %s\", scale, size)\n\n # Hacky fix to force a reload if it happens to find corrupted\n # data, probably due to reading the image whilst it is partially\n # saved. If it continues to fail, then eventually raise.\n for i in range(0, 1000):\n try:\n displayimg = displayimg.resize(size, Image.ANTIALIAS)\n except OSError:\n if i == 999:\n raise\n continue\n break\n self._previewtrain[name][1] = ImageTk.PhotoImage(displayimg)",
"def show_video_image(self, frame):\n frame = cv2.resize(frame, (1280, 800))\n cv2.imshow('Video Preview', frame)\n cv2.waitKey(10)",
"def parse_video(cap, base_path, step, size):\n # Get nb of fps\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # Compute how many images to skip to match the step\n nb_skip = int(step / 1000 * fps)\n index = 1\n count = -1\n while True:\n count += 1\n # Get next image\n success, frame = cap.read()\n\n # If no image to read anymore\n if not success:\n break\n\n # Skip images\n if count % nb_skip != 0:\n continue\n\n # Save current image\n suffix = '-{}.jpg'.format('0' + str(index) if index <= 9 else str(index))\n # Compute new height to keep aspect ratio\n if size is None:\n pass\n elif len(size) == 1:\n if frame.shape[0] >= frame.shape[1]:\n aspect_ratio = frame.shape[0] / frame.shape[1]\n new_h = int(size[0])\n new_w = int(size[0] / aspect_ratio)\n else:\n aspect_ratio = frame.shape[1] / frame.shape[0]\n new_w = int(size[0])\n new_h = int(size[0] / aspect_ratio)\n frame = cv2.resize(frame, (new_w, new_h))\n # Resize with width and height given\n elif len(size) == 2:\n frame = cv2.resize(frame, tuple(size))\n dump_image(frame, base_path + suffix)\n\n # Append index\n index += 1\n\n return",
"def stream_frames(video_capture):",
"def video_process(threshold=THRESHOLD, inputpath=INPUTPATH, file=FILE):\n #create video capture object\n cap = cv2.VideoCapture(f'{inputpath}{file}')\n name = file.split('/')[-1].split('.')[0]\n frame_sqrs_list = []\n if (cap.isOpened()==False):\n logging.error('Error opening video stream or file')\n model = load_model()\n frame_n = 1\n print('model loaded')\n while(cap.isOpened()):\n #capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n squares_list = img_preprocess(frame)\n frame_n = frame_n+1\n print(f'enter video file, frame{frame_n}')\n x_list = []\n y_list = []\n for sq in squares_list:\n predict = predict_hot_pxl(sq.sq, model)\n if predict > threshold:\n pred = 1\n print('ERROR')\n x_list.append(sq.y)\n y_list.append(sq.x)\n # draw square around error in frame:\n # FIXME: save a square to a list of squares\n continue\n else:\n pred = 0\n print('no error')\n # FIXME: draw_sqr(name, frame, frame_n, !!! PASS LIST INSTEAD !!! and rewrite the draw func to draw several squares sq.y, sq.x) \n sq = sq._replace(pred_float = predict)\n sq = sq._replace(pred_int = pred)\n # dict element sq is now obsolete, remove it\n sq = sq._replace(sq = None)\n # save single frame with squares marking errors as png to disc:\n draw_sqr(name, frame, frame_n, x_list, y_list)\n frame_sqrs_list.append(sq)\n # Break the loop\n else:\n break\n return name, frame_sqrs_list",
"def rescaleFrame(frame, scale=.75):\n\n\twidth = int(frame.shape[1] * scale)\n\theight = int(frame.shape[0] * scale)\n\n\tdimensions = (width, height)\n\t\n\treturn cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)",
"def _crop_video(numpy_video, size, desired_size):\r\n\r\n w, h = size\r\n h1, h2 = int(h/2) - int(desired_size/2), int(h/2) + int(desired_size/2)\r\n w1, w2 = int(w/2) - int(desired_size/2), int(w/2) + int(desired_size/2)\r\n return numpy_video[:, :, h1:h2, w1:w2, :]",
"def analyze_movie(\n video_path, aspect_ratio=0, palette_size=32, frames=-1, step=1, show_frames=False, show_last_frame=False, color_format='hex'\n):\n\n # Parse video frame-by-frame\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n pil_img = None\n count = 0\n while success and frames == -1 or count < frames:\n if count % step == 0:\n # Convert to PIL image\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(img)\n\n # Crop frame to remove border\n if aspect_ratio != 0:\n width, height = pil_img.size\n left = 0\n right = width\n content_height = 1/aspect_ratio * width\n border = (height - content_height) * 0.5\n top = border\n bottom = border + content_height\n pil_img = pil_img.crop((left, top, right, bottom))\n\n # Get primary color\n main_color = get_primary_color(\n pil_img, palette_size, show_img=show_frames)\n\n if color_format == 'hex':\n main_color = rgbToHex(main_color)\n \n print(main_color)\n\n # Attempt to read next frame\n success, image = vidcap.read()\n count += 1\n\n if show_last_frame:\n pil_img.show()",
"def _prepare_frame(self, frame):\n\n initial_h, initial_w = frame.shape[:2]\n scale_h, scale_w = initial_h / float(self.input_height), initial_w / float(self.input_width)\n\n in_frame = cv2.resize(frame, (self.input_width, self.input_height))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(self.input_size)\n\n return in_frame, scale_h, scale_w",
"def test_read_video_from_file_rescale_width_and_height(self, test_video):\n # video related\n width, height, min_dimension, max_dimension = 320, 240, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n samples, channels = 0, 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n full_path = os.path.join(VIDEO_DIR, test_video)\n\n tv_result = torch.ops.video_reader.read_video_from_file(\n full_path,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n assert tv_result[0].size(1) == height\n assert tv_result[0].size(2) == width",
"def capture_video(self):\n while self.capturing:\n nparray = self.source.get_frame()\n self.frame_buffer.put(Frame(nparray, self.frame))\n self.frame += 1\n print \"Stopping Capture\""
] | [
"0.6975515",
"0.6965948",
"0.6840254",
"0.66904473",
"0.6561962",
"0.65187913",
"0.64801097",
"0.6479887",
"0.6456022",
"0.64197224",
"0.64138234",
"0.63360685",
"0.6334037",
"0.6292876",
"0.62918407",
"0.62625307",
"0.62448573",
"0.61914134",
"0.61655945",
"0.6139185",
"0.6137619",
"0.6134392",
"0.61333144",
"0.6124291",
"0.612357",
"0.610023",
"0.6076311",
"0.60641325",
"0.6033924",
"0.60306364"
] | 0.74632716 | 0 |
this function is action for the resize buttons to use, mainly call the resizeVideo methods | def resizeButton(format,vpath,cpath):
if os.path.exists(cpath):
cPath=cpath+'/vid-instance'
if os.path.exists(vpath):
vPath=vpath
N, cPath = dirCapture(1, cPath)
resizeVideo(N, format, vPath, cPath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handleResize(self):\n pass",
"def resize(self):\n pass",
"def resize(self, event=None):\n #self.render()\n self.__resize_background(event)\n #self.__delete_background()\n #self.__create_background(self._imfname)\n for sym in self.itersymbols():\n sym.sym.resize(event)",
"def on_resize(self, width, height):\n self.gamestatemanager.peek().on_resize(width, height)",
"def init_gui(self):\r\n self.root.title('Video Resizer')\r\n self.root.option_add('*tearOff', 'FALSE')\r\n self.grid(column=0, row=0, sticky='nsew')\r\n\r\n # Buttons getvideos, save videos, start resize\r\n self.openButton = ttk.Button(self, width=8, text=\"Browse\", command=self.getVideosPath)\r\n self.openButton.grid(column=1, row=2)\r\n self.saveButton = ttk.Button(self, width=8, text=\"Browse\", command=self.getCapturePath)\r\n self.saveButton.grid(column=3, row=2)\r\n self.startButton = ttk.Button(self, text='Start to Resize', command=self.startResizing)\r\n self.startButton.grid(column=0, row=5)\r\n self.multiStartButton = ttk.Button(self, text='Start to multi Resize', command=self.startMultiResizing)\r\n self.multiStartButton.grid(column=2, row=5)\r\n\r\n # listbox to choose what video type to save\r\n # add a label for the combobox\r\n ttk.Label(self, text=\"Select Video Type to Save\").grid(column=0, row=4)\r\n\r\n def clickMe():\r\n \"\"\"\r\n button clicked to select video type\r\n called when action is clicked\r\n :return:\r\n \"\"\"\r\n global Format\r\n Format = typeToChoose.get()\r\n print(Format)\r\n action.configure(text='selected ' + Format) # show the selected item after clicked\r\n action.configure(state='disabled') # button disabled after clicked\r\n\r\n # Button\r\n action = ttk.Button(self, text=\"Select \", command=clickMe)\r\n action.grid(column=2, row=4)\r\n\r\n # Combobox\r\n typeToChoose = StringVar()\r\n # value in combobox is formatType\r\n numberChosen = ttk.Combobox(self, width=12, textvariable=typeToChoose, values=self.formatTypes)\r\n numberChosen.grid(column=1, row=4)\r\n numberChosen.current(0)\r\n\r\n # Frame show info related to the resizing process\r\n self.resultFrame = ttk.LabelFrame(self, text='Result', height=100)\r\n self.resultFrame.grid(column=0, row=6, columnspan=4, sticky='nesw')\r\n self.resultLabel = ttk.Label(self.resultFrame, text='')\r\n self.resultLabel.grid(column=0, row=0)\r\n\r\n # Labels that remain constant throughout execution.\r\n ttk.Label(self, text='Video Resizer').grid(column=0, row=0, columnspan=4)\r\n ttk.Label(self, text='Select videos').grid(column=0, row=2, sticky='w')\r\n ttk.Label(self, text='Saving folder').grid(column=2, row=2, sticky='w')\r\n ttk.Separator(self, orient='horizontal').grid(column=0, row=1, columnspan=4, sticky='ew')\r\n\r\n # configure for the window grid\r\n for child in self.winfo_children():\r\n child.grid_configure(padx=5, pady=5)",
"def __onResize(self, ev):\n ev.Skip()\n self.__calcCanvasSizes()",
"def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()",
"def __window_resizeTo(self, iWidth, iHeight):\n pass",
"def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()",
"def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))",
"def resize(self, old, new):",
"def OnResize(self, event):\n self._resizing = True\n self._resize_timer.Start(60, True)",
"def OnMotion_Resize(self, event):\r\n\r\n if AuiManager_HasLiveResize(self):\r\n if self._currentDragItem != -1:\r\n self._action_part = self._uiparts[self._currentDragItem]\r\n else:\r\n self._currentDragItem = self._uiparts.index(self._action_part)\r\n\r\n if self._frame.HasCapture():\r\n self._frame.ReleaseMouse()\r\n \r\n self.DoEndResizeAction(event)\r\n self._frame.CaptureMouse()\r\n return\r\n\r\n if not self._action_part or not self._action_part.dock or not self._action_part.orientation:\r\n return\r\n\r\n clientPt = event.GetPosition()\r\n screenPt = self._frame.ClientToScreen(clientPt)\r\n \r\n dock = self._action_part.dock\r\n pos = self._action_part.rect.GetPosition()\r\n\r\n if self._action_part.type == AuiDockUIPart.typeDockSizer:\r\n minPix, maxPix = self.CalculateDockSizerLimits(dock)\r\n else:\r\n if not self._action_part.pane:\r\n return\r\n \r\n pane = self._action_part.pane\r\n minPix, maxPix = self.CalculatePaneSizerLimits(dock, pane)\r\n\r\n if self._action_part.orientation == wx.HORIZONTAL:\r\n pos.y = Clip(clientPt.y - self._action_offset.y, minPix, maxPix)\r\n else:\r\n pos.x = Clip(clientPt.x - self._action_offset.x, minPix, maxPix)\r\n\r\n hintrect = wx.RectPS(self._frame.ClientToScreen(pos), self._action_part.rect.GetSize())\r\n\r\n if hintrect != self._action_rect:\r\n \r\n if wx.Platform == \"__WXMAC__\":\r\n dc = wx.ClientDC(self._frame)\r\n else:\r\n dc = wx.ScreenDC()\r\n\r\n DrawResizeHint(dc, self._action_rect)\r\n DrawResizeHint(dc, hintrect)\r\n self._action_rect = wx.Rect(*hintrect)",
"def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")",
"def ev_windowresized(self, event: WindowResized) -> None:",
"def resizeEvent(self, event):\n self.updateViewer()",
"def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)",
"def ev_windowsizechanged(self, event: WindowResized) -> None:",
"def startResizing(self):\r\n # total running times\r\n global totaltime\r\n start = time.time()\r\n try:\r\n str(cpath)\r\n try:\r\n file = str(filenames).split(',')[0].strip(\"('\")\r\n if os.path.exists(file):\r\n print(file)\r\n try:\r\n resizeButton(Format, file, cpath)\r\n end = time.time()\r\n totaltime = end - start\r\n self.resultLabel['text'] = self.Results()\r\n except NameError:\r\n messagebox.showerror('ERROR', 'No Format selected')\r\n except NameError:\r\n messagebox.showerror('ERROR', 'No video selected')\r\n except NameError:\r\n messagebox.showerror('ERROR', 'No saving folder selected')",
"def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()",
"def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()",
"def OnResizeEnd(self, event):\n self._resizing = False\n self.Refresh()",
"def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)",
"def resizeEvent(self, event):\n self.refresh_images(resize=True)\n QMainWindow.resizeEvent(self, event)",
"def OnSize(self,event):\r\n\t\t\r\n self.SetupView()\r\n event.Skip()",
"def update_resize(self, viewer, dims):\n self.recalc(viewer)",
"def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:",
"def _resize_image(self, event):\n self.window_width = event.width\n self.window_height = event.height",
"def resize(self):\r\n del self.win\r\n self.__create_win()",
"def resize(self, width: int, height: int):\n pass"
] | [
"0.7466857",
"0.67581195",
"0.6693336",
"0.6686181",
"0.6664426",
"0.66365945",
"0.6623954",
"0.6581972",
"0.65713274",
"0.6529807",
"0.6507639",
"0.6472563",
"0.6404809",
"0.6373768",
"0.63407046",
"0.632196",
"0.63161606",
"0.62875193",
"0.62567973",
"0.624347",
"0.62312657",
"0.622206",
"0.6190905",
"0.6147348",
"0.61384463",
"0.613599",
"0.6130015",
"0.6090538",
"0.6081597",
"0.6079279"
] | 0.74494356 | 1 |
this function is called when multiStartButton is called and will use multicore() function to resize videos in parallel | def startMultiResizing(self):
global totaltime
try:
str(cpath)
try:
str(filenames)
try:
print(filenames)
totaltime = multicore(Format, filenames, cpath)
self.resultLabel['text'] = self.Results()
except NameError:
messagebox.showerror('ERROR', 'no format selected')
except NameError:
messagebox.showerror('ERROR', 'No saving folder selected')
except NameError:
messagebox.showerror('ERROR', 'No video selected') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def multicore(format, filenames, cpath):\r\n start = time.time()\r\n po = multiprocessing.Pool(P)\r\n file = str(filenames).split(',')\r\n for file in filenames:\r\n print(file)\r\n po.apply_async(func=resizeButton, args=(format, file, cpath))\r\n time.sleep(M)\r\n print(\"Done\")\r\n po.close()\r\n po.join()\r\n end = time.time()\r\n total = end - start\r\n return total",
"def startResizing(self):\r\n # total running times\r\n global totaltime\r\n start = time.time()\r\n try:\r\n str(cpath)\r\n try:\r\n file = str(filenames).split(',')[0].strip(\"('\")\r\n if os.path.exists(file):\r\n print(file)\r\n try:\r\n resizeButton(Format, file, cpath)\r\n end = time.time()\r\n totaltime = end - start\r\n self.resultLabel['text'] = self.Results()\r\n except NameError:\r\n messagebox.showerror('ERROR', 'No Format selected')\r\n except NameError:\r\n messagebox.showerror('ERROR', 'No video selected')\r\n except NameError:\r\n messagebox.showerror('ERROR', 'No saving folder selected')",
"def init_gui(self):\r\n self.root.title('Video Resizer')\r\n self.root.option_add('*tearOff', 'FALSE')\r\n self.grid(column=0, row=0, sticky='nsew')\r\n\r\n # Buttons getvideos, save videos, start resize\r\n self.openButton = ttk.Button(self, width=8, text=\"Browse\", command=self.getVideosPath)\r\n self.openButton.grid(column=1, row=2)\r\n self.saveButton = ttk.Button(self, width=8, text=\"Browse\", command=self.getCapturePath)\r\n self.saveButton.grid(column=3, row=2)\r\n self.startButton = ttk.Button(self, text='Start to Resize', command=self.startResizing)\r\n self.startButton.grid(column=0, row=5)\r\n self.multiStartButton = ttk.Button(self, text='Start to multi Resize', command=self.startMultiResizing)\r\n self.multiStartButton.grid(column=2, row=5)\r\n\r\n # listbox to choose what video type to save\r\n # add a label for the combobox\r\n ttk.Label(self, text=\"Select Video Type to Save\").grid(column=0, row=4)\r\n\r\n def clickMe():\r\n \"\"\"\r\n button clicked to select video type\r\n called when action is clicked\r\n :return:\r\n \"\"\"\r\n global Format\r\n Format = typeToChoose.get()\r\n print(Format)\r\n action.configure(text='selected ' + Format) # show the selected item after clicked\r\n action.configure(state='disabled') # button disabled after clicked\r\n\r\n # Button\r\n action = ttk.Button(self, text=\"Select \", command=clickMe)\r\n action.grid(column=2, row=4)\r\n\r\n # Combobox\r\n typeToChoose = StringVar()\r\n # value in combobox is formatType\r\n numberChosen = ttk.Combobox(self, width=12, textvariable=typeToChoose, values=self.formatTypes)\r\n numberChosen.grid(column=1, row=4)\r\n numberChosen.current(0)\r\n\r\n # Frame show info related to the resizing process\r\n self.resultFrame = ttk.LabelFrame(self, text='Result', height=100)\r\n self.resultFrame.grid(column=0, row=6, columnspan=4, sticky='nesw')\r\n self.resultLabel = ttk.Label(self.resultFrame, text='')\r\n self.resultLabel.grid(column=0, row=0)\r\n\r\n # Labels that remain constant throughout execution.\r\n ttk.Label(self, text='Video Resizer').grid(column=0, row=0, columnspan=4)\r\n ttk.Label(self, text='Select videos').grid(column=0, row=2, sticky='w')\r\n ttk.Label(self, text='Saving folder').grid(column=2, row=2, sticky='w')\r\n ttk.Separator(self, orient='horizontal').grid(column=0, row=1, columnspan=4, sticky='ew')\r\n\r\n # configure for the window grid\r\n for child in self.winfo_children():\r\n child.grid_configure(padx=5, pady=5)",
"def resizeButton(format,vpath,cpath):\r\n if os.path.exists(cpath):\r\n cPath=cpath+'/vid-instance'\r\n if os.path.exists(vpath):\r\n vPath=vpath\r\n N, cPath = dirCapture(1, cPath)\r\n resizeVideo(N, format, vPath, cPath)",
"def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )",
"def on_worker_started(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)",
"def run():\n while True:\n try:\n active = pacvert.thequeue.getActive()\n current = pacvert.thequeue.getPending()\n if (active == None) and (current != None):\n pacvert.thequeue.addActive(current)\n active = current\n\n try:\n # setting up codec specific settings\n video = {'codec': pacvert.CONFIG.DEFAULT_CODEC_VIDEO} # set the targets codec\n if pacvert.CONFIG.DEFAULT_CODEC_VIDEO_CROP: # check if cropping is enabled\n video['width'] = active.crop[0] # set width\n video['height'] = active.crop[1] # set height\n video['mode'] = 'crop' # set crop mode\n\n if pacvert.CONFIG.DEFAULT_CODEC_VIDEO == \"h264\": # if target codec is h264\n video['preset'] = pacvert.CONFIG.CODEC_AVC_PRESET # set preset\n video['profile'] = pacvert.CONFIG.CODEC_AVC_PROFILE # set profile\n video['quality'] = pacvert.CONFIG.CODEC_AVC_QUALITY # set quality\n video['tune'] = pacvert.CONFIG.CODEC_AVC_TUNE # set tune\n if pacvert.CONFIG.CODEC_AVC_AUTOMAXRATE: # if automatic maxrate is enabled\n if pacvert.CONFIG.CODEC_AVC_BUFSIZE < 0 or pacvert.CONFIG.CODEC_H264_MAXRATE < 0:\n if 'bit_rate' in active.mediainfo['Video']:\n video['maxrate'] = cast_to_int(active.mediainfo['Video']['bit_rate']) # set maxrate to video track bitrate\n video['bufsize'] = cast_to_int(active.mediainfo['Video']['bit_rate']*3) # set bufsize to three times the video bitrate\n else:\n video['maxrate'] = pacvert.CONFIG.CODEC_AVC_MAXRATE # set maxrate to given value\n video['bufsize'] = pacvert.CONFIG.CODEC_AVC_BUFSIZE # set bufsize to given value\n for anotheropt in pacvert.CONFIG.CODEC_AVC_ADDITIONALOPT: # if additional options are specified\n video[anotheropt] = pacvert.CONFIG.CODEC_AVC_ADDITIONALOPT[anotheropt] # add options to out encoding list\n elif pacvert.CONFIG.DEFAULT_CODEC_VIDEO == \"hevc\": # if target codec is hevc\n video['preset'] = pacvert.CONFIG.CODEC_HEVC_PRESET # set preset\n video['quality'] = pacvert.CONFIG.CODEC_HEVC_QUALITY # set quality\n video['tune'] = pacvert.CONFIG.CODEC_HEVC_TUNE # set tune\n if pacvert.CONFIG.CODEC_HEVC_AUTOMAXRATE: # set max rate\n if pacvert.CONFIG.CODEC_HEVC_BUFSIZE < 0 or pacvert.CONFIG.CODEC_HEVC_MAXRATE < 0:\n if 'bit_rate' in active.mediainfo['Video']:\n video['maxrate'] = cast_to_int(active.mediainfo['Video']['bit_rate']) # set maxrate to video track bitrate\n video['bufsize'] = cast_to_int(active.mediainfo['Video']['bit_rate']*3) # set bufsize to three times the video bitrate\n else:\n video['maxrate'] = pacvert.CONFIG.CODEC_HEVC_MAXRATE # set maxrate to given value\n video['bufsize'] = pacvert.CONFIG.CODEC_HEVC_BUFSIZE # set bufsize to given value\n for anotheropt in pacvert.CONFIG.CODEC_HEVC_ADDITIONALOPT: # if additional options are specified\n video[anotheropt] = pacvert.CONFIG.CODEC_HEVC_ADDITIONALOPT[anotheropt] # add options to out encoding list\n elif pacvert.CONFIG.DEFAULT_CODEC_VIDEO == \"vp8\": # if target codec is vp8\n video['quality'] = pacvert.CONFIG.CODEC_VP8_QUALITY # set video quality\n video['threads'] = pacvert.CONFIG.CODEC_VP8_THREADS # set no of real cores\n else:\n logger.error(\"Codec not yet implemented\")\n\n conv = c.convert(active.fullpath, active.outputfilename,\n {\n 'format': 'mkv',\n 'video': video,\n 'audio': {\n 'codec': pacvert.CONFIG.DEFAULT_CODEC_AUDIO,\n },\n 'subtitle': {\n 'codec': pacvert.CONFIG.DEFAULT_CODEC_SUBTITLE,\n },\n 'map': 0,\n })\n for timecode in conv:\n logger.debug(\"Converting (\"+str(timecode)+\")...\")\n active.progress = timecode\n logger.info(\"Finished File: '\"+active.fullpath+\"'\")\n active.finished = now()\n pacvert.thequeue.addFinished(pacvert.thequeue.getActive()) # set status to finished\n except FFMpegConvertError as e:\n logger.error(\"ffmpeg: \" +e.message + \" with command: \"+ e.cmd)\n\n pacvert.thequeue.addFailed(pacvert.thequeue.getActive()) # set status to failed\n time.sleep(1)\n except Exception as e:\n logger.error(e)",
"def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"",
"def start_processing(self):",
"def start_videos(self):\n\n # Disable play/pause/stop buttons until it is safe\n self.enable_video_buttons(False, False, False)\n\n # If any button click is still being processed\n if (self.unpausing) or (self.pausing) or (self.shutdown):\n return\n\n if self.playing:\n self.enable_video_buttons(False, True, True)\n return\n\n if self.worker is not None:\n self.worker.force_unpause()\n return\n\n #\n # Check for valid inputs\n #\n def throw_error_message(self, message):\n # Re-enable video buttons\n self.enable_video_buttons(True, False, False)\n\n # Display warning\n self.warning = QErrorMessage()\n self.warning.showMessage(message)\n self.warning.show()\n return None\n\n def acquire_var(self, text, widget_name, func):\n try:\n temp = func(text)\n except:\n # Re-enable video buttons\n self.enable_video_buttons(True, False, False)\n\n # Display warning\n if func == float:\n return throw_error_message(self, \"Please set a valid float for \\\"{}\\\".\".format(widget_name))\n else:\n return throw_error_message(self, \"Please set a valid integer for \\\"{}\\\".\".format(widget_name))\n return temp\n\n if ((acquire_var(self, self.collect_entry.text(), \"Collect Duration\", float) is None) or\n (acquire_var(self, self.collect_entry.text(), \"Rest Duration\", float) is None) or\n (acquire_var(self, self.num_reps.text(), \"Number of Repetitions\", int) is None)):\n return\n\n self.collect_duration = acquire_var(self, self.collect_entry.text(), \"Collect Duration\", float)\n self.rest_duration = acquire_var(self, self.rest_entry.text(), \"Rest Duration\", float)\n self.repetitions = acquire_var(self, self.num_reps.text(), \"Rest Duration\", int)\n\n if (not self.ex_a_check.isChecked()) and (not self.ex_b_check.isChecked()) and (\n not self.ex_c_check.isChecked()):\n return throw_error_message(self, \"Please select at least one exercise.\")\n\n if self.collect_duration < 1.0:\n return throw_error_message(self, \"Please select a collect duration >= 1.0s.\")\n if self.rest_duration < 1.0:\n return throw_error_message(self, \"Please select a rest duration >= 1.0s.\")\n if self.repetitions < 1:\n return throw_error_message(self, \"Please select a number of repetitions >= 1.\")\n\n #\n # Attempt to find all videos\n #\n exercises_found = self.check_video_paths()\n\n def missing_exer(self, ex_found, ex_label):\n if not ex_found:\n # Re-enable video buttons\n self.enable_video_buttons(True, False, False)\n\n # Display warning\n self.warning = QErrorMessage()\n self.warning.showMessage(\"Unable to find videos for Exercise {}.\".format(ex_label))\n self.warning.show()\n return ex_found\n\n if ((not missing_exer(self, exercises_found[0], \"A\")) or (not missing_exer(self, exercises_found[1], \"B\")) or\n (not missing_exer(self, exercises_found[2], \"C\"))):\n return\n\n #\n # Start playing videos, and updating text fields, via background thread\n #\n self.worker = GroundTruthWorker(self.status_label, self.progress_label, self.desc_title, self.desc_explain,\n self.current_movement, self.video_player, self.all_video_paths,\n self.collect_duration, self.rest_duration, self.repetitions,\n self.on_worker_started, self.on_worker_unpaused, self.on_worker_paused,\n self.on_worker_stopped)\n QThreadPool.globalInstance().start(self.worker)",
"def video_loop(self):\n if not self.isReplay:\n if self.initStream:\n print('[SB Live] Starting live video stream...')\n self.replayStream.release()\n self.vs.open(0)\n self.initStream = False\n print('[SB Live] Live video stream started')\n if self.cClear:\n self.cache.release()\n os.remove('sblive/cache/replay.mov')\n self.cache.open('sblive/cache/replay.mov', self.fourcc, 10.0, (1280, 720))\n self.cClear = False\n ok, frame = self.vs.read() # read frame from video stream\n if ok: # frame captured without any errors\n key = cv2.waitKey(1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA\n self.cache.write(frame)\n self.current_image = Image.fromarray(cv2image) # convert image for PIL\n imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter\n \n self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector\n self.panel.config(image=imgtk) # show the image\n else:\n if self.initStream:\n print('[SB Live] Starting replay video stream...')\n self.cache.release()\n self.vs.release()\n self.replayStream.open('sblive/cache/replay.mov')\n self.initStream = False\n print('[SB Live] Replay video stream started')\n ok, frame = self.replayStream.read()\n if ok:\n key = cv2.waitKey(1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA\n self.current_image = Image.fromarray(cv2image) # convert image for PIL\n imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter\n \n self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector\n self.panel.config(image=imgtk) # show the image\n else:\n self.replayStream.release()\n self.replayStream.open('sblive/cache/replay.mov')\n if not self.killThread:\n self.root.after(30, self.video_loop) # call the same function after 30 milliseconds",
"def run(self):\n for asset in self.get_assets():\n if asset:\n canvas = self.get_ticker_canvas(asset)\n else:\n canvas = self.get_error_canvas()\n self.matrix.SwapOnVSync(canvas)\n time.sleep(self.sleep)",
"def continue_video(self):\n\n print(\"continue_video needs implementation\")",
"def runVideo(vidSeq):\r\n\r\n app = wx.PySimpleApp()\r\n frame = ImageFrame(None)\r\n frame.SetSize((800, 600))\r\n frame.Show(True)\r\n\r\n myImageIn = ImageIn(frame.window)\r\n t = threading.Thread(target=vidSeq, args=(myImageIn.SetData,))\r\n t.setDaemon(1)\r\n t.start()\r\n\r\n app.MainLoop()",
"def initialize_2nd_video_tab(self):\n self.mediaplayer2 = self.instance.media_player_new()\n\n self.media_list2 = self.instance.media_list_new()\n\n self.mediaListPlayer2 = self.instance.media_list_player_new()\n self.mediaListPlayer2.set_media_player(self.mediaplayer2)\n\n app.processEvents()\n\n '''\n if sys.platform == \"darwin\": # for MacOS\n self.videoframe2 = QMacCocoaViewContainer(0)\n else:\n self.videoframe2 = QFrame()\n '''\n\n self.videoframe2 = QFrame()\n\n self.palette2 = self.videoframe2.palette()\n self.palette2.setColor(QPalette.Window, QColor(0, 0, 0))\n self.videoframe2.setPalette(self.palette2)\n self.videoframe2.setAutoFillBackground(True)\n\n self.volumeslider2 = QSlider(QtCore.Qt.Vertical, self)\n self.volumeslider2.setMaximum(100)\n self.volumeslider2.setValue(self.mediaplayer2.audio_get_volume())\n self.volumeslider2.setToolTip(\"Volume\")\n\n self.volumeslider2.sliderMoved.connect(self.setVolume2)\n\n self.video2layout = QHBoxLayout()\n self.video2layout.addWidget(self.videoframe2)\n self.video2layout.addWidget(self.volumeslider2)\n\n self.vboxlayout.insertLayout(1, self.video2layout)",
"def handleResize(self):\n pass",
"def video_loop(self):\n\n _, img = self.vs.read()\n img = imutils.resize(img, width=self.width)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n self.frame.configure(image=image)\n self.frame.photo = image\n\n self.top.after(self.fps, self.video_loop)",
"def resizeVideo(n, format, vpath, cpath):\r\n start_time = time.time()\r\n t = time.process_time()\r\n vidcap = cv2.VideoCapture(vpath)\r\n success, image = vidcap.read()\r\n cv2.namedWindow('image')\r\n cv2.imshow('image', image)\r\n cv2.waitKey(1)\r\n count = 0\r\n\r\n CODE = 'XVID'\r\n # default save to avi\r\n\r\n CODE1 = 'XVID'\r\n format1 = '.avi'\r\n CODE2 = 'WMV1' # OR WMV2\r\n format2 = '.wmv'\r\n CODE3 = 'FLV1'\r\n format3 = '.flv'\r\n CODE4 = 'MPEG'\r\n format4 = '.mp4'\r\n\r\n if (format == format1):\r\n CODE = CODE1\r\n if (format == format2):\r\n CODE = CODE2\r\n if (format == format3):\r\n CODE = CODE3\r\n if (format == format4):\r\n CODE = CODE4\r\n if format == '':\r\n CODE = CODE1\r\n format = '.avi'\r\n print(\"default save the resized video to .avi\")\r\n\r\n # fourcc used for saving videos\r\n fourcc = cv2.VideoWriter_fourcc(*CODE)\r\n # video saved to the same path as the capatured frame\r\n out = cv2.VideoWriter((str(cpath) + 'ResizedVideo%d' % n + format), fourcc, vidcap.get(5), (480, 480))\r\n infotxt = open(cpath + 'Resize Info' + '.txt', 'w')\r\n infotxt.write(vpath + '\\n')\r\n print(\"Resizing...\")\r\n\r\n while success:\r\n if success:\r\n resize = cv2.resize(image, (480, 480), interpolation=cv2.INTER_LINEAR)\r\n # frame name save as Frame%5d.jpg\r\n cv2.imwrite((str(cpath) + \"Frame%05d.jpg\" % count), resize)\r\n\r\n # write resized frame to saved video\r\n out.write(resize)\r\n\r\n cv2.imshow('image', resize)\r\n\r\n # print converage rate of the frame\r\n end_time = time.time()\r\n executeTime = end_time - start_time\r\n converageRate = executeTime / (count + 1)\r\n infotxt.write('converage rate is: %f' % converageRate + 'f/s' + '\\n')\r\n\r\n cv2.waitKey(1)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n # check whether capture finished\r\n\r\n success, image = vidcap.read()\r\n count += 1\r\n infotxt.write('Resizing Completed')\r\n print(\"Resizing Completed\")\r\n\r\n end_time = time.time()\r\n executeTime = end_time - start_time\r\n infotxt.close()\r\n printInfo.printInfo(executeTime, vidcap, cpath)\r\n\r\n cv2.destroyAllWindows()\r\n return executeTime",
"def resizeAndRepubThread():\n\n # reference globals\n global primaryCamString\n global secondaryCamString\n global armCamImage\n global headCamImage\n\n # initialize image publishers\n primaryPub = rospy.Publisher(primaryCamRepub, Image, queue_size=1)\n secondaryPub = rospy.Publisher(secondaryCamRepub, Image, queue_size=1)\n\n # create CvBridge object for converting CV2 images to sensor_msgs/Image messages\n backBridge = CvBridge()\n\n while(True):\n primaryImage = np.zeros(shape=[512, 512, 3])\n secondaryImage = np.zeros(shape=[512, 512, 3])\n\n # just keep looping until we get images\n\n if(np.sum(headCamImage) == 0 or np.sum(armCamImage) == 0):\n rospy.loginfo(\"still waiting on camera images...\")\n continue\n\n # get primary image\n if(primaryCamString == \"head\"):\n primaryImage = resizeImage(headCamImage, primarySize)\n elif(primaryCamString == \"leftArm\"):\n primaryImage = resizeImage(armCamImage, primarySize)\n elif(primaryCamString == \"\"):\n pass\n else:\n rospy.logerr(\"Invalid Option for primaryCamString recieved!\")\n\n # get secondary image\n if(secondaryCamString == \"head\"):\n secondaryImage = resizeImage(headCamImage, secondarySize)\n elif(secondaryCamString == \"leftArm\"):\n secondaryImage = resizeImage(armCamImage, secondarySize)\n elif(secondaryCamString == \"\"):\n pass\n else:\n rospy.logerr(\"Invalid Option for secondaryCamString recieved!\")\n\n # publish both new images\n if(np.sum(primaryImage) != 0 and np.sum(secondaryImage) != 0):\n primaryImageMessage = backBridge.cv2_to_imgmsg(primaryImage, \"bgr8\")\n primaryPub.publish(primaryImageMessage)\n\n secondaryImageMessage = backBridge.cv2_to_imgmsg(secondaryImage, \"bgr8\")\n secondaryPub.publish(secondaryImageMessage)",
"def RunMultiFrames(self):\r\n\r\n\r\n frameNumber = self.spinBox_FrameNum.value()\r\n\r\n segmentNumber = self.spinBox_SegmentNum.value()\r\n\r\n\r\n self.textBrowser_SetMeasureInf.setTextColor(QtCore.Qt.gray)\r\n self.textBrowser_SetMeasureInf.append(\"frameNumber, segmentNumber is: \"+ str(frameNumber)+ \", \" + str(segmentNumber))\r\n self.textBrowser_SetMeasureInf.setTextColor(QtCore.Qt.green)\r\n self.textBrowser_SetMeasureInf.append(\"Running to get MultiImageData data ... \")\r\n print(\"frameNumber, segmentNumber is: \", frameNumber, segmentNumber)\r\n print(\"begin to get MultiImageData data ... \")\r\n self.Cam.MultiImageData(infoObj = self.textBrowser_SetMeasureInf, frame_number_expected = frameNumber, segment_frame = segmentNumber)",
"def __videoThread(self):\n\n self.frameList = []\n\n fpsTimer = FpsTimer(self.fps)\n printf(\"Starting videoStream thread.\")\n while self.running:\n fpsTimer.wait()\n if not fpsTimer.ready(): continue\n if self.setCamera is not None: self.__setNewCamera(self.setCamera)\n if self.paused: continue\n if self.cap is None: continue\n\n\n # Get a new frame\n ret, newFrame = self.cap.read()\n\n if not ret: # If a frame was not successfully returned\n printf(\"ERROR: while reading frame from Cam. Setting camera again...\")\n self.__setNewCamera(self.cameraID)\n cv2.waitKey(1000)\n continue\n\n\n # Do frame related work\n with self.frameLock:\n self.frame = newFrame\n\n # Add a frame to the frameList that records the 5 latest frames for Vision uses\n self.frameList.insert(0, self.frame.copy())\n # print(\"len\", len(self.frameList), \"Curr frames: \", [id(frame) for frame in self.frameList])\n while len(self.frameList) > 10:\n del self.frameList[-1]\n\n # Keep track of new frames by counting them. (100 is an arbitrary number)\n if self.frameCount >= 100:\n self.frameCount = 0\n else:\n self.frameCount += 1\n\n\n # Run any work functions that must be run. Expect no results. Work should be run before filters.\n if len(self.workList) > 0:\n # print(\"Work: \", self.workList)\n with self.workLock:\n for workFunc in self.workList:\n workFunc(self.frame)\n\n\n\n # Run any filters that must be run, save the results in self.filterFrame\n if len(self.filterList) > 0:\n # print(\"Filters: \", self.filterList)\n with self.filterLock:\n filterFrame = self.getFrame()\n for filterFunc in self.filterList:\n filterFrame = filterFunc(filterFrame)\n\n # Draw FPS on the screen\n fps = str(int(round(fpsTimer.currentFPS, 0)))\n cv2.putText(filterFrame, fps, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.25, (255, 255, 255), 2)\n\n self.filterFrame = filterFrame\n\n\n else:\n self.filterFrame = self.frame\n\n printf(\"VideoStream Thread has ended\")",
"def videos():\n main_window.withdraw()\n window = Toplevel()\n video_gui = VideoGUI(window, main_window)",
"def sub_processor(lock, pid, video_list):\r\n text = 'processor %d' % pid\r\n with lock:\r\n progress = tqdm.tqdm(\r\n total=len(video_list),\r\n position=pid,\r\n desc=text\r\n )\r\n for i in range(len(video_list)):\r\n video_name = video_list[i]\r\n \"\"\" Read result csv file \"\"\"\r\n df = pd.read_csv(os.path.join(config.post_csv_load_dir, video_name + \".csv\"))\r\n \"\"\" Calculate final score of proposals \"\"\"\r\n df['score'] = df.iou.values[:] * df.start.values[:] * df.end.values[:]\r\n if len(df) > 1:\r\n df = softNMS(df)\r\n df = df.sort_values(by=\"score\", ascending=False)\r\n video_info = video_dict[video_name]\r\n video_duration = video_info[\"duration_second\"]\r\n proposal_list = []\r\n\r\n for j in range(min(top_number, len(df))):\r\n tmp_proposal = {}\r\n tmp_proposal[\"score\"] = df.score.values[j]\r\n tmp_proposal[\"segment\"] = [max(0, df.xmin.values[j]) * video_duration,\r\n min(1, df.xmax.values[j]) * video_duration]\r\n tmp_proposal[\"label\"] = \"行走\"\r\n # tmp_proposal[\"label\"] = \"Fun sliding down\"\r\n proposal_list.append(tmp_proposal)\r\n result_dict[video_name] = proposal_list\r\n with lock:\r\n progress.update(1)\r\n\r\n with lock:\r\n progress.close()",
"def OnResize(self, event):\n self._resizing = True\n self._resize_timer.Start(60, True)",
"def run(self):\n global vminstance\n\n iomanager.clear_IOM()\n vminstance = self\n if self.__profile:\n import cProfile, pstats\n cProfile.runctx('vminstance._run()', globals(), locals(), 'videomanager.prof')\n else:\n self._run()",
"def __init__(self, video_folder, output_folder, output_file=None, height=320, width=480,\n sample_every=10, max_workers=32):\n self.video_folder = video_folder\n self.output_folder = output_folder\n self.output_file = output_file\n print(\n f\"Video Preprocessor created with video_folder = {video_folder} , output_folder = {output_folder}, output_file = {output_file}\")\n\n self.height = height\n self.width = width\n self.sample_every = sample_every\n self.max_workers = max_workers\n print(f\"Frames will be created with height = {height} , width = {width} , sample_every = {sample_every}\")",
"def run(self):\n list_count = self.queue_list.count()\n for i in range(list_count):\n if self._isRunning:\n currentItem = self.queue_list.item(0)\n self.statusChange.emit(currentItem.fName, currentItem.video, currentItem.audio)\n self.func(self.queue_list, 0)\n self.notifyProgress.emit((i+1)/list_count * 100) # current progress = completed / total jobs\n self.revertButton.emit(\"Convert\")\n # self.notifyProgress.emit(0)",
"def _pool_and_resize(self):\n # Pool if there are enough screens to do so.\n if self.frame_skip > 1:\n np.maximum(\n self.screen_buffer[0],\n self.screen_buffer[1],\n out=self.screen_buffer[0])\n\n transformed_image = cv2.resize(\n self.screen_buffer[0], (self.screen_size, self.screen_size),\n interpolation=cv2.INTER_AREA)\n int_image = np.asarray(transformed_image, dtype=np.uint8)\n return np.expand_dims(int_image, axis=2)",
"def run(self):\n for index, playlist_line in enumerate(self.playlist):\n gv.logger.info(\"New video\")\n self.index = index\n self.video_command = playlist_line.strip(\" \")\n gv.logger.info(self.video_command)\n if gv.api_dm.probe_status == \"killed\":\n gv.logger.info(\"Exiting playlist\")\n break\n self.launch_video_process()\n gv.logger.info(\"Finished video\")\n self.finish_playlist()",
"def run(self):\n\n # Start the video stream process\n self._process.start()"
] | [
"0.6563744",
"0.64110225",
"0.5975478",
"0.59674823",
"0.59062296",
"0.5873052",
"0.58316374",
"0.58308446",
"0.578684",
"0.5743147",
"0.5654799",
"0.56200457",
"0.5559133",
"0.55516684",
"0.5550606",
"0.5521735",
"0.546656",
"0.54456455",
"0.5401045",
"0.5391771",
"0.5387132",
"0.53807807",
"0.5373912",
"0.53450465",
"0.5318746",
"0.5294342",
"0.5288463",
"0.52673143",
"0.52672523",
"0.52671325"
] | 0.69108474 | 0 |
All entrys in the header must be specified in the metadata lines. | def check_header(self, entry):
if entry not in self.metadata:
raise SyntaxError("Header entry must be described in the metadata lines. Entry: %s is not in metadata." % entry) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_header_missing_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['AAA', 'XXX', 'YYY',\r\n 'ZZZ']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field XXX, expected field BarcodeSequence\\t0,1',\r\n 'Found header field YYY, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field ZZZ, last field should be Description\\t0,3']\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)",
"def test_check_header_required_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find all as errors if not named correctly\r\n header = ['AAA', 'BBB', 'CCC', 'DDD',\r\n 'EEE']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field BBB, expected field BarcodeSequence\\t0,1',\r\n 'Found header field CCC, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field EEE, last field should be Description\\t0,4',\r\n 'Missing added demultiplex field run_prefix\\t-1,-1']\r\n\r\n self.assertEqual(errors, expected_errors)",
"def test_missing_header(barred_tac_list_importer, logger, db_conn):\n expect_failure(barred_tac_list_importer, exc_message='Metadata header, cannot find the column headers - tac, '\n '10000110')",
"def test_check_metadata_fields(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n keys = [(\"name\", str), (\"postScriptName\", str),\n (\"fullName\", str), (\"style\", str),\n (\"weight\", int), (\"filename\", str),\n (\"copyright\", str)]\n\n missing = set([])\n unknown = set([])\n\n for j, itemtype in keys:\n\n for font_metadata in family.fonts:\n if j not in font_metadata:\n missing.add(j)\n\n for k in font_metadata:\n if k not in map(lambda x: x[0], keys):\n unknown.add(k)\n\n if unknown:\n msg = 'METADATA.json \"fonts\" property has unknown items [%s]'\n self.fail(msg % ', '.join(unknown))\n\n if missing:\n msg = 'METADATA.json \"fonts\" property items missed [%s] items'\n self.fail(msg % ', '.join(missing))",
"def check_headerEntries(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = ('original master', 'object', 'barcode')\n header2 = ('original master', 'object',\n 'object identifier\\n(edit heading to specify type ' +\n '- e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n bad_entries = []\n\n for header in expected:\n if header not in found:\n bad_entries.append(header)\n\n if bad_entries:\n self.raise_excelerror(\"Incorrect header entry for {0}.\"\n .format(bad_entries))\n return True",
"def _header(self, hdr1, hdr2, ignore=None):\n errorlist = []\n s1 = set(hdr1.keys()) - {'HISTORY', 'COMMENT'}\n s2 = set(hdr2.keys()) - {'HISTORY', 'COMMENT'}\n if ignore:\n s1 -= set(ignore)\n s2 -= set(ignore)\n if s1 != s2:\n if s1 - s2:\n errorlist.append(f'Header 1 contains keywords {s1 - s2}')\n if s2 - s1:\n errorlist.append(f'Header 2 contains keywords {s2 - s1}')\n\n ignore_list = ['GEM-TLM', 'HISTORY', 'COMMENT', '']\n # Include keywords from `ignore` parameter.\n if ignore:\n ignore_list.extend(ignore)\n\n for kw in hdr1:\n # GEM-TLM is \"time last modified\"\n if kw not in timestamp_keys.values() and kw not in ignore_list:\n try:\n v1, v2 = hdr1[kw], hdr2[kw]\n except KeyError: # Missing keyword in AD2\n continue\n try:\n if abs(v1 - v2) >= 0.01:\n errorlist.append(f'{kw} value mismatch: {v1} v {v2}')\n except TypeError:\n if v1 != v2:\n errorlist.append(f'{kw} value inequality: {v1} v {v2}')\n return errorlist",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def _entry_has_required_fields(entry: _LexiconEntry) -> None:\n missing_fields = [f for f in _REQUIRED_FIELDS if f not in entry]\n\n if missing_fields:\n field_str = \", \".join(sorted(missing_fields))\n raise InvalidLexiconEntryError(f\"Entry is missing fields: '{field_str}'\")",
"def _extract_metadata(self, header, cleaner):\n metadata = []\n for k, v in header.items():\n key = str(cleaner(k)) # clean key and ensure it is a string\n val = str(cleaner(v)) # clean value and ensure it is a string\n if (key and val):\n metadata.append(Metadatum(key, val))\n return metadata",
"def test_metadata_no_unknown_top_keys(self):\n top_keys = [\"name\", \"designer\", \"license\", \"visibility\", \"category\",\n \"size\", \"dateAdded\", \"fonts\", \"subsets\"]\n for x in self.metadata.keys():\n self.assertIn(x, top_keys, msg=\"%s found unknown top key\" % x)",
"def _add_metadata(bt, md_key, lines):\r\n taxonomy_md = biom_taxonomy_formatter(bt, md_key)\r\n if taxonomy_md is not None:\r\n for i in range(len(lines) - 1): # one more line than OTU\r\n # skip header line in lines\r\n lines[i + 1] = lines[i + 1] + '\\t' + taxonomy_md[i]\r\n return lines\r\n else: # remove md_header from the first line\r\n nls = ['\\t'.join(lines[0].split('\\t')[:-1])] + lines[1:]\r\n return nls",
"def extract_flat_metadata(self):\n metadata = []\n missing_metadata = []\n if self.records is not None:\n for r in self.records:\n try:\n item = r[\"metadata\"][\"arXiv\"]\n item[\"authors\"] = item[\"authors\"][\"author\"]\n item.update(r[\"header\"])\n metadata.append(item)\n except KeyError:\n missing_metadata.append(r[\"header\"])\n self.metadata = metadata\n self.missing_metadata = missing_metadata",
"def process_headers(fin, fout, fixed_headers):\n filtered_headers = set(item[0] for item in fixed_headers)\n filtered_headers.add(\"SAMPLE\")\n expected_values = {\n name: value for name, asserted, value in fixed_headers if asserted\n }\n errors = False\n for raw_line in fin:\n if raw_line.startswith('##'):\n # TODO: This will break if the metadata header is bad.\n name, value = raw_line[2:].rstrip().split('=', 1)\n if name in filtered_headers:\n if name in expected_values:\n if value != expected_values[name]:\n errors = True\n # TODO: propper logging\n sys.stderr.write(\n 'tcga-vcf-reheader: mismatch {}={}\\n'.format(\n name, value\n )\n )\n else: # Just some other header...\n fout.write(raw_line)\n else:\n break\n fout.write(raw_line) # raw_line should now be the data header line.\n return errors",
"def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n return (cache_file_metadata_header.key_size > 0 and\n cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and\n cache_file_metadata_header.format_version in (1, 2, 3) and\n cache_file_metadata_header.last_fetched_time > 0 and\n cache_file_metadata_header.fetch_count > 0)",
"def _check_duplicate_headers(self, docstring: PetscDocStringImpl) -> None:\n for heading, where in self.seen_headers.items():\n if len(where) <= 1:\n continue\n\n lasti = len(where) - 1\n src_list = []\n nbefore = 2\n nafter = 0\n prev_line_begin = 0\n for i, loc in enumerate(where):\n startline = loc.start.line\n if i:\n nbefore = startline - prev_line_begin - 1\n if i == lasti:\n nafter = 2\n src_list.append(loc.formatted(num_before_context=nbefore, num_after_context=nafter, trim=False))\n prev_line_begin = startline\n mess = \"Multiple '{}' subheadings. Much like Highlanders, there can only be one:\\n{}\".format(\n self.transform(self.name), '\\n'.join(src_list)\n )\n docstring.add_diagnostic(\n Diagnostic(Diagnostic.Kind.ERROR, self.diags.section_header_unique, mess, self.extent.start)\n )\n return",
"def test_check_header(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)",
"def _metadata_is_consistent(metadata):\n checks = []\n required = ('version', 'fields', 'size', 'width', 'height', 'points',\n 'viewpoint', 'data')\n for f in required:\n if f not in metadata:\n print('%s required' % f)\n checks.append((lambda m: all([k in m for k in required]),\n 'missing field'))\n checks.append((lambda m: len(m['type']) == len(m['count']) ==\n len(m['fields']),\n 'length of type, count and fields must be equal'))\n checks.append((lambda m: m['height'] > 0,\n 'height must be greater than 0'))\n checks.append((lambda m: m['width'] > 0,\n 'width must be greater than 0'))\n checks.append((lambda m: m['points'] > 0,\n 'points must be greater than 0'))\n checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',\n 'binary_compressed'),\n 'unknown data type:'\n 'should be ascii/binary/binary_compressed'))\n ok = True\n for check, msg in checks:\n if not check(metadata):\n print('error:', msg)\n ok = False\n return ok",
"def check_header_required_fields(header,\r\n errors,\r\n sample_id_ix,\r\n desc_ix,\r\n bc_ix,\r\n linker_primer_ix,\r\n added_demultiplex_field=None):\r\n\r\n header_checks = {\r\n sample_id_ix: \"SampleID\",\r\n desc_ix: \"Description\",\r\n bc_ix: \"BarcodeSequence\",\r\n linker_primer_ix: \"LinkerPrimerSequence\"\r\n }\r\n\r\n for curr_check in header_checks:\r\n if (header[curr_check] != header_checks[curr_check] and\r\n header_checks[curr_check] == \"Description\"):\r\n errors.append('Found header field %s, last field should be %s' %\r\n (header[curr_check], header_checks[curr_check]) +\r\n '\\t%d,%d' % (0, curr_check))\r\n elif (header[curr_check] != header_checks[curr_check] and\r\n header_checks[curr_check] != \"Description\"):\r\n errors.append('Found header field %s, expected field %s' %\r\n (header[curr_check], header_checks[curr_check]) +\r\n '\\t%d,%d' % (0, curr_check))\r\n\r\n if added_demultiplex_field:\r\n if added_demultiplex_field not in header:\r\n errors.append('Missing added demultiplex field %s\\t%d,%d' %\r\n (added_demultiplex_field, -1, -1))\r\n\r\n return errors",
"def safely_parse_metadata(self):\n # type: () -> List[Tuple[LineNo, int]]\n return [\n (int(start), int(length or \"1\"))\n for start, length in SAFE_PARSE_HUNK_HEADER.findall(\n self.text.lstrip(\"@\").split(\"@\", 1)[0]\n )\n ]",
"def _validate_heading(self, heading_parts: List[str]):\n # Validate heading row.\n assert len(heading_parts) >= 4\n assert \"oxygen\" in heading_parts\n assert \"pulse\" in heading_parts\n assert \"blood_pressure_systolic\" in heading_parts\n assert \"blood_pressure_diastolic\" in heading_parts",
"def test_metadata(self):\n cr = CaseReader(self.filename)\n self.assertEqual(cr.format_version, format_version,\n msg='incorrect format version')\n self.assertIsNone(cr.parameters,\n msg='parameter metadata should be None')\n self.assertIsNone(cr.unknowns, msg='unknown metadata should be None')",
"def spamHeaders(self) -> Tuple[List[str], Dict[str, str]]:\n sections = [\"STATUS\", \"TITLE\", \"PROJECT\", \"FILE\", \"SITE\", \"CHANNAME\", \"DATA\"]\n sectionHeaders = {}\n sectionHeaders[\"STATUS\"] = [\"STATUS\"]\n sectionHeaders[\"TITLE\"] = [\"AUTHOR\", \"VERSION\", \"DATE\", \"COMMENT\"]\n sectionHeaders[\"FILE\"] = [\"NAME\", \"FREQBAND\", \"DATE\"]\n sectionHeaders[\"CHANNAME\"] = [\"ITEMS\", \"NAME\"]\n sectionHeaders[\"DATA\"] = [\"ITEMS\", \"CHAN\"]\n return sections, sectionHeaders",
"def create_meta_dict_L1(adcp_meta):\n meta_dict = {}\n with open(adcp_meta) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n next(csv_reader, None) # Skip header row\n for row in csv_reader:\n # extract all metadata from csv file into dictionary -- some items not passed to netCDF file but are extracted anyway\n if row[0] == '' and row[1] == '':\n print('Metadata file contains a blank row; skipping this row !')\n elif row[0] != '' and row[1] == '':\n print('Metadata item in csv file has blank value; skipping this row '\n 'in metadata file !')\n else:\n meta_dict[row[0]] = row[1]\n\n # Add conventions metadata to meta_dict\n meta_dict['deployment_type'] = 'Sub Surface'\n meta_dict['flag_meaning'] = 'no_quality_control, good_value, probably_good_value, probably_bad_value, ' \\\n 'bad_value, changed_value, value_below_detection, value_in_excess, ' \\\n 'interpolated_value, missing_value'\n meta_dict['flag_references'] = 'BODC SeaDataNet'\n meta_dict['flag_values'] = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9'\n meta_dict['keywords'] = 'Oceans > Ocean Circulation > Ocean Currents'\n meta_dict['keywords_vocabulary'] = 'GCMD Science Keywords'\n meta_dict['naming_authority'] = 'BODC, MEDS, CF v72'\n meta_dict['variable_code_reference'] = 'BODC P01'\n meta_dict['Conventions'] = \"CF-1.8\"\n\n return meta_dict",
"def test_headlines_required(self) -> None:\n for rule in self.rules.headlines:\n if not rule.required:\n continue\n is_match: bool = False\n for headline in self.report.headlines:\n if self.rules.get_headline_rules(headline.name) == rule:\n is_match = True\n break\n if not is_match:\n self.add_error(f\"Rubriken {rule.name} som måste vara med saknas.\")",
"def _check_headers(cursor, headers):\n all_columns = set(chain.from_iterable(_columns(cursor, table) for table in DATA_TABLES))\n for header in headers:\n if header not in all_columns:\n raise ValueError('column {} not recognized'.format(header))",
"def check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix,\r\n desc_ix,\r\n bc_ix,\r\n linker_primer_ix,\r\n added_demultiplex_field=None):\r\n\r\n # Check for duplicates, append to errors if found\r\n errors = check_header_dups(header, errors)\r\n\r\n # Check for valid characters\r\n warnings = check_header_chars(header, warnings)\r\n\r\n # Check for required header fields\r\n errors = check_header_required_fields(header, errors, sample_id_ix,\r\n desc_ix, bc_ix, linker_primer_ix, added_demultiplex_field)\r\n\r\n return errors, warnings",
"def check_headers(self, headers):\n h = headers.values()[0]\n\n if 'DT' in PAR:\n if h.dt != PAR.DT:\n h.dt = PAR.DT\n\n if 'NT' in PAR:\n if h.nt != PAR.NT:\n print 'Warning: h.nt != PAR.NT'\n\n if 'NREC' in PAR:\n if h.nr != PAR.NREC:\n print 'Warning: h.nr != PAR.NREC'\n\n return h",
"def validate_header(self, reply):\n # check message is from my agg to me\n check_equal(reply.header.sender, self.aggregator_uuid, self.logger)\n check_equal(reply.header.recipient, self.common_name, self.logger)\n\n # check that the federation id matches\n check_equal(reply.header.federation_id, self.federation_uuid, self.logger)\n\n # check that we agree on single_col_cert_common_name\n check_equal(reply.header.single_col_cert_common_name, self.single_col_cert_common_name, self.logger)",
"def header(self):\n ...",
"def parse_header(self): # -> list[Unknown]:\n ..."
] | [
"0.6268195",
"0.6264674",
"0.62105215",
"0.6196427",
"0.6144928",
"0.6113776",
"0.60642403",
"0.60047483",
"0.5985811",
"0.5964642",
"0.59487873",
"0.5914528",
"0.5880523",
"0.58377224",
"0.58223",
"0.58066964",
"0.5804777",
"0.5768354",
"0.5765713",
"0.57347786",
"0.5703136",
"0.5700132",
"0.5680853",
"0.56777954",
"0.5657165",
"0.56503385",
"0.5648478",
"0.5641499",
"0.56075734",
"0.5600818"
] | 0.7916447 | 0 |
Test selection of a leaf node | def testSelectLeaf(self):
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
menu.select(tag="a11")
assertTrue(menu.selected)
assertTrue(items["a1"].selected)
assertTrue(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def IsLeaf(self):\n return not self.subtests",
"def is_leaf(node):\n return node.children == {}",
"def check_leaf_node(df):\r\n \r\n return len(set(df['class'])) == 1",
"def is_leaf(self, p):\n return self.num_children(p) == 0",
"def test_is_leaf(self):\n self.assertEqual(True, comparator.is_leaf(None))\n self.assertEqual(True, comparator.is_leaf(True))\n self.assertEqual(True, comparator.is_leaf(False))\n self.assertEqual(True, comparator.is_leaf(int()))\n self.assertEqual(True, comparator.is_leaf(str()))\n self.assertEqual(False, comparator.is_leaf(list()))\n self.assertEqual(False, comparator.is_leaf(dict()))",
"def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...",
"def select(self, board, c_puct):\n # the game rule has a random cases in the select procedure\n board.get_point()\n batch = self._children.get(board.point, None) # get this point's edge\n if not batch: return True, None # this node is the leaf\n return False, max(batch.items(),\n key=lambda act_node: act_node[1].get_value(c_puct))",
"def node_leaf(self):\r\n return self.zero_son is None and self.one_son is None",
"def is_leaf(self):\r\n return self.num_children() == 0",
"def testSelectSpecificNode(self):\n\n menu = self.menu\n items = self.items\n\n assertTrue = self.assertTrue\n assertIsNone = self.assertIsNone\n\n items[\"a2\"].select()\n\n assertTrue(menu.selected)\n assertIsNone(items[\"a1\"].selected)\n assertIsNone(items[\"a11\"].selected)\n assertIsNone(items[\"a12\"].selected)\n assertTrue(items[\"a2\"].selected)\n assertIsNone(items[\"a21\"].selected)\n assertIsNone(items[\"a22\"].selected)\n\n items[\"a12\"].select()\n\n assertTrue(menu.selected)\n assertTrue(items[\"a1\"].selected)\n assertIsNone(items[\"a11\"].selected)\n assertTrue(items[\"a12\"].selected)\n assertIsNone(items[\"a2\"].selected)\n assertIsNone(items[\"a21\"].selected)\n assertIsNone(items[\"a22\"].selected)",
"def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R",
"def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1",
"def leaf(self, node: object) -> bool:\n if node.left is None and node.right is None:\n return True\n\n else:\n return False",
"def is_leaf(self):\n return self.__left == None and self.__right==None",
"def is_leaf(tree):\n return not branches(tree)",
"def testSelectBranch(self):\n\n menu = self.menu\n items = self.items\n\n assertTrue = self.assertTrue\n assertIsNone = self.assertIsNone\n\n menu.select(tag=\"a2\")\n\n assertTrue(menu.selected)\n assertIsNone(items[\"a1\"].selected)\n assertIsNone(items[\"a11\"].selected)\n assertIsNone(items[\"a12\"].selected)\n assertTrue(items[\"a2\"].selected)\n assertTrue(items[\"a21\"].selected)\n assertIsNone(items[\"a22\"].selected)",
"def is_leaf(tree):\n\n return not branches(tree)",
"def next_leaf(node):\n return len(node[1][0][1]) == 0",
"def is leaf(self, p):\n return self.num children(p) == 0",
"def isNodeLeaf ( self ):\n return self.nodes is None or len ( self.nodes ) == 0\n # End isNodeLeaf",
"def is_leaf(self):\n return self._children == {}",
"def is_leaf(self):\n return self._children == {}",
"def is_leaf(self):\n return self._children == {}",
"def is_leaf(self):\n return self._children == {}",
"def isLeaf(node):\n\n return node.left is None and node.right is None",
"def _select(node):\n return max(node.children.items(), key=lambda child: Tree._cal_uct(child[1]))",
"def test_node_selection(self, node: dict, selection_type: SelectionType):\n assert ListSelectedExecutor.node_selection(node) == selection_type",
"def get_leaf_nodes(self):\n pass",
"def leaf(self):\n if not self.left and not self.right:\n return True\n return False",
"def isLeaf(self):\n return self.left is None and self.right is None"
] | [
"0.7235153",
"0.6678625",
"0.66372824",
"0.66100496",
"0.6575179",
"0.65703756",
"0.6556593",
"0.6548734",
"0.6526366",
"0.65042156",
"0.6485087",
"0.6446794",
"0.6434655",
"0.6423185",
"0.63771605",
"0.6371146",
"0.63690525",
"0.6355469",
"0.63342595",
"0.63308424",
"0.63199174",
"0.63199174",
"0.63199174",
"0.63199174",
"0.6303664",
"0.62891406",
"0.6269056",
"0.62537766",
"0.6252634",
"0.6240367"
] | 0.7547813 | 0 |
Test selection of a branch | def testSelectBranch(self):
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
menu.select(tag="a2")
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertTrue(items["a21"].selected)
assertIsNone(items["a22"].selected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)",
"def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))",
"def branch(self):\n return None",
"def test_branch_if_equal(self, test_cpu, branch_, zero_flag):\n test_cpu.status.zero = zero_flag\n\n branch.branch_if_equal(test_cpu, 10)\n\n branch_.assert_called_with(test_cpu, zero_flag, 10)",
"def test_branch_true_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = true_value\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")",
"def testSelectLeaf(self):\n\n menu = self.menu\n items = self.items\n\n assertTrue = self.assertTrue\n assertIsNone = self.assertIsNone\n\n menu.select(tag=\"a11\")\n\n assertTrue(menu.selected)\n assertTrue(items[\"a1\"].selected)\n assertTrue(items[\"a11\"].selected)\n assertIsNone(items[\"a12\"].selected)\n assertIsNone(items[\"a2\"].selected)\n assertIsNone(items[\"a21\"].selected)\n assertIsNone(items[\"a22\"].selected)",
"def switchToBranch(self):\n branches = self._listBranches()\n if not branches:\n raise error.ExpectationFailed(\n 'No branches available. Please import one.')\n\n choice = io.getChoice('Available release branches:',\n 'Your choice?',\n branches,\n suggest=len(branches)-1)\n self._switchBranch(branches[choice])",
"def test_branch_single_value_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = 1\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")",
"def test_sql_branch_operator_postgres(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"postgres_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)",
"def test_branch_false_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = false_value\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")",
"def test_branch_name_get(repository: Repository) -> None:\n branch = repository.branch(repository.head.name)\n assert repository.head.name == branch.name",
"def requires_branch(f):\n @functools.wraps(f)\n def check_branch(self, *args, **kwargs):\n if self.branch is None:\n raise error.ExpectationFailed(\n 'This operation requires an active release branch')\n return f(self, *args, **kwargs)\n return check_branch",
"def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))",
"def branch(self, *arguments, **kwargs):\n return self.get_output('branch', *arguments, **kwargs)",
"def test_branch_commit_get(repository: Repository) -> None:\n branch = repository.head\n assert repository.heads[branch.name] == branch.commit",
"def select_branches(mode, ose_version, build_version):\n\n branch_spec = source_branches[mode]\n\n # check for valid mode and source version\n if branch_spec['on-master'] != eq_version(ose_version, build_version):\n # release builds can't be from the version on master HEAD\n # dev builds must be on master HEAD \n raise ValueError(\n \"Invalid build mode {}: ose_version: {}, build_version {}\".\n format(mode, ose_version, build_version))\n\n origin_branch = string.replace(\n branch_spec['origin'], \"VERSION\", build_version)\n\n if branch_spec['upstream'] is None:\n upstream_branch = None\n else:\n upstream_branch = string.replace(\n branch_spec['upstream'], \"VERSION\", build_version)\n\n return (origin_branch, upstream_branch)",
"def isSelected(*args):",
"def isSelected(*args):",
"def __gitBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBranch(self.project.getProjectPath())[1] or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"branch\")",
"def branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"branch\")",
"def test_branch_can_be_copied():\n\n setup_org()\n setup_repo()\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/master\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n responses.add(responses.POST, \"https://api.github.com/repos/my-org/my-repo/git/refs\",\n body=my_new_ref,\n content_type='text/json',\n status=201)\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/main\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n token = '__dummy__'\n org = \"my-org\"\n client = GithubRestClient(token)\n new_branch_name = \"main\"\n\n repo = get_repository(client, org, \"my-repo\")\n new_branch = copy_branch(repo, repo.default_branch, new_branch_name)\n assert None is not new_branch",
"def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)",
"def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool",
"def _branch(self):\n printer = Printer(None)\n ci_manager = CIManager(printer)\n return ci_manager.get_branch()",
"def branch(branch_name):\n env.branch = branch_name",
"def branch(branch_name):\n env.branch = branch_name",
"def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)",
"def test_with_skip_in_branch_downstream_dependencies(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n branch_op >> self.branch_1 >> self.branch_2\n branch_op >> self.branch_2\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = [true_value]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")",
"def select_branch(self, node):\n total_n = node.total_visit_count\n \n def branch_score(move):\n q = node.expected_value(move)\n p = node.prior(move)\n n = node.visit_count(move)\n return q + self.c * p * np.sqrt(total_n)/(1+n)\n \n moves = node.moves()\n if moves:\n return max(moves, key=branch_score)\n else:\n # If moves is empty then no legal moves can be made from the game\n # state corresponding to the given node.\n return None"
] | [
"0.648244",
"0.6446408",
"0.64247346",
"0.63390535",
"0.62524986",
"0.6216893",
"0.6167209",
"0.61396754",
"0.6128586",
"0.60267806",
"0.60020113",
"0.59735584",
"0.5965118",
"0.5926897",
"0.58735794",
"0.58632785",
"0.5858319",
"0.5858319",
"0.58455837",
"0.58444524",
"0.58444524",
"0.58417094",
"0.58352786",
"0.5777858",
"0.57658684",
"0.57554805",
"0.57554805",
"0.5747932",
"0.5747719",
"0.572995"
] | 0.7612395 | 0 |
Test selection of specific nodes | def testSelectSpecificNode(self):
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
items["a2"].select()
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
items["a12"].select()
assertTrue(menu.selected)
assertTrue(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertTrue(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_node_selection(self, node: dict, selection_type: SelectionType):\n assert ListSelectedExecutor.node_selection(node) == selection_type",
"def test_selecting_nodes_clicking_them_discovered(self):\n with Nodes()as n:\n for node in n.nodes_discovered:\n node.parent.click()\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Discovered node is selected')",
"def test_select_all(self):\n with Nodes()as n:\n n.select_all.click()\n for selects in n.select_all_in_group:\n self.assertTrue(selects.is_selected(),\n 'Select all in group is selected')\n for node in n.nodes_discovered:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Discovered node is selected')\n for node in n.nodes_offline:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Offline node is not selected')\n for node in n.nodes_error:\n self.assertFalse(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Error node is not selected')",
"def test_select_all_selecting_nodes_one_by_one(self):\n with Nodes()as n:\n for i, group in enumerate(n.node_groups):\n for node in group.nodes_discovered:\n node.checkbox.click()\n for node in group.nodes_offline:\n node.checkbox.click()\n self.assertTrue(\n group.select_all_in_group[0].is_selected(),\n '\"Select all in group\" is checked')\n self.assertTrue(\n n.select_all.is_selected(), '\"Select all\" is checked')",
"def test_selecting_nodes_clicking_them_error(self):\n with Nodes()as n:\n for node in n.nodes_error:\n node.parent.click()\n self.assertFalse(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Error node is not selected')",
"def event_node_selected(self, node):\n # TODO\n print(\"selected node:\", node)",
"def test_randomly_select_node_1(self):\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = set()\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2)\n\n self.assertIn(c, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2)",
"def test_randomly_select_node_2(self):\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = {c}\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)\n\n self.assertNotIn(c, node_counter)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)",
"def test_selecting_nodes_clicking_them_offline(self):\n with Nodes()as n:\n for node in n.nodes_offline:\n node.parent.click()\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Offline node is not selected')",
"def test_select_all_in_group(self):\n with Nodes()as n:\n for i, group in enumerate(n.node_groups):\n group.select_all_in_group[0].click()\n for node in group.nodes_discovered:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Discovered node is selected')\n self.assertTrue(\n n.select_all.is_selected(), '\"Select all\" is checked')",
"def run():\n \n # GET SELECTED NODES\n sel = hou.selectedNodes()\n \n # DISPLAY WARNINGS IF TWO NODES ARE NOT SELECTED\n if len(sel) != 2:\n hou.ui.displayMessage(\"Please select exactly two nodes.\")\n\n\n # INITIALIZE VARIABLES\n node1 = sel[0]\n node2 = sel[1]\n\n # COPY PARAMETERS\n copy(node1, node2)",
"def select(self, target):",
"def testSelectLeaf(self):\n\n menu = self.menu\n items = self.items\n\n assertTrue = self.assertTrue\n assertIsNone = self.assertIsNone\n\n menu.select(tag=\"a11\")\n\n assertTrue(menu.selected)\n assertTrue(items[\"a1\"].selected)\n assertTrue(items[\"a11\"].selected)\n assertIsNone(items[\"a12\"].selected)\n assertIsNone(items[\"a2\"].selected)\n assertIsNone(items[\"a21\"].selected)\n assertIsNone(items[\"a22\"].selected)",
"def test_boolean_and_selection(self):\n\n # The selection loop:\n sel = list(mol_res_spin.residue_loop(\"#Ap4Aase:4 & :Pro\"))\n\n # Test:\n self.assertEqual(len(sel), 1)\n for res in sel:\n self.assert_(res.name == \"Pro\" and res.num == 4)",
"def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )",
"def OnNodeSelected( self, event ):\n try:\n node = self.sorted[ event.GetIndex() ]\n except IndexError, err: \n log.warn( _('Invalid index in node selected: %(index)s'), index=event.GetIndex())\n else:\n if node is not self.selected_node:\n wx.PostEvent( \n self, \n squaremap.SquareSelectionEvent( node=node, point=None, map=None ) \n )",
"def _random_subset(self, pa_nodes, seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n # if x in pa_nodes:\n if pa_nodes.get(x, False):\n targets.add(x)\n else:\n pass\n return targets",
"def filter_selected_nodes(tree) -> list:\n return [n for n in tree.nodes if n.select and n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}]",
"def test_discovered_nodes_enabled(self):\n with Nodes()as n:\n for node in n.nodes_discovered:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node enabled')",
"def isSelected(*args):",
"def isSelected(*args):",
"def iter_nodes(self):",
"def visit(self, node):",
"def visit(self, node):",
"def testSelectBranch(self):\n\n menu = self.menu\n items = self.items\n\n assertTrue = self.assertTrue\n assertIsNone = self.assertIsNone\n\n menu.select(tag=\"a2\")\n\n assertTrue(menu.selected)\n assertIsNone(items[\"a1\"].selected)\n assertIsNone(items[\"a11\"].selected)\n assertIsNone(items[\"a12\"].selected)\n assertTrue(items[\"a2\"].selected)\n assertTrue(items[\"a21\"].selected)\n assertIsNone(items[\"a22\"].selected)",
"def list_nodes_select(call=None):\n return salt.utils.cloud.list_nodes_select(\n list_nodes_full(),\n __opts__[\"query.selection\"],\n call,\n )",
"def test_check_tree_subset(self):\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_subset(fasta_labels,\r\n self.sample_tree_3tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, True)\r\n\r\n # Should also get same results with 5 tip tree\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_subset(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, True)\r\n\r\n # Change two of the fasta labels to not match tree tips\r\n\r\n fasta_labels = ['seq1_1', 'seqX_2', 'seq2_3', 'seqY_4']\r\n\r\n actual_subset_results = check_tree_subset(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find seqX and seqY as not being a subset\r\n\r\n self.assertEqual(actual_subset_results, ['seqX', 'seqY'])",
"def selectAdd(node):\n node['selected'].setValue(True)",
"def selection(self):\n selectednode = self.children.values()[0]\n selectedaction = self.children.keys()[0]\n maxValue = selectednode.toValue()\n \n for child in self.children.items():\n if(child[1].toValue() > maxValue):\n selectednode = child[1]\n maxValue = child[1].toValue()\n selectedaction = child[0]\n return selectednode, selectedaction",
"def select(self, board, c_puct):\n # the game rule has a random cases in the select procedure\n board.get_point()\n batch = self._children.get(board.point, None) # get this point's edge\n if not batch: return True, None # this node is the leaf\n return False, max(batch.items(),\n key=lambda act_node: act_node[1].get_value(c_puct))"
] | [
"0.70753044",
"0.69574356",
"0.66272986",
"0.65160197",
"0.63747287",
"0.62622404",
"0.6255423",
"0.6250882",
"0.61456066",
"0.6128836",
"0.61121875",
"0.60613096",
"0.6057898",
"0.5994398",
"0.5972796",
"0.5919818",
"0.5912785",
"0.5878819",
"0.58584577",
"0.57744914",
"0.57744914",
"0.572908",
"0.5697714",
"0.5697714",
"0.56946677",
"0.56509125",
"0.56003076",
"0.5593773",
"0.55555683",
"0.5539852"
] | 0.72523385 | 0 |
Test selection with nonexistent tag | def testSelectNonexistentTag(self):
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
# Make a selection
menu.select(tag="a21")
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertTrue(items["a21"].selected)
assertIsNone(items["a22"].selected)
# Use a non-existent tag
menu.select(tag="nonexistent")
# Nothing should be selected
assertIsNone(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tags_tag_search_invalid_tag(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n global NON_EXISTENT_TAG\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(NON_EXISTENT_TAG)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code\" % (NON_EXISTENT_TAG) \\\n + \"on the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))",
"def test_tags_content_search_invalid_tag(self):\n\n global NON_EXISTENT_TAG\n\n po = self.catalog.load_pageobject('TagsPage')\n\n self.browser.proxy_client.new_har(\"page\")\n po.goto_page()\n har_entry = self.browser.page_load_details()\n\n start_url = po.current_url()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_content([NON_EXISTENT_TAG])\n har_entry = self.browser.page_load_details()\n\n end_url = po.current_url()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is True, \\\n \"while on the tags page %s,\" % (start_url) \\\n + \" searching for content with the tag '%s'\" % (NON_EXISTENT_TAG) \\\n + \" did not return an error\" \\\n + \" response code on page %s.\" % (end_url) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))",
"def test_tags_tag_search_no_tag(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags('')\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an empty string as the tag\" \\\n + \"returned an error response code on the page\" \\\n + \"%s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))",
"def _is_text_tag(tag):\n return tag.name not in ['script', 'style']",
"def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result.append('<' + tag)\n for k, v in attrs:\n if string.lower(k[0:2]) != 'on' and",
"def test_does_not_return_study_with_no_tagged_traits_for_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(study.pk, pks)",
"def test_textNotOperator(self):\n xp = XPathQuery(\"/foo[not(@nosuchattrib)]\")\n self.assertEqual(xp.matches(self.e), True)",
"def test_invalid_tag(self):\r\n with self.assertRaises(Exception):\r\n self.check_group('invalid', 'choice', 'checkbox')",
"def test_get_tag_fail(self):\n self.assertRaises(AttributeError, get_tag, None, \"h1\")\n self.assertRaises(\n AttributeError, get_tag, \"<h1>This is not a XML tag object</h1>\", \"h1\"\n )",
"def exists(self, selector):\n return not self.main_frame.findFirstElement(selector).isNull()\n\n\n #TODO: Still not work.",
"def getOptionalTag(node, tag, option=\"\"):\n try:\n return getTag(node, tag)\n except TagError:\n return option",
"def test_returns_study_with_unreviewed_tagged_trait_for_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertIn(study.pk, pks)",
"def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])",
"def test_select_unexisting_field(self, document):\n assert document.select({\"idontexist\": 1}) == {\"_id\": 1, \"idontexist\": None}",
"def is_tag(t):\n return len(t) > 1 and t.startswith('#') and not t.startswith('##') and t",
"def find_selected(self):\r\n return None",
"def _text_or_none(root, tag):\n elem = root.find(tag)\n return None if elem is None else elem.text",
"def test_returns_study_with_unreviewed_tagged_trait_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertIn(study.pk, pks)",
"def test_no_tagging_button(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertNotContains(response, reverse('trait_browser:source:traits:tagging', kwargs={'pk': self.trait.pk}))\n self.assertFalse(context['show_tag_button'])",
"def test_invalid_tag(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div)p')",
"def test_no_duplicate_tag(driver):\n for i,rs in enumerate(driver.find_elements_by_class_name('receipt')):\n l = list(get_tags(rs))\n if len(l) != len(set(l)):\n print(\"There are duplicate tags in the {}-th receipt line\"\\\n .format(i))\n print(\"Found tag: {!r}\".format(l))\n return -1\n return 0",
"def validate_tag(tag=None):\n if not tag:\n raise AttributeError('Tag cannot be empty')\n\n if tag not in TAGS:\n raise ValueError('{0} tag is not supported')",
"def intf_TAGNOTQUERY(E):\n if not inc.TXT_or_LST_of_TXTs(E.The,1):\n print(\"Input Error: nottag?\")\n print(intf_TAGNOTQUERY.__doc__)\n return # Without doing much of anything.\n mytags= E.The.StackPop().val\n if type(mytags)==type(list()):\n #mytags= map(lambda x:x.val, mytags) # Should now be a list of TXTs.\n mytags= [x.val for x in mytags] # Should now be a list of TXTs.\n else:\n mytags= [ mytags ] # Also a (1 item) list of ints.\n disqualifying_ents= list()\n for myeid in MMEL.El.keys():\n atagishere= False # Assume they're here until one is not found.\n for mytag in mytags:\n #print(\"Searching entity #%d for tag ''%s''\" % (myeid,mytag))\n if MMEL.El[myeid].has_tag(mytag):\n atagishere= True\n break\n if atagishere:\n disqualifying_ents.append( myeid )\n qualifying_ents= list() # For inverting.\n for myeid in MMEL.El.keys(): # Go through all ents again.\n if myeid not in disqualifying_ents: # Add ones not found before.\n qualifying_ents.append(myeid)\n # Objectify remaining.\n qualifying_ents= [objectifier.StackOB_VAL(m) for m in qualifying_ents] \n E.The.StackPush( objectifier.StackOB_LST(qualifying_ents) )",
"def test_does_not_return_studies_without_tagged_traits_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n other_study = self.studies[1]\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(other_study.pk, pks)",
"def _issingleton(self, tagname):\n return self.shortempty",
"def test_does_not_return_study_with_deprecated_tagged_trait_for_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(\n tag=tag, trait__source_dataset__source_study_version__study=study,\n trait__source_dataset__source_study_version__i_is_deprecated=True)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(study.pk, pks)",
"def test_tags_tag_search_valid_tag(self,tag_with_items):\n\n tag = tag_with_items\n\n assert tag is not None, 'Could not find a tag with items'\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(tag)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code on\" % (tag) \\\n + \"the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))\n\n # check for valid pagination total on tags view page\n po = self.catalog.load_pageobject('TagsViewPage')\n (start,end,total) = po.get_pagination_counts()\n\n assert total >= 0, \\\n \"performing a tag search using the tag\" \\\n + \"'%s' took user to page (%s) with invalid pagination\"\\\n % (tag,po.current_url())",
"def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})",
"def test_does_not_return_studies_with_unreviewed_tagged_traits_with_other_tag_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n other_tag = TagFactory.create()\n other_study = self.studies[1]\n other_tagged_trait = TaggedTraitFactory.create(\n tag=other_tag, trait__source_dataset__source_study_version__study=other_study)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(other_study.pk, pks)",
"def check_and_get_ele_by_tag_name(element, tag_name):\r\n if element is None or not tag_name:\r\n return None \r\n try:\r\n return element.find_element_by_tag_name(tag_name) \r\n except NoSuchElementException:\r\n return None"
] | [
"0.6625541",
"0.6273861",
"0.62554324",
"0.60222036",
"0.58781713",
"0.584287",
"0.5800551",
"0.5729907",
"0.5698181",
"0.5698148",
"0.5659998",
"0.56520003",
"0.5631423",
"0.5631034",
"0.55683744",
"0.55287963",
"0.550021",
"0.5499857",
"0.5495857",
"0.5495131",
"0.5466072",
"0.54402643",
"0.5427457",
"0.5399744",
"0.5399183",
"0.5387898",
"0.5387192",
"0.5385978",
"0.53828883",
"0.537662"
] | 0.7399087 | 0 |
Read and parse NEXUS input (a filename, filehandle, or string). | def read(self, input):
# 1. Assume we have the name of a file in the execution dir or a
# file-like object.
# Note we need to add parsing of the path to dir/filename
try:
with File.as_handle(input, 'rU') as fp:
file_contents = fp.read()
self.filename = getattr(fp, 'name', 'Unknown_nexus_file')
except (TypeError, IOError, AttributeError):
#2 Assume we have a string from a fh.read()
if isinstance(input, basestring):
file_contents = input
self.filename = 'input_string'
else:
print(input.strip()[:50])
raise NexusError('Unrecognized input: %s ...' % input[:100])
file_contents = file_contents.strip()
if file_contents.startswith('#NEXUS'):
file_contents = file_contents[6:]
commandlines = _get_command_lines(file_contents)
# get rid of stupid 'NEXUS token - in merged treefiles, this might appear multiple times'
for i, cl in enumerate(commandlines):
try:
if cl[:6].upper() == '#NEXUS':
commandlines[i] = cl[6:].strip()
except:
pass
# now loop through blocks (we parse only data in known blocks, thus ignoring non-block commands
nexus_block_gen = self._get_nexus_block(commandlines)
while True:
try:
title, contents = next(nexus_block_gen)
except StopIteration:
break
if title in KNOWN_NEXUS_BLOCKS:
self._parse_nexus_block(title, contents)
else:
self._unknown_nexus_block(title, contents) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_nexus(filename):\n f = open(filename)\n return parse_nexus(f)",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def _parse(self, infile):\n raise NotImplementedError()",
"def read_input(infile):\n #Some utility functions to read in particular types of input\n def read_int():\n return int(infile.readline().strip())\n def read_ints():\n return np.array(infile.readline().split(), dtype=int)\n def read_bigints(): #For ints that won't fit directly in an int32 array\n line = infile.readline().split()\n return np.array(map(lambda x: int(x), line))\n def read_float():\n return float(infile.readline().strip())\n def read_floats():\n return np.array(infile.readline().split(), dtype=float)\n def read_string():\n return infile.readline().strip()\n def read_strings():\n return np.array(infile.readline().split(), dtype=object) #N.B. general dtype\n \n N = read_int()\n cars = read_strings()\n assert N == len(cars)\n \n return cars",
"def parse_user_data(infile):\n if os.path.isfile(infile):\n with open(infile, 'r') as inf:\n out = inf.read()\n else:\n sys.exit(\"File not found: {}\".format(infile))\n return out",
"def _read(self, string=\"\", fname=\"\"):\n if string:\n self.handle = gv.readstring(string)\n elif fname == \"stdin\":\n data = sys.stdin.read()\n self.handle = gv.readstring(data)\n else:\n self.handle = gv.read(fname)\n # gv returns None if eg. the input does not exist\n if not self.handle:\n raise ValueError(\"Error with file \" + fname)",
"def read_input(infile):\n #Some utility functions to read in particular types of input\n def read_int():\n return int(infile.readline().strip())\n def read_ints():\n return np.array(infile.readline().split(), dtype=int)\n def read_bigints(): #For ints that won't fit directly in an int32 array\n line = infile.readline().split()\n return np.array(map(lambda x: int(x), line))\n def read_float():\n return float(infile.readline().strip())\n def read_floats():\n return np.array(infile.readline().split(), dtype=float)\n def read_string():\n return infile.readline().strip()\n def read_strings():\n return np.array(infile.readline().split(), dtype=object) #N.B. general dtype\n \n N, J = read_ints()\n \n return (N, J)",
"def read_inputs(argn=1):\n if len(sys.argv) < argn+1:\n raise IOError(\"Hey, no input file was passed as argument to\"\n \" the program!!\")\n if not os.path.exists(sys.argv[argn]):\n raise FileNotFoundError(\"Input file '{}' not found.\".\n format(sys.argv[argn]))\n return read_config_file(sys.argv[argn], attribution_char='=')",
"def _read_input_file(self):\n pass",
"def ParseInput(infile):\n lines = [line for line in open(infile).readlines() if line[0] != '#' and not line.isspace()]\n line1match = re.compile('^\\s*VOLUME\\s+(?P<volume>\\d+\\.{0,1}\\d*)\\s*\\n$')\n m = line1match.search(lines[0])\n if not m:\n raise IOError(\"Failed to parse VOLUME from the first line.\")\n volume = float(m.group('volume'))\n line2match = re.compile('^\\s*DILUTION\\s+(?P<dilution>\\d+\\.{0,1}\\d*)\\s*\\n$')\n m = line2match.search(lines[1])\n if not m:\n raise IOError(\"Failed to parse DILUTION from the second line.\")\n dilution = float(m.group('dilution'))\n if dilution <= 1:\n raise IOError(\"The dilution factor must be > 1, but read a value of %f\" % dilution)\n line3match = re.compile('^\\s*NREPLICATES\\s+(?P<nreplicates>\\d+)\\s*\\n$')\n m = line3match.search(lines[2])\n if not m:\n raise IOError(\"Failed to parse an integer value for NREPLICATES from the third line.\")\n nreplicates = int(m.group('nreplicates'))\n if nreplicates < 2:\n raise IOError(\"There must be at least two replicates, but read a value of %d.\" % nreplicates)\n lines = lines[3 : ] # the remaining lines\n # there should be nreplicates + 1 line for each sample\n linespersample = nreplicates + 1\n if len(lines) % linespersample != 0:\n raise IOError(\"The sample data is not specified correctly. There should be a total of %d lines for each sample (the sample name plus a line for each of the %d replicates), but the number additional lines is not divisible by %d.\" % (linespersample, nreplicates, linespersample))\n nsamples = len(lines) / linespersample\n sampledata = {}\n namematch = re.compile('^\\s*SAMPLE\\s+(?P<name>.+)\\n$')\n validrows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n samplenames = []\n for isample in range(nsamples):\n nameline = lines[isample * linespersample]\n samplelines = lines[isample * linespersample + 1 : (isample + 1) * linespersample]\n assert len(samplelines) == nreplicates\n m = namematch.search(nameline)\n if not m:\n raise IOError(\"Failed to match sample name from line: %s\" % nameline)\n sample = m.group('name').strip()\n if sample in sampledata:\n raise IOError(\"Duplicate sample name of %s\" % sample)\n sampledata[sample] = []\n samplenames.append(sample)\n for line in samplelines:\n if line.strip() == 'na':\n sampledata[sample].append([]) # no rows with effect\n else:\n rows = [x.strip() for x in line.split(',')]\n for x in rows:\n if x not in validrows:\n raise IOError(\"Invalid row specification of %s in the following line: %s\\nValid row labels are A to H.\" % (x, line))\n if rows.count(x) != 1:\n raise IOError(\"Row identifier of %s appears more than once in the following line: %s\" % (x, line))\n sampledata[sample].append(rows)\n return (samplenames, sampledata, volume, dilution)",
"def read(self, inputfile):\n infile = open(inputfile, 'r')\n if (inputfile.lower().endswith('.po')):\n self.read_po(infile)\n elif (inputfile.lower().endswith('.json')):\n self.read_json(infile)\n elif (inputfile.lower().endswith('.xml')):\n self.read_properties(infile)\n infile.close()",
"def read(infile):\n _, ext = os.path.splitext(infile)\n ext = ext.strip('.')\n return read_funcs[ext](infile)",
"def readInput(fileName):\n\n with open(fileName, 'r') as file:\n return file.read().splitlines()",
"def read_cinder_input(input_file_name):\n if input_file_name:\n # reading from a file, not stdin\n with open(input_file_name, \"r\") as input_file:\n info = json.loads(input_file.read())\n return info\n else:\n # read json from stdin\n return json.loads(sys.stdin.read())",
"def parse_products(self, infile):\r\n raise NotImplementedError()",
"def main():\n parse_file(sys.argv[1])",
"def read_input():\n return Path(__file__).with_name('input.txt').read_text().splitlines()",
"def parse_data(fp):\n pass",
"def _parse_input(self):\n #temperature\n regex = re.compile(\"TEMP=(\\d+\\.\\d*|\\d+)\")\n r = regex.search(self.file_dic['input'])\n if r:\n self.temperature = r.groups()[0]\n else:\n self.temperature = 298.15\n #theory\n regex = re.compile('(\\$contrl.+\\$end|\\$basis.+ \\$end)')\n temp_theory = regex.findall(self.file_dic['input'])\n contrl = temp_theory[0][:-4][7:].strip()\n basis = temp_theory[1][:-4][6:].strip()\n self.theory = contrl + ' ' + basis",
"def parse_infile(self, infile):\n\n if type(infile)==str:\n print('Im a string')\n folder, file = os.path.split(infile)\n elif type(infile) in [list, tuple]:\n if not len(infile) == 2:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n else:\n folder, file = infile\n else:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n \n self.folder = folder\n self.file_ = file",
"def inputfile(filename):\n infile = open(filename, 'r')\n lines = infile.readlines()\n\n # --------------------------------------------------------------------------\n # Domain specifications\n\n Nx = eval(lines[15][lines[15].find('=')+1:].strip())\n ax = eval(lines[16][lines[16].find('=')+1:].strip())\n bx = eval(lines[17][lines[17].find('=')+1:].strip())\n\n Ny = eval(lines[21][lines[21].find('=')+1:].strip())\n ay = eval(lines[22][lines[22].find('=')+1:].strip())\n by = eval(lines[23][lines[23].find('=')+1:].strip())\n\n Nz = eval(lines[27][lines[27].find('=')+1:].strip())\n az = eval(lines[28][lines[28].find('=')+1:].strip())\n bz = eval(lines[29][lines[29].find('=')+1:].strip())\n\n Nvx = eval(lines[33][lines[33].find('=')+1:].strip())\n avx = eval(lines[34][lines[34].find('=')+1:].strip())\n bvx = eval(lines[35][lines[35].find('=')+1:].strip())\n\n Nvy = eval(lines[39][lines[39].find('=')+1:].strip())\n avy = eval(lines[40][lines[40].find('=')+1:].strip())\n bvy = eval(lines[41][lines[41].find('=')+1:].strip())\n\n Nvz = eval(lines[45][lines[45].find('=')+1:].strip())\n avz = eval(lines[46][lines[46].find('=')+1:].strip())\n bvz = eval(lines[47][lines[47].find('=')+1:].strip())\n\n Nt = eval(lines[51][lines[51].find('=')+1:].strip())\n T = eval(lines[52][lines[52].find('=')+1:].strip())\n\n N = eval(lines[58][lines[58].find('=')+1:].strip())\n\n # --------------------------------------------------------------------------\n # Broadcast notification regarding start of simulation and order of solver\n\n print \"\\nStarting 1D1V Vlasov-Poisson simulation\"\n print \"\\nadvection solver: LTE order %d\" % (N+1)\n\n # --------------------------------------------------------------------------\n # Boundary conditions\n\n # stored as a dictionary of dictionaries, access as\n # BC['z']['upper'] and BC['z']['lower'] for z = {x, y, ...}\n\n BC = {}\n # main dictionary with key/values {'x' : {'lower' : value, 'upper : value},\n # {'y' : {'lower' : value, 'upper : value},\n # {'z' : {'lower' : value, 'upper : value},\n # {'vx' : {'lower' : value, 'upper : value},\n # {'vy' : {'lower' : value, 'upper : value},\n # {'vz' : {'lower' : value, 'upper : value},\n\n\n # subdictionaries with key/values {'lower' : BC_value, and 'upper' : BC_value}\n BC['x'] = {}\n BC['x']['lower'] = lines[18][lines[18].find('=')+1:].strip()\n BC['x']['upper'] = lines[19][lines[19].find('=')+1:].strip()\n\n BC['y'] = {}\n BC['y']['lower'] = lines[24][lines[24].find('=')+1:].strip()\n BC['y']['upper'] = lines[25][lines[25].find('=')+1:].strip()\n\n BC['z'] = {}\n BC['z']['lower'] = lines[30][lines[30].find('=')+1:].strip()\n BC['z']['upper'] = lines[31][lines[31].find('=')+1:].strip()\n\n BC['vx'] = {}\n BC['vx']['lower'] = lines[36][lines[36].find('=')+1:].strip()\n BC['vx']['upper'] = lines[37][lines[37].find('=')+1:].strip()\n\n BC['vy'] = {}\n BC['vy']['lower'] = lines[42][lines[42].find('=')+1:].strip()\n BC['vy']['upper'] = lines[43][lines[43].find('=')+1:].strip()\n\n BC['vz'] = {}\n BC['vz']['lower'] = lines[48][lines[48].find('=')+1:].strip()\n BC['vz']['upper'] = lines[49][lines[49].find('=')+1:].strip()\n\n # --------------------------------------------------------------------------\n # Store number of active gridpoints for every phase space variable\n #\n # Note: for periodic BCs: Nz_active = Nz - 1\n # for all other BCs: Nz_active = Nz\n\n # TODO this is acknowledged as being redundant, but more specific than the lists\n # active_dims vs. total_dims\n if BC['x']['lower'] == 'periodic' and BC['x']['upper'] == 'periodic' and Nx is not None:\n Nx_active = Nx - 1\n else:\n Nx_active = Nx\n\n if BC['y']['lower'] == 'periodic' and BC['y']['upper'] == 'periodic' and Ny is not None:\n Ny_active = Ny - 1\n else:\n Ny_active = Ny\n\n if BC['z']['lower'] == 'periodic' and BC['z']['upper'] == 'periodic' and Nz is not None:\n Nz_active = Nz - 1\n else:\n Nz_active = Nz\n\n if BC['vx']['lower'] == 'periodic' and BC['vx']['upper'] == 'periodic' and Nvx is not None:\n Nvx_active = Nvx - 1\n else:\n Nvx_active = Nvx\n\n if BC['vy']['lower'] == 'periodic' and BC['vy']['upper'] == 'periodic' and Nvy is not None:\n Nvy_active = Nvy - 1\n else:\n Nvy_active = Nvy\n\n if BC['vz']['lower'] == 'periodic' and BC['vz']['upper'] == 'periodic' and Nvz is not None:\n Nvz_active = Nvz - 1\n else:\n Nvz_active = Nvz\n\n # --------------------------------------------------------------------------\n # High order correction (HOC) method applied to each phase space variable\n\n HOC = {}\n HOC['x'] = lines[68][lines[68].find(':')+1:].strip().upper()\n HOC['y'] = lines[69][lines[69].find(':')+1:].strip().upper()\n HOC['z'] = lines[70][lines[70].find(':')+1:].strip().upper()\n\n HOC['vx'] = lines[72][lines[72].find(':')+1:].strip().upper()\n HOC['vy'] = lines[73][lines[73].find(':')+1:].strip().upper()\n HOC['vz'] = lines[74][lines[74].find(':')+1:].strip().upper()\n\n\n # list of phase space variables used, in etc/params.dat must set unused\n # vars to have Nz as None, z = x, vx, y, ...\n # e.g. in 1D1V, phasespace_vars = ['x', 'vx']\n phasespace_vars = []\n if Nx is not None:\n phasespace_vars.append('x')\n if Ny is not None:\n phasespace_vars.append('y')\n if Nz is not None:\n phasespace_vars.append('z')\n if Nvx is not None:\n phasespace_vars.append('vx')\n if Nvy is not None:\n phasespace_vars.append('vy')\n if Nvz is not None:\n phasespace_vars.append('vz')\n\n print \"will step through %d-dimensional solution in variables: %s\" % (len(phasespace_vars), phasespace_vars)\n for var in phasespace_vars:\n print \"high order correction method on %s: %s\" % (var, HOC[var])\n\n # for periodic BCs, the number of active dims is not equal to the\n # total number of dims, we evolve \"Nz-1\" gridpoints, then assign\n # the Nth point by periodicity as equal to the 0th point. Hence,\n # a distinction is needed between active dims and total dims\n # where we note they are identical in all cases but periodic BCs.\n\n # TODO as mentioned above, this is now a redundant set of total grid points\n # as compared to active grid points. At some point, need to trace where\n # this is actually used in the code and replace or remove it\n\n # initialize lists\n total_dims = []\n active_dims = []\n\n # strip all whitespace in each entry\n for var in phasespace_vars:\n total_dims.append(eval('N' + var))\n\n if ( (BC[var]['lower'] == 'periodic') and (BC[var]['upper'] == 'periodic') ):\n active_dims.append(eval('N' + var) - 1)\n else:\n active_dims.append(eval('N' + var))\n\n # TODO this is a misleading name, should be numvars\n numdims = len(phasespace_vars)\n\n # --------------------------------------------------------------------------\n # Initial density specification\n #\n # the following establishes a difference between the number of densities\n # specified in etc/params.dat. Should there be two, the solver is a two\n # species Vlasov solver. If only one, then a cold background will be\n # automatically computed (TODO)\n\n\n densities_list = lines[79][lines[79].find(':')+1:].strip().split(', ')\n for i in range(len(densities_list)):\n densities_list[i] = densities_list[i].lower()\n\n if len(densities_list) == 2: # if two species return dictionary of strings\n density = {}\n density['electrons'] = densities_list[0]\n density['electrons'] = density['electrons'].lower()\n density['ions'] = densities_list[1]\n density['ions'] = density['ions'].lower()\n print \"\\ntwo species simulation with initial densities:\\n\"\n print \"electrons: %s\" % density['electrons']\n print \"ions: %s\\n\" % density['ions']\n\n elif len(densities_list) == 1: # if one species return a string\n density = densities_list[0]\n print \"one species (electron) simulation with initial density: %s\" % density\n # TODO compute cold background, store both this and the above\n # in a common dictionary as above for two species.\n\n # --------------------------------------------------------------------------\n # Split scheme specification\n\n split_scheme = lines[98][lines[98].find('=')+1:].strip()\n split_scheme = split_scheme.upper()\n print \"split scheme: %s\\n\\n\" % split_scheme\n\n # filepath to splitting coefficient tables\n filename = lines[99][lines[99].find(':')+1:].strip()\n filepath = './etc/' + filename\n\n # get splitting coefficients for chosen scheme\n if split_scheme is not None:\n splitting = splitting_coefficients(filepath, split_scheme)\n else:\n splitting = None\n\n # --------------------------------------------------------------------------\n # Plot window specification (used in lib.plots.Setup)\n\n xmin = eval(lines[113][lines[113].find('=')+1:].strip())\n xmax = eval(lines[114][lines[114].find('=')+1:].strip())\n ymin = eval(lines[116][lines[116].find('=')+1:].strip())\n ymax = eval(lines[117][lines[117].find('=')+1:].strip())\n\n plot_params = dict(xmin = xmin, xmax = xmax,\n ymin = ymin, ymax = ymax)\n\n record_outputs = lines[120][lines[120].find(':')+1:].strip()\n record_outputs = record_outputs.lower()\n\n if record_outputs == 'yes':\n # output filepath setup\n filename = lines[121][lines[121].find(':')+1:].strip()\n filepath = './etc/' + filename\n outfiles = output_files(filepath) # dictionary of opened files\n else:\n outfiles = None\n\n # --------------------------------------------------------------------------\n # MISC STORAGE (e.g. stored matrices that are used routinely)\n #\n # dictionaries and matrices relevant for high order correction applications\n #\n # Constructing the finite different weight matricies, W.\n #-------------------------------------------------------\n # requires: (dict) FD_schemes\n #\n # Note: FD_schemes is only needed to construct W. W is what is used in\n # the simulation. Hence, the building routine for FD_schemes\n # is not optimized, since it happens before the simulation starts\n # and hence is not a source of repeated computational cost.\n #\n # FD_schemes is a dictionary containing the families of every order derivative\n # needed for the indicated global error N in etc/params.dat, i.e. all schemes\n # of various degrees of asymmetry and handedness. For large N, this can be a\n # very large dictionary, see the function routine read_FD_schemes to see all\n # that gets stored inside. It is used to construct the difference coefficient\n # matrices W (for applying high order corrections). The other scheme\n # FD_scheme_dn1 is used to construct the matrix W_dn1 which is a difference\n # coefficient matrix for the first derivative (dn = 1) at LTE = 6, and used\n # in the finite difference 6th order Poisson solver (PBCs currently only).\n #---------------------------------------------------------------------------\n #\n # initialize all dictionaries whose keys correspond to phase space vars\n # and whose values contain the relevant ndarrays\n\n Xi = {}\n xi = {}\n W = {}\n\n # top level check: if any var has FD corrections, store FD_schemes and init W\n if 'FD' in HOC.values():\n # store finite difference schemes\n FD_schemes = read_FD_schemes(N)\n\n if HOC['x'] == 'FD':\n # first derivative with LTE = 6, used to find dphi = -E after phi is\n # found from a 6th order Poisson solve\n FD_scheme_dn1 = read_FD_scheme(1,6)\n W_dn1_LTE6 = assemble_finite_difference_weight_matrix_const_dn_const_LTE(Nx_active,\n FD_scheme_dn1,\n dn = 1,\n LTE = 6\n )\n\n # TODO if more than one or different spatial dimension\n # TODO than 'x' with FD corrections need to permit access to this\n # TODO dictionary W_dn1_LTE6 and have it be assembled.\n\n else:\n # else, Fourier Gauss solver is used, no need for this matrix\n W_dn1_LTE6 = None\n\n # variable-by-variable checks: assemble consistent objects needed\n # for the specified means of HOC from etc/params.dat\n\n # Note: the following is organized with the expectation that\n # higher dimensional implementations would be stepped through\n # as sets of 2D advection problems, always paired as z and vz\n # i.e. not as mixed stepthroughs with x paired with vy for example\n\n for var in phasespace_vars:\n if HOC[var] == 'FD':\n W[var] = assemble_finite_difference_weight_matrix(\n eval('N' + var + '_active'),\n N,\n FD_schemes\n )\n elif HOC[var] == 'FOURIER':\n # ensure the correct number of grid points\n # is passed for the generalized velocity Nvz_active\n # for x,y,z, 'vz' = vx, vy, vz\n # for vx, vy, vz, 'vz' = ax, ay, az, which have\n # the same number of dims as x, y, z, respectively\n\n if var[0] == 'v':\n Nvz_active = eval('N' + var[1] + '_active')\n else:\n Nvz_active = eval('Nv' + var + '_active')\n\n Xi, xi = assemble_spectral_derivative_operator(Xi, xi,\n var,\n eval('a' + var),\n eval('b' + var),\n eval('N' + var),\n eval('N' + var + '_active'),\n Nvz_active,\n N)\n\n # ---------------------------------------------------------------------\n # \"Alternating\" identity matrix\n\n\n # in lib.HOC.correctors, require an N x N diagonal matrix with entries\n # (-1)^i, where i is the row number, for details see on github\n #\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n #\n # section \"2D casting of correction coefficients c (vector) -> c (tensor)\"\n\n I_alternating = np.diag( (-np.ones(N)) ** np.arange(N) )\n\n # obtain Bernoulli numbers (note: list only 23 numbers are listed)\n # for a correction up to global error order N, N-1 Bernoulli numbers\n # are needed. If higher than global error order 22 is desired, additional\n # Bernoulli numbes need to be entered in\n #\n # etc/Table_of_Bernoulli_numbers.dat\n #\n\n # Store Bernoulli numbers from dat file etc/Table_of_Bernoulli_numbers.dat\n filename = 'Table_of_Bernoulli_numbers.dat'\n filepath = './etc/' + filename\n Bernoulli_numbers = Bernoulli(filepath)\n\n # \"A\" matrices for Bernoulli number storage and matrix HOC application\n # in lib.HOC.Beta_matrix, see notebook on github at\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n A_pos, A_neg = np.zeros([N,N]), np.zeros([N,N])\n for i in range(N):\n for j in range(i+1):\n A_pos[i,j] = Bernoulli_numbers[i-j] / scipy.misc.factorial(i-j)\n if (i - j) == 1:\n A_neg[i,j] = -A_pos[i,j]\n else:\n A_neg[i,j] = A_pos[i,j]\n\n A_matrix = {}\n # dictionary container\n # allow dictionary access to relevant matrix of Bernoulli numbers\n # by operating with str(int(np.sign(CFL.frac)))\n\n A_matrix['1'] = A_pos\n A_matrix['0'] = A_pos\n A_matrix['-1'] = A_neg\n\n\n # ---------------------------------------------------------------------\n # 6th order finite difference Poisson solver for periodic BCs\n # (stored as keys 'D' [difference matrix] and 'B' [inhomogeneity])\n\n Poisson_6th_order_PBC_FD_solver_matrices = assemble_Poisson_6th_order_PBC_FD_solver_matrices(Nx, BC)\n\n # TODO specialize right now to just be x, vx. Figure out how to generalize later with higher dimensions\n compute_electric_field_function_handle_prefix = \"DECSKS.lib.fieldsolvers.compute_electric_field_\"\n compute_electric_field_function_handle = \"\".join((compute_electric_field_function_handle_prefix, HOC['x'].lower()))\n\n derivative_method = {}\n derivative_method_prefix = 'DECSKS.lib.derivatives'\n for var in phasespace_vars:\n derivative_method[var] = \".\".join((derivative_method_prefix, HOC[var].lower()))\n\n sim_params = dict(\n N = N, HOC = HOC,\n derivative_method = derivative_method,\n Nx = Nx, ax = ax, bx = bx,\n Ny = Ny, ay = ay, by = by,\n Nz = Nz, az = az, bz = bz,\n Nvx = Nvx, avx = avx, bvx = bvx,\n Nvy = Nvy, avy = avy, bvy = bvy,\n Nvz = Nvz, avz = avz, bvz = bvz,\n Nt = Nt, T = T,\n phasespace_vars = phasespace_vars,\n numdims = numdims,\n active_dims = active_dims,\n total_dims = total_dims,\n density = density,\n split_scheme = split_scheme,\n splitting = splitting,\n plot_params = plot_params,\n record_outputs = record_outputs,\n outfiles = outfiles,\n BC = BC, # boundary conditions on all phase space variables\n I_alternating = I_alternating, # identity matrix with alternating signs according to row, used in computing correctors c\n A_matrix = A_matrix, # Matrices of Bernoulli numbers for HOC\n W = W,\n W_dn1_LTE6 = W_dn1_LTE6,\n Xi = Xi, # spectral differentiation operator matrix (1j*xi[i,j]) ** q\n xi = xi, # wave number vector\n Poisson_6th_order_PBC_FD_solver_matrices = Poisson_6th_order_PBC_FD_solver_matrices,\n compute_electric_field_function_handle = compute_electric_field_function_handle # determines if solver is FD or fourier based\n )\n\n infile.close()\n\n return sim_params",
"def read(filename):\r\n # File object should use settings from source file by default.\r\n with open(filename, 'rU') as f:\r\n data = f.read()\r\n settings = FileSettings(**detect_excellon_format(data))\r\n return ExcellonParser(settings).parse(filename)",
"def read():\n try:\n #Open and parse input files.\n nodeFile = open(sys.argv[1], 'r')\n edgeFile = open(sys.argv[2], 'r')\n\t\n parse_nodes(nodeFile)\n parse_edges(edgeFile)\n nodeFile.close()\n\tedgeFile.close()\n\treturn \n except:\n print 'problem parsing input'\n #Put here some more information - usage...",
"def parse_input(data: Iterator[str]) -> Iterator[SnailfishNumber]:\n yield from (SnailfishNumber.from_str(line.strip())\n for line in data)",
"def read_infile(infile):\n # There are a variable header lengths possible.\n # Loop through and look for when the line starts\n # with '1', the first index.\n nheader = 0\n try:\n with open(infile, 'r') as f:\n for line in f:\n if line.strip().startswith('1'):\n break\n nheader += 1\n except IOError:\n message = f'Unable to open {infile} in modconvert.'\n raise PipeCalError(message)\n index, freq, tbr, flux, trj = np.genfromtxt(infile, unpack=True,\n skip_header=nheader)\n return index, freq, tbr, flux, trj",
"def parse(self, input):\n pass",
"def read_meth(filename, name, window, smoothen=5):\n file_type = file_sniffer(filename)\n logging.info(\"File is of type {}\".format(file_type))\n try:\n if file_type.startswith(\"nanopolish\"):\n return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)\n elif file_type == \"nanocompore\":\n return parse_nanocompore(filename, name, window)\n elif file_type == \"ont-cram\":\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write(\"\\n\\n\\nInput file {} not recognized!\\n\".format(filename))\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise",
"def inputfile(filename):\n infile = open(filename, 'r')\n lines = infile.readlines()\n\n # --------------------------------------------------------------------------\n # Domain specifications\n\n Nx = eval(lines[15][lines[15].find('=')+1:].strip())\n ax = eval(lines[16][lines[16].find('=')+1:].strip())\n bx = eval(lines[17][lines[17].find('=')+1:].strip())\n\n Ny = eval(lines[19][lines[19].find('=')+1:].strip())\n ay = eval(lines[20][lines[20].find('=')+1:].strip())\n by = eval(lines[21][lines[21].find('=')+1:].strip())\n\n Nz = eval(lines[23][lines[23].find('=')+1:].strip())\n az = eval(lines[24][lines[24].find('=')+1:].strip())\n bz = eval(lines[25][lines[25].find('=')+1:].strip())\n\n Nvx = eval(lines[27][lines[27].find('=')+1:].strip())\n avx = eval(lines[28][lines[28].find('=')+1:].strip())\n bvx = eval(lines[29][lines[29].find('=')+1:].strip())\n\n Nvy = eval(lines[31][lines[31].find('=')+1:].strip())\n avy = eval(lines[32][lines[32].find('=')+1:].strip())\n bvy = eval(lines[33][lines[33].find('=')+1:].strip())\n\n Nvz = eval(lines[35][lines[35].find('=')+1:].strip())\n avz = eval(lines[36][lines[36].find('=')+1:].strip())\n bvz = eval(lines[37][lines[37].find('=')+1:].strip())\n\n Nt = eval(lines[39][lines[39].find('=')+1:].strip())\n T = eval(lines[40][lines[40].find('=')+1:].strip())\n\n N = eval(lines[46][lines[46].find('=')+1:].strip())\n\n # --------------------------------------------------------------------------\n # list of phase space variables used, in etc/params.dat must set unused\n # vars to have Nz as None, z = x, vx, y, ...\n # e.g. in 1D1V, phasespace_vars = ['x', 'vx']\n phasespace_vars = []\n if Nx is not None:\n phasespace_vars.append('x')\n if Ny is not None:\n phasespace_vars.append('y')\n if Nz is not None:\n phasespace_vars.append('z')\n if Nvx is not None:\n phasespace_vars.append('vx')\n if Nvy is not None:\n phasespace_vars.append('vy')\n if Nvz is not None:\n phasespace_vars.append('vz')\n\n # ==========================================================================\n # Boundary conditions dictionary -- contains dist. function BCs as well as phi\n\n BC = {}\n BC['f'] = {}\n BC['phi'] = {}\n\n # BC['f'] = BC dict on distribution function f\n\n # BC['f']['x'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['y'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['z'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['vx'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['vy'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['vz'] = {'lower' : lower_value, 'upper' : upper_value}\n\n # BC['phi'] = BC dict on electric potential phi\n\n # BC['phi']['x'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['y'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['z'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['vx'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['vy'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['vz'] = {'lower' : lower_value, 'upper' : upper_value}\n #\n # subdict objects that give keyword descriptions that match method names in lib.boundaryconditions and lib.fieldsolvers\n # include, for var in phasespace_vars:\n #\n # BC['f'][var]['type'] and BC['phi'][var]['type']\n #\n # these are used to assemble function handle strings that select the corresponding routine needed for the specified BCs\n\n\n BC_infilename = './etc/' + lines[106][lines[106].find(':')+1:].strip()\n BC_infile = open(BC_infilename, 'r')\n BC_infile_lines = BC_infile.readlines()\n\n # DECSKS will throw an error if numbers are inputted as BCs in etc/params.dat\n\n # strings are stored as lowercase as they are used in an eval statement to access\n # the relevant method in lib.boundaryconditions. e.g. 'absorbing' is accessed as\n # either eval('lib.boundaryconditions.absorbing_lower_boundary') or\n # eval('lib.boundaryconditions.absorbing_upper_boundary') in lib.convect.remap_step\n\n BC['f']['x'] = {}\n BC['f']['x']['lower'] = safe_eval(BC_infile_lines[40][BC_infile_lines[40].find('=')+1:].strip())\n BC['f']['x']['upper'] = safe_eval(BC_infile_lines[41][BC_infile_lines[41].find('=')+1:].strip())\n\n BC['f']['y'] = {}\n BC['f']['y']['lower'] = safe_eval(BC_infile_lines[43][BC_infile_lines[43].find('=')+1:].strip())\n BC['f']['y']['upper'] = safe_eval(BC_infile_lines[44][BC_infile_lines[44].find('=')+1:].strip())\n\n BC['f']['z'] = {}\n BC['f']['z']['lower'] = safe_eval(BC_infile_lines[46][BC_infile_lines[46].find('=')+1:].strip())\n BC['f']['z']['upper'] = safe_eval(BC_infile_lines[47][BC_infile_lines[47].find('=')+1:].strip())\n\n BC['f']['vx'] = {}\n BC['f']['vx']['lower'] = safe_eval(BC_infile_lines[55][BC_infile_lines[55].find('=')+1:].strip())\n BC['f']['vx']['upper'] = safe_eval(BC_infile_lines[56][BC_infile_lines[56].find('=')+1:].strip())\n\n BC['f']['vy'] = {}\n BC['f']['vy']['lower'] = safe_eval(BC_infile_lines[58][BC_infile_lines[58].find('=')+1:].strip())\n BC['f']['vy']['upper'] = safe_eval(BC_infile_lines[59][BC_infile_lines[59].find('=')+1:].strip())\n\n BC['f']['vz'] = {}\n BC['f']['vz']['lower'] = safe_eval(BC_infile_lines[61][BC_infile_lines[61].find('=')+1:].strip())\n BC['f']['vz']['upper'] = safe_eval(BC_infile_lines[62][BC_infile_lines[62].find('=')+1:].strip())\n\n # make all BCs lowercase strings so they can be used to construct the function strings in lib.boundaryconditions module\n # whose names are all lowercase\n\n # if an accepted boundary condition synonym as been used, change value to the name it goes by in lib.boundaryconditions\n # check that all inputs for evolved phase space variables are recognized keywords and are compatible with the\n # boundary at which they are indicated\n for var in phasespace_vars:\n for boundary in ['lower', 'upper']:\n BC['f'][var][boundary] = BC['f'][var][boundary].lower()\n if BC['f'][var][boundary] == 'open' or BC['f'][var][boundary] == 'cutoff':\n print \"\\nCourtesy notice to user: the boundary condition %s was selected for the distribution function on %s at the %s boundary in params_boundaryconditions.dat; \" % (BC['f'][var][boundary].upper(), var, boundary)\n print \"this is a recognized input synonym for a '%s' condition. Changing value stored to BC['f']['%s']['%s'] = '%s'\\n\" % ('ABSORBING', var, boundary, 'ABSORBING')\n print \"Please regard any warnings/error messages that cite the keyword '%s' with this change in mind\\n\" % ('ABSORBING')\n BC['f'][var][boundary] = 'absorbing'\n\n elif BC['f'][var][boundary] == 'collector':\n pass\n\n elif BC['f'][var][boundary] == 'absorbing':\n pass\n\n elif BC['f'][var][boundary] == 'symmetry':\n if boundary == 'upper':\n raise NotImplementedError('a symmetric UPPER boundary condition on the distribution function was specified in params_boundaryconditions.dat; however, DECSKS only has functionality to permit lower boundary symmetry.')\n elif boundary == 'lower':\n print \"\\nCourtesy notice to user: the boundary condition %s was selected for the distribution function on %s at the %s boundary in params_boundaryconditions.dat; \" % (BC['f'][var][boundary].upper(), var, boundary)\n print \"this is a recognized input synonym for a '%s' condition. Changing value stored to BC['f']['%s']['%s'] = '%s'\\n\" % ('SYMMETRIC', var, boundary, 'SYMMETRIC')\n print \"Please regard any warnings/error messages that cite the keyword '%s' with this change in mind\\n\" % ('SYMMETRIC')\n BC['f'][var][boundary] = 'symmetric'\n\n elif BC['f'][var][boundary] == 'symmetric':\n if boundary == 'lower':\n pass\n elif boundary == 'upper':\n raise NotImplementedError('a symmetric UPPER boundary condition on the distribution function was specified in params_boundaryconditions.dat; however, DECSKS only has functionality to permit lower boundary symmetry.')\n\n elif BC['f'][var][boundary] == 'periodic':\n pass\n\n else: # inputs do not match any options\n print '\\nThe invalid keyword %s was specified in params_boundaryconditions.dat on the variable %s at the %s boundary\\n' % (BC['f'][var][boundary].upper(), var, boundary)\n raise InputError('inputs are restricted to those listed as options in params_boundaryconditions.dat')\n\n # above we have checked for valid input. Next, check for compatible inputs (if 'periodic' is selected, it must be selected for both\n # upper and lower bounds) and store a descriptor that toggles the correct orchestrator\n # function in lib.boundaryconditions module ('periodic' vs. 'nonperiodic')\n for var in phasespace_vars:\n if BC['f'][var]['lower'] == 'periodic' and BC['f'][var]['upper'] == 'periodic':\n BC['f'][var]['type'] = 'periodic'\n\n elif BC['f'][var]['lower'] == 'symmetric' and BC['f'][var]['upper'] != 'periodic':\n BC['f'][var]['type'] = 'nonperiodic'\n \n # check for invalid inputs\n elif BC['f'][var]['lower'] == 'symmetric' and BC['f'][var]['upper'] == 'periodic':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['upper'].upper())\n\n print \"\\nare inconsistent. Cannot combine a symmetric lower boundary with a periodic upper boundary condition. Periodic boundary conditions involve both boundaries (both boundaries would have to be set to PERIODIC)\\n\"\n\n raise InputError('cannot combine a symmetric lower boundary condition with a periodic upper boundary condition for the distribution function. Check inputs in boundaryconditions.dat and change the upper bound to be of non-periodic type')\n\n elif BC['f'][var]['lower'] == 'periodic' and BC['f'][var]['upper'] != 'periodic':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['upper'].upper())\n\n print \"\\nare inconsistent. Cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\"\n\n raise InputError('cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\n elif BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] == 'periodic':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['upper'].upper())\n\n print \"\\nare inconsistent. Cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\"\n\n raise InputError('cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\n else: # boundary conditions are combination of only: symmetric (lower), collector (lower or upper), absorbing (lower or upper)\n BC['f'][var]['type'] = 'nonperiodic'\n\n distribution_function_boundarycondition_orchestrator_prefix = 'DECSKS.lib.boundaryconditions'\n\n # create a dictionary of function handles that call either\n # the 'periodic', 'nonperiodic', or 'symmetric' orchestrator in lib.boundaryconditions\n #\n # i.e. we form the string handle for each active variable var:\n #\n # distribution_function_boundarycondition_orchestrator_handle[var] =\n #\n # DECSKS.lib.boundaryconditions.periodic\n # DECSKS.lib.boundaryconditions.nonperiodic\n # DECSKS.lib.boundaryconditions.symmetric\n\n distribution_function_boundarycondition_orchestrator_handle = {}\n\n for var in phasespace_vars:\n distribution_function_boundarycondition_orchestrator_handle[var] = \".\".join(\n (distribution_function_boundarycondition_orchestrator_prefix, BC['f'][var]['type']))\n\n # --------------------------------------------------------------------------\n # Store number of active gridpoints for every phase space variable\n #\n # Note: for periodic BCs: Nz_active = Nz - 1, we evolve Nz_active nodes and assign by periodicity the f[Nz-1] = f[0]\n # for all other BCs: Nz_active = Nz\n\n # active_dims vs. total_dims\n # note a generalized loop cannot be used as assignments cannot be made under an assembled string with eval\n if BC['f']['x']['lower'] == 'periodic' and BC['f']['x']['upper'] == 'periodic' and Nx is not None:\n Nx_active = Nx - 1\n else:\n Nx_active = Nx\n\n if BC['f']['y']['lower'] == 'periodic' and BC['f']['y']['upper'] == 'periodic' and Ny is not None:\n Ny_active = Ny - 1\n else:\n Ny_active = Ny\n\n if BC['f']['z']['lower'] == 'periodic' and BC['f']['z']['upper'] == 'periodic' and Nz is not None:\n Nz_active = Nz - 1\n else:\n Nz_active = Nz\n\n if BC['f']['vx']['lower'] == 'periodic' and BC['f']['vx']['upper'] == 'periodic' and Nvx is not None:\n Nvx_active = Nvx - 1\n else:\n Nvx_active = Nvx\n\n if BC['f']['vy']['lower'] == 'periodic' and BC['f']['vy']['upper'] == 'periodic' and Nvy is not None:\n Nvy_active = Nvy - 1\n else:\n Nvy_active = Nvy\n\n if BC['f']['vz']['lower'] == 'periodic' and BC['f']['vz']['upper'] == 'periodic' and Nvz is not None:\n Nvz_active = Nvz - 1\n else:\n Nvz_active = Nvz\n\n # --------------------------------------------------------------------------\n # High order correction (HOC) method applied to each phase space variable\n\n # store as uppercase\n\n HOC = {}\n HOC['x'] = safe_eval(lines[56][lines[56].find(':')+1:].strip())\n HOC['y'] = safe_eval(lines[57][lines[57].find(':')+1:].strip())\n HOC['z'] = safe_eval(lines[58][lines[58].find(':')+1:].strip())\n\n HOC['vx'] = safe_eval(lines[60][lines[60].find(':')+1:].strip())\n HOC['vy'] = safe_eval(lines[61][lines[61].find(':')+1:].strip())\n HOC['vz'] = safe_eval(lines[62][lines[62].find(':')+1:].strip())\n\n # make all non-None inputs capitalized\n for key in HOC.keys():\n if HOC[key] is not None:\n HOC[key] = HOC[key].upper()\n else:\n pass\n\n # check for valid inputs\n for key in HOC.keys():\n if HOC[key] is not None:\n if type(HOC[key]) != str:\n raise InputError('A non-string entry was found as a high order correction specification. Only FD or FOURIER are accepted')\n elif HOC[key] != 'FD' and HOC[key] != 'FOURIER':\n print \"\\nThe following high order correction was specified in params.dat, but is not recognized:\"\n print \"\\nHigh order correction on %s: %s\\n\" % (key, HOC[key].upper())\n print \"only FD and FOURIER are accepted keywords\\n\"\n raise InputError('An unrecognized high order correction was specified. Only FD or FOURIER are accepted')\n\n elif HOC[key] == 'FOURIER' and BC['f'][key]['type'] != 'periodic': # Fourier corrections use trigonometric derivatives, which rely on periodicity of the underlying functions\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (key, BC['f'][key]['lower'].upper())\n print \"upper boundary condition on f fore the variable %s: %s\\n\\n\" % (key, BC['f'][key]['upper'].upper())\n\n print \"are inconsistent with the high order correction specified in params.dat:\"\n print \"\\nhigh order correction on %s: %s\\n\\n\" % (key, HOC[var].upper())\n\n print \"FOURIER high order corrections only make sense for periodic systems (if this is the intention, the BCs on f and phi must be set to PERIODIC in params_boundaryconditions.dat)\\n\"\n\n raise InputError('Fourier corrections on a variable only make sense for periodic systems. The boundary conditions on the distribution function were read-in as not periodic for this variable.')\n elif eval('N' + key) is None:\n raise InputError('a variable not involved in the simulation (its number of grid points was specified as None) must also have its high order correction method specified as None. While reading in the input deck, the aforementioned expectation was not met. Please revisit the entries (number of grid points) and high order correction specification.')\n\n # store lists containing number of total and active gridpoints\n # this is acknowledged as redundant given the above storing as Nx_active, Ny_active,\n # etc., but these objects are used in legacy methods inside DECSKS\n\n # initialize lists\n total_dims = [] # e.g. in 1D1V this could contain [Nx, Nvx]\n active_dims = [] # e.g. in 1D1V this could contain [Nx_active, Nvx_active]\n\n for var in phasespace_vars:\n total_dims.append(eval('N' + var))\n active_dims.append(eval('N' + var + '_active'))\n\n numdims = len(phasespace_vars)\n # --------------------------------------------------------------------------\n # Initial density specification (2 species)\n\n mu = safe_eval(lines[68][lines[68].find(':')+1:].strip())\n\n densities_list = lines[69][lines[69].find(':')+1:].strip().split(', ')\n for i in range(len(densities_list)):\n densities_list[i] = densities_list[i].lower()\n\n if len(densities_list) == 2: # if two species return dictionary of strings\n density = {}\n density['electrons'] = densities_list[0]\n density['electrons'] = density['electrons'].lower()\n density['ions'] = densities_list[1]\n density['ions'] = density['ions'].lower()\n print \"\\ntwo species simulation with initial densities:\\n\"\n print \"electrons: %s\" % density['electrons']\n print \"ions: %s\\n\" % density['ions']\n\n # --------------------------------------------------------------------------\n # split scheme specification\n\n split_scheme = lines[81][lines[81].find('=')+1:].strip()\n split_scheme = split_scheme.upper()\n print \"split scheme: %s\\n\" % split_scheme\n\n # filepath to splitting coefficient tables\n filename = lines[82][lines[82].find(':')+1:].strip()\n filepath = './etc/' + filename\n\n # get splitting coefficients for chosen scheme\n if split_scheme is not None:\n splitting = splitting_coefficients(filepath, split_scheme)\n else:\n splitting = None\n\n\n\n # --------------------------------------------------------------------------\n # check for validity on split scheme vs. boundary conditions\n #\n # i.e. check that if the problem is bounded, the user cannot use a split scheme that has negative time substeps\n #\n # Schemes with only positive time substeps: LF2\n # Schemes that contain negative time substeps: Y4, O6-4, O11-6, O14-6\n #\n\n for var in phasespace_vars:\n if BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] != 'periodic':\n if split_scheme in ['LF2']:\n pass\n else: # a split scheme that involves negative time substeps has been selected\n print \"\\nThe following set of user specified information is not accepted by DECSKS:\\n\"\n print \"\\nin params.dat, the following was specified:\"\n print \"split scheme = %s:\" % split_scheme\n print \"\\nand the boundary data was specified in params_boundaryconditions.dat:\\n\"\n print \"distribution function lower boundary condition on %s: %s\" % (BC['f'][var]['lower'],var)\n print \"distribution function upper boundary condition on %s: %s\" % (BC['f'][var]['upper'], var)\n print \"\\nThe split scheme involves negative time substeps, while the boundary conditions are non-periodic. The BOUNDED Vlasov-Poisson problem is irreversible. A split scheme with negative time substeps can only be used in periodic systems, which correspond to systems of infinite extent\\n\"\n raise InputError('The split scheme involves negative time substeps, while the boundary conditions are non-periodic. The BOUNDED Vlasov-Poisson problem is irreversible. A split scheme with negative time substeps can only be used in periodic systems, which correspond to systems of infinite extent. To rectify this, the user may wish to select periodic boundary conditions on the distribution function (hence phi).')\n\n # --------------------------------------------------------------------------\n # Plot window specification (used in lib.plots.Setup)\n\n xmin = eval(lines[96][lines[96].find('=')+1:].strip())\n xmax = eval(lines[97][lines[97].find('=')+1:].strip())\n ymin = eval(lines[99][lines[99].find('=')+1:].strip())\n ymax = eval(lines[100][lines[100].find('=')+1:].strip())\n\n plot_params = dict(xmin = xmin, xmax = xmax,\n ymin = ymin, ymax = ymax)\n\n record_outputs = lines[103][lines[103].find(':')+1:].strip()\n record_outputs = record_outputs.lower()\n\n if record_outputs == 'yes':\n # output filepath setup\n filename = lines[104][lines[104].find(':')+1:].strip()\n filepath = './etc/' + filename\n outfiles = output_files(filepath) # dictionary of opened files\n else:\n outfiles = None\n\n # --------------------------------------------------------------------------\n # DICTIONARIES AND MATRICES RELEVANT FOR HIGH ORDER CORRECTION APPLICATIONS\n #\n\n # Constructing the finite different weight matrices, W.\n #-------------------------------------------------------\n # requires: (dict) FD_schemes\n #\n # Note: FD_schemes is only needed to construct W. W is what is used in\n # the simulation. Hence, the building routine for FD_schemes\n # is not optimized, since it happens before the simulation starts\n # and is not a source of repeated computational cost.\n #\n # FD_schemes is a dictionary containing the families of every order derivative\n # needed for the indicated global error N in etc/params.dat, i.e. all schemes\n # of various degrees of asymmetry and handedness. For large N, this can be a\n # large dictionary, cf. the function routine read_FD_schemes to see all\n # that gets stored inside. It is used to construct the difference coefficient\n # matrices W (for applying high order corrections). The other scheme\n # FD_scheme_dn1 is used to construct the matrix W_dn1 which is a difference\n # coefficient matrix for the first derivative (dn = 1) at LTE = 6, and used\n # to compute the electric field E = \"-dphi\" = W_dn1.dot(phi),\n # where dphi is the first derivative# of the electric potential, as calculated by\n # the methods in lib.fieldsolvers package\n #---------------------------------------------------------------------------\n #\n # initialize all dictionaries whose keys correspond to phase space vars\n # and whose values contain the relevant ndarrays\n\n Xi = {}\n xi = {}\n W = {}\n\n # top level check: if any var has FD corrections, store FD_schemes and init FD weight matrix W\n # for 6th order first derivative\n if 'FD' in HOC.values():\n # store finite difference schemes\n FD_schemes = read_FD_schemes(N)\n\n # if FD on a configuration variable, need to differentiate phi to obtain the acceleration a ~ E = -dphi\n if HOC['x'] == 'FD' or HOC['y'] == 'FD' or HOC['z'] == 'FD':\n # first derivative with LTE = 6, used to find dphi = -E after phi is\n # found from a 6th order Poisson solve\n FD_scheme_dn1 = read_FD_scheme(1,6)\n W_dn1_LTE6 = assemble_finite_difference_weight_matrix_const_dn_const_LTE(Nx_active,\n FD_scheme_dn1,\n dn = 1,\n LTE = 6\n )\n\n else:\n # else, Fourier Gauss solver is used, no need for this matrix\n W_dn1_LTE6 = None\n\n # variable-by-variable checks: assemble consistent objects needed\n # for the specified means of HOC from etc/params.dat\n\n # Note: the following is organized with the expectation that\n # higher dimensional implementations would be stepped through\n # as sets of 2D advection problems, always paired as z and vz\n # i.e. not as mixed stepthroughs with x paired with vy for example\n\n for var in phasespace_vars:\n if HOC[var] == 'FD':\n W[var] = assemble_finite_difference_weight_matrix(\n eval('N' + var + '_active'),\n N,\n FD_schemes\n )\n elif HOC[var] == 'FOURIER':\n # ensure the correct number of grid points\n # is passed for the generalized velocity Nvz_active\n # for x,y,z, 'vz' = vx, vy, vz\n # for vx, vy, vz, 'vz' = ax, ay, az, which have\n # the same number of dims as x, y, z, respectively\n # this is needed in the routine assemble_spectral_derivative_operator\n # so that the correctly dimensioned 2D arrays are returned\n\n if var[0] == 'v':\n # if a velocity variable, the velocity of this velocity is an acceleration\n # which has the same dimensions as the corresponding configuration variable\n # e.g. vx has velocity(vx) = ax which has the same dimensions as x\n Nvz_active = eval('N' + var[1] + '_active')\n else:\n # if a configuration variable, the velocity is the physical velocity, which\n # must be a coresponding active variable\n # e.g. x has a velocity vx\n Nvz_active = eval('Nv' + var + '_active')\n\n\n # The 3D tensor Xi is used to compute trigonometric derivatives\n # by operating on a 2D array of Fourier wave components (transformed\n # row-wise for each column, where as usual the objects have been\n # transpoed if needed so that the variation (x or vx) is along\n # rows, not columns)\n #\n # Fourier transform (derivatives) = Xi * Fourier transform (f)\n # derivatives = inverse transform (Xi * Fourier(f))\n #\n #\n # the object xi is used in legacy methods in DECSKS (pre-DECSKSv2.0)\n\n Xi, xi = assemble_spectral_derivative_operator(Xi, xi,\n var,\n eval('a' + var),\n eval('b' + var),\n eval('N' + var),\n eval('N' + var + '_active'),\n Nvz_active,\n N)\n\n # ---------------------------------------------------------------------\n # \"Alternating\" identity matrix\n\n # in lib.HOC.correctors, require an diagonal matrix with shape = (Nz_active, Nz_active)\n # with entries as (-1)^i, where i is the row number, for details see on github\n #\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n #\n # section \"2D casting of correction coefficients c (vector) -> c (tensor)\"\n\n I_alternating = np.diag( (-np.ones(N)) ** np.arange(N) )\n\n # ---------------------------------------------------------------------\n # Bernoulli number storage, and forming the matrices A_pos, A_neg\n\n # obtain Bernoulli numbers (note: only 23 numbers are entered into the dat file ->\n # max global error is 23 - 1 = 22) for a correction up to global error order\n # N, N-1 Bernoulli numbers are needed. If higher than global error order 22 is\n # desired, additional Bernoulli numbes need to be entered in\n #\n # etc/Table_of_Bernoulli_numbers.dat\n #\n\n # Store Bernoulli numbers from dat file etc/Table_of_Bernoulli_numbers.dat\n filename = 'Table_of_Bernoulli_numbers.dat'\n filepath = './etc/' + filename\n Bernoulli_numbers = Bernoulli(filepath)\n\n # \"A\" matrices for Bernoulli number storage and matrix HOC application\n # in lib.HOC.Beta_matrix, see notebook on github at\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n #\n # the A matrices are matrices containing scaled Bernoulli numbers (normalized by factorials)\n # that also factor in the sign (direction) information of the advecting density packets\n # (the different amounts to all odd coefficients having opposite sign)\n\n # The A matrices are used in the method lib.HOC.Beta_matrix (used to construct the array of the *magnitudes*\n # of the Nvz sets of N beta coefficients; note that the high order flux is further computed as a sum of\n # products that alternating with sign according to the parity of the derivative number, i.e. alternates signs\n # among odds and evens. These prefactors are applied at the end of the method lib.HOC.correctors by matrix\n # pre-multiplication of the matrix B with the alternating (in sight) identity matrix I formed above)\n\n # the method lib.HOC.Beta_matrix is called from inside lib.HOC.correctors (used to assemble the 2D array c of correctors)\n\n A_pos, A_neg = np.zeros([N,N]), np.zeros([N,N])\n for i in range(N):\n for j in range(i+1):\n A_pos[i,j] = Bernoulli_numbers[i-j] / scipy.misc.factorial(i-j)\n if (i - j) == 1:\n A_neg[i,j] = -A_pos[i,j]\n else:\n A_neg[i,j] = A_pos[i,j]\n\n A_matrix = {}\n # dictionary container\n # allow dictionary access to relevant matrix of Bernoulli numbers\n # by operating with str(int(np.sign(CFL.frac)))\n\n A_matrix['1'] = A_pos\n A_matrix['0'] = A_pos\n A_matrix['-1'] = A_neg\n\n #--------------------------------------------------------------------------------------------#\n # ELECTRIC POTENTIAL PHI\n #--------------------------------------------------------------------------------------------#\n\n #--------------------------------------------------------------------------------------------#\n # Boundary conditions BC['phi'] dictionary and dictionary of boundary values, phi_BC\n #\n # BC['phi']['x', 'y', or 'z']['lower' or 'upper'] = string keyword that describes the BC\n # phi_BC['x', 'y', or 'z'] = boundary value vector phi_BC that appears in a Poisson solver\n #--------------------------------------------------------------------------------------------#\n\n phi_BC = {}\n # keys: 'x', 'y', 'z'\n # values: ndarrays of size eval('N' + var + '_active)\n\n BC['phi'] = {}\n # keys: 'x', 'y', 'z'\n # values / keys for subdict: 'lower', 'upper'\n # values for subdict: string keyword that describes the BC at the key specification\n\n # --------------------------------------------------------------------------\n # PHI BOUNDARY CONDITIONS AND PHI BOUNDARY VALUES VECTORS FOR SOLVER Phi_BC['x', 'y', or 'z']\n\n # lines read in from boundaryconditions dat file were stored above in BC_infile_lines\n if HOC['x'] == 'FD':\n BC['phi']['x'] = {}\n BC['phi']['x']['lower'] = safe_eval(BC_infile_lines[196][BC_infile_lines[196].find('=')+1:].strip())\n BC['phi']['x']['upper'] = safe_eval(BC_infile_lines[197][BC_infile_lines[197].find('=')+1:].strip())\n phi_BC['x'] = np.zeros(Nx_active)\n elif HOC['x'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed\n phi_BC['x'] = None\n\n if HOC['y'] == 'FD':\n BC['phi']['y'] = {}\n BC['phi']['y']['lower'] = safe_eval(BC_infile_lines[199][BC_infile_lines[199].find('=')+1:].strip())\n BC['phi']['y']['upper'] = safe_eval(BC_infile_lines[200][BC_infile_lines[200].find('=')+1:].strip())\n phi_BC['y'] = np.zeros(Ny_active)\n elif HOC['y'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed\n phi_BC['y'] = None\n\n if HOC['z'] == 'FD':\n BC['phi']['z'] = {}\n BC['phi']['z']['lower'] = safe_eval(BC_infile_lines[202][BC_infile_lines[202].find('=')+1:].strip())\n BC['phi']['z']['upper'] = safe_eval(BC_infile_lines[203][BC_infile_lines[203].find('=')+1:].strip())\n phi_BC['z'] = np.zeros(Nz_active)\n elif HOC['z'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed\n phi_BC['z'] = None\n\n # ensure all inputs stored above in BC['phi'] dict objects are uppercase and recognized\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n if HOC[var] == 'FOURIER':\n pass\n else: # HOC is FD which computes the Lorentz term through a potential phi (Fourier uses the electric field E)\n\n # LOWER BOUNDARY CHECKS\n if BC['phi'][var]['lower'] is None:\n raise InputError('a NoneType was specified as a LOWER boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not meant to be evolved, set its number of grid points to None')\n\n elif type(BC['phi'][var]['lower']) != str:\n raise InputError('a non-string type as a LOWER boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not intended to be active, set its number of grid points to None. Otherwise, a recognized string keyword must be specified on the boundary condition on phi for this variable.')\n\n else:\n BC['phi'][var]['lower'] = BC['phi'][var]['lower'].upper()\n\n if BC['phi'][var]['lower'] not in ['PERIODIC', 'SELF-CONSISTENT', 'SYMMETRIC', 'SYMMETRY', 'BIAS']:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat is not a recognized keyword:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n\n raise InputError('boundary condition indicated on phi is not an accepted keyword option')\n\n elif (BC['phi'][var]['lower'] == 'SYMMETRIC' or BC['phi'][var]['lower'] == 'SYMMETRY') and BC['f'][var]['lower'] != 'symmetric':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat is:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['lower'].upper())\n print \"lower boundary condition on f for variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for variable %s: %s\\n\" % (var, BC['f'][var]['upper'].upper())\n\n print \"a SYMMETRIC boundary condition must be specified on both phi and f\"\n # by this point all synonyms have been normalized on BC['f'][var], 'symmetric' corresponds to the symmetry condition\n raise InputError('a SYMMETRY boundary condition on phi was specified, but a symmetry boundary was not specified on the distribution function f at this same (lower) boundary. A symmetric domain requires a lower boundary condition to be SYMMETRIC on both phi and f.')\n\n else:\n pass\n\n # UPPER BOUNDARY CHECKS\n if BC['phi'][var]['upper'] is None:\n raise InputError('a NoneType was specified as an upper boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not meant to be evolved, set its number of grid points to None')\n\n elif type(BC['phi'][var]['upper']) != str:\n raise InputError('a non-string type as an upper boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not intended to be active, set its number of grid points to None. Otherwise, a recognized string keyword must be specified on the boundary condition on phi for this variable.')\n\n else:\n BC['phi'][var]['upper'] = BC['phi'][var]['upper'].upper()\n\n if BC['phi'][var]['upper'] not in ['PERIODIC', 'SELF-CONSISTENT', 'SYMMETRIC', 'SYMMETRY', 'BIAS']:\n print \"\\nThe following boundary condition specified in params_boundaryconditions.dat is not a recognized boundary condition keyword:\\n\\n\"\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n\n raise InputError('boundary condition indicated on phi is not an accepted keyword option')\n\n elif BC['phi'][var]['upper'] == 'SYMMETRIC' or BC['phi'][var]['upper'] == 'SYMMETRY':\n print \"\\nThe following boundary condition specified in params_boundaryconditions.dat is not available:\\n\\n\"\n print \"upper boundary condition on phi: %s\\n\" % BC['phi'][var]['upper'].upper()\n\n raise NotImplementedError('a SYMMETRY boundary condition on phi as an UPPER boundary is specified in params_boundaryconditions.dat; only lower boundaries can support a symmetry boundary condition.')\n\n\n # CHECK FOR CONSISTENCY IN BOUNDARY CONDITIONS BETWEEN BOTH LOWER AND UPPER SPECIFICATIONS\n if BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] != 'PERIODIC':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, BC['phi'][var]['upper'].upper())\n\n raise InputError('PERIODIC boundary conditions on phi involve both lower and upper boundaries. The read-in of params_boundaryconditions.dat has the lower boundary condition as PERIODIC but the upper boundary condition is NOT. Both boundary conditions on phi must be set to PERIODIC if a periodic plasma is to be simulated.')\n\n elif BC['phi'][var]['lower'] != 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, BC['phi'][var]['upper'].upper())\n\n raise InputError('PERIODIC boundary conditions on phi involve both lower and upper boundaries. The read-in of params_boundaryconditions.dat has the upper boundary condition as PERIODIC but the lower boundary condition is NOT. Both boundary conditions on phi must be set to PERIODIC if a periodic plasma is to be simulated.')\n\n elif BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC':\n\n if BC['f'][var]['type'] != 'periodic': # note that validity and consistency checks on inputs for the distribution function have already been done above\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['f'][var]['upper'].upper())\n print \"e.g. periodic boundaries on phi require periodic boundaries on f for the same variable\\n\"\n raise InputError('PERIODIC boundary conditions on were specifed consistently for phi in params_boundaryconditions.dat; however, periodic boundary conditions must also be consistently specified on the distribution function. Revisit params_boundaryconditions.dat and ensure that both lower and upper boundaries on the distribution function f and the potential phi are set to PERIODIC if a periodic plasma is intended to be simulated.')\n elif BC['f'][var]['type'] == 'periodic': # note that validity and consistency checks on inputs for the distribution function have already been done above\n pass\n\n\n # CHECK FOR CONSISTENCY ON PHI BCS WITH HIGH ORDER CORRECTION METHOD SPECIFIED (note we have already checked this against the distribution function BCs)\n # here, we are only checking to see if that BCs on phi aren't periodic, to ensure that HOC is NOT set to fourier (relies on periodicity))\n # the following conditional check asks: \"if (BCs on phi are not periodic) AND (HOC is FOURIER)\"\n if ((BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] != 'PERIODIC') or (BC['phi'][var]['lower'] != 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC')) and HOC[var] == 'fourier':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent with the specified high order correction method in params.dat: \\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, HOC[var].upper())\n print \"\\n\\nFourier high order corrections require periodic boundary conditions on both phi and the distribution function f\\n\"\n\n raise InputError('the high order correction is specified as FOURIER; however, the BCs on the electric potential phi are not periodic. FOURIER corrections require PERIODIC BCs on phi and the distribution function as the methods rely on periodicity')\n\n #--------------------------------------------------------------------------------------------#\n # BIAS values\n #--------------------------------------------------------------------------------------------#\n\n Bias = {} # this dictionary is created for reading in the bias values, it is not returned\n # in sim_params dict. If a bias condition is set on any boundary, this dictionary\n # assigns its value at that boundary in the vector phi_BC[var], phi_BC[var] is\n # returned (as usual, var = ['x', 'y', 'z'])\n\n Bias['x'] = {}\n Bias['y'] = {}\n Bias['z'] = {}\n\n Bias['x']['lower'] = safe_eval(BC_infile_lines[214][BC_infile_lines[214].find('=')+1:].strip())\n Bias['x']['upper'] = safe_eval(BC_infile_lines[215][BC_infile_lines[215].find('=')+1:].strip())\n Bias['y']['lower'] = safe_eval(BC_infile_lines[217][BC_infile_lines[217].find('=')+1:].strip())\n Bias['y']['upper'] = safe_eval(BC_infile_lines[218][BC_infile_lines[218].find('=')+1:].strip())\n Bias['z']['lower'] = safe_eval(BC_infile_lines[220][BC_infile_lines[220].find('=')+1:].strip())\n Bias['z']['upper'] = safe_eval(BC_infile_lines[221][BC_infile_lines[221].find('=')+1:].strip())\n\n # check for valid inputs on active variables for any boundary that is specified as BIAS\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n if HOC[var] == 'FOURIER':\n pass\n else:\n for boundary in ['lower', 'upper']:\n if var in phasespace_vars:\n if BC['phi'][var][boundary] == 'BIAS':\n if Bias[var][boundary] is None: # if the BC is BIAS but the value input for the BIAS value is None\n print \"\\nThe following specifications in params_boundaryconditions.dat are inconsistent:\\n\"\n print \"%s boundary condition on phi for variable %s: %s\" % (boundary, var, BC['phi'][var][boundary].upper())\n print \"%s BIAS value on phi for variable %s: %s\\n\" % (boundary, var, Bias[var][boundary])\n print \"e.g. if a boundary condition on phi is set to BIAS for a variable, a number must be specifed under BIAS value\\n\"\n raise InputError('A phi boundary condition on an active variable (number of grid points on this variable has been set as non-None) has been specified as BIAS; however, the corresponding BIAS value is NoneType. Must be a number.')\n elif type(Bias[var][boundary]) == str:\n print \"\\nThe following specifications in params_boundaryconditions.dat are inconsistent:\\n\"\n print \"%s boundary condition on phi for variable %s: %s\" % (boundary, var, BC['phi'][var][boundary].upper())\n print \"%s BIAS value on phi for variable %s: %s\\n\" % (boundary, var, Bias[var][boundary])\n print \"e.g. if a boundary condition on phi is set to BIAS for a variable, a number must be specifed under BIAS value\\n\"\n\n raise InputError('A phi boundary condition on an active variable (number of grid points on this variable has been set as non-None) has been specified as BIAS; however, the corresponding BIAS value is str type. Must be a number.')\n else:\n pass\n\n # E is calculated by the following call flow, first an ORCHESTRATOR is called:\n #\n # E = lib.fieldsolvers.compute_electric_field_fourier <--- solves with a Gauss' law solver directly\n #\n # or\n #\n # E = lib.fieldsolvers.compute_electric_field_fd <--- solves a Poisson solver for phi, then differentiate to get E\n #\n # which can generally be called by eval operating on string handles that are themselves constructed\n # per 'lib.fieldsolvers.compute_electric_field_' + HOC[var].lower()\n #\n # If a finite difference routine is specified, a Poisson solve must be performed to obtain phi.\n # We call the relevant Poisson solver among the following options (L = lower boundary, U = upper boundary, DBC = Dirichlet BC, NBC = Neumann BC):\n #\n # Poisson_6th_PBC\n # Poisson_6th_LDBC_UDBC\n # Poisson_6th_LDBC_UNBC\n # Poisson_6th_LNBC_UDBC\n # Poisson_6th_LDBC_LDBC\n # Poisson_6th_UDBC_UNBC\n #\n\n # which are selected based on the boundary conditions the user has supplied in params_boundaryconditions.dat.\n #\n # finally, we compute and return:\n #\n # E = - 1 / config_var.width * W_dn1_LTE6.dot(phi)\n #\n\n # --------------------------------------------------------------------------\n # fieldsolver orchestator handle string for electric field (periodic or non-periodic)\n #\n # currently only 1D1V, only one handle needed. When this will be generalized, can make a dict object with keys corresponding\n # to each active configuration variable\n\n compute_electric_field_orchestrator_handle = {}\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n # dictionary key labels the component of the electric field: 'x', 'y', 'z'\n compute_electric_field_orchestrator_handle[var] = \"DECSKS.lib.fieldsolvers.compute_electric_field_\" + HOC[var].lower()\n\n\n # ---------------------------------------------------------------------\n # initialize dictionaries for wall charge objects\n\n sigma = {}\n sigma_n = {}\n\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n sigma_n[var] = {}\n sigma[var] = {}\n\n # --------------------------------------------------------------------------\n # Dictionary for the specific electric potential phi function solver needed\n # according to the specified boundary conditions on phi\n\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n\n if HOC[var] == 'FOURIER':\n pass # uses electric field E, periodic boundary conditions only\n\n else: # is FD corrections, and electric potential phi in a Poisson solver, can be periodic or other BCs\n BC['phi'][var]['type'] = BC['phi'][var]['lower'] + '_' + BC['phi'][var]['upper']\n if BC['phi'][var]['type'] == 'PERIODIC_PERIODIC':\n BC['phi'][var]['type'] = 'PBC'\n\n if BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] != 'periodic':\n raise InputError('A boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n\n if BC['phi'][var]['type'] == 'BIAS_BIAS':\n BC['phi'][var]['type'] = 'LDBC_UDBC'\n\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][0] = float(Bias[var]['lower'])\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][-1] = float(Bias[var]['upper'])\n\n if BC['f'][var]['lower'] != 'absorbing' or BC['f'][var]['upper'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n raise InputError('A boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n elif BC['phi'][var]['type'] == 'BIAS_SELF-CONSISTENT':\n BC['phi'][var]['type'] = 'LDBC_UNBC'\n\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][0] = float(Bias[var]['lower'])\n # Neumann condition, dphi = sigma_upper, translates to phi_BC[-1] = -6 var.width * sigma_upper (see https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb for details)\n # phi_BC[-1] = - 6 * var.width * sim_params['sigma'][var]['upper'], changes with time step\n\n if BC['f'][var]['lower'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n raise InputError('A lower boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n sigma[var]['upper'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"upper boundary condition on f for variable %s: %s\\n\" % (var, BC['f'][var]['upper'].upper())\n print \"\\ne.g. an upper boundary condition on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\"\n print \"\\ne.g. an upper boundary condition on f as ASBORBING must have the upper boundary condition on phi as BIAS\\n\"\n\n raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n elif BC['phi'][var]['type'] == 'SELF-CONSISTENT_BIAS':\n BC['phi'][var]['type'] = 'LNBC_UDBC'\n\n # Neumann condition, dphi = -sigma_lower, translates to phi_BC[0] = -6 var.width * sigma_lower (see https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb for details)\n #phi_BC[var][0] = - 6 * var.width * sim_params['sigma'][var]['lower'], changes with time step\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][-1] = float(Bias[var]['upper'])\n\n # check upper boundary\n if BC['f'][var]['upper'] == 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n pass\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"upper boundary condition on f for variable %s: %s\\n\\n\" % (var, BC['f'][var]['upper'].upper())\n print \"\\ne.g. an upper boundary condition set on phi as BIAS must have the upper boundary condition on f as ABSORBING\\n\"\n\n raise InputError('An upper boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n # check lower boundary\n if BC['f'][var]['lower'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n sigma[var]['lower'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['lower'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi: %s\" % BC['phi'][var]['lower'].upper()\n print \"lower boundary condition on f: %s\\n\" % BC['f'][var]['lower'].upper()\n print \"\\ne.g. an lower boundary condition set on phi as SELF-CONSISTENT must have the lower boundary condition on f as COLLECTOR\"\n print \"e.g. an lower boundary condition set on f as ABSORBING must have the lower boundary condition on phi as BIAS\"\n print \"e.g. an lower boundary condition set on f as PERIODIC requires the upper boundary on f to be PERIODIC as well as both lower and upper boundary conditions on phi to be set to PERIODIC\\n\"\n raise InputError('A lower boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector if self-consistent boundary potentials are desired). Equivalently, phi is not compatible with f (e.g. if periodic boundaries on f were desired, the potential must also be periodic)')\n\n elif BC['phi'][var]['type'] == 'SYMMETRIC_BIAS' or BC['phi'][var]['type'] == 'SYMMETRY_BIAS':\n BC['phi'][var]['type'] = 'LNBC_UDBC'\n\n # Neumann condition, dphi = 0 for symmetry\n phi_BC[var][0] = 0.\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][-1] = float(Bias[var]['upper'])\n\n if BC['f'][var]['upper'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi: %s\" % BC['phi'][var]['upper'].upper()\n print \"upper boundary condition on f: %s\\n\\n\" % BC['f'][var]['upper'].upper()\n print \"\\ne.g. an upper boundary condition set on phi as BIAS must have the upper boundary condition on f as ABSORBING\\n \"\n raise InputError('An upper boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n\n elif BC['phi'][var]['type'] == 'SYMMETRIC_SELF-CONSISTENT' or BC['phi'][var]['type'] == 'SYMMETRY_SELF-CONSISTENT':\n BC['phi'][var]['type'] = 'LDBC_LNBC'\n\n # We default to a LDBC_LNBC solver, both boundary conditions on left edge, entries 0 (Dirichlet) and 1 (Neumann)\n # cf. DECSKS-04 notebook for more details:\n #\n # https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb\n #\n # Dirichlet condition, set reference potential phi = 0\n phi_BC[var][0] = 0. # reference potential set to zero\n # Neumann condition, dphi = 0 for symmetry\n phi_BC[var][1] = 0.\n\n\n if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n # By virtue of the setup, the above enforcements on the lower boundary ensures this unenforced upper Neumann BC is\n # satisfied automatically given the relationship that Neumann BCs are fixed by due to the Poisson equation\n #\n # see github.com/dsirajud/IPython-Notebooks/DECSKS-04 for more information (final few sections of the notebook)\n #\n # Thus, we do not need to actually enforce the wall potential directly in terms of the charge accumulated for this boundary; however,\n # we initialize and track the objects here so that the data can be accessed, analyzed or otherwise plotted, should the user wish\n sigma[var]['upper'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi: %s\" % BC['phi'][var]['upper'].upper()\n print \"upper boundary condition on f: %s\\n\\n\" % BC['f'][var]['upper'].upper()\n print \"\\ne.g. an upper boundary condition set on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\\n \"\n\n raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n elif BC['phi'][var]['type'] == 'SELF-CONSISTENT_SELF-CONSISTENT':\n BC['phi'][var]['type'] = 'LDBC_LNBC'\n\n # We default to a LDBC_LNBC solver, both boundary conditions on left edge, entries 0 (Dirichlet) and 1 (Neumann)\n # cf. DECSKS-04 notebook for more details:\n #\n # https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb\n #\n # Dirichlet condition, set reference potential phi = 0\n phi_BC[var][0] = 0. # reference potential set to zero\n # Neumann condition, dphi = 0 for symmetry\n #phi_BC[var][1] = - 6 * var.width * sim_params['sigma'][var]['lower'], changes with time step\n\n\n if BC['f'][var]['lower'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities\n sigma[var]['lower'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['lower'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi on variable %s: SELF-CONSISTENT\" % var\n print \"lower boundary condition on f on variable %s: %s\\n\\n\" % (var, BC['f'][var]['lower'].upper())\n print \"\\ne.g. a lower boundary condition set on phi as SELF-CONSISTENT must have the lower boundary condition on f as COLLECTOR\\n \"\n\n raise InputError('A lower boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n # By virtue of the setup, the above enforcements on the lower boundary ensures this unenforced upper Neumann BC is\n # satisfied automatically given the relationship that Neumann BCs are fixed by due to the Poisson equation\n #\n # see github.com/dsirajud/IPython-Notebooks/DECSKS-04 for more information (final few sections of the notebook)\n #\n # Thus, we do not need to actually enforce the wall potential directly in terms of the charge accumulated for this boundary; however,\n # we initialize and track the objects here so that the data can be accessed, analyzed or otherwise plotted, should the user wish\n sigma[var]['upper'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi: SELF-CONSISTENT\"\n print \"upper boundary condition on f: %s\\n\\n\" % BC['f'][var]['upper'].upper()\n print \"\\ne.g an upper boundary condition set on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\\n \"\n\n raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n # else: boundary conditions have already been checked for valid inputs, no invalid input will be encountered\n\n # --------------------------------------------------------------------------\n # ELECTRIC POTENTIAL PHI FUNCTION HANDLE STRING and BOUNDARY CONDITION TYPE FUNCTION HANDLE STRING\n #\n # currently only 1D1V, only one handle needed. When this will be generalized, can make a dict objects with keys corresponding\n # to each active configuration variable\n #\n # The forms of each string call their associated method per the boundary conditions specified by the user in params_boundaryconditions.dat,\n # based on the boundary conditions specified by the user, one of the following will be created:\n #\n # compute_electric_potential_phi_handle[var] =\n #\n # DECSKS.lib.fieldsolvers.Poisson_6th_PBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_UDBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_UNBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LNBC_UDBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_LNBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_UDBC_UNBC (<-- available, but not used in any current combination of BCs)\n #\n #\n # and, one of the following\n #\n # distribution_function_boundarycondition_handle[var]['lower'] =\n #\n # DECSKS.lib.boundaryconditions.absorbing_lower_boundary\n # DECSKS.lib.boundaryconditions.collector_lower_boundary\n # DECSKS.lib.boundaryconditions.symmetric_lower_boundary\n #\n # NOTE: if 'periodic' has been specified, everything is\n # handled in the orchestrator, distribution_function_boundarycondition_orchestrator\n # which would take on the string value = 'DECSKS.lib.boundaryconditions.periodic\n\n\n distribution_function_boundarycondition_prefix = 'DECSKS.lib.boundaryconditions'\n distribution_function_boundarycondition_handle = {}\n for var in phasespace_vars:\n if BC['f'][var]['type'] == 'periodic':\n pass\n else:\n distribution_function_boundarycondition_handle[var] = {}\n\n distribution_function_boundarycondition_handle[var]['lower'] = \".\".join((distribution_function_boundarycondition_prefix, BC['f'][var]['lower']))\n distribution_function_boundarycondition_handle[var]['lower'] = \"_\".join((distribution_function_boundarycondition_handle[var]['lower'], 'lower_boundary'))\n\n distribution_function_boundarycondition_handle[var]['upper'] = \".\".join((distribution_function_boundarycondition_prefix, BC['f'][var]['upper']))\n distribution_function_boundarycondition_handle[var]['upper'] = \"_\".join((distribution_function_boundarycondition_handle[var]['upper'], 'upper_boundary'))\n\n\n compute_electric_potential_phi_handle = {}\n compute_electric_potential_phi_prefix = \"DECSKS.lib.fieldsolvers.Poisson_6th_\"\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n if HOC[var] == 'FOURIER': # uses a Gauss law solver to find E directly, which is called by the orchestrator on the fieldsolver\n pass\n else: # computes the electric field E by differentiating phi in an orchestrator fieldsolver function (string handle constructed above)\n # inside the orchestrator, a particular Poisson solver is called according with the boundary conditions indicated in params_boundaryconditions.dat\n compute_electric_potential_phi_handle[var] = compute_electric_potential_phi_prefix + BC['phi'][var]['type']\n else:\n pass\n\n # in the future, can generalize this to multiple dimensions by making this a dict with keys ['x', 'y', 'z']\n # currently just on 1D1V and expecting an 'x' variable to be evolved in configuration\n\n if 'x' not in phasespace_vars:\n raise NotImplementedError('Current 1D1V version of DECSKS is expecting x to be the active configuration variable. Please revise the intended simulation so that x is the symbol chosen in params.dat.')\n else:\n if HOC['x'] == 'FOURIER': # uses a Gauss solver to find E directly\n Poisson_6th_order_FD_solver_matrices = None\n\n else: # uses a Poisson solver to find phi, then differentiates to obtain E\n Poisson_6th_order_FD_solver_matrices = assemble_Poisson_6th_order_FD_solver_matrices(Nx_active, BC)\n\n derivative_method = {}\n derivative_method_prefix = 'DECSKS.lib.derivatives'\n for var in phasespace_vars:\n derivative_method[var] = \".\".join((derivative_method_prefix, HOC[var].lower()))\n\n sim_params = dict(\n N = N, HOC = HOC,\n derivative_method = derivative_method,\n Nx = Nx, ax = ax, bx = bx,\n Ny = Ny, ay = ay, by = by,\n Nz = Nz, az = az, bz = bz,\n Nvx = Nvx, avx = avx, bvx = bvx,\n Nvy = Nvy, avy = avy, bvy = bvy,\n Nvz = Nvz, avz = avz, bvz = bvz,\n Nt = Nt, T = T,\n phasespace_vars = phasespace_vars,\n numdims = numdims,\n active_dims = active_dims,\n total_dims = total_dims,\n density = density,\n mu = mu,\n split_scheme = split_scheme,\n splitting = splitting,\n plot_params = plot_params,\n record_outputs = record_outputs,\n outfiles = outfiles,\n BC = BC, # boundary condition types on all phase space variables on distribution function f and phi\n phi_BC = phi_BC, # dictionary containing boundary value vector for electric potential used in Poisson solve, e.g. phi_BC['x']\n sigma = sigma,\n sigma_n = sigma_n, # this was put in for charge history plots\n distribution_function_boundarycondition_handle = distribution_function_boundarycondition_handle, # dictionary with keys (var in phasespace_vars), which are keys to a subdict with keys 'lower', 'upper'\n distribution_function_boundarycondition_orchestrator_handle = distribution_function_boundarycondition_orchestrator_handle, # dictionary with keys (var in phasespace_vars)\n compute_electric_potential_phi_handle = compute_electric_potential_phi_handle,\n compute_electric_field_orchestrator_handle = compute_electric_field_orchestrator_handle,\n I_alternating = I_alternating, # identity matrix with alternating signs according to row, used in computing correctors c\n A_matrix = A_matrix, # Matrices of Bernoulli numbers for HOC\n W = W,\n W_dn1_LTE6 = W_dn1_LTE6,\n Xi = Xi, # spectral differentiation operator matrix (1j*xi[i,j]) ** q\n xi = xi, # wave number vector\n Poisson_6th_order_FD_solver_matrices = Poisson_6th_order_FD_solver_matrices\n )\n\n infile.close()\n\n # --------------------------------------------------------------------------\n # Before return, broadcast notification\n # regarding start of simulation and order of solver\n\n print \"\\nStarting 1D1V Vlasov-Poisson simulation\"\n print \"\\nadvection solver: LTE order %d\" % (N+1)\n print \"\\nwill step through %d-dimensional solution in variables: %s\\n\" % (len(phasespace_vars), phasespace_vars)\n for var in phasespace_vars:\n print \"high order correction method on %s: %s\" % (var, HOC[var])\n\n print \"\\n\"\n return sim_params",
"def _read_lexicon(a_dname):\n if not a_dname:\n return\n elif a_dname[-1] == '/':\n a_dname = os.path.dirname(a_dname)\n basename = os.path.basename(a_dname)\n if basename == HSAN:\n return _read_hsan(a_dname)\n elif basename == S140:\n return _read_s140(a_dname)\n elif basename == SUBJCL:\n return _read_subjcl(a_dname)\n elif basename == NRC_HSHTAG:\n return _read_nrc_hshtag(a_dname)\n else:\n raise Exception(\"Unknown dictionary format: '{:s}'\".format(basename))",
"def read_input_file(self):\n\n # Check if input file exists in current directory, if not kill process\n if not os.path.isfile('./visualise.inpt'):\n print('Cannot find input file \"visualise.inpt\" in current directory')\n sys.exit()\n\n # Read input file and analysis options and parameters\n print('Reading input file')\n with open('visualise.inpt','r') as f:\n f.readline()\n self.prefix = f.readline().split()[0]\n f.readline()\n f.readline()\n self.frame = int(f.readline().split()[0])\n f.readline()\n f.readline()\n self.vis_particles = int(f.readline().split()[0])\n self.vis_vortype = int(f.readline().split()[0])\n self.vis_cellcolour = int(f.readline().split()[0])\n self.vis_save = int(f.readline().split()[0])"
] | [
"0.5987714",
"0.59453547",
"0.58234787",
"0.58072245",
"0.57432646",
"0.5646696",
"0.5598178",
"0.557389",
"0.5563821",
"0.5482843",
"0.5413918",
"0.5413375",
"0.53326714",
"0.53081083",
"0.53040534",
"0.5293794",
"0.52868974",
"0.5284766",
"0.526946",
"0.52602595",
"0.5239944",
"0.5237519",
"0.5236472",
"0.5213196",
"0.521094",
"0.5189161",
"0.5181126",
"0.5171589",
"0.51564187",
"0.5120013"
] | 0.7064549 | 0 |
Perform singular values decomposisiton on a document term matrix A. A = T S D^T A ~ T'S'D'^T Where T', S', and D' have a fewer columns than T, S, and D. | def computeTruncatedSVD(docTermMatrix, dim=500):
T, S, D = np.linalg.svd(np.transpose(docTermMatrix), full_matrices=False)
diagS = np.diag(S)
shape = np.shape(diagS)
if dim <= shape[0] and dim <= shape[1]:
subT = T[:,:dim]
subS = diagS[:dim,:dim]
subD = np.transpose(D)[:,:dim]
else:
subT = T
subS = diagS
subD = np.transpose(D)
return subT, subS, subD | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def singular_decomp(A):\n # Initialization\n n, m = A.shape\n U = np.zeros((n, m), dtype='float64')\n\n # Diagonalization of A^T * A\n rot, e, V = eigen.diag(np.dot(np.transpose(A), A))\n\n # Calculate U\n U = np.dot(A, V)\n for i in range(m):\n e[i] = np.sqrt(e[i])\n U[:, i] /= e[i]\n\n return U, e, V",
"def check_non_singular(self, Am):\r\n det = self.detf(Am)\r\n if det != 0:\r\n return det\r\n else:\r\n raise ArithmeticError(\"Singular Matrix!\")",
"def compact_svd(A, tol=1e-6):\r\n eigs, vecs = la.eig(A.conj().T@A)\r\n svs = np.sqrt(eigs)\r\n #sort eigenvalues and eigenvectors accordingly\r\n sorter = list(zip(svs,vecs.T))\r\n sorter.sort(reverse=True, key=lambda tup: tup[0])\r\n svs = [x[0] for x in sorter]\r\n vecs = [x[1] for x in sorter]\r\n #find number of nonzero eigenvalues\r\n r_not = svs.count(0)\r\n r = len(svs) - r_not\r\n svs_1 = np.array(svs[:r])\r\n vecs_1 = np.array(vecs[:r])\r\n u_1 = (A@vecs_1)/svs_1\r\n\r\n return u_1, svs_1, vecs_1.conj().T",
"def matrix_svd(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n\n # SVD each sector at a time.\n # While doing so, also keep track of a list of all singular values, as\n # well as a heap that gives the negative of the largest singular value\n # in each sector. These will be needed later when deciding how to\n # truncate the decomposition.\n svds = {}\n dims = {}\n minus_next_sings = []\n all_sings = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=self.dtype)\n s = np.empty((m,), dtype=np.float_)\n v = np.empty((m, shp[1]), dtype=self.dtype)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n u, s, v = spsla.svds(\n v, k=maxchi, return_singular_vectors=True\n )\n order = np.argsort(-s)\n u = u[:, order]\n s = s[order]\n v = v[order, :]\n else:\n u, s, v = np.linalg.svd(v, full_matrices=False)\n svd = (s, u, v)\n svds[k] = svd\n dims[k] = 0\n sings = svd[0]\n all_sings.append(sings)\n if 0 not in sings.shape:\n heapq.heappush(minus_next_sings, (-sings[0], k))\n try:\n all_sings = np.concatenate(all_sings)\n except ValueError:\n all_sings = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_sings,\n svds,\n minus_next_sings,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n svds = {k: v for k, v in svds.items() if dims[k] > 0}\n for k, v in svds.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n svds[k] = (v[0][:d], v[1][:, :d], v[2][:d, :])\n else:\n del svds[k]\n\n # Initialize U, S, V.\n d = self.dirs[0]\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=0,\n )\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=np.float_,\n invar=False,\n charge=0,\n )\n V = type(self)(\n [new_dim, self.shape[1]],\n qhape=[new_qim, self.qhape[1]],\n dirs=[d, self.dirs[1]],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=self.charge,\n )\n\n # Set the blocks of U, S and V.\n for k, v in svds.items():\n k_U = (k[0], k[0])\n S[(k[0],)] = v[0]\n U[k_U] = v[1]\n V[k] = v[2]\n\n return U, S, V, rel_err",
"def check_non_singular(A):\n det = determinant_fast(A)\n if det != 0:\n return det\n else:\n raise ArithmeticError(\"Singular Matrix!\")",
"def invert_L1_svd():",
"def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n (u,s,v)=np.linalg.svd(matrix)\n ### END YOUR CODE\n\n return u, s, v",
"def compact_svd(A, tol=1e-6):\n #Compute eigenvalues/vectors\n lam, V = la.eig((A.conj().T @ A))\n sig = np.sqrt(lam)\n \n #Sort results\n argB = np.argsort(sig)\n arg = []\n for i in range(0, len(argB)):\n arg.append(argB[len(argB)-1-i])\n sig = sig[arg]\n V = V[:,arg]\n #How many non-zero positive\n r = 0\n for j in range(0, len(sig)):\n if abs(sig[j]) >= tol:\n r += 1\n \n sig1 = sig[:r]\n V1 = np.array(V[:,:r])\n \n# print(np.shape(A))\n# print(np.shape(V1))\n U1 = A@V1\n U1 = U1/sig1\n \n #Return answers\n return U1, sig1, V1.conj().T\n\n raise NotImplementedError(\"Problem 1 Incomplete\")",
"def test_canonicalization_of_vectors_w_symm(free_alg):\n\n dr = free_alg\n p = dr.names\n x = IndexedBase('x')\n r = p.R\n i, j = p.i, p.j\n\n vs = Vec('vs')\n dr.set_symm(vs, Perm([1, 0]), valence=2)\n tensor = dr.sum((i, r), (j, r), x[i, j] * vs[j, i])\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == x[i, j]\n assert term.vecs == (vs[i, j],)\n\n va = Vec('va')\n dr.set_symm(va, Perm([1, 0], NEG), valence=2)\n tensor = dr.sum((i, r), (j, r), x[i, j] * va[j, i])\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == -x[i, j]\n assert term.vecs == (va[i, j],)",
"def test_tensors_w_functions_can_be_canonicalized(free_alg):\n dr = free_alg\n dr.set_symm(SymmFunc, Perm([1, 0], NEG), valence=2, set_base_name=False)\n\n p = dr.names\n i, j, k = p.R_dumms[:3]\n r = p.R\n v = p.v\n\n # General anti-symmetric real matrix.\n tensor = dr.sum(\n (i, r), (j, r), SymmFunc(k, i, j) * SymmFunc(i, j) * v[i] * v[j]\n ) + dr.sum(\n (i, r), (j, r), SymmFunc(k, i, j) * SymmFunc(j, i) * v[i] * v[j]\n )\n assert tensor.n_terms == 2\n assert tensor.simplify() == 0",
"def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return singular_values",
"def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)",
"def truncated_svd(A,k=None):",
"def test_tensor_can_be_canonicalized(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n r = p.R\n m = p.m\n h = p.h\n v = p.v\n\n # Anti-symmetric real matrix.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0\n\n # With wrapping under an even function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 2 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 2 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == 2 * m[i, j] ** 2\n assert term.vecs == (v[i], v[j])\n\n # With wrapping under an odd function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 3 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 3 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 0\n\n # Hermitian matrix.\n tensor = dr.einst(\n h[i, j] * v[i] * v[j] + conjugate(h[j, i]) * v[i] * v[j]\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0",
"def get_s_matrix(data, word_form, semvecs, n_events, n_vec_dims):\n\n s = np.zeros((n_events, n_vec_dims))\n ii = 0\n for index, row in data.iterrows():\n s[ii, ] = semvecs.loc[row[word_form], :]\n ii += 1\n return s",
"def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return u, s, v",
"def implement_svd(data):\n u, s, v = torch.svd(data) # implement svd\n # note: the u returned by this function only includes the top values.\n # u * s will be equivalent due to the zero terms, but will run more efficiently with this implementation.\n s = torch.diag(s) # turn s into a diagonal matrix\n transformed_matrix = torch.mm(u, s) # u * s\n return l21_reg(s), transformed_matrix # return the L2,1 regularization term and matrix",
"def test_norm():\n A = np.diag([1, 1, 1, 1])\n assert abs(norm(A) - 2.0) < 1e-12\n assert abs(norm(scipy.sparse.coo_matrix(A)) - 2.0) < 1e-12",
"def test_ccsd_singles_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c = p.V_dumms[:3]\n i, j, k = p.O_dumms[:3]\n u = dr.two_body\n f = dr.fock\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n r = IndexedBase('r')\n tensor = dr.define_einst(\n r[a, i],\n t[a, b, i, j] * u[j, k, b, c] * t[c, k] + t[a, b, i, j] * f[j, b]\n - t[a, j] * t[b, i] * f[j, b]\n - t[a, j] * t[b, i] * t[c, k] * u[j, k, b, c]\n )\n targets = [tensor]\n\n eval_seq = optimize(targets, substs={p.nv: p.no * 10})\n\n assert verify_eval_seq(eval_seq, targets)\n assert len(eval_seq) == 4",
"def matrix_det(A):\n\tx = A[0,0]*A[1,1]*A[2,2] + A[0,1]*A[1,2]*A[2,0] + A[0,2]*A[1,0]*A[2,1]\n\ty = A[0,0]*A[1,2]*A[2,1] + A[0,1]*A[1,0]*A[2,2] + A[0,2]*A[1,1]*A[2,0]\n\treturn x - y",
"def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X",
"def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V",
"def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n singular_values=s[0:n]\n ### END YOUR CODE\n return singular_values",
"def reduce_svd(embeddings, seed=0):\n svd = TruncatedSVD(n_components=2, n_iter=10, random_state=seed)\n return svd.fit_transform(embeddings)",
"def gram_schmidt(mat_a):\n # NOTE: We will use the same variable names as the one in the\n # pseudo code for clarity\n rows_count = mat_a.shape[0]\n\n u = mat_a.copy()\n r = np.zeros_like(u)\n q = np.zeros_like(u)\n for i in range(rows_count):\n u_i = u[:, i]\n r[i, i] = np.linalg.norm(u_i)\n q[:, i] = u_i / r[i, i] if r[i, i] != 0 else 0\n q_i = q[:, i]\n\n r[i, i + 1:] = q_i.T.dot(u[:, i + 1:])\n # np.outer will multiply q_i by each number in r[i, i + 1:], and create\n # a matrix that each column is a result of that multiplication\n u[:, i + 1:] -= np.outer(q_i, r[i, i + 1:])\n\n return q, r",
"def normalize_adj( adj : np.ndarray, \n sparse : bool = False\n ) -> Union[np.ndarray, sp.spmatrix]:\n if sparse:\n adj = sp.coo_matrix(adj) # [N,N]\n rowsum = np.array(adj.sum(1)) # [N,]\n \n d_inv_sqrt = np.power(rowsum, -0.5) # [N,], may issue runtime warnings (div by zero)\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. # []\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt) if sparse else np.diag(d_inv_sqrt) #[N,N]\n \n if sparse:\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n else:\n return ((adj @ d_mat_inv_sqrt).transpose() @ d_mat_inv_sqrt) # not quite sure why this order = D^T A^T D, D^T = D, A^T = A - the transpose is unncessary?!",
"def visualize_svd():",
"def nullOld(A, eps=1e-14):\n\t# Taken with gratitude from http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix\n\tu, s, vh = la.svd(A)\n\tnull_mask = (s <= eps)\n\tnull_space = scipy.compress(null_mask, vh, axis=0)\n\treturn scipy.transpose(null_space)",
"def tsvd(A, threshold=0.99999, avoid_pathological=True):\n M,N = A.shape\n full_matrices = False\n\n if is_int(threshold):\n # Assume specific number is requested\n r = threshold\n assert 1 <= r <= max(M,N)\n if r > min(M,N):\n full_matrices = True\n r = min(M,N)\n\n U,s,VT = sla.svd(A, full_matrices)\n\n if isinstance(threshold,float):\n # Assume proportion is requested\n r = truncate_rank(s,threshold,avoid_pathological)\n\n # Truncate\n U = U [:,:r]\n VT = VT[ :r]\n s = s [ :r]\n return U,s,VT",
"def spd_pinv(a, rcond=1e-10, square_root=False, check_stability=True):\n N, _N = a.shape\n assert N == _N, \"Matrix is not square!\"\n # get the eigen-decomposition\n # w, v = np.linalg.eigh(a)\n v, w, u = np.linalg.svd(a)\n sort_index = np.argsort(w)\n w = w[sort_index]\n v = v[:,sort_index]\n # check positive-definiteness\n ev_min = w.min()\n if ev_min <= 0:\n msg = \"Matrix is not positive-definite: min ev = {0}\"\n raise IndefiniteError(msg.format(ev_min))\n # check stability of eigen-decomposition\n if check_stability:\n # XXX use a preconditioner?\n if not np.allclose(a, np.dot(v, w[:, np.newaxis] * v.T)):\n raise NumericalError(\n \"Instability in eigh (condition number={:g})\".format(\n (w.max() / w.min())))\n\n # invert the \"large enough\" part of s\n cutoff = rcond * w.max()\n for i in range(N):\n if w[i] > cutoff:\n if square_root:\n # square root of the pseudo-inverse\n w[i] = np.sqrt(1. / w[i])\n else:\n w[i] = 1. / w[i]\n else:\n w[i] = 0.\n # compute the pseudo-inverse (using broadcasting)\n res = np.real(np.dot(v, w[:, np.newaxis] * v.T))\n # check stability of pseudo-inverse\n if check_stability:\n if square_root:\n pa = np.dot(res, res)\n approx_a = np.dot(a, np.dot(pa, a))\n msg = \"Instability in square-root of pseudo-inverse\"\n else:\n approx_a = np.dot(a, np.dot(res, a))\n msg = \"Instability in pseudo-inverse\"\n if not np.allclose(a, approx_a):\n # be a bit laxist by looking at the Mean Squared Error\n mse = np.mean((a - approx_a) ** 2)\n if mse > 1e-16:\n raise NumericalError(\"{} (MSE={:g})\".format(msg, mse))\n return res"
] | [
"0.6640744",
"0.58657926",
"0.58088976",
"0.57761854",
"0.57290536",
"0.5726822",
"0.5711986",
"0.5658731",
"0.5601522",
"0.55621266",
"0.5544685",
"0.5520135",
"0.54970425",
"0.5486736",
"0.5445198",
"0.54431707",
"0.5406663",
"0.53682363",
"0.53435516",
"0.529848",
"0.5295507",
"0.52939636",
"0.5265533",
"0.51799095",
"0.5166513",
"0.5159301",
"0.5145162",
"0.5133937",
"0.5124034",
"0.51177853"
] | 0.6557088 | 1 |
Sets the advanced_catalog_count of this IaasUcsdManagedInfraAllOf. | def advanced_catalog_count(self, advanced_catalog_count):
self._advanced_catalog_count = advanced_catalog_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bm_catalog_count(self, bm_catalog_count):\n\n self._bm_catalog_count = bm_catalog_count",
"def container_catalog_count(self, container_catalog_count):\n\n self._container_catalog_count = container_catalog_count",
"def standard_catalog_count(self, standard_catalog_count):\n\n self._standard_catalog_count = standard_catalog_count",
"def count_all_catalogs(self):\n return self.manager.count_entities(ModelCatalog)",
"def __init__(self,\n advanced_catalog_count=None,\n bm_catalog_count=None,\n container_catalog_count=None,\n esxi_host_count=None,\n external_group_count=None,\n hyperv_host_count=None,\n local_group_count=None,\n standard_catalog_count=None,\n user_count=None,\n vdc_count=None,\n vm_count=None,\n guid=None,\n local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._advanced_catalog_count = None\n self._bm_catalog_count = None\n self._container_catalog_count = None\n self._esxi_host_count = None\n self._external_group_count = None\n self._hyperv_host_count = None\n self._local_group_count = None\n self._standard_catalog_count = None\n self._user_count = None\n self._vdc_count = None\n self._vm_count = None\n self._guid = None\n self.discriminator = None\n\n if advanced_catalog_count is not None:\n self.advanced_catalog_count = advanced_catalog_count\n if bm_catalog_count is not None:\n self.bm_catalog_count = bm_catalog_count\n if container_catalog_count is not None:\n self.container_catalog_count = container_catalog_count\n if esxi_host_count is not None:\n self.esxi_host_count = esxi_host_count\n if external_group_count is not None:\n self.external_group_count = external_group_count\n if hyperv_host_count is not None:\n self.hyperv_host_count = hyperv_host_count\n if local_group_count is not None:\n self.local_group_count = local_group_count\n if standard_catalog_count is not None:\n self.standard_catalog_count = standard_catalog_count\n if user_count is not None:\n self.user_count = user_count\n if vdc_count is not None:\n self.vdc_count = vdc_count\n if vm_count is not None:\n self.vm_count = vm_count\n if guid is not None:\n self.guid = guid",
"def update_library_count(self, ebook_count):\n\t\tself.library_count = ebook_count",
"def advanced_features(self, advanced_features):\n\n self._advanced_features = advanced_features",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def count_all_catalog_services(self):\n return self.manager.count_entities(ModelEndpoint)",
"def associated_object_count(self, associated_object_count):\n self._associated_object_count = associated_object_count",
"def catalog_id(self, catalog_id):\n self._catalog_id = catalog_id",
"def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)",
"def _set_catalog(self, catalog: cat.Catalog) -> None:\n self._catalog_interface = CatalogInterface(catalog)\n self._catalog = catalog",
"def advanced_properties(self, advanced_properties):\n\n self._advanced_properties = advanced_properties",
"def ucat(catalogName, subarray=DEFAULT) :\n stripCat = catalogName.strip() \n if stripCat == \"\": catalogName = 'NONE'\n else : catalogName = stripCat\n multiSubarray('ucat', subarray, catalogName)",
"def total_nucleus_clients_inactive(self, total_nucleus_clients_inactive):\n\n self._total_nucleus_clients_inactive = total_nucleus_clients_inactive",
"def _set_catalog_view(self, session):\n if self._catalog_view == COMPARATIVE:\n try:\n session.use_comparative_catalog_view()\n except AttributeError:\n pass\n else:\n try:\n session.use_plenary_catalog_view()\n except AttributeError:\n pass",
"def total_nucleus_clients(self, total_nucleus_clients):\n\n self._total_nucleus_clients = total_nucleus_clients",
"def can_update_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinAdminSession.can_update_bins\n return self._get_provider_session('catalog_admin_session').can_update_catalogs()",
"def modeScaler(self, latestCount):\n \n try:\n # Accumulate new sample data.\n self.__accumCts += latestCount\n \n # Increment runtime counter.\n self.__runtime += 1\n \n except:\n raise\n \n return",
"def get_catalog_size() -> int:\n return len(gift_catalog)",
"def advertise_osd_count(count):\n for relid in hookenv.relation_ids('mon'):\n hookenv.relation_set(\n relation_id=relid,\n relation_settings={'bootstrapped-osds': count}\n )",
"def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)",
"def use_comparative_catalog_view(self):\n self._catalog_view = COMPARATIVE\n # self._get_provider_session('catalog_lookup_session') # To make sure the session is tracked\n for session in self._get_provider_sessions():\n try:\n session.use_comparative_catalog_view()\n except AttributeError:\n pass",
"def has_offer_catalog(self, has_offer_catalog: object):\n\n self._has_offer_catalog = has_offer_catalog",
"def set_indoor_air_quality_baseline(self,\n co2_equivalent,\n total_volatile_organic_compounds):\n if co2_equivalent == 0 and total_volatile_organic_compounds == 0:\n raise RuntimeError('Invalid baseline')\n buffer = []\n for value in [total_volatile_organic_compounds, co2_equivalent]:\n arr = [value >> 8, value & 0xFF]\n arr.append(generate_crc(arr))\n buffer += arr\n self._i2c_read_words_from_cmd(command=[0x20, 0x1e] + buffer, reply_size=0, delay=0.01)",
"def addCasualties(self, number):\n self.casualties += number\n self.ssp += (number * __SSP_CASUALTY__)",
"def setVerbose(self, verbose):\n self._verbose = verbose",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def total_nucleus_clients_active(self, total_nucleus_clients_active):\n\n self._total_nucleus_clients_active = total_nucleus_clients_active"
] | [
"0.57526886",
"0.5751853",
"0.56568986",
"0.46540743",
"0.46511573",
"0.45186806",
"0.4483399",
"0.43547434",
"0.43004668",
"0.4271701",
"0.4244447",
"0.42245948",
"0.42140615",
"0.41743332",
"0.4168656",
"0.41071948",
"0.41035506",
"0.40634376",
"0.4060891",
"0.4047798",
"0.4037455",
"0.40334085",
"0.40194634",
"0.40136296",
"0.40077776",
"0.40012708",
"0.4000589",
"0.3989238",
"0.39799872",
"0.3976133"
] | 0.8002607 | 0 |
Sets the bm_catalog_count of this IaasUcsdManagedInfraAllOf. | def bm_catalog_count(self, bm_catalog_count):
self._bm_catalog_count = bm_catalog_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def container_catalog_count(self, container_catalog_count):\n\n self._container_catalog_count = container_catalog_count",
"def standard_catalog_count(self, standard_catalog_count):\n\n self._standard_catalog_count = standard_catalog_count",
"def advanced_catalog_count(self, advanced_catalog_count):\n\n self._advanced_catalog_count = advanced_catalog_count",
"def count_all_catalogs(self):\n return self.manager.count_entities(ModelCatalog)",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def catalog_id(self, catalog_id):\n self._catalog_id = catalog_id",
"def __init__(self,\n advanced_catalog_count=None,\n bm_catalog_count=None,\n container_catalog_count=None,\n esxi_host_count=None,\n external_group_count=None,\n hyperv_host_count=None,\n local_group_count=None,\n standard_catalog_count=None,\n user_count=None,\n vdc_count=None,\n vm_count=None,\n guid=None,\n local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._advanced_catalog_count = None\n self._bm_catalog_count = None\n self._container_catalog_count = None\n self._esxi_host_count = None\n self._external_group_count = None\n self._hyperv_host_count = None\n self._local_group_count = None\n self._standard_catalog_count = None\n self._user_count = None\n self._vdc_count = None\n self._vm_count = None\n self._guid = None\n self.discriminator = None\n\n if advanced_catalog_count is not None:\n self.advanced_catalog_count = advanced_catalog_count\n if bm_catalog_count is not None:\n self.bm_catalog_count = bm_catalog_count\n if container_catalog_count is not None:\n self.container_catalog_count = container_catalog_count\n if esxi_host_count is not None:\n self.esxi_host_count = esxi_host_count\n if external_group_count is not None:\n self.external_group_count = external_group_count\n if hyperv_host_count is not None:\n self.hyperv_host_count = hyperv_host_count\n if local_group_count is not None:\n self.local_group_count = local_group_count\n if standard_catalog_count is not None:\n self.standard_catalog_count = standard_catalog_count\n if user_count is not None:\n self.user_count = user_count\n if vdc_count is not None:\n self.vdc_count = vdc_count\n if vm_count is not None:\n self.vm_count = vm_count\n if guid is not None:\n self.guid = guid",
"def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)",
"def _set_catalog(self, catalog: cat.Catalog) -> None:\n self._catalog_interface = CatalogInterface(catalog)\n self._catalog = catalog",
"def update_library_count(self, ebook_count):\n\t\tself.library_count = ebook_count",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def limit_size(self, catalog):\n if len(catalog)<=self.limit:\n return catalog\n mem = {}\n for instance in catalog:\n if (instance['vCpu'], math.ceil(instance['memory'])) not in mem:\n mem[(instance['vCpu'], math.ceil(instance['memory']))] = instance\n out = [val for val in mem.values()]\n if len(out)>self.limit:\n out = sorted(out, key=lambda x: x['onDemandUsdPrice'])\n return out[:self.limit]\n return out",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count",
"def set_circuit_fav_count(self, circuit_id, number):\n key = ':'.join(\n [CIRCUIT_NMBR_FAVS_1, \n str(circuit_id), \n CIRCUIT_NMBR_FAVS_2]\n ) \n self.RS.set(key, number)",
"def set_city_count(self, city_count):\n self.city_count = city_count",
"def count_all_catalog_services(self):\n return self.manager.count_entities(ModelEndpoint)",
"def reset_cbc(self):\n if not self.block_count:\n raise ValueError(\"cannot reset cbc until block_count is set\")\n cbc_len = np.prod(self.block_count)\n self.cbc = np.ones(cbc_len, dtype=np.bool)",
"def set_circuit_remix_count(self, circuit_id, number):\n key = ':'.join(\n [CIRCUIT_NMBR_RMX_1, \n str(circuit_id), \n CIRCUIT_NMBR_RMX_2]\n )\n self.RS.set(key, number)",
"def setCount(self, num):\n self.count=num",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def setCompoundCount(self, count):\n return self._set(compoundCount=count)",
"def processor_count(self, processor_count):\n\n self._processor_count = processor_count",
"def bid_count(self, bid_count):\n\n self._bid_count = bid_count",
"def set_count(self, count):\n self._count = count",
"def reset_cbc(self):\n if not self.parent_block_count:\n raise ValueError(\"cannot reset cbc until parent_block_count is set\")\n cbc_len = np.prod(self.parent_block_count)\n self.cbc = np.ones(cbc_len, dtype=np.uint32)",
"def reset_cbc(self):\n if not self.parent_block_count:\n raise ValueError(\"cannot reset cbc until parent_block_count is set\")\n cbc_len = np.prod(self.parent_block_count)\n self.cbc = np.ones(cbc_len, dtype=np.uint32)",
"def reset_cbc(self):\n if not self.parent_block_count:\n raise ValueError(\"cannot reset cbc until parent_block_count is set\")\n cbc_len = np.prod(self.parent_block_count)\n self.cbc = np.ones(cbc_len, dtype=np.uint32)",
"def get_paginate_by(self, queryset):\n return config.PUBLICATION_BACKBONE_CATALOG_PER_PAGE_ITEMS_COUNT",
"def advertise_osd_count(count):\n for relid in hookenv.relation_ids('mon'):\n hookenv.relation_set(\n relation_id=relid,\n relation_settings={'bootstrapped-osds': count}\n )"
] | [
"0.67917866",
"0.62580836",
"0.60013944",
"0.5060284",
"0.4933995",
"0.4909605",
"0.48784313",
"0.48781607",
"0.47947842",
"0.4782925",
"0.47474197",
"0.47474197",
"0.46623227",
"0.46587622",
"0.4638956",
"0.46326226",
"0.46316606",
"0.46220344",
"0.4605496",
"0.45846856",
"0.4557458",
"0.45374727",
"0.45251608",
"0.45205218",
"0.44527188",
"0.44263664",
"0.44263664",
"0.44263664",
"0.4421333",
"0.4393605"
] | 0.7878276 | 0 |
Sets the container_catalog_count of this IaasUcsdManagedInfraAllOf. | def container_catalog_count(self, container_catalog_count):
self._container_catalog_count = container_catalog_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bm_catalog_count(self, bm_catalog_count):\n\n self._bm_catalog_count = bm_catalog_count",
"def standard_catalog_count(self, standard_catalog_count):\n\n self._standard_catalog_count = standard_catalog_count",
"def advanced_catalog_count(self, advanced_catalog_count):\n\n self._advanced_catalog_count = advanced_catalog_count",
"def count_all_catalogs(self):\n return self.manager.count_entities(ModelCatalog)",
"def count_all_catalog_services(self):\n return self.manager.count_entities(ModelEndpoint)",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def catalog_id(self, catalog_id):\n self._catalog_id = catalog_id",
"def __init__(self,\n advanced_catalog_count=None,\n bm_catalog_count=None,\n container_catalog_count=None,\n esxi_host_count=None,\n external_group_count=None,\n hyperv_host_count=None,\n local_group_count=None,\n standard_catalog_count=None,\n user_count=None,\n vdc_count=None,\n vm_count=None,\n guid=None,\n local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._advanced_catalog_count = None\n self._bm_catalog_count = None\n self._container_catalog_count = None\n self._esxi_host_count = None\n self._external_group_count = None\n self._hyperv_host_count = None\n self._local_group_count = None\n self._standard_catalog_count = None\n self._user_count = None\n self._vdc_count = None\n self._vm_count = None\n self._guid = None\n self.discriminator = None\n\n if advanced_catalog_count is not None:\n self.advanced_catalog_count = advanced_catalog_count\n if bm_catalog_count is not None:\n self.bm_catalog_count = bm_catalog_count\n if container_catalog_count is not None:\n self.container_catalog_count = container_catalog_count\n if esxi_host_count is not None:\n self.esxi_host_count = esxi_host_count\n if external_group_count is not None:\n self.external_group_count = external_group_count\n if hyperv_host_count is not None:\n self.hyperv_host_count = hyperv_host_count\n if local_group_count is not None:\n self.local_group_count = local_group_count\n if standard_catalog_count is not None:\n self.standard_catalog_count = standard_catalog_count\n if user_count is not None:\n self.user_count = user_count\n if vdc_count is not None:\n self.vdc_count = vdc_count\n if vm_count is not None:\n self.vm_count = vm_count\n if guid is not None:\n self.guid = guid",
"def _set_catalog(self, catalog: cat.Catalog) -> None:\n self._catalog_interface = CatalogInterface(catalog)\n self._catalog = catalog",
"def set_city_count(self, city_count):\n self.city_count = city_count",
"def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)",
"def upload_catalog(self, catalog: Catalog) -> None:\n self._status.check_authority_for_draft()\n\n put_data: Dict[str, Any] = {\"catalog\": catalog.dumps()}\n if not put_data:\n raise TypeError(\"Empty catalog\")\n put_data.update(self._status.get_status_info())\n\n self._client.open_api_do(\"PUT\", \"labels/catalogs\", self.dataset_id, json=put_data)",
"def catalog_merge(self, catalog_cols=None):\n\n for cluster_info in self._catalog_dictionary.values():\n # Array element names\n catalog_idx = cluster_info['SPT_cat_idx']\n se_catalog = cluster_info['catalog']\n\n # Replace the existing SPT_ID in the SExtractor catalog with the official cluster ID.\n # se_catalog.columns[0].name = 'SPT_ID'\n # del se_catalog['SPT_ID']\n\n # Then replace the column values with the official ID.\n se_catalog['SPT_ID'] = self._spt_catalog['SPT_ID'][catalog_idx]\n\n # Add the SZ center coordinates to the catalog\n se_catalog['SZ_RA'] = self._spt_catalog['RA'][catalog_idx]\n se_catalog['SZ_DEC'] = self._spt_catalog['DEC'][catalog_idx]\n\n # For all requested columns from the master catalog add the value to all columns in the SExtractor catalog.\n if catalog_cols is not None:\n for col_name in catalog_cols:\n se_catalog[col_name] = self._spt_catalog[col_name][catalog_idx]\n\n cluster_info['catalog'] = se_catalog",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def get_num_of_containers(self):\n Container.num_of_cntnrs = len(Container.containers)\n return self.num_of_cntnrs",
"def update_necrosis_count(self, number):\n\n print(\"controller - update_necrosis_count!\")\n self.view.processing_gui.update_necrosis_count(number)",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def test_api_ucs_get_catalog(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for elementTypes in api_data[\"json\"]:\n for element in api_data[\"json\"][str(elementTypes)]:\n api_data_c = request(\"get\", \"/catalog\",\n query={\"identifier\": element[\"relative_path\"].strip(\"/\")})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Zero catalog elements found\")\n # TO DO: deeper check on the catalog data",
"def setCount(self, num):\n self.count=num",
"def setNumberOfTraces(self,numberOfTraces: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN \" + str(numberOfTraces))\n\n return",
"def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")",
"def ucat(catalogName, subarray=DEFAULT) :\n stripCat = catalogName.strip() \n if stripCat == \"\": catalogName = 'NONE'\n else : catalogName = stripCat\n multiSubarray('ucat', subarray, catalogName)",
"def setACSLabel(self, label):\n\n if self._n_csets:\n if label is None or isinstance(label, str):\n self._cslabels[self._acsi] = label\n else:\n raise TypeError('label must be a string')",
"def get_paginate_by(self, queryset):\n return config.PUBLICATION_BACKBONE_CATALOG_PER_PAGE_ITEMS_COUNT",
"def processor_count(self, processor_count):\n\n self._processor_count = processor_count",
"def update_library_count(self, ebook_count):\n\t\tself.library_count = ebook_count",
"def set_count(c):\n global count\n count = c",
"def set_count(self, count):\n self._count = count",
"def set_circuit_remix_count(self, circuit_id, number):\n key = ':'.join(\n [CIRCUIT_NMBR_RMX_1, \n str(circuit_id), \n CIRCUIT_NMBR_RMX_2]\n )\n self.RS.set(key, number)"
] | [
"0.6810308",
"0.6344454",
"0.59025335",
"0.5486347",
"0.51642656",
"0.5138865",
"0.50294644",
"0.5021692",
"0.50136554",
"0.49914697",
"0.48896855",
"0.4718432",
"0.46324363",
"0.45579195",
"0.45579195",
"0.44740677",
"0.4469012",
"0.44313428",
"0.44271842",
"0.44019943",
"0.4393225",
"0.43922958",
"0.43796965",
"0.43569282",
"0.43443045",
"0.43405145",
"0.43289566",
"0.4326403",
"0.43258992",
"0.432205"
] | 0.8005635 | 0 |
Sets the esxi_host_count of this IaasUcsdManagedInfraAllOf. | def esxi_host_count(self, esxi_host_count):
self._esxi_host_count = esxi_host_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count",
"def hosts_every(self, hosts_every):\n\n self._hosts_every = hosts_every",
"def vcpu_set(vm_hostname, count, offline=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n _check_defined(vm)\n\n if offline and not vm.is_running():\n log.info(\n '\"{}\" is already powered off, ignoring --offline.'.format(\n vm.fqdn)\n )\n offline = False\n\n if count == vm.dataset_obj['num_cpu']:\n raise Warning('CPU count is the same.')\n\n if offline:\n vm.shutdown()\n vm.set_num_cpu(count)\n if offline:\n vm.start()",
"def host_count(self) -> list:\n return self.__host_count",
"def set_entity_count(cls, count):\n return cls.db.set(\"entity_count\", count)",
"def host_num(self, host_num):\n\n self._host_num = host_num",
"def hosts(self, value):\n if not isinstance(value, NodeSet):\n raise TypeError(\"Invalid fio host NodeSet: {} ({})\".format(value, type(value)))\n self._hosts = value.copy()",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def host_num_in(self, host_num_in):\n\n self._host_num_in = host_num_in",
"def set_hosts(self, hypervisor_per_cluster=False):\n\n self.conf['hosts'] = set()\n\n host_patterns, host_others = self._sift_patterns(\n self.conf.get('hosts_list')\n )\n datacenter_patterns = self.conf.get('datacenter', [])\n cluster_patterns = self.conf.get('cluster', [])\n\n if host_patterns:\n self.conf['host_pattern'] = host_patterns\n\n self.conf['hosts'] = self._get_hypervisors_from_api()\n # Filter all host specified with -H\n host_filtered = set()\n if host_others:\n host_filtered = set([\n (dc, cl, h, is_spm, is_up)\n for dc, cl, h, is_spm, is_up in self.conf['hosts']\n if h in host_others\n ])\n not_found = host_others - set(host[2] for host in host_filtered)\n if not_found != set():\n # try to resolve to ip specified hosts\n for fqdn in set(not_found):\n try:\n ipaddr = socket.gethostbyname(fqdn)\n logging.debug('%s --> %s' % (fqdn, ipaddr))\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n if h == ipaddr:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(fqdn)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=fqdn,\n )\n )\n if not_found != set():\n # try to resolve to ip known hypervisors\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n try:\n ipaddr = socket.gethostbyname(h)\n logging.debug('%s --> %s' % (h, ipaddr))\n if ipaddr in host_others:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(ipaddr)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=h,\n )\n )\n if not_found != set():\n logging.error(\n _(\n 'The following host are not listed as hypervisors: '\n '{not_listed}. Known hypervisors can be listed using '\n 'the list command'\n ).format(\n not_listed=','.join(not_found)\n )\n )\n sys.exit(ExitCodes.CRITICAL)\n\n orig_hosts = self.conf['hosts'].copy()\n\n if host_patterns:\n for pattern in host_patterns:\n host_filtered |= self._filter_hosts('host', pattern)\n if host_patterns or host_others:\n self.conf['hosts'] &= host_filtered\n\n # Intersect with hosts belonging to the data centers specified with -d\n if datacenter_patterns:\n datacenter_filtered = set()\n for pattern in datacenter_patterns:\n datacenter_filtered |= self._filter_hosts(\n 'datacenter', pattern\n )\n self.conf['hosts'] &= datacenter_filtered\n\n # Intersect with hosts belonging to the clusters specified with -c\n if cluster_patterns:\n # remove all hosts that don't match the patterns\n cluster_filtered = set()\n for pattern in cluster_patterns:\n cluster_filtered |= self._filter_hosts('cluster', pattern)\n self.conf['hosts'] &= cluster_filtered\n\n # If hypervisor_per_cluster is set, collect data only from a single\n # hypervisor per cluster; if the Spm found, collect data from it.\n if hypervisor_per_cluster:\n selected_hosts = dict()\n for dc, cluster, host, is_spm, is_up in self.conf['hosts']:\n # Always add the SPM\n if is_spm:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # For the given cluster, if no host added yet, add it\n elif cluster.name not in selected_hosts:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # If a host is up and the SPM isn't added yet, add this host\n elif is_up and not selected_hosts[cluster.name][3]:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n self.conf['hosts'] &= set(selected_hosts.values())\n\n # warn users if they are going to collect logs from all hosts.\n if orig_hosts and self.conf['hosts'] == orig_hosts:\n logging.warning(\n _(\n 'This ovirt-log-collector call will collect logs from '\n 'all available hosts. This may take long time, '\n 'depending on the size of your deployment'\n )\n )\n\n return bool(self.conf.get('hosts'))",
"def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True",
"def employee_count(self, employee_count):\n\n self._employee_count = employee_count",
"def external_group_count(self, external_group_count):\n\n self._external_group_count = external_group_count",
"def set_all(self, host_names, ip_address):\n for host_name in host_names:\n self.set_one(host_name, ip_address)",
"def setVoxelSize(self, vxs):\n\t\tself.voxelsize = vxs\n\t\ta, b, c = vxs\n\t\tself.spacing = [1, b / a, c / a]",
"def vcpus(self, vcpus):\n self._vcpus = vcpus",
"def index(self, req):\n LOG.info(\"List all the nova-compute hosts in the system\")\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n services = dbapi.service_get_all_compute_sorted(ctxt)\n # services looks like (Service(object), Decimal('0'))\n # must convert from Decimal('0') to int() because no JSON repr\n hosts = [{'name':srv[0].host,\n 'instanceCount':int(srv[1])}\n for srv in services]\n return {'hosts': hosts}",
"def set_autoscaled_instances(self, instance_count: int) -> None:\n set_instances_for_marathon_service(\n service=self.service,\n instance=self.instance,\n instance_count=instance_count,\n )",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def init_cpu_counts(host):\n host.cpu_functions = {}\n host.cpu_lists = {}\n for s in range(0, len(host.nodes)):\n host.cpu_functions[s] = {}\n for f in CORE_FUNCTIONS:\n host.cpu_functions[s][f] = []\n host.cpu_lists[s] = []",
"def vms_every(self, vms_every):\n\n self._vms_every = vms_every",
"def all_hosts(self):\n ...",
"def set_article_count(cls, count):\n return cls.db.set(\"article_count\", count)",
"def hcxes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateCloudHcxArgs']]]]:\n return pulumi.get(self, \"hcxes\")",
"def hosts(self, hosts):\n self._hosts = hosts",
"def hcxes(self) -> pulumi.Output[Sequence['outputs.PrivateCloudHcx']]:\n return pulumi.get(self, \"hcxes\")",
"def datacenters_every(self, datacenters_every):\n\n self._datacenters_every = datacenters_every",
"def hosts_some(self, hosts_some):\n\n self._hosts_some = hosts_some",
"def eps_count_estimate(self, eps_count_estimate):\n\n self._eps_count_estimate = eps_count_estimate",
"def set_indices(self, part_instance_counts):\n type_indices = {}\n for entry in self._entries:\n try:\n entry.set_indices(\n model_type_index=type_indices.setdefault(entry.ENTRY_SUBTYPE, 0),\n instance_count=part_instance_counts.get(entry.name, 0),\n )\n except KeyError as e:\n raise SoulstructError(\n f\"Invalid map component name for {entry.ENTRY_SUBTYPE.name} model {entry.name}: {e}\"\n )\n else:\n type_indices[entry.ENTRY_SUBTYPE] += 1"
] | [
"0.59950227",
"0.5260858",
"0.50562745",
"0.50244135",
"0.49948582",
"0.48857233",
"0.48497176",
"0.48385507",
"0.482034",
"0.4762812",
"0.47252673",
"0.4718971",
"0.47022638",
"0.46596396",
"0.46280846",
"0.45461932",
"0.45392084",
"0.45244592",
"0.45231992",
"0.45225677",
"0.45150542",
"0.45062536",
"0.45039192",
"0.44987655",
"0.4494041",
"0.44755217",
"0.44519544",
"0.44330424",
"0.44218957",
"0.44173533"
] | 0.8037844 | 0 |
Sets the external_group_count of this IaasUcsdManagedInfraAllOf. | def external_group_count(self, external_group_count):
self._external_group_count = external_group_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_group_count(self, local_group_count):\n\n self._local_group_count = local_group_count",
"def external_ids(self, external_ids):\n\n self._external_ids = external_ids",
"def esxi_host_count(self, esxi_host_count):\n\n self._esxi_host_count = esxi_host_count",
"def set_all_data_external(\n self, check_data=True, external_data_folder=None\n ):\n # set blocks\n for key, block in self.blocks.items():\n file_name = os.path.split(self.filename)[1]\n block.set_all_data_external(\n file_name, check_data, external_data_folder\n )\n # set sub-packages\n for package in self._packagelist:\n package.set_all_data_external(check_data, external_data_folder)",
"def external_id(self, external_id):\n if external_id is not None and len(external_id) > 255:\n raise ValueError(\"Invalid value for `external_id`, length must be less than or equal to `255`\") # noqa: E501\n\n self._external_id = external_id",
"def total_nucleus_clients(self, total_nucleus_clients):\n\n self._total_nucleus_clients = total_nucleus_clients",
"def _set_group_resource(self, _g):\n\n if isinstance(_g, Server):\n return\n\n for _, sg in _g.subgroups.items():\n self._set_group_resource(sg)\n _g.vCPUs += sg.vCPUs\n _g.mem += sg.mem\n _g.local_volume_size += sg.local_volume_size",
"def consistency_groups_every(self, consistency_groups_every):\n\n self._consistency_groups_every = consistency_groups_every",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def external_id(self, external_id):\n\n self._external_id = external_id",
"def external_id(self, external_id):\n\n self._external_id = external_id",
"def external_id(self, external_id):\n\n self._external_id = external_id",
"def update_num_rois_per_group(self,num_rois_per_group=None):\n if num_rois_per_group is not None:\n self.roi_groups[0].set_num_rois(num_rois_per_group)\n self.signal_status_message.emit('Updated number of ROIs/group to {}'.format(num_rois_per_group))\n num_rois_per_group = self.roi_groups[0].get_num_rois()\n for group in self.roi_groups[1:]:\n group.set_num_rois(num_rois_per_group)\n self.signal_num_rois_per_group.emit(num_rois_per_group)\n self.send_roi_coords()",
"def consistency_group_num_in(self, consistency_group_num_in):\n\n self._consistency_group_num_in = consistency_group_num_in",
"def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count",
"def namespace_group_num_in(self, namespace_group_num_in):\n\n self._namespace_group_num_in = namespace_group_num_in",
"def namespace_groups_every(self, namespace_groups_every):\n\n self._namespace_groups_every = namespace_groups_every",
"def _init_ext_info(self):\n IterativeCooperation._init_ext_info(self)\n\n for node in self._global_scenario.network.nodes:\n system = self._cluster_systems[node.id][0]\n for time_step in range(self.control_sequence_length):\n ctrl_limit = self._cluster_ctrl_limits[node.id][time_step]\n env_input = self._cluster_env_inputs[node.id][time_step]\n\n for app in system.apps:\n for ext_node in self._global_scenario.network.nodes:\n if node == ext_node:\n continue\n\n max_dispatch_load = 0.0\n if ext_node.is_cloud():\n max_dispatch_load = math.inf\n ctrl_limit.max_dispatch_load[app.id][ext_node.id] = max_dispatch_load\n\n env_input.generated_load[app.id][ext_node.id] = 0.0\n env_input.additional_received_load[app.id][ext_node.id] = 0.0\n env_input.nb_instances[app.id][ext_node.id] = 0\n if self._global_control_input is not None:\n nb_instances = self._global_control_input.get_max_app_placement(app.id, ext_node.id)\n env_input.nb_instances[app.id][ext_node.id] = nb_instances",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)",
"def update_num_roi_groups(self,num_roi_groups):\n # print('MAIA: num roi groups {}'.format(num_roi_groups))\n if num_roi_groups is not None:\n for _ in range(num_roi_groups,len(self.roi_groups)): # delete unneeded ROIs\n self.roi_groups.pop()\n for _ in range(len(self.roi_groups), num_roi_groups): # make new ROIs\n self.roi_groups.append(ROIGroup(num_images=self.num_images))\n self.signal_status_message.emit('Updated number of ROI groups to {}'.format(num_roi_groups))\n self.update_num_rois_per_group() # ensures that newly created ROI groups have the right number of ROIs\n num_roi_groups = len(self.roi_groups)\n self.signal_num_roi_groups.emit(num_roi_groups)\n # self.send_roi_coords() # this will be send when updating the number of ROIs per group anyway",
"def resize_eip_group_count(self, id, eip_add_count,\n client_token=None, config=None):\n path = utils.append_uri(self._get_path(), id)\n if client_token is None:\n client_token = generate_client_token()\n params = {\n b'resize': None,\n b'clientToken': client_token\n }\n body = {\n 'eipAddCount': eip_add_count\n }\n return self._send_request(http_methods.PUT,\n path, body=json.dumps(body),\n params=params, config=config)",
"def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()",
"def external_id(self, external_id):\n if external_id is None:\n raise ValueError(\"Invalid value for `external_id`, must not be `None`\") # noqa: E501\n\n self._external_id = external_id",
"def set_entity_count(cls, count):\n return cls.db.set(\"entity_count\", count)",
"def clusters_every(self, clusters_every):\n\n self._clusters_every = clusters_every",
"def consistency_group_num_lte(self, consistency_group_num_lte):\n\n self._consistency_group_num_lte = consistency_group_num_lte",
"def set_externals_state(self, dp_name, externals_up):\n dp_conf = self._get_faucet_conf()['dps'][dp_name]\n for port_num, port_conf in dp_conf['interfaces'].items():\n if port_conf.get('loop_protect_external'):\n if externals_up:\n self.set_port_up(port_num, dp_conf.get('dp_id'))\n else:\n self.set_port_down(port_num, dp_conf.get('dp_id'))",
"def consistency_group_num(self, consistency_group_num):\n\n self._consistency_group_num = consistency_group_num",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count"
] | [
"0.53656894",
"0.4734468",
"0.4715213",
"0.4630498",
"0.46285418",
"0.4532851",
"0.45295992",
"0.44841346",
"0.44807005",
"0.44654495",
"0.44654495",
"0.44654495",
"0.44536784",
"0.4435368",
"0.43804",
"0.42757764",
"0.42720485",
"0.42300162",
"0.42260072",
"0.4207612",
"0.41835812",
"0.4180668",
"0.41730314",
"0.41576615",
"0.41405568",
"0.41390336",
"0.41054726",
"0.40963188",
"0.40958533",
"0.4085947"
] | 0.75556606 | 0 |
Sets the hyperv_host_count of this IaasUcsdManagedInfraAllOf. | def hyperv_host_count(self, hyperv_host_count):
self._hyperv_host_count = hyperv_host_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def esxi_host_count(self, esxi_host_count):\n\n self._esxi_host_count = esxi_host_count",
"def vcpu_set(vm_hostname, count, offline=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n _check_defined(vm)\n\n if offline and not vm.is_running():\n log.info(\n '\"{}\" is already powered off, ignoring --offline.'.format(\n vm.fqdn)\n )\n offline = False\n\n if count == vm.dataset_obj['num_cpu']:\n raise Warning('CPU count is the same.')\n\n if offline:\n vm.shutdown()\n vm.set_num_cpu(count)\n if offline:\n vm.start()",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def vcpus(self, vcpus):\n self._vcpus = vcpus",
"def host_num(self, host_num):\n\n self._host_num = host_num",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def host_count(self) -> list:\n return self.__host_count",
"def host_num_in(self, host_num_in):\n\n self._host_num_in = host_num_in",
"def set_hosts(self, hypervisor_per_cluster=False):\n\n self.conf['hosts'] = set()\n\n host_patterns, host_others = self._sift_patterns(\n self.conf.get('hosts_list')\n )\n datacenter_patterns = self.conf.get('datacenter', [])\n cluster_patterns = self.conf.get('cluster', [])\n\n if host_patterns:\n self.conf['host_pattern'] = host_patterns\n\n self.conf['hosts'] = self._get_hypervisors_from_api()\n # Filter all host specified with -H\n host_filtered = set()\n if host_others:\n host_filtered = set([\n (dc, cl, h, is_spm, is_up)\n for dc, cl, h, is_spm, is_up in self.conf['hosts']\n if h in host_others\n ])\n not_found = host_others - set(host[2] for host in host_filtered)\n if not_found != set():\n # try to resolve to ip specified hosts\n for fqdn in set(not_found):\n try:\n ipaddr = socket.gethostbyname(fqdn)\n logging.debug('%s --> %s' % (fqdn, ipaddr))\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n if h == ipaddr:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(fqdn)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=fqdn,\n )\n )\n if not_found != set():\n # try to resolve to ip known hypervisors\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n try:\n ipaddr = socket.gethostbyname(h)\n logging.debug('%s --> %s' % (h, ipaddr))\n if ipaddr in host_others:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(ipaddr)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=h,\n )\n )\n if not_found != set():\n logging.error(\n _(\n 'The following host are not listed as hypervisors: '\n '{not_listed}. Known hypervisors can be listed using '\n 'the list command'\n ).format(\n not_listed=','.join(not_found)\n )\n )\n sys.exit(ExitCodes.CRITICAL)\n\n orig_hosts = self.conf['hosts'].copy()\n\n if host_patterns:\n for pattern in host_patterns:\n host_filtered |= self._filter_hosts('host', pattern)\n if host_patterns or host_others:\n self.conf['hosts'] &= host_filtered\n\n # Intersect with hosts belonging to the data centers specified with -d\n if datacenter_patterns:\n datacenter_filtered = set()\n for pattern in datacenter_patterns:\n datacenter_filtered |= self._filter_hosts(\n 'datacenter', pattern\n )\n self.conf['hosts'] &= datacenter_filtered\n\n # Intersect with hosts belonging to the clusters specified with -c\n if cluster_patterns:\n # remove all hosts that don't match the patterns\n cluster_filtered = set()\n for pattern in cluster_patterns:\n cluster_filtered |= self._filter_hosts('cluster', pattern)\n self.conf['hosts'] &= cluster_filtered\n\n # If hypervisor_per_cluster is set, collect data only from a single\n # hypervisor per cluster; if the Spm found, collect data from it.\n if hypervisor_per_cluster:\n selected_hosts = dict()\n for dc, cluster, host, is_spm, is_up in self.conf['hosts']:\n # Always add the SPM\n if is_spm:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # For the given cluster, if no host added yet, add it\n elif cluster.name not in selected_hosts:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # If a host is up and the SPM isn't added yet, add this host\n elif is_up and not selected_hosts[cluster.name][3]:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n self.conf['hosts'] &= set(selected_hosts.values())\n\n # warn users if they are going to collect logs from all hosts.\n if orig_hosts and self.conf['hosts'] == orig_hosts:\n logging.warning(\n _(\n 'This ovirt-log-collector call will collect logs from '\n 'all available hosts. This may take long time, '\n 'depending on the size of your deployment'\n )\n )\n\n return bool(self.conf.get('hosts'))",
"def hosts_every(self, hosts_every):\n\n self._hosts_every = hosts_every",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count",
"def host_status_control(self, host_status_control):\n\n self._host_status_control = host_status_control",
"def init_cpu_counts(host):\n host.cpu_functions = {}\n host.cpu_lists = {}\n for s in range(0, len(host.nodes)):\n host.cpu_functions[s] = {}\n for f in CORE_FUNCTIONS:\n host.cpu_functions[s][f] = []\n host.cpu_lists[s] = []",
"def hosts(self, value):\n if not isinstance(value, NodeSet):\n raise TypeError(\"Invalid fio host NodeSet: {} ({})\".format(value, type(value)))\n self._hosts = value.copy()",
"def number_of_nodes(self, number_of_nodes):\n\n self._number_of_nodes = number_of_nodes",
"def restructure_host_cpu_data(host):\n init_cpu_counts(host)\n host.sockets = len(host.nodes or [])\n host.hyperthreading = False\n host.physical_cores = 0\n if not host.cpus:\n return\n host.cpu_model = host.cpus[0].cpu_model\n cpu_list = sorted(host.cpus, key=_sort_by_coreid)\n for cpu in cpu_list:\n inode = pecan.request.dbapi.inode_get(inode_id=cpu.forinodeid)\n cpu.numa_node = inode.numa_node\n if cpu.thread == 0:\n host.physical_cores += 1\n elif cpu.thread > 0:\n host.hyperthreading = True\n function = cpu.allocated_function or get_default_function(host)\n host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))\n host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))",
"def advertise_osd_count(count):\n for relid in hookenv.relation_ids('mon'):\n hookenv.relation_set(\n relation_id=relid,\n relation_settings={'bootstrapped-osds': count}\n )",
"def external_group_count(self, external_group_count):\n\n self._external_group_count = external_group_count",
"def vms_every(self, vms_every):\n\n self._vms_every = vms_every",
"def update_core_allocations(host, cpu_counts):\n # Remove any previous assignments\n for s in range(0, len(host.nodes)):\n for f in CORE_FUNCTIONS:\n host.cpu_functions[s][f] = []\n # Set new assignments\n for s in range(0, len(host.nodes)):\n cpu_list = host.cpu_lists[s] if s in host.cpu_lists else []\n # Reserve for the platform first\n for i in range(0, cpu_counts[s][constants.PLATFORM_FUNCTION]):\n host.cpu_functions[s][constants.PLATFORM_FUNCTION].append(\n cpu_list.pop(0))\n # Reserve for the vswitch next\n for i in range(0, cpu_counts[s][constants.VSWITCH_FUNCTION]):\n host.cpu_functions[s][constants.VSWITCH_FUNCTION].append(\n cpu_list.pop(0))\n # Reserve for the shared next\n for i in range(0, cpu_counts[s][constants.SHARED_FUNCTION]):\n host.cpu_functions[s][constants.SHARED_FUNCTION].append(\n cpu_list.pop(0))\n for i in range(0, cpu_counts[s][constants.ISOLATED_FUNCTION]):\n host.cpu_functions[s][constants.ISOLATED_FUNCTION].append(\n cpu_list.pop(0))\n # Assign the remaining cpus to the default function for this host\n host.cpu_functions[s][get_default_function(host)] += cpu_list\n return",
"def host_num_lte(self, host_num_lte):\n\n self._host_num_lte = host_num_lte",
"def host_num_gt(self, host_num_gt):\n\n self._host_num_gt = host_num_gt",
"def device_count(self, device_count):\n\n self._device_count = device_count",
"def set_entity_count(cls, count):\n return cls.db.set(\"entity_count\", count)",
"def add_nodes(self, count=1):\n self.log.info('Adding %d nodes' % count)\n new_nodes = []\n Node.flavor = env_vars['client_flavor']\n for i in range(count):\n #check if cluster did not previously exist\n if i == 0 and len(self.all_nodes) == 0:\n # give a floating IPv4 to the first node only\n new_guy = Node(self.cluster_name, '', len(self.all_nodes)+1, create=True, IPv4=True)\n else:\n new_guy = Node(self.cluster_name, node_type=\"\", number=len(self.all_nodes)+1, create=True)\n self.all_nodes.append(new_guy)\n new_nodes.append(new_guy)\n self.save_cluster()\n for n in new_nodes:\n n.wait_ready()\n #inject host files to everybody\n n.inject_hostnames(self.get_hosts(private=True), delete=self.cluster_name)\n n.bootstrap()\n self.log.info(\"Node %s is live \" % new_guy.name)\n #inform all\n self.inject_hosts_files()",
"def node_count(self, node_count):\n\n self._node_count = node_count",
"def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()",
"def config_ha(self, vlan_tag, vip_address):\n\n if len(self.hostnames) == 1:\n LOG.debug(\"Only one machine, doesn't need to configure HA\")\n return True\n\n interface_name = self.in_interface\n if vlan_tag:\n interface_name = \"vlan.\" + vlan_tag\n\n cmd_apv_config_virtual_iface = ADCDevice.cluster_config_virtual_interface(interface_name)\n cmd_apv_config_virtual_vip = ADCDevice.cluster_config_vip(interface_name, vip_address)\n cmd_apv_cluster_enable = ADCDevice.cluster_enable(interface_name)\n\n priority = 1\n for base_rest_url in self.base_rest_urls:\n self.run_cli_extend(base_rest_url, cmd_apv_config_virtual_iface)\n self.run_cli_extend(base_rest_url, cmd_apv_config_virtual_vip)\n\n priority += 10\n cmd_apv_config_virtual_prior = ADCDevice.cluster_config_priority(interface_name, priority)\n self.run_cli_extend(base_rest_url, cmd_apv_config_virtual_prior)\n\n self.run_cli_extend(base_rest_url, cmd_apv_cluster_enable)"
] | [
"0.66588247",
"0.5818077",
"0.5640094",
"0.5640094",
"0.5394924",
"0.5279623",
"0.5277185",
"0.52716357",
"0.5202823",
"0.50858754",
"0.5027976",
"0.4993971",
"0.4947955",
"0.4938431",
"0.48995036",
"0.4849338",
"0.4816406",
"0.4814211",
"0.48059392",
"0.47629017",
"0.47593567",
"0.4754853",
"0.47468898",
"0.4743043",
"0.46774286",
"0.46355253",
"0.46348038",
"0.46311286",
"0.4624137",
"0.4622257"
] | 0.7839512 | 0 |
Sets the local_group_count of this IaasUcsdManagedInfraAllOf. | def local_group_count(self, local_group_count):
self._local_group_count = local_group_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def external_group_count(self, external_group_count):\n\n self._external_group_count = external_group_count",
"def _set_group_resource(self, _g):\n\n if isinstance(_g, Server):\n return\n\n for _, sg in _g.subgroups.items():\n self._set_group_resource(sg)\n _g.vCPUs += sg.vCPUs\n _g.mem += sg.mem\n _g.local_volume_size += sg.local_volume_size",
"def __init__(__self__, *,\n local_ssd_count: Optional[pulumi.Input[int]] = None):\n if local_ssd_count is not None:\n pulumi.set(__self__, \"local_ssd_count\", local_ssd_count)",
"def __init__(__self__, *,\n local_ssd_count: Optional[pulumi.Input[int]] = None):\n if local_ssd_count is not None:\n pulumi.set(__self__, \"local_ssd_count\", local_ssd_count)",
"def __init__(__self__, *,\n local_ssd_count: Optional[pulumi.Input[int]] = None):\n if local_ssd_count is not None:\n pulumi.set(__self__, \"local_ssd_count\", local_ssd_count)",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def local_id(self, local_id):\n\n self._local_id = local_id",
"def local_id(self, local_id):\n\n self._local_id = local_id",
"def local_id(self, local_id):\n\n self._local_id = local_id",
"def local_id(self, local_id):\n\n self._local_id = local_id",
"def namespace_group_num_in(self, namespace_group_num_in):\n\n self._namespace_group_num_in = namespace_group_num_in",
"def local_category(self, local_category: str):\n\n self._local_category = local_category",
"def local_id_in(self, local_id_in):\n\n self._local_id_in = local_id_in",
"def local_id_in(self, local_id_in):\n\n self._local_id_in = local_id_in",
"def local_id_in(self, local_id_in):\n\n self._local_id_in = local_id_in",
"def local_id_in(self, local_id_in):\n\n self._local_id_in = local_id_in",
"def _ensure_local_neo4j_has_test_computergroup_data(neo4j_session):\n groups = tests.data.jamf.computers.GROUPS\n cartography.intel.jamf.computers.load_computer_groups(groups, neo4j_session, TEST_UPDATE_TAG)",
"def local_id_gte(self, local_id_gte):\n\n self._local_id_gte = local_id_gte",
"def local_id_gte(self, local_id_gte):\n\n self._local_id_gte = local_id_gte",
"def local_id_gte(self, local_id_gte):\n\n self._local_id_gte = local_id_gte",
"def local_id_gte(self, local_id_gte):\n\n self._local_id_gte = local_id_gte",
"def total_nucleus_clients(self, total_nucleus_clients):\n\n self._total_nucleus_clients = total_nucleus_clients",
"def _set_group_weight(self, _group):\n\n if self.resource.CPU_avail > 0:\n _group.vCPU_weight = float(_group.vCPUs) / float(self.resource.CPU_avail)\n else:\n if _group.vCPUs > 0:\n _group.vCPU_weight = 1.0\n else:\n _group.vCPU_weight = 0.0\n\n if self.resource.mem_avail > 0:\n _group.mem_weight = float(_group.mem) / float(self.resource.mem_avail)\n else:\n if _group.mem > 0:\n _group.mem_weight = 1.0\n else:\n _group.mem_weight = 0.0\n\n if self.resource.local_disk_avail > 0:\n _group.local_volume_weight = float(_group.local_volume_size) / float(self.resource.local_disk_avail)\n else:\n if _group.local_volume_size > 0:\n _group.local_volume_weight = 1.0\n else:\n _group.local_volume_weight = 0.0\n\n for _, sg in _group.subgroups.items():\n if isinstance(sg, Group):\n self._set_group_weight(sg)",
"def local_id_gt(self, local_id_gt):\n\n self._local_id_gt = local_id_gt",
"def local_id_gt(self, local_id_gt):\n\n self._local_id_gt = local_id_gt",
"def local_id_gt(self, local_id_gt):\n\n self._local_id_gt = local_id_gt",
"def local_id_gt(self, local_id_gt):\n\n self._local_id_gt = local_id_gt",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count",
"def local_id(self, local_id):\n if self.local_vars_configuration.client_side_validation and local_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `local_id`, must not be `None`\") # noqa: E501\n\n self._local_id = local_id",
"def update_num_rois_per_group(self,num_rois_per_group=None):\n if num_rois_per_group is not None:\n self.roi_groups[0].set_num_rois(num_rois_per_group)\n self.signal_status_message.emit('Updated number of ROIs/group to {}'.format(num_rois_per_group))\n num_rois_per_group = self.roi_groups[0].get_num_rois()\n for group in self.roi_groups[1:]:\n group.set_num_rois(num_rois_per_group)\n self.signal_num_rois_per_group.emit(num_rois_per_group)\n self.send_roi_coords()"
] | [
"0.5947293",
"0.509726",
"0.4911288",
"0.4911288",
"0.4911288",
"0.4881376",
"0.4801572",
"0.4801572",
"0.4801572",
"0.4801572",
"0.46374658",
"0.46280244",
"0.46024618",
"0.46024618",
"0.46024618",
"0.46024618",
"0.45948556",
"0.45816755",
"0.45816755",
"0.45816755",
"0.45816755",
"0.45652",
"0.45283303",
"0.4524158",
"0.4524158",
"0.4524158",
"0.4524158",
"0.44918805",
"0.44677573",
"0.44227064"
] | 0.76894385 | 0 |
Sets the standard_catalog_count of this IaasUcsdManagedInfraAllOf. | def standard_catalog_count(self, standard_catalog_count):
self._standard_catalog_count = standard_catalog_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def container_catalog_count(self, container_catalog_count):\n\n self._container_catalog_count = container_catalog_count",
"def bm_catalog_count(self, bm_catalog_count):\n\n self._bm_catalog_count = bm_catalog_count",
"def advanced_catalog_count(self, advanced_catalog_count):\n\n self._advanced_catalog_count = advanced_catalog_count",
"def setNumberOfTraces(self,numberOfTraces: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN \" + str(numberOfTraces))\n\n return",
"def count_all_catalogs(self):\n return self.manager.count_entities(ModelCatalog)",
"def custom_compliance_standard_id(self, custom_compliance_standard_id):\n\n self._custom_compliance_standard_id = custom_compliance_standard_id",
"def _set_readcount_related_data_to_zero(self):\n self.total_read_count = 0\n self.perfect_read_count = 0\n self.RISCC_genome_side_aligned_reads = {}\n self.RISCC_genome_side_unaligned_reads = {}\n self.sequences_counts_positions_errors = {}\n # TODO should all this really be readcount-related? Well, it IS, but when I have a multi-dataset mutant, do I really want to keep the seq/position/count details and the genome-side RISCC read data per dataset rather than total? Hard to tell, really. In a perfect world I wouldn't be doing multiple RISCC datasets anyway!",
"def solid_surface_density_CL2013_given_physical_catalog(sssp_per_sys, max_core_mass=10.):\n a_all = sssp_per_sys['a_all'][sssp_per_sys['a_all'] > 0]\n core_mass_all = np.copy(sssp_per_sys['mass_all'][sssp_per_sys['a_all'] > 0])\n core_mass_all[core_mass_all > max_core_mass] = max_core_mass\n sigma_all = solid_surface_density_CL2013(core_mass_all, a_all)\n return sigma_all, a_all",
"def count_all_catalog_services(self):\n return self.manager.count_entities(ModelEndpoint)",
"def setCompoundCount(self, count):\n return self._set(compoundCount=count)",
"def categoria_svs(self, categoria_svs):\n\n self._categoria_svs = categoria_svs",
"def catalog_id(self, catalog_id):\n self._catalog_id = catalog_id",
"def count_standard_residues(self):\n n = 0\n for na in self.iter_standard_residues():\n n += 1\n return n",
"def catalog_merge(self, catalog_cols=None):\n\n for cluster_info in self._catalog_dictionary.values():\n # Array element names\n catalog_idx = cluster_info['SPT_cat_idx']\n se_catalog = cluster_info['catalog']\n\n # Replace the existing SPT_ID in the SExtractor catalog with the official cluster ID.\n # se_catalog.columns[0].name = 'SPT_ID'\n # del se_catalog['SPT_ID']\n\n # Then replace the column values with the official ID.\n se_catalog['SPT_ID'] = self._spt_catalog['SPT_ID'][catalog_idx]\n\n # Add the SZ center coordinates to the catalog\n se_catalog['SZ_RA'] = self._spt_catalog['RA'][catalog_idx]\n se_catalog['SZ_DEC'] = self._spt_catalog['DEC'][catalog_idx]\n\n # For all requested columns from the master catalog add the value to all columns in the SExtractor catalog.\n if catalog_cols is not None:\n for col_name in catalog_cols:\n se_catalog[col_name] = self._spt_catalog[col_name][catalog_idx]\n\n cluster_info['catalog'] = se_catalog",
"def __init__(self,\n advanced_catalog_count=None,\n bm_catalog_count=None,\n container_catalog_count=None,\n esxi_host_count=None,\n external_group_count=None,\n hyperv_host_count=None,\n local_group_count=None,\n standard_catalog_count=None,\n user_count=None,\n vdc_count=None,\n vm_count=None,\n guid=None,\n local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._advanced_catalog_count = None\n self._bm_catalog_count = None\n self._container_catalog_count = None\n self._esxi_host_count = None\n self._external_group_count = None\n self._hyperv_host_count = None\n self._local_group_count = None\n self._standard_catalog_count = None\n self._user_count = None\n self._vdc_count = None\n self._vm_count = None\n self._guid = None\n self.discriminator = None\n\n if advanced_catalog_count is not None:\n self.advanced_catalog_count = advanced_catalog_count\n if bm_catalog_count is not None:\n self.bm_catalog_count = bm_catalog_count\n if container_catalog_count is not None:\n self.container_catalog_count = container_catalog_count\n if esxi_host_count is not None:\n self.esxi_host_count = esxi_host_count\n if external_group_count is not None:\n self.external_group_count = external_group_count\n if hyperv_host_count is not None:\n self.hyperv_host_count = hyperv_host_count\n if local_group_count is not None:\n self.local_group_count = local_group_count\n if standard_catalog_count is not None:\n self.standard_catalog_count = standard_catalog_count\n if user_count is not None:\n self.user_count = user_count\n if vdc_count is not None:\n self.vdc_count = vdc_count\n if vm_count is not None:\n self.vm_count = vm_count\n if guid is not None:\n self.guid = guid",
"def _set_catalog(self, catalog: cat.Catalog) -> None:\n self._catalog_interface = CatalogInterface(catalog)\n self._catalog = catalog",
"def limit_size(self, catalog):\n if len(catalog)<=self.limit:\n return catalog\n mem = {}\n for instance in catalog:\n if (instance['vCpu'], math.ceil(instance['memory'])) not in mem:\n mem[(instance['vCpu'], math.ceil(instance['memory']))] = instance\n out = [val for val in mem.values()]\n if len(out)>self.limit:\n out = sorted(out, key=lambda x: x['onDemandUsdPrice'])\n return out[:self.limit]\n return out",
"def set_nb_clusters(self):\n \n print(\"Finding the optimal number of clusters...\")\n \n sample = ro.r.matrix(self.df[self.df[\"filename\"].between(1, 4)][\"active_power\"].to_numpy())\n \n r=ro.r(\"\"\"\n check = function(matrix) {\n n_clust = fviz_nbclust(matrix, kmeans, k.max = 15)\n\n n_clust = n_clust$data\n\n max_cluster = as.numeric(n_clust$clusters[which.max(n_clust$y)])\n return(max_cluster)\n }\n \"\"\")\n\n result = r(sample)\n self.conf[\"nb_clust\"] = int(result[0])\n \n print(f\"Optimal number of clusters is {self.conf['nb_clust']}\\n\")",
"def solid_surface_density_nHill_given_physical_catalog(sssp_per_sys, sssp, max_core_mass=10., n=10.):\n a_all = sssp_per_sys['a_all'][sssp_per_sys['a_all'] > 0]\n core_mass_all = np.copy(sssp_per_sys['mass_all'])\n core_mass_all[core_mass_all > max_core_mass] = max_core_mass\n sigma_all = solid_surface_density_nHill(core_mass_all, sssp_per_sys['a_all'], Mstar=sssp['Mstar_all'][:,None], n=n)[sssp_per_sys['a_all'] > 0]\n return sigma_all, a_all",
"def solid_surface_density_CL2013_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_CL2013(core_mass_obs, a_obs)\n return sigma_obs, core_mass_obs, a_obs",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_scalar_oids(self, new_scalar_oids):\n # type: (List[OID]) -> None\n if not self._is_cache_enabled():\n return\n # Do not update if we are already using scalar oids cache.\n if self._use_scalar_oids_cache:\n return\n self._all_scalar_oids = new_scalar_oids\n self._use_scalar_oids_cache = True\n self._last_ts = time.time()",
"def update_necrosis_count(self, number):\n\n print(\"controller - update_necrosis_count!\")\n self.view.processing_gui.update_necrosis_count(number)",
"def set_rate_catalog(self, rate_catalog):\n self.single_selection_from_kendo_dropdown(self.rate_catalog_kendo_dropdown_locator, rate_catalog)",
"def setupCPUStandard(solver):\n #Adjust blocks for boundary conditions\n makeReadBlocksStandard(solver,solver.operating)\n solver.cpu.set_globals(*solver.globals)\n #Creating sets for cpu calculation\n standardSet = [(x+solver.operating,y+solver.operating) for x,y in numpy.ndindex(solver.blocksize[:-1])]\n #Initializing CPU on standard\n cshape = solver.sharedArray[solver.blocks[0][1]].shape if solver.blocks else (0,)\n solver.standard.initializeCPU(solver.cpu,standardSet,solver.intermediate-1,cshape)",
"def modeScaler(self, latestCount):\n \n try:\n # Accumulate new sample data.\n self.__accumCts += latestCount\n \n # Increment runtime counter.\n self.__runtime += 1\n \n except:\n raise\n \n return",
"def set_n_kinetics(self, n):\n self.lib.SetNumberKinetics(ct.c_int(n))",
"def solid_surface_density_S2014_given_physical_catalog(sssp_per_sys, sssp, max_core_mass=10.):\n a_all = sssp_per_sys['a_all'][sssp_per_sys['a_all'] > 0]\n core_mass_all = np.copy(sssp_per_sys['mass_all'])\n core_mass_all[core_mass_all > max_core_mass] = max_core_mass\n sigma_all = solid_surface_density_S2014(core_mass_all, sssp_per_sys['radii_all'], sssp_per_sys['a_all'], Mstar=sssp['Mstar_all'][:,None])[sssp_per_sys['a_all'] > 0]\n return sigma_all, a_all",
"def system_wide(self, system_wide):\n\n self._system_wide = system_wide"
] | [
"0.57423514",
"0.5616781",
"0.50827426",
"0.4542607",
"0.44935066",
"0.44344246",
"0.43874955",
"0.43789217",
"0.4358185",
"0.43167907",
"0.43059853",
"0.4270202",
"0.42586276",
"0.4214198",
"0.42093727",
"0.4138831",
"0.41344634",
"0.41310257",
"0.41012546",
"0.40627837",
"0.4059922",
"0.40574533",
"0.40550974",
"0.40492907",
"0.4037851",
"0.40317857",
"0.40211806",
"0.4020201",
"0.40056854",
"0.400553"
] | 0.81387305 | 0 |
Sets the user_count of this IaasUcsdManagedInfraAllOf. | def user_count(self, user_count):
self._user_count = user_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()",
"def AddUser(self, usercount, user):\n for i in range(usercount):\n login = string.replace(user[i]['Login'], ' ', '')\n home = self.__homeprefix + login[0] + '/' + login\n action = 'userman -A ' + login + ' -p ' + user[i]['Passwd'] + ' -u ' + str(user[i]['UID']) + \\\n ' -g ' + str(user[i]['GID']) + ' -H ' + home + ' -s ' + user[i]['Shell'] \n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET ToDo = 0 WHERE Login = '%s'\" % (login)]",
"def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users",
"def user_capacity(self, user_capacity: SmartSsdUserCapacity):\n\n self._user_capacity = user_capacity",
"def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def set_count(self, count):\n self._count = count",
"def setCount(self, num):\n self.count=num",
"def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def device_count(self, device_count):\n\n self._device_count = device_count",
"def users(self, users):\n if (self.local_vars_configuration.client_side_validation and\n users is not None and not isinstance(users, int)):\n raise ValueError(\"Parameter `users` must be an integer\") # noqa: E501\n\n self._users = users",
"def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)",
"async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })",
"def add_user(self, u: \"Node\") -> None:\n\n if u not in self.users_:\n self.users_[u] = 0\n self.users_[u] += 1",
"def set_count(self, count):\n\n\t\tif count is not None and not isinstance(count, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: count EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__count = count\n\t\tself.__key_modified['count'] = 1",
"def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()",
"def setuservisits_and_allitems(self, uservisits=None, allitems=None):\n if uservisits is not None:\n self._uservisits = uservisits\n if allitems is not None:\n self._allitems = allitems",
"def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)",
"def users(self, users):\n\n self._users = users",
"def users(self, users):\n\n self._users = users",
"def users(self, users):\n\n self._users = users",
"def count(self, value):\n \n self._count = int(value)",
"def count(self, count: int) -> None:\n self._count = count",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count",
"def count(self, count: int):\n\n self._count = count",
"def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])",
"def user_id(self, user_id):\n\n self._user_id = user_id"
] | [
"0.61215466",
"0.5793492",
"0.54216063",
"0.54033273",
"0.532203",
"0.5165332",
"0.5099306",
"0.5092787",
"0.5068327",
"0.5068107",
"0.5022238",
"0.5022238",
"0.49769667",
"0.49539578",
"0.49485537",
"0.4900824",
"0.48729518",
"0.48636237",
"0.48355436",
"0.4805139",
"0.48023763",
"0.47965187",
"0.47965187",
"0.47965187",
"0.47733536",
"0.47679263",
"0.47635502",
"0.47627205",
"0.47581416",
"0.47561386"
] | 0.72169346 | 1 |
Sets the vdc_count of this IaasUcsdManagedInfraAllOf. | def vdc_count(self, vdc_count):
self._vdc_count = vdc_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def vm_count(self, vm_count):\n\n self._vm_count = vm_count",
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)",
"def device_count(self, device_count):\n\n self._device_count = device_count",
"def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count",
"def vsvrcount(self) :\n\t\ttry :\n\t\t\treturn self._vsvrcount\n\t\texcept Exception as e:\n\t\t\traise e",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count",
"def vcpus(self, vcpus):\n self._vcpus = vcpus",
"def vcn_id(self, vcn_id):\n self._vcn_id = vcn_id",
"def count(self, count: int):\n\n self._count = count",
"def setCompoundCount(self, count):\n return self._set(compoundCount=count)",
"def container_catalog_count(self, container_catalog_count):\n\n self._container_catalog_count = container_catalog_count",
"def count(self, count: int) -> None:\n self._count = count",
"def get_vdcs(self):\n if self._check_for_7k():\n self.logger.debug('Getting VDC information from {}'.format(self.host))\n vdcxml = self._ncc.nxoscli('show vdc')\n vdcparsed = _begin_parse(vdcxml)\n vdcschema = parse_get_nsmap(vdcparsed)\n showvdc = parse_xml_heirarchy('ROW_vdc', ['vdc_id', 'vdc_name', 'state'], vdcschema,\n vdcparsed)\n vdcs = {}\n for v in showvdc:\n self.logger.debug(\n 'VDC {} {} {} on {}'.format(v['vdc_id'], v['vdc_name'], v['state'], self.host))\n vdcs[v['vdc_name']] = VDC(**v)\n if v['vdc_id'] == '1':\n self.default_vdc = v['vdc_name']\n self.vdcs = vdcs\n self.logger.debug(vdcs)",
"def set_count(self, count):\n self._count = count",
"def vcpu_set(vm_hostname, count, offline=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n _check_defined(vm)\n\n if offline and not vm.is_running():\n log.info(\n '\"{}\" is already powered off, ignoring --offline.'.format(\n vm.fqdn)\n )\n offline = False\n\n if count == vm.dataset_obj['num_cpu']:\n raise Warning('CPU count is the same.')\n\n if offline:\n vm.shutdown()\n vm.set_num_cpu(count)\n if offline:\n vm.start()",
"def advertise_osd_count(count):\n for relid in hookenv.relation_ids('mon'):\n hookenv.relation_set(\n relation_id=relid,\n relation_settings={'bootstrapped-osds': count}\n )",
"def bm_catalog_count(self, bm_catalog_count):\n\n self._bm_catalog_count = bm_catalog_count",
"def set_count(self, count):\n\n\t\tif count is not None and not isinstance(count, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: count EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__count = count\n\t\tself.__key_modified['count'] = 1",
"def esxi_host_count(self, esxi_host_count):\n\n self._esxi_host_count = esxi_host_count",
"def set_count(c):\n global count\n count = c",
"def count(self, count):\n\n self._count = count",
"def count(self, count):\n\n self._count = count",
"def count(self, count):\n\n self._count = count",
"def count(self, count):\n\n self._count = count",
"def setCount(self, num):\n self.count=num",
"def set_entity_count(cls, count):\n return cls.db.set(\"entity_count\", count)",
"def user_count(self, user_count):\n\n self._user_count = user_count",
"def user_count(self, user_count):\n\n self._user_count = user_count"
] | [
"0.5437181",
"0.5437181",
"0.531096",
"0.5136232",
"0.50647324",
"0.49089286",
"0.47968695",
"0.4710327",
"0.46992522",
"0.46300593",
"0.46219018",
"0.46113285",
"0.46049786",
"0.4604565",
"0.45997623",
"0.45524865",
"0.45503348",
"0.45477548",
"0.45165822",
"0.45023248",
"0.44992244",
"0.44801655",
"0.44748098",
"0.44748098",
"0.44748098",
"0.44748098",
"0.44720647",
"0.44547394",
"0.4446141",
"0.4446141"
] | 0.7397312 | 0 |
Sets the vm_count of this IaasUcsdManagedInfraAllOf. | def vm_count(self, vm_count):
self._vm_count = vm_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_cpu_count(self, nVmCpuCount):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuCount', self.handle, nVmCpuCount)",
"def vdc_count(self, vdc_count):\n\n self._vdc_count = vdc_count",
"def lun_count(self, lun_count):\n\n self._lun_count = lun_count",
"def vcpus(self, vcpus):\n self._vcpus = vcpus",
"def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count",
"def vcpu_set(vm_hostname, count, offline=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n _check_defined(vm)\n\n if offline and not vm.is_running():\n log.info(\n '\"{}\" is already powered off, ignoring --offline.'.format(\n vm.fqdn)\n )\n offline = False\n\n if count == vm.dataset_obj['num_cpu']:\n raise Warning('CPU count is the same.')\n\n if offline:\n vm.shutdown()\n vm.set_num_cpu(count)\n if offline:\n vm.start()",
"def vms_every(self, vms_every):\n\n self._vms_every = vms_every",
"def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)",
"def vm_num(self, vm_num):\n\n self._vm_num = vm_num",
"def vm_volumes_every(self, vm_volumes_every):\n\n self._vm_volumes_every = vm_volumes_every",
"def vm_num_in(self, vm_num_in):\n\n self._vm_num_in = vm_num_in",
"def setCount(self, num):\n self.count=num",
"def user_count(self, user_count):\n\n self._user_count = user_count",
"def user_count(self, user_count):\n\n self._user_count = user_count",
"def vm(self, vm):\n\n self._vm = vm",
"def processor_count(self, processor_count):\n\n self._processor_count = processor_count",
"def node_count(self, node_count):\n\n self._node_count = node_count",
"def set_num_virtual_stages(self, num_virtual_stages: int) -> None:\n self.num_virtual_stages = num_virtual_stages",
"def set_count(self, count):\n self._count = count",
"def test_vm_count():\n assert environments.vm_count() > 0, 'Total VM count should be over 1.'\n count = 0\n for l in list(environments.data):\n e = environments[l]\n count += e.vm_count\n msg = ('VM count mismatch. Environments says: ' +\n str(environments.vm_count()) +\n ', actual count: ' + str(count))\n assert count == environments.vm_count(), msg",
"def vlans_every(self, vlans_every):\n\n self._vlans_every = vlans_every",
"def virtual_machines(self, virtual_machines):\n\n self._virtual_machines = virtual_machines",
"def bm_catalog_count(self, bm_catalog_count):\n\n self._bm_catalog_count = bm_catalog_count",
"def device_count(self, device_count):\n\n self._device_count = device_count",
"def vsvrcount(self) :\n\t\ttry :\n\t\t\treturn self._vsvrcount\n\t\texcept Exception as e:\n\t\t\traise e",
"def count(self, count: int):\n\n self._count = count",
"def count(self, count: int) -> None:\n self._count = count",
"def update_cpu(self, vm):\n try:\n cpu_spec = self.client.get_cpu(vm.backend_id)\n if (\n cpu_spec['cores_per_socket'] != vm.cores_per_socket\n or cpu_spec['count'] != vm.cores\n ):\n self.client.update_cpu(\n vm.backend_id,\n {\n 'cores_per_socket': vm.cores_per_socket,\n 'count': vm.cores,\n },\n )\n except VMwareError as e:\n raise VMwareBackendError(e)",
"def AddCpuCountFlag(parser):\n help_text = \"\"\"\\\n Whole number value indicating how many vCPUs the machine should\n contain. Each vCPU count corresponds to a N2 high-mem machine:\n (https://cloud.google.com/compute/docs/general-purpose-machines#n2_machines).\n \"\"\"\n parser.add_argument(\n '--cpu-count',\n help=help_text,\n type=int,\n choices=[2, 4, 8, 16, 32, 64],\n required=True)",
"def vm_templates_every(self, vm_templates_every):\n\n self._vm_templates_every = vm_templates_every"
] | [
"0.60274035",
"0.5922383",
"0.5739843",
"0.5628235",
"0.56280166",
"0.5574185",
"0.55255973",
"0.53965175",
"0.5285271",
"0.5197171",
"0.5193007",
"0.5076765",
"0.5026433",
"0.5026433",
"0.50207126",
"0.49862435",
"0.4984859",
"0.49740753",
"0.4924906",
"0.49171144",
"0.49162132",
"0.49112612",
"0.4900573",
"0.4875653",
"0.48574725",
"0.48341277",
"0.48284873",
"0.48186758",
"0.47809905",
"0.47798416"
] | 0.7385598 | 1 |
Formats ``path`` with the rank zero values. | def _format_path_with_rank_zero(path: str) -> str:
return path.format(
rank=0,
local_rank=0,
node_rank=0,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_local_rank_zero_path(path: Optional[str]) -> str:\n local_rank_zero = dist.get_global_rank() - dist.get_local_rank()\n paths = dist.all_gather_object(path)\n local_rank_zero_path = paths[local_rank_zero]\n assert local_rank_zero_path is not None, 'local rank zero provides the path'\n return local_rank_zero_path",
"def _format_path_with_current_rank(path: str) -> str:\n return path.format(\n rank=dist.get_global_rank(),\n local_rank=dist.get_local_rank(),\n node_rank=dist.get_node_rank(),\n )",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, int):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def path_str(path):\n\toutput = \"PATH: \"\n\tif path:\n\t\tfor i in path:\n\t\t\toutput += str(i.data) + \" -> \"\n\telse:\n\t\toutput += \"Empty\"\n\treturn output",
"def path_to_string(path: Path) -> str:\n assert_continuous(path)\n\n pieces = [\"M {} {}\".format(path[0].p0[0], path[0].p0[1])]\n for curve in iter(path): # iter cast not strictly necessary\n piece = \"C {} {} {} {} {} {}\".format(\n int(round(curve.c0[0])), int(round(curve.c0[1])),\n int(round(curve.c1[0])), int(round(curve.c1[1])),\n int(round(curve.p1[0])), int(round(curve.p1[1]))\n )\n pieces.append(piece)\n\n return \" \".join(pieces)",
"def format_path(path):\n if len(path) > 1:\n result = [crayons.yellow(path[0].name)]\n\n previous = path[0]\n for item in path[1:]:\n result.append(' -> ')\n result.append(crayons.yellow(item.name))\n result.append(': Line ')\n result.append(crayons.cyan(str(item.is_imported_from[previous.full_path][0])))\n previous = item\n result.append(' =>> ')\n\n result.append(crayons.magenta(path[0].name))\n return ''.join(str(x) for x in result)\n else:\n return ''",
"def encodePath(self, path):\n codedPath = []\n for x, y, pickedRewards, holding in path:\n rewardsList = sorted(list(pickedRewards))\n codedPath.append((self.coordToState[(x, y, tuple(rewardsList), holding)], 0))\n return codedPath",
"def print_rank_zero(*args, **kwargs) -> None:\n print(*args, **kwargs)",
"def _GeneratePathStr(path):\n return ((len(path) - 1) * ' ') + path[-1] if path else ''",
"def empty_path() -> dict[str, int]:\n return {\"UP\": 0, \"DOWN\": 0, \"LEFT\": 0, \"RIGHT\": 0}",
"def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0",
"def zero(self, value):\n raise NotImplementedError",
"def count_paths_with_zero_intervals(self):\n zeros = []\n for path in self.paths:\n # print(\"Checking path {}\".format(path))\n has_zero = 0\n for arc in path:\n # lb = self.arc_info[arc][\"lower_bound\"]\n # ub = self.arc_info[arc][\"upper_bound\"]\n # print(\"{} {} interval\".format(lb,ub))\n if (self.arc_info[arc][\"upper_bound\"] -\n self.arc_info[arc][\"lower_bound\"]) == 0:\n has_zero = 1\n zeros.append(has_zero)\n print(zeros)\n return(sum(zeros))",
"def test_format_throughput_for_when_available_is_zero(self):\n self.assertEqual(format_throughput(0, 7), \"7/∞\")\n self.assertEqual(format_throughput(0), \"N/A\")",
"def format_path(path):\n if not path:\n return path\n\n path = re.sub(r'/+', '/', path)\n\n if path == '/':\n return (u\"\" if isinstance(path, unicode) else \"\")\n else:\n return '/' + path.strip('/')",
"def normalizePath(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n buff = '/' + path if path[0] != '/' else path\n return buff.replace('//', '/')",
"def format_path(path):\n return path if path.endswith('/') else path + '/'",
"def zero(klass):\n return RatTerm(RatNum(0, 1), 0)",
"def corrTIFPath(path, symbol, value): \n reg_expr_search = '(\\%s+)'%str(symbol)\n length_of_replace = len(re.search(reg_expr_search, path).group(0))\n str_format = '{0:%sd}'%'{0:02d}'.format(length_of_replace)\n str_replace = str_format.format(int(value))\n\n return re.sub(reg_expr_search, str_replace, path)",
"def path(filename, path):\n\n # If the line is not empty:\n if len(path) > 0:\n # Open the file for appending\n with open(filename, \"a\") as file:\n # Define format string\n write = \"{:.2f},{:.2f},{:d},{:d},{:d},\\n\"\n\n # Find the first point\n first = path[0]\n # Write the first point with \"no extruding\" option\n file.write(write.format(float(first[1][0]), float(first[1][1]), 0, 0, 0))\n\n # For each line in the path\n for i, line in enumerate(path):\n # If line isn't a repeated point\n if True or (line[1][0] != line[2][0]) and (line[1][1] != line[2][1]):\n\n # If the line is somewhere in the middle of the list write it with \"extruding\" option\n if i < len(path) - 1:\n file.write(write.format(float(line[2][0]), float(line[2][1]), 1, 0, 0))\n\n # If the line is the last of the path, write it with \"extruding\" and \"end of island\" options\n else:\n file.write(write.format(float(line[2][0]), float(line[2][1]), 1, 1, 0))",
"def reset_path(self):\n for i in self.grid:\n for y in i:\n y.g = 0\n y.h = 0\n y.f = 0\n y.parent = None\n y.visited = False",
"def clean_path(path):\n return resolved_path(path)",
"def _isolated_path_format(self, path):\n if self._root_dir.is_parent_of(path):\n return '%s:%s' % (\n self._root_dir,\n self._api.path.join(*path.pieces[len(self._root_dir.pieces):])\n )\n else:\n assert path == self._root_dir, \\\n \"isolated path must be equal to or within %s\" % self._root_dir\n return '%s:.' % self._root_dir",
"def sign_of_path(path):\n vectors = [(a[0] - b[0], a[1] - b[1]) for b, a in pairwise(path)]\n sign_exp = 0\n for idx, vector in enumerate(vectors):\n if vector == (0, 1):\n sign_exp += len([v for v in vectors[idx + 1:] if v == (1, 0)])\n return (-1) ** (sign_exp)",
"def printPath(path):\n result =''\n for i in range(len(path)):\n result = result + str(path[i])\n if i != len(path) -1:\n result = result + '->'\n return result",
"def printPath(path):\r\n result = ''\r\n for i in range(len(path)):\r\n result = result + str(path[i])\r\n if i != len(path) - 1:\r\n result = result + '->'\r\n return result",
"def calcPath(self):\n return None"
] | [
"0.6743314",
"0.6682493",
"0.5183608",
"0.51139396",
"0.51139396",
"0.51139396",
"0.50459886",
"0.49452367",
"0.49336353",
"0.49094725",
"0.4861403",
"0.48274943",
"0.48119223",
"0.48062816",
"0.47794765",
"0.47726187",
"0.4767327",
"0.47632933",
"0.47430956",
"0.4704121",
"0.46915564",
"0.46808624",
"0.4668043",
"0.46600246",
"0.4647841",
"0.4640059",
"0.46351165",
"0.46247953",
"0.46155828",
"0.46115085"
] | 0.890424 | 0 |
Formats ``path`` formatted with the current rank values. | def _format_path_with_current_rank(path: str) -> str:
return path.format(
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
node_rank=dist.get_node_rank(),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _format_path_with_rank_zero(path: str) -> str:\n return path.format(\n rank=0,\n local_rank=0,\n node_rank=0,\n )",
"def format_path(path):\n if len(path) > 1:\n result = [crayons.yellow(path[0].name)]\n\n previous = path[0]\n for item in path[1:]:\n result.append(' -> ')\n result.append(crayons.yellow(item.name))\n result.append(': Line ')\n result.append(crayons.cyan(str(item.is_imported_from[previous.full_path][0])))\n previous = item\n result.append(' =>> ')\n\n result.append(crayons.magenta(path[0].name))\n return ''.join(str(x) for x in result)\n else:\n return ''",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, int):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def format(self, path=None):\n if not path:\n path = self.path\n print(path, end=\"\")\n FORMATTER.parse_file(path)\n FORMATTER.write_file(path)\n print(\" Done\")",
"def print_path(self, path, marks = []):\n\n result = ''\n\n for y in range(1, self.height + 1):\n for x in range(1, self.width + 1):\n # Draw top line\n if (x, y - 1) in self.get_reachables(x, y):\n result += '+ '\n else: result += '+--'\n\n result += '+\\n'\n\n for x in range(1, self.width + 1):\n # Draw horizontal passage\n if (x - 1, y) in self.get_reachables(x, y):\n result += ' '\n else: result += '|'\n\n\n if (x, y) in path:\n if (x, y) in path[-1:]:\n result += '(X'\n else: result += ' x'\n elif (x, y) in marks:\n result += ' #'\n else: result += ' '\n\n result += '|\\n'\n\n if y == self.height:\n for x in range(1, self.width + 1):\n # Draw bottom line\n result += '+--'\n\n return result + '+'",
"def _isolated_path_format(self, path):\n if self._root_dir.is_parent_of(path):\n return '%s:%s' % (\n self._root_dir,\n self._api.path.join(*path.pieces[len(self._root_dir.pieces):])\n )\n else:\n assert path == self._root_dir, \\\n \"isolated path must be equal to or within %s\" % self._root_dir\n return '%s:.' % self._root_dir",
"def format_path(path):\n return path if path.endswith('/') else path + '/'",
"def _pretty_path(path: Sequence[BaseField]) -> str:\n # pylint: disable=protected-access\n return \"< \" + \" -> \".join(f\"'{field._resolve_field_name()}' ({type(field).__name__})\" for field in path) + \" >\"",
"def printPath(path):\r\n result = ''\r\n for i in range(len(path)):\r\n result = result + str(path[i])\r\n if i != len(path) - 1:\r\n result = result + '->'\r\n return result",
"def printPath(path):\n result =''\n for i in range(len(path)):\n result = result + str(path[i])\n if i != len(path) -1:\n result = result + '->'\n return result",
"def format_path(path):\n if not path:\n return path\n\n path = re.sub(r'/+', '/', path)\n\n if path == '/':\n return (u\"\" if isinstance(path, unicode) else \"\")\n else:\n return '/' + path.strip('/')",
"def _formatPath(directoryPath, filePath):\n return directoryPath + \"\\\\\" + filePath",
"def printPath(path):\n result = ''\n for i in range(len(path)):\n result = result + str(path[i])\n if i != len(path) - 1:\n result = result + '->'\n return result",
"def _pretty_json_path(self, path):\r\n segments = path.split('.')\r\n\r\n def builder(prev, cur):\r\n if re.match(cur):\r\n return \"{0}[]\".format(prev)\r\n return \"{0}.{1}\".format(prev, cur)\r\n\r\n segments = reduce(builder, segments)\r\n return segments",
"def format_path(path_string, selection):\n return path_string.format(selection, selection.namespace())",
"def pretty_path(path):\n return path.replace(REPO_DIR + '/', '')",
"def _get_local_rank_zero_path(path: Optional[str]) -> str:\n local_rank_zero = dist.get_global_rank() - dist.get_local_rank()\n paths = dist.all_gather_object(path)\n local_rank_zero_path = paths[local_rank_zero]\n assert local_rank_zero_path is not None, 'local rank zero provides the path'\n return local_rank_zero_path",
"def path_to_string(path: Path) -> str:\n assert_continuous(path)\n\n pieces = [\"M {} {}\".format(path[0].p0[0], path[0].p0[1])]\n for curve in iter(path): # iter cast not strictly necessary\n piece = \"C {} {} {} {} {} {}\".format(\n int(round(curve.c0[0])), int(round(curve.c0[1])),\n int(round(curve.c1[0])), int(round(curve.c1[1])),\n int(round(curve.p1[0])), int(round(curve.p1[1]))\n )\n pieces.append(piece)\n\n return \" \".join(pieces)",
"def __str__(self):\n if self._rank is None:\n rank_str = \"\"\n else:\n rank_str = str(self._rank + 1)\n\n if self._file is None:\n file_str = \"\"\n else:\n file_str = chr(self._file + 97)\n\n return file_str + rank_str",
"def path_str(path):\n\toutput = \"PATH: \"\n\tif path:\n\t\tfor i in path:\n\t\t\toutput += str(i.data) + \" -> \"\n\telse:\n\t\toutput += \"Empty\"\n\treturn output",
"def corrTIFPath(path, symbol, value): \n reg_expr_search = '(\\%s+)'%str(symbol)\n length_of_replace = len(re.search(reg_expr_search, path).group(0))\n str_format = '{0:%sd}'%'{0:02d}'.format(length_of_replace)\n str_replace = str_format.format(int(value))\n\n return re.sub(reg_expr_search, str_replace, path)",
"def display_path(self, path):\n graph = path.graph\n if not graph:\n return\n for v in sorted(graph.vertices()):\n p = graph.get_vertex_attribute(v, 'xy')\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('define v{} ellipse 2 2 c_vertex {} {}'.format(v, x, y))\n #print('define v{0}t text {0} 14 white {1} {2}'.format(v, x, y))\n for u, v in graph.edges():\n print('define - link v{} v{} 1 c_edge'.format(u, v))\n # NOTE: this code assumes paths will not move indefinitely\n print('fix /./')",
"def __str__(self):\n return '{0}'.format(self.path.name[2:])",
"def path_name(self, path):\r\n ind = path.rfind(\"/\") + 1\r\n return (path[:ind], path[ind:])",
"def __rank_from_int_to_str(rank: int) -> str:\n return str(rank + 1)",
"def format_path(s,\n path=None,\n replace_long_filename=False):\n # TODO: could possibly simplify by moving representation logic to FileNode\n replaced_path_name = False\n if path is not None:\n if s.startswith(path):\n replaced_path_name = True\n s = s[len(path)+1:]\n if replace_long_filename:\n head, tail = os.path.split(s)\n name_prefix = head.replace('/','_')\n if '/' in head and len(name_prefix) > 0:\n s = s.replace(name_prefix, '. . . ')\n if replaced_path_name:\n s = \"[DIR]/\" + s\n return \"/\\\\n\".join(s.split('/'))",
"def addRankToLine(self,cols,rank):\n line = \"\"\n ann = \";RankScore=\" + str(self.family_id) + \":\" + str(rank)\n for c in cols:\n line = line + c\n if cols.index(c) == 7: # 0-based index\n line = line + ann\n if cols.index(c) < len(cols):\n line = line + \"\\t\"\n print(line)"
] | [
"0.7308434",
"0.6294355",
"0.6030438",
"0.58486265",
"0.58486265",
"0.58486265",
"0.5666408",
"0.5501491",
"0.54912573",
"0.5491005",
"0.5487094",
"0.5456914",
"0.5407116",
"0.53897357",
"0.5379467",
"0.5378536",
"0.53413504",
"0.5290244",
"0.5282369",
"0.5271615",
"0.52611613",
"0.52607363",
"0.52605337",
"0.522763",
"0.51928437",
"0.5153218",
"0.5116245",
"0.5106497",
"0.5103367",
"0.50885874"
] | 0.8522862 | 0 |
Broadcasts the ``path`` from the LOCAL rank zero to all LOCAL ranks. | def _get_local_rank_zero_path(path: Optional[str]) -> str:
local_rank_zero = dist.get_global_rank() - dist.get_local_rank()
paths = dist.all_gather_object(path)
local_rank_zero_path = paths[local_rank_zero]
assert local_rank_zero_path is not None, 'local rank zero provides the path'
return local_rank_zero_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_path(self, path):\n self.clear_path()\n for coordinate in path:\n self.send_coordinate(coordinate)\n time.sleep(0.05)",
"def broadcast(value, root_rank, name=None):\n return _impl.broadcast(K, value, root_rank, name)",
"def extern_to_local_path(self, path: PurePath) -> Path:\n return self.path_supervisor / path.relative_to(self.path_extern_supervisor)",
"def local_to_extern_path(self, path: PurePath) -> PurePath:\n return self.path_extern_supervisor / path.relative_to(self.path_supervisor)",
"def Update(self, local_path_info):\n if not local_path_info:\n return\n for priority_group in local_path_info._path_priority_groups:\n group_list = []\n for path in priority_group:\n if not self.IsPathInLocalPaths(path):\n group_list.append(path)\n if group_list:\n self._path_priority_groups.append(group_list)",
"def local_rebroadcast_lift(node):\r\n op = node.op\r\n if not isinstance(op, T.Rebroadcast):\r\n return False\r\n\r\n input = node.inputs[0]\r\n inode = input.owner\r\n if inode and isinstance(inode.op, Elemwise) and len(inode.inputs) == 1:\r\n # It may happen that `input` has no client because this optimization\r\n # is called from `apply_rebroadcast_opt`, which in particular is used\r\n # by the `unbroadcast` function before we are in the actual function\r\n # compilation phase.\r\n if hasattr(input, 'clients') and len(input.clients) == 1:\r\n rval = inode.op.make_node(T.Rebroadcast(*op.axis.items())(\r\n inode.inputs[0])).outputs\r\n return rval\r\n if inode and isinstance(inode.op, T.Rebroadcast):\r\n # the \"axis\" specification in the outer Rebroadcast overrides\r\n # the axis of the inner one\r\n axis = inode.op.axis.copy()\r\n axis.update(op.axis)\r\n iinput = inode.inputs[0]\r\n rval = [T.Rebroadcast(*axis.items())(iinput)]\r\n return rval",
"def _format_path_with_rank_zero(path: str) -> str:\n return path.format(\n rank=0,\n local_rank=0,\n node_rank=0,\n )",
"def broadcast_global_variables(root_rank):\n return _impl.broadcast_global_variables(K, root_rank)",
"def broadcast(data: T, root: int) -> T:\n return collective.broadcast(data, root)",
"def _format_path_with_current_rank(path: str) -> str:\n return path.format(\n rank=dist.get_global_rank(),\n local_rank=dist.get_local_rank(),\n node_rank=dist.get_node_rank(),\n )",
"def broadcast_global_variables(root_rank):\n if _executing_eagerly():\n raise RuntimeError(\n \"bps.broadcast_global_variables() does not support eager execution. \"\n \"Please use `bps.broadcast_variables(<model/optimizer variables>)` instead.\"\n )\n\n return broadcast_variables(_global_variables(), root_rank)",
"def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)",
"def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)",
"def broadcast(self):\n comm.Barrier()\n\n if rank == 0:\n dim = np.array([self.nspin, self.nkpt, self.nband], dtype=np.int)\n else:\n dim = np.empty(3, dtype=np.int)\n\n comm.Bcast([dim, MPI.INT])\n\n if rank != 0:\n self.EIG = np.empty(dim, dtype=np.float64)\n self.Kptns = np.empty((dim[1],3), dtype=np.float64)\n\n comm.Bcast([self.EIG, MPI.DOUBLE])\n comm.Bcast([self.Kptns, MPI.DOUBLE])",
"def traceback_all(self, path, input_data):\r\n current = path[-1]\r\n neighbours = self._get_neighbours(current, input_data)\r\n\r\n for vec in neighbours:\r\n if (vec.x == 0 and vec.y == 0 and vec.z == 0):\r\n path.append(vec)\r\n self.paths.append(copy.deepcopy(path))\r\n path.pop()\r\n else:\r\n path.append(vec)\r\n self.traceback_all(path, input_data)\r\n path.pop()",
"def send_broadcast_packet(self, broadcast_packet: Packet) -> None:\n for neighbor_address in [*self.children_addresses, self.parent_address]:\n if neighbor_address:\n self.stream.add_message_to_out_buff(neighbor_address, broadcast_packet)\n log(f'Message packet added to out buff of Node({neighbor_address}).')",
"def walk(self, priv_path:list):\n # End conditions for recursive loop\n current_node = priv_path[-1]\n if current_node.location in self.destination and len(priv_path)>1:\n self.addItinerary(priv_path)\n self.n_routes+=1\n return\n if self.n_routes >= self.max_n_routes:\n return\n\n if len(priv_path)>1:\n # Get metadata of last edge type\n last_edge = self.EdgeType(priv_path[-2], priv_path[-1])\n else: # If it's start of itinerary, next edge would be travel edge\n # So, make last edge as stay\n last_edge = 'stay'\n if last_edge == 'stay': # next edge will be travel i.e., ship not None\n next_nodes = [node for node in self.G.neighbors(current_node) \n if self.G.edges[current_node, node]['ship'] is not None]\n else: # Next edge will be stay, i.e., ship = None\n next_nodes = [node for node in self.G.neighbors(current_node)\n if self.G.edges[current_node, node]['ship'] is None]\n \n for node in next_nodes:\n self.walk(priv_path+[node])",
"def zero_base_local(*args):\n robots = get_robot_roots()\n if not robots:\n pm.warning('Nothing Selected; Select a valid robot')\n return\n\n try:\n for robot in robots:\n local_ctrl_path = get_local_ctrl_path(robot)\n pm.setAttr(local_ctrl_path + '.translate', 0, 0, 0)\n pm.setAttr(local_ctrl_path + '.rotate', 0, 0, 0)\n except:\n pm.warning('Cannot zero base (local)')",
"def update_path(self):\r\n if len(self.queue) == 0:\r\n return\r\n self.path[:] = []\r\n current = self.peek_queue()[0]\r\n while current in self.previous:\r\n self.path.append(current)\r\n current = self.previous[current]",
"def local_path(G, grid, lcl, p1, p2):\n\n # create a local graph\n lcl_graph = local_graph(G)\n\n # plan over the local graph from p1 to p2\n path, _ = astar.a_star_graph(lcl_graph, utils.norm_distance, p1, p2)\n print(path)\n for p in path:\n lcl.append(p)\n return lcl",
"def broadcast(self, tx):\n\n for neighbor_id in self.adjacencies:\n self.sendMsg(neighbor_id, Message(self.id, Type.BLOCK, tx))",
"def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)",
"def _get_local_dest(self, path: Path) -> Path:\n dest = \"\"\n\n if str(path).startswith(\"~\"):\n path = path.relative_to(\"~\")\n\n if self.category == \"global\":\n dest = f\"{self.local_base}/global/{path}\"\n elif self.category == \"local\":\n dest = f\"{self.local_base}/local/{path}\"\n else:\n dest = f\"{self.local_base}/custom/{path}\"\n\n return Path(dest)",
"def test_broadcast(self):\n if _debug: TestSimple._debug(\"test_broadcast\")\n\n # create a network\n tnet = TNetwork()\n\n # make a PDU from node 1 to node 2\n pdu_data = xtob('dead.beef')\n pdu = PDU(pdu_data, source=tnet.td.address, destination=LocalBroadcast())\n if _debug: TestSimple._debug(\" - pdu: %r\", pdu)\n\n # test device sends it, iut gets it\n tnet.td.start_state.send(pdu).success()\n tnet.iut.start_state.receive(PDU, pduSource=tnet.td.address).success()\n\n # sniffer sees message on the wire\n tnet.sniffer.start_state.receive(OriginalBroadcastNPDU,\n pduSource=tnet.td.address.addrTuple,\n# pduDestination=('192.168.4.255', 47808),\n pduData=pdu_data,\n ).timeout(1.0).success()\n\n # run the group\n tnet.run()",
"def localpath(self, *args):\n return os.path.join(os.path.expanduser(self.serverfiles_dir), *args)",
"def sync_model(model):\n size = float(dist.get_world_size())\n\n for param in model.parameters():\n dist.broadcast(param.data, 0)",
"def emit(self, *path):\n path = list(path)\n for module in self.modules.values():\n module.emit_local(*path)",
"def get_remote_path(self, local_path, mapped_paths):\n return self.get_local_path(local_path, mapped_paths, reverse=True)",
"def update_trip_path(trip_mpois, paths, graph):\n n_nodes = len(trip_mpois)\n # adjacency matrix\n new_paths = np.zeros(shape=(n_nodes, n_nodes))\n\n # iterate through all the nodes and create a list of nodes with sequential id\n for i, node1 in enumerate(trip_mpois):\n for j, node2 in enumerate(trip_mpois):\n new_paths[i, j] = paths[node1, node2]\n\n # new_paths = new_paths/np.max(new_paths[new_paths < _INF])\n # new_paths[np.isinf(new_paths)] = _INF\n\n # create a dummy edge between end and start node with weight 0\n new_paths[1,0] = -_INF\n # new_paths[0,1] = _INF\n\n shortest_path = None\n if n_nodes > 5:\n shortest_path, dist = tsp.solve(n_nodes, new_paths)\n # shortest_path = range(n_nodes)\n else:\n shortest_path = range(n_nodes)\n\n trip_path = np.array(trip_mpois)[shortest_path]\n\n if ___DEBUG:\n fname = 'dump/' + str(n_nodes) + '.dist'\n np.savetxt(fname, new_paths, fmt='%.6f')\n \n mpoi_pos = np.zeros(shape=(n_nodes,2))\n \n for i, node in enumerate(trip_mpois):\n pos_3d = graph.vs[node]['position']\n assert node == graph.vs[node].index\n mpoi_pos[i,:] = pos_3d[:2]\n\n fname = 'dump/' + str(n_nodes) + '.pos'\n np.savetxt(fname, mpoi_pos)\n \n # print trip_mpois, trip_path\n\n return trip_path",
"def appendleft(self, path):\n self.paths.appendleft(path)\n self.time += path.time"
] | [
"0.558918",
"0.5540478",
"0.5262298",
"0.5138672",
"0.51280296",
"0.5033147",
"0.4981683",
"0.4957514",
"0.4915474",
"0.4859186",
"0.4790848",
"0.4757363",
"0.47432727",
"0.4711225",
"0.47017384",
"0.4673872",
"0.46665725",
"0.46487427",
"0.4647647",
"0.46301493",
"0.46140268",
"0.4612716",
"0.45998842",
"0.4593032",
"0.45724565",
"0.4556833",
"0.45547992",
"0.4547696",
"0.4529297",
"0.4523274"
] | 0.639906 | 0 |
Download the checkpoint stored at ``path``, potentially in ``object_store``, to ``node_checkpoint_folder``. Returns a tuple of (``composer_states_filepath``, ``extracted_checkpoint_folder``, ``extracted_rank_n``). The ``composer_states_filepath``, is the path to the composer states, which can be passed into | def download_checkpoint(path: str,
node_checkpoint_folder: str,
object_store: Optional[Union[ObjectStore, LoggerDestination]],
progress_bar: bool,
fsdp_sharded_state_dict_enabled: bool = False,
deepspeed_sharded_checkpoint: bool = False) -> Tuple[str, Optional[str], bool]:
log.debug('Downloading checkpoint to folder %s', node_checkpoint_folder)
rank_zero_checkpoint_filepath = os.path.join(node_checkpoint_folder, 'rank0_checkpoint')
rank_n_checkpoint_filepath = os.path.join(node_checkpoint_folder, f'rank{dist.get_global_rank()}_checkpoint')
extracted_checkpoint_folder = None
extracted_rank_n = False
if is_tar(path):
extracted_checkpoint_folder = os.path.join(node_checkpoint_folder, 'checkpoint')
composer_states_filepath = os.path.join(extracted_checkpoint_folder, _COMPOSER_STATES_FILENAME)
else:
# it's not an archive; it's just the composer state dict
# and only rank zero has this file unless fsdp_sharded_state_dict_enabled then
# every rank has it's own file.
extracted_checkpoint_folder = None
composer_states_filepath = (rank_n_checkpoint_filepath
if fsdp_sharded_state_dict_enabled else rank_zero_checkpoint_filepath)
checkpoint_is_sharded = fsdp_sharded_state_dict_enabled or deepspeed_sharded_checkpoint
try:
if not checkpoint_is_sharded and dist.get_local_rank() == 0:
# if the checkpoint is not sharded, then local rank 0 on each node needs to download the
# global rank 0 checkpoint
path = _format_path_with_rank_zero(path)
get_file(destination=rank_zero_checkpoint_filepath,
path=path,
object_store=object_store,
progress_bar=progress_bar)
if extracted_checkpoint_folder is not None:
try:
with tarfile.open(rank_zero_checkpoint_filepath) as tarball:
tarball.extractall(extracted_checkpoint_folder)
except FileNotFoundError:
# Not re-raising the file-not-found error as that is irrelevant;
# the underlying issue is that the checkpoint file does not exist on the disk
# or could not be downloaded
raise RuntimeError(f'Checkpoint {path} does not exist')
elif checkpoint_is_sharded:
# if the checkpoint is sharded, then every rank needs to download its own checkpoint
try:
get_file(destination=rank_n_checkpoint_filepath,
path=_format_path_with_current_rank(path),
object_store=object_store,
progress_bar=progress_bar)
except FileNotFoundError as e:
raise FileNotFoundError(
(f'Checkpoint {_format_path_with_current_rank(path)} does not exist, '
f'but is required for sharded checkpointing on rank {dist.get_global_rank()}. '
'Please ensure that the checkpoint exists and your load_path was specified as a format string'
'with the {rank} argument.')) from e
if extracted_checkpoint_folder is not None:
with contextlib.suppress(FileNotFoundError):
# it's an archive and needs to be extracted
with tarfile.open(rank_n_checkpoint_filepath) as tarball:
tarball.extractall(extracted_checkpoint_folder)
extracted_rank_n = True
finally:
# Use busy wait to avoid timeouts on large downloads for non-sharded checkpoints
if not checkpoint_is_sharded:
signal_file_path = os.path.join(node_checkpoint_folder, '.local_rank0_completed')
if dist.get_local_rank() == 0:
with open(signal_file_path, 'wb') as f:
f.write(b'local_rank0_completed')
# Avoid the collective call until the local rank zero has finished trying to download the
# checkpoint so that we don't timeout for large downloads. This syncs all processes on the
# node
with dist.local_rank_zero_download_and_wait(signal_file_path):
# Then, wait to ensure every node has finished downloading the checkpoint
dist.barrier()
if dist.get_local_rank() == 0:
os.remove(signal_file_path)
dist.barrier()
return composer_states_filepath, extracted_checkpoint_folder, extracted_rank_n | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_checkpoint(\n path: str,\n state: State,\n logger: Logger,\n object_store: Optional[Union[ObjectStore, LoggerDestination]] = None,\n load_weights_only: bool = False,\n strict_model_weights: bool = False,\n progress_bar: bool = True,\n ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]] = None,\n exclude_algorithms: Optional[List[str]] = None,\n algorithm_passes: Optional[List[AlgorithmPass]] = None,\n):\n # Download the checkpoint to the node-local folder\n log.debug('Loading checkpoint at %s', path)\n # Each node gets one unique folder to store checkpoints that is shared amongst all local ranks in that node.\n # If fsdp sharded state_dicts is enabled then EVERY rank gets a unique checkpoint folder.\n needs_unique_checkpoint_folder = state.fsdp_sharded_state_dict_enabled or dist.get_local_rank() == 0\n tempdir_ctx = tempfile.TemporaryDirectory() if needs_unique_checkpoint_folder else contextlib.nullcontext(None)\n with tempdir_ctx as tempdir:\n try:\n # Get the path to the proper checkpoint folder corresponding to the current rank's node.\n # If fsdp_sharded_state_dict_enabled then just use that rank's unique tempdir.\n node_checkpoint_folder = (tempdir\n if state.fsdp_sharded_state_dict_enabled else _get_local_rank_zero_path(tempdir))\n assert node_checkpoint_folder is not None\n\n composer_states_filepath, extracted_checkpoint_folder, extracted_rank_n = download_checkpoint(\n path=path,\n node_checkpoint_folder=node_checkpoint_folder,\n object_store=object_store,\n progress_bar=progress_bar,\n fsdp_sharded_state_dict_enabled=state.fsdp_sharded_state_dict_enabled,\n deepspeed_sharded_checkpoint=is_model_deepspeed(state.model),\n )\n rng_state_dicts = _restore_checkpoint(\n state,\n logger,\n composer_states_filepath,\n extracted_rank_n,\n extracted_checkpoint_folder,\n load_weights_only=load_weights_only,\n strict_model_weights=strict_model_weights,\n ignore_keys=ignore_keys,\n exclude_algorithms=exclude_algorithms,\n algorithm_passes=algorithm_passes,\n )\n finally:\n # Wait for all ranks to finish restoring the checkpoint before releasing the tempdir, since tempdir can\n # be a shared resource between nodes.\n dist.barrier()\n\n log.info('%s loaded from %s', 'Model weights' if load_weights_only else 'Trainer checkpoint', path)\n return rng_state_dicts",
"def download_checkpoint_ngc(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None:\n with tqdm(unit=\"B\") as t:\n reporthook = download_progress(t)\n result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook)\n\n filename = result[0]\n\n file_path = pathlib.Path(filename)\n assert file_path.is_file() or file_path.is_dir(), \"Checkpoint was not downloaded\"\n\n shutil.move(file_path, checkpoint_path.parent / file_path.name)\n\n archive_path = checkpoint_path.parent / file_path.name\n unzip(checkpoint_path, archive_path)",
"def parse_checkpoint(checkpoint_path):\n with gfile.Open(checkpoint_path, 'rb') as fp:\n raw_contents = fp.read()\n if raw_contents.startswith(b'model_checkpoint_path'):\n raise ValueError(\n 'Attempting to restore a TensorFlow checkpoint as a native T5X '\n f'checkpoint. Path: {checkpoint_path}')\n return serialization.msgpack_restore(raw_contents)",
"def _restore_checkpoint(\n state: State,\n logger: Logger,\n composer_states_filepath: str,\n extracted_rank_n: bool,\n extracted_checkpoint_folder: Optional[str],\n load_weights_only: bool,\n strict_model_weights: bool,\n ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]],\n exclude_algorithms: Optional[List[str]],\n algorithm_passes: Optional[List[AlgorithmPass]],\n) -> Optional[List[Dict[str, Any]]]:\n # Now, all ranks load the checkpoint that local rank zero downloaded\n state_dict = safe_torch_load(\n composer_states_filepath=composer_states_filepath,\n load_fsdp_monolith_rank0_only=state.load_fsdp_monolith_rank0_only,\n )\n if ignore_keys:\n # Filter provided list of key paths\n if not callable(ignore_keys):\n ignore_keys = glob_filter(ignore_keys)\n # Call function to modify state_dict\n ignore_keys(state_dict)\n log.debug(f\"Loaded checkpoint with keys {state_dict.keys()} and state keys {state_dict['state'].keys()}\")\n\n if is_model_deepspeed(state.model):\n if extracted_checkpoint_folder is None:\n raise RuntimeError('Deepspeed checkpoints require a tarball, not a weights file.')\n\n global_rank = dist.get_global_rank()\n if global_rank > 0 and not extracted_rank_n:\n raise RuntimeError(f'Deepspeed checkpoint missing for rank {global_rank}')\n\n load_path, _ = state.deepspeed_model.load_checkpoint(\n extracted_checkpoint_folder,\n tag=_DEEPSPEED_TAG,\n load_module_only=load_weights_only,\n load_module_strict=strict_model_weights,\n )\n if load_path is None:\n raise RuntimeError('Failed to load DeepSpeed checkpoint')\n elif load_weights_only:\n state.load_model_state(\n state_dict['state'],\n logger,\n strict=strict_model_weights,\n exclude_algorithms=exclude_algorithms,\n algorithm_passes=algorithm_passes,\n )\n if not load_weights_only:\n state.load_state_dict(\n state_dict['state'],\n logger,\n exclude_algorithms=exclude_algorithms,\n algorithm_passes=algorithm_passes,\n )\n step_to_resume_from = state.timestamp.batch.value\n max_step_to_resume_from = state.device.tensor_to_device(\n torch.tensor(state.timestamp.batch.value, dtype=torch.int64))\n min_step_to_resume_from = state.device.tensor_to_device(\n torch.tensor(state.timestamp.batch.value, dtype=torch.int64))\n dist.all_reduce(max_step_to_resume_from, reduce_operation='MAX')\n dist.all_reduce(min_step_to_resume_from, reduce_operation='MIN')\n if max_step_to_resume_from.data != min_step_to_resume_from.data:\n raise RuntimeError(\n textwrap.dedent(\n f'Timestamp mismatch error: batch to resume from {step_to_resume_from} is not the same on all ranks. '\n 'This usually occurs when at least one rank fails to save the last checkpoint '\n 'while using sharded checkpointing + autoresume. '\n 'Please manually resume by disabling autoresume and explicitly setting load_path '\n 'to the most recent checkpoints that all ranks have saved. '\n 'E.g. for the 10th batch: trainer = Trainer(autoresume=False, load_path=\"/path/to/checkpoint/ba10-rank{rank}.pt\", ...). '\n 'Remember to keep the {rank} placeholder!'))\n return state_dict['rng']",
"def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")",
"def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict",
"def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)",
"def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)",
"def get_checkpoint_file(path, checkpoint):\n if checkpoint is not None:\n return checkpoint\n else:\n file_list = glob.glob(path + \"/*.pt\")\n if not file_list:\n raise AssertionError(\"Please ensure a checkpoint file (.pt)\"\n f\" is present in folder {path}\")\n elif len(file_list) > 1:\n raise AssertionError(\n \"Multiple checkpoint files present. Either ensure only 1 \"\n \"is present or indicate which checkpoint to use \"\n \"via --checkpoint\")\n else:\n return split(file_list[0])[1]",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)",
"def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)",
"def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0",
"def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state",
"def load_checkpoint(checkpoint_dir, epoch, iteration):\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n if not os.path.isfile(path):\n raise Exception(\"Checkpoint in epoch %d doesn't exist :sob:\" % epoch)\n\n checkpoint = torch.load(path)\n start_epoch = checkpoint['epoch']\n state_dict = checkpoint['state_dict']\n start_iteration = checkpoint['iteration']\n\n assert iteration == start_iteration\n return start_epoch, start_iteration, state_dict",
"def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket",
"def restore_checkpoint(checkpoint_path: str,\n train_state: Optional[TrainState] = None,\n assert_exist: bool = False,\n step: Optional[int] = None) -> Tuple[TrainState, int]:\n if assert_exist:\n glob_path = os.path.join(checkpoint_path, 'checkpoint_*')\n if not gfile.glob(glob_path):\n raise ValueError('No checkpoint for the pretrained model is found in: '\n f'{checkpoint_path}')\n if train_state is None:\n raise ValueError('Please use `restore_pretrained_checkpoint` for loading'\n 'a checkpoint without providing a Scenic TrainState.')\n train_state = checkpoints.restore_checkpoint(checkpoint_path, train_state,\n step)\n return train_state, int(train_state.global_step)",
"def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )",
"def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state",
"def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])",
"def get_latest_checkpoint(cls, experiment_path):\n checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)\n all_times = sorted(os.listdir(checkpoints_path), reverse=True)\n return os.path.join(checkpoints_path, all_times[0])",
"def restore_checkpoint(self, checkpoint_id, name, path=''):\n\n\t\tself.log.debug(\"restoring Notebook %s from checkpoint %s\", name, checkpoint_id)\n\t\tnb_path = self._get_os_path(name, path)\n\t\tcp_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\n\t\tif not key_exists(self.bucket, cp_path):\n\t\t\tself.log.debug(\"checkpoint file does not exist: %s\", cp_path)\n\t\t\traise web.HTTPError(404,\n\t\t\t\tu'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)\n\t\t\t)\n\t\t# ensure notebook is readable (never restore from an unreadable notebook)\n\t\tkey = self.bucket.get_key(cp_path)\n\t\tnb = current.reads(key.get_contents_as_string(), u'json')\n\t\tself._copy(cp_path, nb_path)\n\t\tself.log.debug(\"copying %s -> %s\", cp_path, nb_path)",
"def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state",
"def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)",
"def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state",
"def load_checkpoint(path: str, use_cuda: bool = True) -> dict:\n assert os.path.isfile(path), \"Checkpoint %s not found\" % path\n checkpoint = torch.load(path, map_location=\"cuda\" if use_cuda else \"cpu\")\n return checkpoint",
"def restore(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step",
"def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))",
"def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']"
] | [
"0.62694466",
"0.61993414",
"0.6029626",
"0.5934021",
"0.5839948",
"0.5779024",
"0.5675783",
"0.5657235",
"0.55893075",
"0.5551518",
"0.5551518",
"0.55440056",
"0.55313367",
"0.5514979",
"0.5417088",
"0.54159427",
"0.5401518",
"0.5353508",
"0.53150314",
"0.529472",
"0.5274455",
"0.5255847",
"0.52468973",
"0.5233274",
"0.5200701",
"0.51954675",
"0.5182463",
"0.5162873",
"0.51401293",
"0.5114447"
] | 0.7012607 | 0 |
Recursively flatten the keys of a dictionary or list into a set of paths. | def _flatten_keys(obj: Any, paths: List[str], existing_path: str):
# Store path when we reach end, which is either non-Dict or empty Dict
if isinstance(obj, list) and len(obj) > 0:
for i, elm in enumerate(obj):
_flatten_keys(elm, paths, f'{existing_path}/{i}')
elif isinstance(obj, dict) and len(obj) > 0:
for k, v in obj.items():
_flatten_keys(v, paths, f'{existing_path}/{k}')
# Remove leading /
paths.append(existing_path.lstrip('/')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flatten_keys(in_keys):\n return_keys = []\n if isinstance(in_keys, str):\n return [in_keys]\n if isinstance(in_keys, Iterable):\n for key in in_keys:\n if isinstance(key, Iterable):\n return_keys += flatten_keys(key)\n else:\n return_keys.append(key)\n else:\n return_keys.append(in_keys)\n return return_keys",
"def flatten(d, path):\n\n if isinstance(d, dict):\n for k, v in d.items():\n yield from flatten(v, path + [k])\n else:\n yield (\".\".join(path), d)",
"def flatten_dict_string_keys(x):\n return {'/'.join(k): v for k, v in flatten_dict(unfreeze(x)).items()}",
"def _flatten(object_, key):\n # Empty object can't be iterated, take as is\n if not object_:\n flattened_dict[key] = object_\n # These object types support iteration\n elif isinstance(object_, dict):\n for object_key in object_:\n if not (not key and object_key in root_keys_to_ignore):\n _flatten(object_[object_key], _construct_key(key,\n separator,\n object_key))\n elif isinstance(object_, (list, set, tuple)):\n for index, item in enumerate(object_):\n _flatten(item, _construct_key(key, separator, index))\n # Anything left take as is\n else:\n flattened_dict[key] = object_",
"def dict_path(my_dict, path=None):\n if path is None:\n path = \"\"\n for k, v in my_dict.items():\n newpath = path + (\".\" if path != \"\" else \"\") + k\n if isinstance(v, dict):\n for u in dict_path(v, newpath):\n yield u\n else:\n yield newpath, v",
"def graph_walk_collection_flat(indict, pre=None):\n pre = pre[:] if pre else []\n \n # if isinstance(indict, dict):\n if type(indict) in [dict, OrderedDict]:\n for key, value in indict.items():\n # if isinstance(value, dict):\n if type(value) in [dict, OrderedDict]:\n for d in graph_walk_collection_flat(value, [key] + pre):\n yield d\n # elif isinstance(value, list) or isinstance(value, tuple):\n elif type(value) in [list, tuple]:\n for v in value:\n for d in graph_walk_collection_flat(v, [key] + pre):\n yield d\n else:\n yield pre + [key, value]\n else:\n yield indict",
"def flat (root, ignore_access_errors=False):\n for key, subkeys, values in walk (root, ignore_access_errors):\n yield key\n for value in values:\n yield value",
"def _recursiveURISearch(self, multidict):\r\n valueList = []\r\n keys = []\r\n\r\n for k, v in multidict.iteritems():\r\n if isinstance(v, dict):\r\n valueList += self._recursiveURISearch(v)\r\n elif k[-1] == '*':\r\n keys.append(k)\r\n\r\n for k in keys:\r\n ele = multidict.pop(k)\r\n\r\n if isinstance(ele, list):\r\n lst = [None] * len(ele)\r\n multidict[k[:-1]] = lst\r\n\r\n for i, uri in enumerate(ele):\r\n valueList.append((uri, lst, i))\r\n else:\r\n valueList.append((ele, multidict, k[:-1]))\r\n\r\n return valueList",
"def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n items = dict(items)\n # remove info like PCA primitive ID\n items_not_strings = {k: v for k, v in items.items() if type(v) != str}\n return dict(items_not_strings)",
"def flatten_object(obj):\n\tpaths = []\n\n\tif isinstance(obj, (dict,)):\n\t\tfor f in obj:\n\t\t\tsub_paths = flatten_object(obj[f])\n\t\t\tfor p in sub_paths:\n\t\t\t\tpaths.append((\"/{}{}\".format(f, p[0]), p[1]))\n\telif isinstance(obj, (list,)):\n\t\tfor i, x in enumerate(obj):\n\t\t\tsub_paths = flatten_object(x)\n\t\t\tfor p in sub_paths:\n\t\t\t\tpaths.append((\"/{}{}\".format(i, p[0]), p[1]))\n\telse:\n\t\tpaths = [(\"\", obj)]\n\n\treturn paths",
"def get_by_list_of_keys(dictionary: Dict, key_path: List[Any]) -> Dict:\n if len(key_path) == 1:\n return dictionary[key_path[0]]\n else:\n return get_by_list_of_keys(dictionary[key_path[0]], key_path[1:])",
"def dict_get_nodekeys_recursive(d):\n nodekeys = set(d.keys())\n for nk in nodekeys:\n # print \"nodekey\", nk\n # print \"graphkeys\", d[nk]['params'].keys()\n if 'graph' in d[nk]['params']:\n # print \"graphkeys\", d[nk]['params']['graph'].keys()\n nodekeys = nodekeys.union(dict_get_nodekeys_recursive(d[nk]['params']['graph']))\n return nodekeys",
"def flatten(cmd, path=\"\", fc={}, sep=\".\"):\n fcmd = fc.copy()\n if isinstance(cmd, dict):\n for k, v in cmd.items():\n k = k.split(\":\")[1] if \":\" in k else k\n fcmd = flatten(v, sep.join((path, k)) if path else k, fcmd)\n elif isinstance(cmd, list):\n for n, v in enumerate(cmd):\n fcmd.update(flatten(v, sep.join([path, str(n)])))\n else:\n fcmd[path] = cmd\n return fcmd",
"def _Flatten(obj):\n\n def Flatten(obj, name, res):\n \"\"\"Recursively appends keys in path from obj into res.\n\n Args:\n obj: The object to flatten.\n name: The key name of the current obj.\n res: The ordered result value list.\n \"\"\"\n if isinstance(obj, list):\n if obj:\n for i, item in enumerate(obj):\n Flatten(item, '{name}[{index}]'.format(name=name, index=i), res)\n else:\n res.append((name, []))\n elif isinstance(obj, dict):\n if obj:\n for k, v in sorted(obj.iteritems()):\n Flatten(v, '{name}{dot}{key}'.format(\n name=name, dot='.' if name else '', key=k), res)\n else:\n res.append((name, {}))\n elif isinstance(obj, float):\n res.append((name, resource_transform.TransformFloat(obj)))\n else:\n res.append((name, obj))\n\n res = []\n Flatten(obj, '', res)\n return res",
"def RecurseKeys(self):\n yield self\n for subkey in self.GetSubkeys():\n for key in subkey.RecurseKeys():\n yield key",
"def _resolve_paths(d, path):\n try:\n if len(path) == 0:\n yield (), d\n elif len(path) == 1:\n yield (path[0],), d[path[0]]\n else:\n if path[0] == '*':\n keys = d.keys()\n else:\n keys = [path[0]]\n for key in keys:\n for p, v in CombinatorialTree._resolve_paths(d[key], path[1:]):\n if v is not None:\n yield (key,) + p, v\n except KeyError:\n yield None, None",
"def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)",
"def expand_flattened_dict(flattened, separator='.'):\n merged = {}\n for key, value in flattened.items():\n expanded = expand_flattened_path(key, value=value, separator=separator)\n merged = merge_dicts(merged, expanded)\n return merged",
"def flatten(items):\n for k, v in items:\n if isinstance(k, (list, tuple)):\n for key in k:\n yield key, v\n else:\n yield k, v",
"def flatten_paths(*items):\r\n\r\n flat = []\r\n\r\n def flatmap(item):\r\n if isinstance(item, Compatibility.string):\r\n flat.append(item)\r\n else:\r\n try:\r\n for i in iter(item):\r\n flatmap(i)\r\n except TypeError:\r\n if callable(item):\r\n flatmap(item())\r\n else:\r\n flat.append(item)\r\n\r\n for item in items:\r\n flatmap(item)\r\n\r\n return flat",
"def get_final_key_paths(\n obj: Union[dict, list, tuple], cur_path: str = '',\n append_values: bool = False,\n paths: list = None, black_list: list = None,\n final_keys_only: bool = False):\n if paths is None:\n paths = []\n\n if isinstance(obj, (dict, list, tuple)):\n if isinstance(obj, dict):\n for key in obj:\n new_path = cur_path + f'[\\'{key}\\']'\n if isinstance(obj[key], dict):\n if black_list is not None and key in black_list:\n continue\n get_final_key_paths(\n obj[key], new_path, append_values, paths, black_list,\n final_keys_only)\n elif isinstance(obj[key], (list, tuple)):\n get_final_key_paths(\n obj[key], new_path, append_values, paths, black_list,\n final_keys_only)\n else:\n if final_keys_only:\n last_bracket = new_path.rfind('[\\'')\n new_path = new_path[\n last_bracket+2:new_path.rfind('\\'')]\n if append_values:\n to_append = [new_path, obj[key]]\n else:\n to_append = new_path\n paths.append(to_append)\n else:\n key_added = False\n for i in range(len(obj)):\n if isinstance(obj[i], (dict, tuple, list)):\n get_final_key_paths(\n obj[i], cur_path + f'[{i}]', append_values,\n paths, black_list, final_keys_only)\n else:\n if not key_added:\n if final_keys_only:\n last_bracket = cur_path.rfind('[\\'')\n cur_path = cur_path[\n last_bracket+2:cur_path.rfind('\\'')]\n if append_values:\n to_append = [cur_path, obj]\n else:\n to_append = cur_path\n paths.append(to_append)\n key_added = True\n\n return paths",
"def dict_flatten(*args):\n hold = []\n for a in args:\n hold.append([i for s in a.values() for i in s])\n return hold",
"def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result",
"def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base",
"def serialize_dict_keys(d, prefix=\"\"):\n keys = []\n for k, v in d.items():\n fqk = \"{}{}\".format(prefix, k)\n keys.append(fqk)\n if isinstance(v, dict):\n keys.extend(serialize_dict_keys(v, prefix=\"{}.\".format(fqk)))\n\n return keys",
"def unflatten(arg):\n if hasattr(arg, \"iteritems\"):\n items = arg.iteritems()\n elif hasattr(arg, \"items\"):\n items = arg.items()\n else:\n items = arg\n\n data = {}\n holders = []\n for flat_key, val in items:\n parsed_key = _parse_key(flat_key)\n obj = data\n for depth, (key, next_key) in enumerate(zip(parsed_key, parsed_key[1:]), 1):\n if isinstance(next_key, string_type):\n holder_type = _dict_holder\n else:\n holder_type = _list_holder\n\n if key not in obj:\n obj[key] = holder_type(_unparse_key(parsed_key[:depth]))\n holders.append((obj, key))\n elif not isinstance(obj[key], holder_type):\n raise ValueError(\n \"conflicting types %s and %s for key %r\"\n % (\n _node_type(obj[key]),\n holder_type.node_type,\n _unparse_key(parsed_key[:depth]),\n )\n )\n obj = obj[key]\n\n last_key = parsed_key[-1]\n if isinstance(obj.get(last_key), _holder):\n raise ValueError(\n \"conflicting types %s and terminal for key %r\"\n % (_node_type(obj[last_key]), flat_key)\n )\n obj[last_key] = val\n\n for obj, key in reversed(holders):\n obj[key] = obj[key].getvalue()\n\n return data",
"def allkeys(self, as_str=False):\n for key in self.__allkeys((\"__ROOT__\",), {\"__ROOT__\": self}):\n yield \".\".join(key) if as_str else key",
"def graph_walk_dict_flat(indict, pre=None):\n pre = pre[:] if pre else []\n if isinstance(indict, dict):\n for key, value in indict.items():\n if isinstance(value, dict):\n for d in dict_generator(value, [key] + pre):\n yield d\n elif isinstance(value, list) or isinstance(value, tuple):\n for v in value:\n for d in dict_generator(v, [key] + pre):\n yield d\n else:\n yield pre + [key, value]\n else:\n yield indict",
"def nested_set(dictionary: dict, keys: list, value):\n nested_dict = dictionary\n for key in keys[:-1]:\n nested_dict = nested_dict[key]\n nested_dict[keys[-1]] = value\n return dictionary",
"def flatten(d: Union[dict, list], parent_key: str = \"\", sep: str = \".\") -> dict:\n items = []\n if isinstance(d, dict):\n for k, v in d.items():\n new_key = f\"{parent_key}{sep}{k}\" if parent_key else str(k)\n items.extend(flatten(v, new_key, sep=sep).items())\n elif isinstance(d, list):\n for i, elem in enumerate(d):\n new_key = f\"{parent_key}{sep}{i}\" if parent_key else str(i)\n items.extend(flatten(elem, new_key, sep).items())\n else:\n items.append((parent_key, d))\n return dict(items)"
] | [
"0.6875952",
"0.6714829",
"0.6393634",
"0.6268066",
"0.6147617",
"0.60040855",
"0.5961839",
"0.58822143",
"0.5856093",
"0.58354765",
"0.5823535",
"0.58075947",
"0.58066595",
"0.5770507",
"0.57541543",
"0.5746323",
"0.574392",
"0.57279015",
"0.5708329",
"0.5703219",
"0.5697986",
"0.56909585",
"0.568402",
"0.5648774",
"0.56477576",
"0.5638721",
"0.5638624",
"0.56355137",
"0.5568469",
"0.555309"
] | 0.7069572 | 0 |
Replace a file with its compressed version. The contents will be called ``basename`` inside the compressed archive. | def _compress_file(filename: str, basename: str):
write_mode = _get_write_mode(filename)
with tempfile.TemporaryDirectory() as tmpdir:
shutil.move(filename, os.path.join(tmpdir, basename))
with tarfile.open(filename, write_mode) as tarball:
tarball.add(tmpdir, arcname='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)",
"def gzip_and_name(fname, gzip_file=True) -> str:\n if gzip_file:\n with open(fname, \"rb\") as f_in:\n with gzip.open(fname + \".gz\", \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.unlink(fname)\n fname += \".gz\"\n return fname",
"def replace_extract(self, file_path):\n tmp_file = shutil.copy2(self.path, \"tmpzip\")\n with ZipFile(tmp_file) as src, ZipFile(self.path, \"w\") as dst:\n for src_info in src.infolist():\n _, src_tail = path.split(src_info.filename)\n _, file_tail = path.split(file_path)\n if src_tail == file_tail:\n dst.write(file_path, src_info.filename)\n else:\n with src.open(src_info) as src_file:\n dst.writestr(src_info, src_file.read())\n\n remove(tmp_file)",
"def _gzip_file(filename):\n gzip_filename = filename + '.gz'\n with open(filename, 'rb') as f_in, gzip.open(gzip_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)",
"def compress(filename, remove=False):\n import gzip\n fin = open(filename, 'rb')\n fout = gzip.open(filename+'.gz', 'wb')\n fout.writelines(fin)\n fout.close()\n fin.close()\n if remove == True:\n os.remove(filename)\n return",
"def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)",
"def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)",
"def _uncompress(fname, outdir, msg=msg):\n import os\n assert os.access(fname, os.R_OK), \"could not access [%s]\" % fname\n fname = os.path.abspath(os.path.realpath(fname))\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n orig_dir = os.getcwd()\n try:\n os.chdir(outdir)\n ext = os.path.splitext(fname)[1][1:] # drop the dot\n if ext in ('gz', 'bz2'):\n import tarfile\n f = tarfile.open(fname, 'r:%s'%ext)\n f.extractall()\n else:\n err = 'extension [%s] not handled (yet?)' % ext\n msg.error(err)\n raise ValueError(err)\n finally:\n os.chdir(orig_dir)",
"def gzipFile(aFile):\n f_in = open(aFile, \"rb\")\n f_out = gzip.open(aFile + \".gz\", \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n os.remove(aFile)",
"def uncompress(compressed_file, dest_dir = None):\n\n\trouting_pairs = (\n\t\t(\".tar.gz\", _uncompress_targz),\n\t\t(\".tgz\", _uncompress_targz),\n\t\t(\".tar\", _uncompress_tar),\n\t\t(\".zip\", _uncompress_zip)\n\t)\n\n\tfound_handler = None\n\tfor suffix, handler in routing_pairs:\n\t\tif compressed_file.filename.endswith(suffix):\n\t\t\tfound_handler = handler\n\t\t\tbreak\n\telse:\n\t\traise ValueError(\"Compressed file does not have known format.\")\n\n\t# If we didn't get a directory to place the uncompressed files into, create\n\t# a temporary one.\n\tif dest_dir is None:\n\t\tdest_dir = tempfile.mkdtemp()\n\n\ttempfile_handle, tempfile_path = tempfile.mkstemp()\n\tos.close(tempfile_handle)\n\n\ttry:\n\t\tcompressed_file.save(tempfile_path)\n\n\t\tfound_handler(tempfile_path, dest_dir)\n\tfinally:\n\t\tos.remove(tempfile_path)\n\n\treturn dest_dir",
"def extracted_file(fname: str):\n new_fname = extract_gzip(fname)\n gzipped = True\n if new_fname is None:\n new_fname = fname\n gzipped = False\n\n try:\n yield new_fname\n finally:\n if gzipped:\n try:\n bgzip_and_name(new_fname)\n except Exception:\n gzip_and_name(new_fname)",
"def compress_file(netcdf_file_name):\n\n radar_io.compress_file(netcdf_file_name)",
"def zip_file(file_path: str) -> str:\n zip_file_path: str = file_path + \".gz\"\n\n print(f\"Compressing {file_path} into {zip_file_path}\")\n timestamp=path.getmtime(file_path)\n with open(file_path, \"rb\") as read_stream:\n with gzip.open(zip_file_path, \"wb\") as write_stream:\n shutil.copyfileobj(read_stream, write_stream)\n os.utime(zip_file_path, (timestamp,timestamp) )\n\n return zip_file_path",
"def ungz(filepath, compression='rb', deletesource=False):\n\n import gzip\n\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception(\"Could not delete gz archive {0}.\".format(filepath))\n\n return filepath[:-3]",
"def create_compressed_file(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name)\n\t\tself._is_png = 'png' in self.file_extension\n\t\tself._is_jpg = 'jpg' in self.file_extension\n\n\t\timage = Image.open(self.full_path)\n\n\t\tif self._is_png:\n\t\t\timage.save(self._compressed_save_path, quality=85, optimize=False, compress_level=9)\n\t\telif self._is_jpg:\n\t\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\t\telse:\n\t\t\tprint('Non-recognized asset format!!')\n\t\t\texit()\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)",
"def decompressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.decompress(data)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)",
"def replace_file(pattern, substr, filename):\n file_handle = open(filename, \"r\")\n file_string = file_handle.read()\n file_handle.close()\n\n file_string = re.sub(pattern, substr, file_string)\n\n file_handle = open(filename, \"w\", newline=\"\\n\")\n file_handle.write(file_string)\n file_handle.close()",
"def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]",
"def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]",
"def minify(filename):\r\n if not is_min(filename):\r\n new_filename = re.sub(\".js$\", \"-min.js\", filename)\r\n\r\n with open(filename) as shrink_me:\r\n with open(new_filename, 'w') as tobemin:\r\n jsm = JavascriptMinify()\r\n jsm.minify(shrink_me, tobemin)",
"def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))",
"def compress_file(map_, name, save_path):\n size = os.path.getsize(save_path)\n temp = subprocess.run([\"gzip\", \"-k\", save_path])\n cr_size = os.path.getsize(save_path+\".gz\")\n try:\n map_[name] = cr_size / size\n except Exception as e:\n print(f\"File: {save_path}, Ori:{size}, Compr:{cr_size}\")\n print(e)\n raise ZeroDivisionError\n temp = subprocess.run([\"rm\", save_path])\n temp = subprocess.run([\"rm\", save_path+\".gz\"])",
"def _compress_meds_file(self, ucfilename, fzfilename):\n from os.path import basename\n\n tup=(basename(ucfilename),basename(fzfilename))\n print('compressing file: %s -> %s' % tup)\n tpath=files.expandpath(fzfilename)\n if os.path.exists(tpath):\n os.remove(tpath)\n\n tmpdir = os.path.dirname(ucfilename)\n with StagedOutFile(fzfilename,tmpdir=tmpdir) as sf:\n cmd = self['fpack_command']\n cmd = cmd.format(fname=ucfilename)\n ret=os.system(cmd)\n\n if ret != 0:\n raise RuntimeError(\"failed to compress file\")\n\n print('output is in:',fzfilename)",
"def replace_file(new_content, current_location):\r\n\tif should_replace(new_content, current_location):\r\n\t\tabs_path = os.path.abspath(current_location)\r\n\t\tcurrent_dir, filename = os.path.split(abs_path)\r\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\r\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\r\n\r\n\t\ttry:\r\n\t\t\twith open(tmp_path, 'w') as tmp:\r\n\t\t\t\ttmp.write(new_content.getvalue())\r\n\t\t\tos.rename(tmp_path, abs_path)\t\r\n\t\texcept IOError:\r\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\r\n\t\t\treturn False\r\n\t\treturn True\r\n\treturn False",
"def handle_file(self, path):\n\n if path:\n if not matches_patterns(path, self.gzip_patterns):\n return\n\n try:\n original_file = self.open(path, mode=\"rb\")\n except FileNotFoundError:\n pass\n else:\n gzipped_path = \"{0}.gz\".format(path)\n\n if self.exists(gzipped_path):\n self.delete(gzipped_path)\n\n gzipped_file = self._compress(original_file)\n gzipped_path = self.save(gzipped_path, gzipped_file)\n\n return gzipped_path, gzipped_path, True",
"def debz(oldfn, newfn):\n if os.path.isfile(newfn):\n print(\"Error: refusing to overwrite existing file '%s'\" % (newfn, ))\n return\n output = open(newfn, 'wb')\n fobj = open(oldfn, 'rb')\n\n output.write(fobj.read(24))\n while True:\n sz = struct.unpack('>L', fobj.read(4))[0]\n chunk = fobj.read(sz)\n if not chunk:\n break\n output.write(bz2.decompress(chunk))\n # unsure of this\n if sz != len(chunk):\n break\n\n output.close()",
"def replace_file(new_content, current_location):\n\tif should_replace(new_content, current_location):\n\t\tabs_path = os.path.abspath(current_location)\n\t\tcurrent_dir, filename = os.path.split(abs_path)\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\n\n\t\ttry:\n\t\t\twith open(tmp_path, 'w') as tmp:\n\t\t\t\ttmp.write(new_content.getvalue())\n\t\t\tos.rename(tmp_path, abs_path)\t\n\t\texcept IOError:\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\n\t\t\treturn False\n\t\treturn True\n\treturn False",
"def compress(src,dstfile):\n\tafile = zipfile.ZipFile(dstfile,\"w\",zipfile.ZIP_DEFLATED)\n\tfor root,dirs,files in os.walk(src):\n\t\tfor filename in files:\n\t\t\tabspath = osp.join(root,filename)\n\t\t\trelpath = osp.relpath(abspath,src)\n\t\t\tafile.write(abspath, relpath)\n\tafile.close();",
"def replace_tag(tag, value, file):\r\n with open(file, \"r\") as origin:\r\n with open(file+\".replaced\", \"w\") as dest:\r\n dest.write(origin.read().replace(tag, str(value)))\r\n return file+\".replaced\"",
"def updateFile(filename, content):\n\tfilename = adaptPath(filename)\n\tif filename != None:\n\t\ttry:\n\t\t\toldContent = open(filename, \"r\").read()\n\t\texcept IOError:\n\t\t\toldContent = \"\"\n\t\tif oldContent != content:\n\t\t\tfile = open (filename, \"w\")\n\t\t\tfile.write(content)\n\t\t\tfile.close()\n\treturn content"
] | [
"0.6415052",
"0.6314928",
"0.6199872",
"0.6001604",
"0.5810293",
"0.5780952",
"0.5752636",
"0.57117546",
"0.5672663",
"0.5574416",
"0.55683494",
"0.55642724",
"0.55603486",
"0.5546732",
"0.5530672",
"0.55286574",
"0.550386",
"0.5496359",
"0.5496359",
"0.54615456",
"0.54451406",
"0.54386413",
"0.5422494",
"0.54118186",
"0.54007465",
"0.53968054",
"0.5394201",
"0.5379679",
"0.5343281",
"0.534254"
] | 0.6844481 | 0 |
Save Deepspeed model and tarball the files. | def _save_deepspeed_model(model, filename: str):
write_mode = _get_write_mode(filename)
read_mode = f'r{write_mode[1:]}'
with tempfile.TemporaryDirectory() as tmpdir:
model.save_checkpoint(tmpdir, _DEEPSPEED_TAG)
if os.path.exists(filename):
# extract to tmpdir to append below
# not all compression formats support direct append
with tarfile.open(filename, read_mode) as tar:
tar.extractall(tmpdir)
with tarfile.open(filename, write_mode) as tar:
tar.add(tmpdir, arcname='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))",
"def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)",
"def save(self, path):\n with tempfile.TemporaryDirectory() as td:\n U.save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n dill.dump((model_data, self._act_params), f)",
"def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()",
"def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")",
"def save_model(self, filename):\r\n pass",
"def save(self, filename):\n # serialize model to JSON\n model_json = self._model.to_json()\n with open('models/' + filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n self._model.save_weights('models/' + filename + \".h5\")\n print(\"Saved model to disk\")",
"def save_model(self, filename):\n with open(filename + \".dil\", 'wb') as f:\n dill.dump(self.social_distancing_func, f)\n self.clear_social_distancing_func()\n with open(filename + \".pkl\", 'wb') as f:\n pickle.dump(self, f)",
"def save(self, path=\"word2vec_keras.tar.gz\"):\n tokenizer_path = os.path.join(tempfile.gettempdir(), \"tokenizer.pkl\")\n label_encoder_path = os.path.join(tempfile.gettempdir(), \"label_encoder.pkl\")\n params_path = os.path.join(tempfile.gettempdir(), \"params.pkl\")\n keras_path = os.path.join(tempfile.gettempdir(), \"model.h5\")\n w2v_path = os.path.join(tempfile.gettempdir(), \"model.w2v\")\n\n # Dump pickle\n pickle.dump(self.tokenizer, open(tokenizer_path, \"wb\"))\n pickle.dump(self.label_encoder, open(label_encoder_path, \"wb\"))\n pickle.dump(self.__attributes__(), open(params_path, \"wb\"))\n pickle.dump(self.w2v_model, open(w2v_path, \"wb\"))\n self.k_model.save(keras_path)\n # self.w2v_model.save(w2v_path)\n\n # Create Tar file\n tar = tarfile.open(path, \"w:gz\")\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n tar.add(name, arcname=os.path.basename(name))\n tar.close()\n\n # Remove temp file\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n os.remove(name)",
"def save_model(model, model_filepath):",
"def save_model(model, model_filepath, protocol=0):\n # using pickle to store trained classifier\n #pickle.dump(model,open(model_filepath,'wb'))\n \n file = gzip.GzipFile(model_filepath, 'wb')\n file.write(pickle.dumps(model, protocol))\n file.close()\n \n pass",
"def save(self, main_dir):\n with open(f'{main_dir}/models/model_N{self.N}.pkl', 'wb') as f:\n pickle.dump(self.model, f)",
"def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")",
"def export_model(model, name):\n\tpath = \"data/{}/\".format(name)\n\tfilename = \"{}.model\".format(name)\n\tif os.path.isdir(path):\n\t\tprint(\"model already exists\")\n\t\treturn\n\telse:\n\t\tos.mkdir(path)\n\t\tjoblib.dump(model, path + filename)",
"def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)",
"def save(self, model_name):\n\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Save the Keras models\n if self.mol_to_latent_model is not None:\n self.mol_to_latent_model.save(dirpath + \"/mol_to_latent_model.h5\")\n\n self.latent_to_states_model.save(dirpath + \"/latent_to_states_model.h5\")\n self.batch_model.save(dirpath + \"/batch_model.h5\")\n\n # Exclude unpicklable and unwanted attributes\n excl_attr = [\n \"_DDC__mode\",\n \"_DDC__train_gen\",\n \"_DDC__valid_gen\",\n \"_DDC__mol_to_latent_model\",\n \"_DDC__latent_to_states_model\",\n \"_DDC__batch_model\",\n \"_DDC__sample_model\",\n \"_DDC__multi_sample_model\",\n \"_DDC__model\",\n ]\n\n # Cannot deepcopy self.__dict__ because of Keras' thread lock so this is\n # bypassed by popping and re-inserting the unpicklable attributes\n to_add = {}\n # Remove unpicklable attributes\n for attr in excl_attr:\n to_add[attr] = self.__dict__.pop(attr, None)\n\n # Pickle metadata, i.e. almost everything but the Keras models and generators\n pickle.dump(self.__dict__, open(dirpath + \"/metadata.pickle\", \"wb\"))\n\n # Zip directory with its contents\n shutil.make_archive(model_name, \"zip\", dirpath)\n\n # Finally, re-load the popped elements for the model to be usable\n for attr in excl_attr:\n self.__dict__[attr] = to_add[attr]\n\n print(\"Model saved.\")",
"def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)",
"def save_model(self, dir=\"\", **kwargs):\n ckpt_fn = os.path.join(dir, f\"model.pkl\")\n torch.save(\n {\n \"global_step\": self.global_step_,\n \"epoch\": self.epoch_,\n \"model\": self.net_.state_dict(),\n \"optimizer\": self.optimizer_.state_dict(),\n \"sampler_state\": self.sampler.state_dict(),\n \"model_samples\": list(self.model_samples_),\n \"ais_state\": self.ais_loss.state_dict(),\n \"replay_prob\": self.replay_prob,\n \"max_replay\": self.max_replay,\n },\n ckpt_fn,\n )\n return ckpt_fn",
"def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))",
"def save_model(model):\n\n # model.save(os.path.join(output))\n # model.save(output)\n\n # key = \"{}/{}/examples\".format(prefix,data_partition_name)\n # url = 's3://{}/{}'.format(bucket, key)\n # boto3.Session().resource('s3').Bucket(bucket).Object(key).upload_file('data.csv')\n # print('Done writing to {}'.format(url))\n \n model.save('output/sentiment_model.h5')\n\n s3 = boto3.resource('s3')\n s3.meta.client.upload_file('output/sentiment_model.h5', 'ieor4577-hw4', 'sentiment_model.h5')\n\n # tf.saved_model.save(model, os.path.join(output, \"1\"))\n print(\"Model successfully saved\")",
"def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:",
"def save_model(self, model_path: str):",
"def save_model(model, filename):\n model_dir = \"models\"\n os.makedirs(model_dir,exist_ok=True) #create only if model directory dosent exists\n filePath = os.path.join(model_dir, filename)\n logging.info(filePath)\n joblib.dump(model, filePath)",
"def save_model(self, model):\n # serialize model to JSON\n model_json = model.to_json()\n os.makedirs(os.path.dirname(self.model_json_path), exist_ok=True)\n with open(self.model_json_path, \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(self.model_weights_path)\n print(\"Saved model to disk\")",
"def export_model(self, output_model_dir):\n logger.info(\"Exporting model to directory : {}\".format(output_model_dir))\n self.model.export(output_model_dir=output_model_dir)",
"def _save_tar(self, path: Path, metadata: Dict, model: Model) -> NoReturn:\n model_file_name = self.repo_config.model_file_name\n\n try:\n # Create a tmp directory\n with tempfile.TemporaryDirectory() as tmp_path:\n os.chdir(tmp_path)\n\n # Save to file the dat\n with open(model_file_name, 'wb') as f:\n pickle.dump(model, f)\n\n with open(self._METADATA_FILENAME, 'w') as f:\n json.dump(metadata, f)\n\n # Tar the tmp directory with all the files\n with tarfile.open(self._TAR_FILE_NAME, \"w\") as tar:\n for file in os.listdir(tmp_path):\n tar.add(file)\n\n # Upload the tar file to S3\n s3_file_path = str(Path(str(path) + '/' + self._TAR_FILE_NAME))\n tmp_file_path = tmp_path + '/' + self._TAR_FILE_NAME\n self.s3_client.upload_file(tmp_file_path, self.s3_bucket, s3_file_path)\n finally:\n del tmp_path",
"def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)",
"def serialize_model(model,model_dir): \n # Make output directory to store model\n pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)\n model_json = model.to_json()\n \n # Serialize model to JSON\n with open(model_dir + '/' + model_dir + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n \n # Serialize weights to HDF5\n model.save_weights(model_dir +'/' + model_dir + \".h5\")\n print(\"Saved model to disk\")\n print(\"Model info stored within local directory: {model_name}/\")\n print(\"Model weights stored as: {model_name}/{model_name}.h5\")\n print(\"Model structure stored as: {model_name}/{model_name}.json\")",
"def save_model(self, directory):\n def serving_input_fn():\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(tf.int32, [None, self.max_seq_len], name='input_ids')\n input_mask = tf.placeholder(tf.int32, [None, self.max_seq_len], name='input_mask')\n segment_ids = tf.placeholder(tf.int32, [None, self.max_seq_len], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids,\n })()\n return input_fn\n\n self.model._export_to_tpu = False # this is important\n self.model.export_savedmodel(directory, serving_input_fn)",
"def save_model(self, name): \n torch.save(dict(params=self.model.encoder.state_dict()), osp.join(self.args.save_path, name + '.pth'))"
] | [
"0.6917545",
"0.6801277",
"0.6786645",
"0.6771546",
"0.65725636",
"0.654351",
"0.6506806",
"0.6503191",
"0.6480949",
"0.6474152",
"0.64712524",
"0.64655024",
"0.6454765",
"0.64464855",
"0.643115",
"0.64104533",
"0.6390846",
"0.6343271",
"0.6322349",
"0.6322143",
"0.63072705",
"0.6305635",
"0.62996155",
"0.629078",
"0.62780285",
"0.6274761",
"0.62698555",
"0.6259175",
"0.6253674",
"0.6227031"
] | 0.7130798 | 0 |
Get the association type mapping for a given query string, splitting the category and predicate components apart | def get_association_type_mapping_by_query_string(
query_string: str,
) -> AssociationTypeMapping:
categories = parse_query_string_for_category(query_string)
matching_types = [
a_type for a_type in AssociationTypeMappings.get_mappings() if set(a_type.category) == set(categories)
]
if len(matching_types) == 0:
raise ValueError(f"No matching association type found for query string: [{query_string}]")
elif len(matching_types) > 1:
raise ValueError(f"Too many association types found for query string: [{query_string}]")
else:
return matching_types[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lookup_categories(querystring):\n tokens = tokenize_query(querystring)\n categories = []\n for idx, token in enumerate(tokens):\n if token.type == \"EXTERNAL_COMMAND\":\n categories.append(category.get(token.value, \"Miscellaneous\"))\n elif token.type == \"MACRO\":\n categories.append(\"Macro\")\n elif token.type not in [\"ARGS\", \"PIPE\", \"LBRACKET\", \"RBRACKET\"]:\n command = token.value.lower()\n # Note: This is an imperfect way to detect this.\n # See below for an example.\n if token.value == \"addtotals\":\n if len(tokens) == idx+1:\n command = \"addtotals row\"\n elif tokens[idx+1].value.lower()[:3] == \"row\":\n command = \"addtotals row\"\n else:\n command = \"addtotals col\"\n try:\n categories.append(lookup_category(command))\n except KeyError as e:\n logger.error(\"Unknown command type: %s\" % token.value)\n return categories",
"def classify(self, mutation) -> Set[Category]:\n def normalise(string):\n \"\"\"Remove double spaces, make lower case. Just remove some weirdness\"\"\"\n return re.sub(' +', ' ', string).lower()\n return {cat for string, cat in self.mapping.items()\n if normalise(string) in normalise(mutation.description)}",
"def get_association_dict_split_by_category(protein_ans_list):\n etype_2_association_dict = {}\n for etype in variables.entity_types:\n etype_2_association_dict[etype] = {}\n result = get_results_of_statement(\"SELECT protein_2_function.an, protein_2_function.function, protein_2_function.etype FROM protein_2_function WHERE protein_2_function.an IN({});\".format(str(protein_ans_list)[1:-1]))\n for res in result:\n an, associations_list, etype = res\n etype_2_association_dict[etype][an] = set(associations_list)\n return etype_2_association_dict",
"def category_reducer(category):\n if not \"--\" in category:\n if category in BAD_CATEGORIES:\n return \"Unknown\"\n return category\n\n main, sub = category.split(\"--\")\n\n main = main.strip()\n if main in [\"Science\"]:\n return sub.strip()\n else:\n return main",
"def ccd_mapper(data_type, concept=None, broader=None):\n\n mapper = {\n \"double precision\": \"IntervalA\",\n \"bigint\": \"IntervalA\",\n \"integer\": \"IntervalA\"\n }\n\n if data_type in mapper:\n return mapper.get(data_type)\n\n with psycopg2.connect(\"host=localhost\") as conn:\n with conn.cursor() as cur:\n\n if broader:\n cur.execute(\"select narrower from concepts where uri = %s\", [ broader ])\n concept = cur.fetchone()[0]\n\n cur.execute(\"select count(distinct value), count(*) from concepts__data where uri = %s\", [ concept ])\n unique, total = cur.fetchone()\n\n is_categorical = unique < 20 and unique != total\n is_bool = unique == 2\n\n if is_bool:\n return \"BooleanA\"\n elif is_categorical: # Cannot known ordinal\n return \"NominalA\"",
"def _get_category(identifier: str) -> str:\n for category, keywords in categories.items():\n # Check for each keyword\n for k in keywords:\n # Check if lower-case keyword is substring of lower-case identifier\n if identifier.lower().find(k.lower()) != -1:\n return category\n # Default value if no category was found\n return 'other'",
"def facts_stringquery(querystring, simplequery, **kwargs):\n return _stringquery('facts', querystring, simplequery, **kwargs)",
"def getCategories(URIList, annotatedWords):\n \n L=[]\n wordByCategory=dict()\n i=0\n for URI in URIList:\n sparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\n sparql.setQuery(\"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX dc: <http://purl.org/dc/terms/>\n SELECT ?label\n WHERE { \"\"\"+ \"<\"+ URI + \"> dc:subject ?label }\"\n )\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n for result in results[\"results\"][\"bindings\"]:\n category=result[\"label\"][\"value\"].encode(\"UTF-8\").split(\"/\")[-1].replace(\"_\",\" \").replace(\"Category:\",\"\")\n L.append(category)\n if category in wordByCategory:\n if i>= len(annotatedWords):\n print \"getCategories is computing URI=\",URI\n print \"Trying to append element number\",i,\n print \"from a list having\",len(annotatedWords),\"elements.\"\n wordByCategory[category].append(annotatedWords[i])\n else:\n wordByCategory[category]=[annotatedWords[i]]\n i+=1\n return L, wordByCategory",
"def _attribute_lookup(\n obj, query_string, query_value, sep=SEP,\n operator_collection=OperatorCollection,\n):\n attribute_list = query_string.split(sep)\n if hasattr(operator_collection, attribute_list[-1]) \\\n and callable(getattr(operator_collection, attribute_list[-1])):\n operator_name = attribute_list.pop()\n else:\n operator_name = operator_collection.default_operator\n operator = getattr(operator_collection, operator_name)\n\n value = _get_attribute(obj, attribute_list)\n\n return operator(value, query_value)",
"def _get_categories(cats):\n if \",\" in cats:\n return tuple([c.lower().strip() for c in cats.split(\",\")])\n else:\n return (cats.lower().strip(), )",
"def get_category(self, obj):\n cat_lst = []\n for k, v in obj.items():\n cat_lst = cat_lst + list(v.keys())\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n cat_lst = cat_lst + list(in_v.keys())\n in_k, in_v = list(in_v.items())[-1]\n simpl_lst = [i for n, i in enumerate(cat_lst) if i not in cat_lst[:n]]\n res = []\n for cat in simpl_lst:\n if cat not in self._loop_name:\n re_outer = re.compile(r'([^A-Z ])([A-Z])')\n re_inner = re.compile(r'(?<!^)([A-Z])([^A-Z])')\n res.append(re_outer.sub(r'\\1 \\2', re_inner.sub(r' \\1\\2', cat)))\n self._category = res",
"def _get_categories(category_label):\n if not category_label:\n return None\n return map(lambda x: x if x != '$' else None, category_label.split('###'))",
"def parse_query_spec(self, query_spec):\n try:\n return self.QUERY_TYPE_MAP[query_spec['type']](query_spec)\n except KeyError:\n raise exceptions.QueryError('invalid query spec')\n except TypeError:\n raise exceptions.QueryError('Query must be a dictionary specifyng type and value of the query')",
"def search_categorie(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.categories, f.categories.id, f.categories.name)\n q.where().equal(f.categories.name, _input)\n categorie_data = j.executeQuery(q)\n\n if categorie_data: \n cat_id, cat_name = categorie_data[0]\n examples = _create_examples(j.list_word_by_categorie, cat_name)\n return SelectorResult('categorie', cat_id, cat_name, *examples)",
"def sdcToClassifier_keyword(self, sdc):\n for srname, engine in self.engineMap.iteritems():\n if(srname in sdc[\"spatialRelation\"].text):\n return engine\n return None",
"def _request_category(self, category_str):\n return self._request(self._wikipedia, category_str, query_key='cmtitle')",
"def email_category(x):\n MAP = {'msn':'msn',\n 'yahoo':'yahoo',\n 'gmail':'gmail',\n 'hotmail':'hotmail',\n 'live.com':'hotmail',\n '.edu':'education',\n 'comcast':'comcast_aol_att',\n 'aol.com':'comcast_aol_att',\n 'att.net':'comcast_aol_att',\n 'verizon.net':'comcast_aol_att',\n 'icloud':'apple',\n 'me.com':'apple',\n 'mac.com':'apple',\n 'qq':'qq',\n 'outlook':'outlook'\n }\n for key in MAP.keys():\n if key in x:\n return MAP[key]\n return \"other_email_account\"",
"def _categories(self, txt):\n\n # It is slightly faster like this because we are nto creating\n # a lambda obj each time.\n def first_part(s):\n return s.split(']]', 1)[0].split('|')[0]\n\n return map(first_part, txt.split(\"[[Category:\")[1:]) + \\\n [\"wikibase-article\"]",
"def get_entity_classes(entity, mapping):\n yago_categories = []\n lkif_categories = []\n for category in sorted(entity.categories):\n if not category in mapping:\n print('Error, unmapped category {}'.format(category),\n file=sys.stderr)\n lkif_parents = mapping.get(category, {'Unknown'})\n for lkif_parent in lkif_parents:\n yago_categories.append(category)\n lkif_categories.append(lkif_parent)\n return ('|'.join(yago_categories).encode(\"utf-8\"),\n '|'.join(lkif_categories).encode(\"utf-8\"))",
"def _extract_filter_type_and_value(data):\n if data.startswith(\"in:\"):\n value = list(six.text_type(data[3:]).split(\",\"))\n filter_type = 'in'\n elif data.startswith(\"nin:\"):\n value = list(six.text_type(data[4:]).split(\",\"))\n filter_type = 'nin'\n elif data.startswith(\"neq:\"):\n value = six.text_type(data[4:])\n filter_type = 'neq'\n elif data.startswith(\"gt:\"):\n value = six.text_type(data[3:])\n filter_type = 'gt'\n elif data.startswith(\"gte:\"):\n value = six.text_type(data[4:])\n filter_type = 'gte'\n elif data.startswith(\"lt:\"):\n value = six.text_type(data[3:])\n filter_type = 'lt'\n elif data.startswith(\"lte:\"):\n value = six.text_type(data[4:])\n filter_type = 'lte'\n elif data.startswith(\"eq:\"):\n value = six.text_type(data[3:])\n filter_type = 'eq'\n elif data.startswith(\"has:\"):\n value = six.text_type(data[4:])\n filter_type = 'has'\n else:\n value = data\n filter_type = 'eq'\n\n return filter_type, value",
"def form_cand_queries_amongvocab(self, batch_input, gram):\n N = batch_input['pair_objects'].size(0)\n tensor_type = batch_input['pair_objects'].long().data.type()\n\n M = len(self.vocab[gram])\n cats = self.idx_to_vocab[gram].type(tensor_type)\n queries = cats.unsqueeze(0) \n labels = batch_input['labels_'+gram].type(queries.data.type()) #(N,M)\n\n return (queries, labels)",
"def iParseQuery(queryResults):\n iPaths = []\n results = queryResults.get_results()\n\n for item in results:\n for k in item.keys():\n if k.icat_key == 'DATA_NAME':\n name = item[k]\n elif k.icat_key == 'COLL_NAME':\n coll = item[k]\n else:\n continue\n iPaths.append(coll+'/'+name)\n return iPaths",
"def _category_slugs(self, category):\n key = self._category_key(category)\n slugs = self.r.smembers(key)\n return slugs",
"def map_concept_types (self, thing, object_type=None):\n\n # Try the CURIE approach.\n the_type = self.guess_type (thing.identifier) if thing and thing.identifier else None\n\n # If that didn't work, get candiddate types based on the (abstract) node type.\n if thing and not the_type:\n the_type = self.concepts.get (thing.node_type, None)\n if the_type:\n # Attempt to map them down to IRIs\n the_type = [ self.vocab.get(t,t) for t in the_type ]\n\n # Systematize this:\n # If the concept type is disease but the curie is NAME, we don't have a DOID.\n if isinstance(the_type,str):\n # If we've ended up with just one string, make it a list for conformity of return type\n the_type = [ the_type ]\n\n result = the_type if the_type else self.concepts.get (object_type, [ object_type ])\n\n curie = Text.get_curie (thing.identifier) if thing else None\n if curie:\n result = [ self.make_up_curie (curie) ] #[ self.vocab[curie] ]\n #result = [ self.vocab[curie] ]\n\n return result",
"def parse_category_annotations(self, annotations):\n categories = {}\n category_list, supercategory_list, category_id = [], [], []\n for i, annot in enumerate(annotations['categories']):\n categories[annot['id']] = {\n \"name\": annot['name'],\n \"supercategory\": annot['supercategory'],\n \"id\": annot['id']\n }\n category_id.append(annot['id'])\n category_list.append(annot['name'])\n supercategory_list.append(annot['supercategory'])\n supercategory_list = list(set(supercategory_list))\n\n return categories, category_list, supercategory_list, category_id",
"def __switch_restriction_type(self, categoryType):\n switch = {\n \"user\": self.userRestrictions,\n \"role\": self.roleRestrictions,\n \"channel\": self.channelRestrictions\n }\n return switch[categoryType]",
"def getQueryType(ogcuisine):\n establishmenttype = {}\n establishmenttype['Fast Food'] = 'Fast Food restaurants'\n establishmenttype['Burgers'] = 'Burger places'\n establishmenttype['Cheesesteaks'] = 'Cheesesteak spots'\n establishmenttype['Gastropubs'] = 'Gastropubs'\n establishmenttype['Breakfast'] = 'Breakfast spots'\n establishmenttype['Diner'] = 'Diners'\n establishmenttype['Salad'] = 'Salad places'\n establishmenttype['Sandwiches'] = 'Sandwich places'\n establishmenttype['Soup'] = 'Soup places'\n establishmenttype['Pizza'] = 'Pizza places'\n establishmenttype['Italian'] = 'Italian restaurants'\n establishmenttype['African'] = 'African restaurants'\n establishmenttype['Ethiopian'] = 'Ethiopian restaurants'\n establishmenttype['American'] = 'American restaurants'\n establishmenttype['BBQ'] = 'BBQ restaurants'\n establishmenttype['French'] = 'French restaurants'\n establishmenttype['Belgian'] = 'Belgian restaurants'\n establishmenttype['British'] = 'British restaurants'\n establishmenttype['Irish'] = 'Irish restaurants'\n establishmenttype['Southern'] = 'Southern restaurants'\n establishmenttype['Cajun'] = 'Cajun restaurants'\n establishmenttype['Caribbean'] = 'Caribbean restaurants'\n establishmenttype['Chinese'] = 'Chinese restaurants'\n establishmenttype['Latin American'] = 'Latin restaurants'\n establishmenttype['Cuban'] = 'Cuban restaurants'\n establishmenttype['Latin'] = 'Latin restaurants'\n establishmenttype['Brazilian'] = 'Brazilian'\n establishmenttype['Mexican'] = 'Mexican'\n establishmenttype['Tex-Mex'] = 'Tex-Mex restaurants'\n establishmenttype['Greek'] = 'Greek restaurants'\n establishmenttype['Indian'] = 'Indian restaurants'\n establishmenttype['Japanese'] = 'Japanese restaurants'\n establishmenttype['Sushi'] = 'Sushi restaurants'\n establishmenttype['Mediterranean'] = 'Mediterranean restaurants'\n establishmenttype['Middle Eastern'] = 'Middle Eastern restaurants'\n establishmenttype['Kosher'] = 'Kosher restaurants'\n establishmenttype['Seafood'] = 'Seafood restaurants'\n establishmenttype['Spanish / Tapas'] = 'Spanish / Tapas restaurants'\n establishmenttype['Steakhouse'] = 'Steakhouses'\n establishmenttype['Thai'] = 'Thai restaurants'\n establishmenttype['Vegetarian'] = 'Vegetarian restaurants'\n establishmenttype['Vietnamese'] = 'Vietnamese restaurants'\n establishmenttype['Coffee'] = 'Coffee shops'\n establishmenttype['Bagels'] = 'Bagel shops'\n establishmenttype['Bakeries'] = 'Bakeries'\n establishmenttype['Beer / Wine Stores'] = 'Beer and Wine stores'\n establishmenttype['Cupcakes'] = 'Cupcake shops'\n establishmenttype['Breweries'] = 'Breweries'\n establishmenttype['Desserts'] = 'Dessert spots'\n establishmenttype['Distilleries'] = 'Distilleries'\n establishmenttype['Donuts'] = 'Donut shops'\n establishmenttype['Empanadas'] = 'Empanada spots'\n establishmenttype['Gelato'] = 'Gelato spots'\n establishmenttype['Ice Cream / FroYo'] = 'Ice Cream shops'\n establishmenttype['Beer Bars'] = 'Beer Bars'\n establishmenttype['Cocktail Bars'] = 'Cocktail Bars'\n establishmenttype['Dive Bars'] = 'Dive Bars'\n establishmenttype['Sports Bars'] = 'Sports Bars'\n establishmenttype['Wine Bars'] = 'Wine Bars'\n establishmenttype['Beer Gardens'] = 'Beer Gardens'\n\n return establishmenttype[ogcuisine]",
"def site_to_category():\n return {\"UNEW\": 1, \"USFD\": 2, \"CAU\": 3, \"TASMC\": 4, \"RBMF\": 5}",
"def create_feature_space(sentences):\n splits = [s.split() for s in sentences]\n types = set(reduce(lambda x, y: x + y, splits))\n lookup = dict()\n for i, word in enumerate(types):\n lookup[word] = i\n return lookup",
"def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset"
] | [
"0.58501035",
"0.52250046",
"0.51798195",
"0.48137656",
"0.4768399",
"0.46968216",
"0.4696489",
"0.46959627",
"0.46701097",
"0.46667367",
"0.46602768",
"0.46147487",
"0.46047112",
"0.45540965",
"0.4524083",
"0.4497096",
"0.44962004",
"0.44849357",
"0.4439901",
"0.443559",
"0.44265905",
"0.44099367",
"0.44089985",
"0.4392169",
"0.4386087",
"0.4385822",
"0.43639722",
"0.4357607",
"0.4353253",
"0.43514642"
] | 0.71058404 | 0 |
Returns the distance between the current entity and the given (x, y) coordinate. | def distance(self, x: int, y: int) -> float:
return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))",
"def distance_from_center(self, x: int, y: int) -> float:\n width, height = self.width, self.height\n dis = distance(x, y, width/2, height/2)\n return dis",
"def distance(self, x, y=None):\n if y is not None:\n pos = TwoDV(x, y)\n if isinstance(x, TwoDV):\n pos = x\n elif isinstance(x, tuple):\n pos = TwoDV(*x)\n elif isinstance(x, TNavigator):\n pos = x._position\n return abs(pos - self._position)",
"def get_dist(self, point_x, point_y):\n dist = sqrt((point_x - self.player_x) ** 2 + (point_y -\n self.player_y) ** 2)\n return dist",
"def distance(self, x2, y2):\r\n return math.sqrt((x2 - self.x) ** 2 + (y2 - self.y) ** 2)",
"def dist(self,x, y):\n\n x1, y1 = x\n x2, y2 = y\n return np.sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def getDistance(self, x1, x2, y1, y2):\n return ((x1 - x2)**2 + (y1 - y2)**2)**0.5",
"def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5",
"def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )",
"def dist(self, other: Coordinate) -> int:\n return abs(other.x - self.x) + abs(other.y - self.y)",
"def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)",
"def distance_to(self, obj):\n\t\tx, y = self.position\n\t\tobj_x, obj_y = obj.position\n\t\treturn hypot(x - obj_x, y - obj_y)",
"def __distance(start_x, start_y, end_x, end_y):\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance",
"def calculate_distance(self, other_point):\n return math.sqrt(\n (self._x - other_point._x)**2 +\n (self._y - other_point._y)**2)",
"def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans",
"def distance_to(self, other_particle):\n return sqrt((self.pos_x - other_particle.pos_x) ** 2 + (\n self.pos_y - other_particle.pos_y) ** 2)",
"def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans",
"def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)",
"def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)",
"def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)",
"def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)"
] | [
"0.81523913",
"0.7383806",
"0.72868764",
"0.7262011",
"0.7195622",
"0.7163461",
"0.7131348",
"0.71239984",
"0.71239984",
"0.71239984",
"0.71239984",
"0.71239984",
"0.71239984",
"0.71239984",
"0.7074517",
"0.70256484",
"0.7024614",
"0.70112294",
"0.68983996",
"0.6831905",
"0.67817825",
"0.6780067",
"0.6773637",
"0.67658895",
"0.6763439",
"0.6752022",
"0.67141783",
"0.66960484",
"0.66929966",
"0.66848576"
] | 0.7916495 | 1 |
create dictionary with protein ids and fasta sequence from uniprot site | def open_uniprotsite(prot_names):
fasta_dict = {}
for prot_id in prot_names:
uniprot_link = "https://www.uniprot.org/uniprot/" + prot_id + ".fasta"
uniprot_fasta = urllib.request.urlopen(uniprot_link)
fasta_sequence = uniprot_fasta.readlines()#.decode('utf-8')
fasta_sequence = fasta_sequence[1:]
fasta_sequence = list(f.decode('utf-8') for f in fasta_sequence)
fasta_sequence = ''.join(fasta_sequence)
fasta_sequence = fasta_sequence.replace('\n','')
fasta_dict[prot_id] = fasta_sequence
uniprot_fasta.close()
return fasta_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prot_sequence_finder(protL):\n \n idDict = prot_id_converter(protL, \"9606\", inpDB = \"genesymbol\",outDB=\"refseqproteingi\")\n seqD = prot_entrez_fetch(idDict, retM=\"gb\", retT=\"fasta\")\n \n protD = {}\n \n for keyS, valueS in idDict.items():\n protD[keyS] = seqD[valueS]\n \n return protD",
"def DictProteomeNameToSeq(X, n):\n DictProtToSeq_UP = {}\n for rec2 in SeqIO.parse(X, \"fasta\"):\n UP_seq = str(rec2.seq)\n if n == \"full\":\n UP_name = rec2.description.split(\"HUMAN \")[1].split(\" OS\")[0]\n DictProtToSeq_UP[UP_name] = str(UP_seq)\n if n == \"gene\":\n try:\n UP_name = rec2.description.split(\" GN=\")[1].split(\" \")[0]\n DictProtToSeq_UP[UP_name] = str(UP_seq)\n except BaseException:\n continue\n return DictProtToSeq_UP",
"def produce_proteinSeq_dict (inPath, outPath):\n s = list(SeqIO.parse(str(inPath), 'fasta'))\n proteinSeq = {}\n for _, elm in enumerate(s):\n proteinSeq[elm.id] = str(elm.seq)\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinSeq, fOut)",
"def getseq(genomefasta):\n genomedict = {}\n for i in SeqIO.parse(open(genomefasta), \"fasta\"):\n genomedict[i.id] = str(i.seq)\n return genomedict",
"def uniprot_txt_parser(uniprot_txt_lines):\n uniprot = {}\n entry_line = [i for i,l in enumerate(uniprot_txt_lines) if l[:2]=='ID']\n entry_line.append(len(uniprot_txt_lines))\n begin_end = [(begin,entry_line[i+1]) for i,begin in enumerate(entry_line[:-1])]\n for begin,end in begin_end:\n for line in uniprot_txt_lines[begin:end]:\n line = line.rstrip('\\r\\n')\n line = line.rstrip('.')\n line = line.replace(';',' ')\n words = line.split()\n if words[0] == 'AC':\n acc = words[1]\n uniprot[acc] = {}\n elif words[0] == 'DR' and words[1] =='InterPro':\n if uniprot[acc].has_key('interpro'):\n uniprot[acc]['interpro'].append((words[2],1))\n else:\n uniprot[acc]['interpro'] = [(words[2],1)]\n elif words[0] == 'DR' and words[1] == 'Pfam':\n if uniprot[acc].has_key('pfam'):\n uniprot[acc]['pfam'].append((words[2],int(words[-1])))\n else:\n uniprot[acc]['pfam'] = [(words[2],int(words[-1]))]\n elif words[0] == 'DR' and words[1] == 'SMART':\n if uniprot[acc].has_key('smart'):\n uniprot[acc]['smart'].append((words[2],words[-1]))\n else:\n uniprot[acc]['smart'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'SUPFAM':\n if uniprot[acc].has_key('supfam'):\n uniprot[acc]['supfam'].append((words[2],words[-1]))\n else:\n uniprot[acc]['supfam'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'PROSITE':\n if uniprot[acc].has_key('prosite'):\n uniprot[acc]['prosite'].append((words[2],words[-1]))\n else:\n uniprot[acc]['prosite'] = [(words[2],words[-1])]\n # elif words[0] == 'DR' and words[1] =='PDB':\n # w = words[-1].replace('/',' ')\n # w = w.replace('=',' ')\n # w = w.replace('-',' ')\n # w = w.split()\n # w = words[2:-1]+w\n\n # if uniprot[acc].has_key('pdb'):\n # uniprot[acc]['pdb'].append(w)\n # else:\n # uniprot[acc]['pdb'] = [w]\n\n return uniprot",
"def return_fasta_dic(file):\n seq_dict = {rec.id: rec.seq for rec in SeqIO.parse(file, \"fasta\")}\n return seq_dict",
"def flowgram_id_to_seq_id_map(seqs):\r\n result = {}\r\n for id_, seq in seqs:\r\n fields = id_.split()\r\n seq_id = id_\r\n flowgram_id = fields[1]\r\n result[flowgram_id] = seq_id\r\n return result",
"def read_fasta_to_dictionary(genome_file):\n filename = genome_file\n dct = {}\n\n id_name = \"\"\n sequence = \"\"\n first_pass = 1\n\n read_fh = open(filename, 'r')\n for i, line in enumerate(read_fh):\n line = line.rstrip()\n if re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r',', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n\n elif re.search(r'^>(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r'(\\d+)_', \"\", id_name)\n id_name = re.sub(r'.*\\|', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n else:\n sequence += line\n dct[id_name] = sequence\n\n return dct",
"def get_pep_seq(pep_path):\n pep_dict = dict()\n j, trans_id, trans_id_else, pep_sequence, pep_id = 0, '', '', '', ''\n pep_pattern = re.compile(r'>([^\\s]+)')\n trans_pattern = re.compile(r'transcript:([^\\s]+)')\n\n with open(pep_path) as f:\n for line in f:\n if not line.strip():\n continue\n if line.startswith('>'):\n j += 1\n if j > 1:\n seq_len = len(pep_sequence)\n pep_dict[trans_id] = dict(name=pep_id, sequence=pep_sequence,\n sequence_length=seq_len)\n pep_dict[trans_id_else] = dict(name=pep_id, sequence=pep_sequence,\n sequence_length=seq_len)\n pep_id = pep_pattern.match(line).group(1)\n trans_id = trans_pattern.search(line).group(1)\n if '.' in trans_id:\n trans_id_else = trans_id[:trans_id.rfind('.')]\n else:\n trans_id_else = trans_id\n pep_sequence = ''\n else:\n pep_sequence += line.strip()\n else:\n seq_len = len(pep_sequence)\n pep_dict[trans_id] = dict(name=pep_id, sequence=pep_sequence,\n sequence_length=seq_len)\n pep_dict[trans_id_else] = dict(name=pep_id, sequence=pep_sequence,\n sequence_length=seq_len)\n if not pep_dict:\n print('提取蛋白序列信息为空')\n print(\"共统计出{}条转录本的蛋白序列信息\".format(len(pep_dict)))\n return pep_dict",
"def map_from_uniprot_pdb(uniprots: List[str])-> pd.DataFrame:\n url: str ='https://www.uniprot.org/uploadlists/'\n # define the query parameters \n q_params: Dict[str, str]={\n 'from': 'ACC+ID', \n 'to': 'PDB_ID',\n 'format': 'tab',\n 'query': ' '.join(uniprots)\n }\n data: bytes =urllib.parse.urlencode(q_params).encode('utf-8')\n request: urllib.request.Request = urllib.request.Request(url,data)\n # read the request\n with urllib.request.urlopen(request) as input_file: \n results: str =input_file.read().decode('utf-8')\n # parse the resulting strings \n mapped_pairs: List[str] = results.split('\\n')\n # pop the first element as it contain the words from and to \n mapped_pairs.pop(0)\n # allocate to lists to hold the results \n unitpot_ids: List[str] = []\n pdb_ids: List[str] = []\n # parse the results \n for pair in mapped_pairs:\n temp_lists: List[str] = pair.split('\\t')\n if len(temp_lists) ==2:\n unitpot_ids.append(temp_lists[0])\n pdb_ids.append(temp_lists[1])\n # combine the data into a dataframe \n results: pd.DataFrame = pd.DataFrame({\n 'Uniprot-ID':unitpot_ids,\n 'PDB':pdb_ids\n })\n # return the results \n return results",
"def sequenceDict(self):\n\t\twith open(self.ff) as fastaFile:\n\t\t\tsequences = {}\n\t\t\tfor name, seq in self.readFasta(fastaFile):\n\t\t\t\tsequences[name] = seq\n\t\treturn sequences",
"def map_from_uniprot_gene(uniprots: List[str])->pd.DataFrame:\n url: str ='https://www.uniprot.org/uploadlists/'\n # define the query parameters \n q_params: Dict[str, str]={\n 'from': 'ACC+ID', \n 'to': 'ENSEMBL_ID',\n 'format': 'tab',\n 'query': ' '.join(uniprots)\n }\n data: bytes =urllib.parse.urlencode(q_params).encode('utf-8')\n request: urllib.request.Request = urllib.request.Request(url,data)\n # read the request\n with urllib.request.urlopen(request) as input_file: \n results: str =input_file.read().decode('utf-8')\n # parse the resulting strings \n mapped_pairs: List[str] = results.split('\\n')\n # pop the first element as it contain the words from and to \n mapped_pairs.pop(0)\n # allocate to lists to hold the results \n unitpot_ids: List[str] = []\n ensemble_ids: List[str] = []\n # parse the results \n for pair in mapped_pairs:\n temp_lists: List[str] = pair.split('\\t')\n if len(temp_lists) ==2:\n unitpot_ids.append(temp_lists[0])\n ensemble_ids.append(temp_lists[1])\n # combine the data into a dataframe \n results: pd.DataFrame = pd.DataFrame({\n 'Uniprot-ID':unitpot_ids,\n 'Gene-ID':ensemble_ids\n })\n # return the results \n return results",
"def produce_rnaToProtein_refseqID_dict (inPath, outPath):\n idMap = {}\n with open(inPath, 'r') as f:\n next(f)\n for line in f:\n tax_id, gene_id, symbol, rsg, lrg, rna, t, protein, p, category = line.strip().split('\\t')\n if (len(rna) > 0) and (len(protein) > 0):\n idMap[rna] = protein\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)",
"def get_protein_fasta(uniprot_id):\r\n url = \"http://www.uniprot.org/uniprot/{}.fasta\".format(uniprot_id)\r\n string = re.split(\"\\n\",ur.urlopen(url).read().decode(),1)[1]\r\n return re.sub(\"\\n\",\"\",string)",
"def extract_uniprot4protein_keys(self, proteins_dict):\n\t\treturn {key.split(\"|\")[1]: value for (key, value) in proteins_dict.items()}",
"def get_uniprots_for_hint():\n initial_dict = {}\n for node in DatabaseGraph.get_all('UNIPROT'):\n initial_dict[node['legacyID']] = node.id\n\n for key in list(initial_dict.keys()):\n initial_dict[key.split('_')[0]] = initial_dict.pop(key)\n return initial_dict",
"def produce_uniprotID_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if uniprotID in swissProtIDs:\n if otherIDtype == 'Gene_Name':\n otherID = otherID.upper()\n idMap[otherID] = uniprotID\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)",
"def _add_uniprot_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding UniProt identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n all_uniprot = [k for k in map_dict if k.lower().startswith('uniprot')]\n\n for uniprot_id in tqdm.tqdm(all_uniprot, total=len(all_uniprot)):\n db, uid = uniprot_id.split(':')\n\n try:\n # query UniProt API\n r = r_session.get(\n 'http://www.uniprot.org/uniprot/' + uid + '.xml'\n )\n except Exception as x:\n print(\"%s: %s\" % (uniprot_id, x.__class__.__name__))\n continue\n\n if r.content:\n root = etree.fromstring(r.content)\n if root:\n for s in root[0]:\n if s.tag.endswith('accession'):\n new_id = '{}:{}'.format('UniProt', s.text.split(':')[-1])\n map_dict[uniprot_id].add(new_id)\n else:\n break\n\n return map_dict",
"def extract_seqs(seq_filepath):\n seqs = {}\n for record in SeqIO.parse(seq_filepath.as_posix(), \"fasta\"):\n seqs[record.id] = record\n return seqs",
"def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict",
"def create_protein_sequences_table(self,fn_proteins_fasta_file):\n log.info(\"Creating table of protein sequences ...\")\n self.create_table(self.SequenceTable,self.SequenceFields,\n self.SequenceTypes)\n parser = SeqIO.parse(fn_proteins_fasta_file, \"fasta\")\n data = []\n n_stored = 0\n chunk_size = 1000\n for seq_record in parser:\n description = seq_record.description\n m = re.match(self.protein_record_pattern,description)\n gene_id = m.group(1)\n locus_tag = m.group(2)\n protein_description = m.group(3)\n table_record = [gene_id, locus_tag, protein_description, seq_record.seq.tostring()]\n data.append(table_record)\n # store chunks of data\n if len(data) > chunk_size:\n self.store_data(self.SequenceTable,data)\n n_stored += chunk_size\n log.info(\"Stored %20d sequences\\r\",n_stored)\n data = [] # empty data to avoid using a lot of memory\n # store last chunk\n if len(data) > 0:\n n_stored += len(data)\n self.store_data(self.SequenceTable,data)\n log.info(\"Stored %20d sequences\\r\",n_stored)",
"def prot_entrez_fetch(proteinDict, retM=\"text\", retT=\"fasta\"):\n from Bio import Entrez\n Entrez.email =\"[email protected]\"\n for i in proteinDict.values():\n try:\n int(i) # test if really a list of UIDs\n except ValueError:\n print(\"was expecting UIDs like \\\"12345678\\\", but got something else instead:\")\n print(i)\n raise\n inpList = list(proteinDict.items())\n proteinIntList = []\n for k in inpList:\n proteinIntList.append(k[1])\n \n proteinList = list(map(str, proteinIntList))\n # print proteinList\n \n print(\"connecting to Entrez...\")\n requestR = Entrez.epost(\"protein\",id=\",\".join(proteinList)) # send all UIDs as a single query to entrez. \n resultO = Entrez.read(requestR)\n webEnv = resultO[\"WebEnv\"]\n queryKey = resultO[\"QueryKey\"]\n handleO = Entrez.efetch(db=\"protein\",retmode=retM, rettype=retT, webenv=webEnv, query_key=queryKey) # retrieve all results in batch\n print(\"connection successful\")\n if retT == \"fasta\":\n fastaL = entrez_fasta_parser(handleO)\n protD = {}\n for j in range(len(proteinIntList)):\n protD[proteinIntList[j]] = fastaL[j].split(\"\\n\")[1]\n return protD\n \n elif retM == \"text\" and retT == \"gp\": # use \"gp\" for genpept flatfile format, and \"gb\" for genbank flatfile for genes\n return handleO.read()\n else:\n print(\"this data format was not expected:\")\n print(\"retmode: \", retM)\n print(\"rettype: \", retT)\n raise ValueError",
"def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r",
"def convertXmlToProtein(self, xml):\n\t\t# XML to dictionary\n\t\tproteinObject = Protein()\n\t\t\n\t\tdictionary = xmltodict.parse(xml)\n\t\troot = dictionary[\"uniprot\"]\n\t\tentry = root[\"entry\"]\n\t\t\n\t\tfor element, value in entry.items():\n\t\t\tif element == \"@accession\":\n\t\t\t\tproteinObject.addAttribute(\"id\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"name\":\n\t\t\t\tproteinObject.addAttribute(\"proteinShortName\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"protein\":\n\t\t\t\tfullname = value[\"recommendedName\"][\"fullName\"]\n\t\t\t\tproteinObject.addAttribute(\"proteinFullName\", \"uniprot\", fullname)\n\t\t\t\t\n\t\t\tif element == \"@created\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"creationDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\t\n\t\t\tif element == \"@modified\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"modifiedDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\n\t\t\tif element == \"comment\":\n\t\t\t\tfor comment in entry[\"comment\"]:\n\t\t\t\t\tif \"text\" in comment:\n\t\t\t\t\t\ttext = comment[\"text\"][\"#text\"] if isinstance(comment[\"text\"], OrderedDict) else comment[\"text\"]\n\t\t\t\t\t\tproteinObject.addAttribute(comment[\"@type\"], \"uniprot\",text)\n\t\t\t\t\t\n\t\t\tif element == \"gene\":\n\t\t\t\tgenes = []\n\t\t\t\tfor gene in value[\"name\"]:\n\t\t\t\t\tif \"#text\" in gene and isinstance(gene, OrderedDict):\n\t\t\t\t\t\tgenes.append(gene[\"#text\"])\n\t\t\t\t\t\n\t\t\t\tproteinObject.addAttribute(\"geneName\", \"uniprot\", genes)\n\t\t\t\t\t\n\t\t\tif element == \"organism\":\n\t\t\t\tif isinstance(value[\"name\"], list):\n\t\t\t\t\torganisms = []\n\t\t\t\t\tfor organism in value[\"name\"]:\n\t\t\t\t\t\torganisms.append(organism[\"#text\"])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tproteinObject.addAttribute(\"organism\", \"uniprot\", value[\"name\"][\"#text\"])\n\t\t\t\t\n\t\t\t\n\t\t\tif element == \"sequence\":\n\t\t\t\tproteinObject.addAttribute(\"sequence\", \"uniprot\",value[\"#text\"].replace(\"\\n\",\"\"))\n\t\t\t\tproteinObject.addAttribute(\"sequencelength\", \"uniprot\",value[\"@length\"].replace(\"\\n\",\"\"))\n\n\n\t\treturn proteinObject",
"def produce_protein_chain_dict (inPath, outPath):\n chainMap = pd.read_table(inPath, sep=\"\\t\")\n proteins = set(chainMap[\"Query\"])\n proteinChains = {}\n for protein in proteins:\n proteinChains[protein] = set(chainMap.loc[chainMap[\"Query\"]==protein, \"Subject\"])\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinChains, fOut)",
"def parse_transcripts(trans):\n s = SeqIO.parse(trans, 'fasta')\n seq_dict = SeqIO.to_dict(s)\n # Remove the _whatever at the end\n seq_dict_nosuff = {}\n for seqid in seq_dict:\n seq_dict_nosuff[seqid.split('_')[0]] = seq_dict[seqid]\n return seq_dict_nosuff",
"def dna_to_protein(seq):\n\n # Verify a convertible sequence\n if len(seq) % 3 != 0:\n raise RuntimeError('Total number of bases must be a multiple of 3')\n\n # Iterate through adding the proteins\n protein = ''\n for i in range(0, len(seq), 3):\n protein += bioinfo_dicts.codons[seq[i:i+3]]\n return protein",
"def load_uniprot(filepath):\n print('Loading uniprot dataset')\n with open(filepath) as handle:\n uniprot = [r for r in SeqIO.parse(handle, 'swiss')]\n repeated_seqs = set(seq for seq, count in Counter(u._seq._data for u in uniprot).items() if count > 1)\n return uniprot, repeated_seqs",
"def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier",
"def uniprot(gene, organism, output_file):\n\n print(\"\\tUniprot ...\")\n\n # Request\n domain = \"https://www.uniprot.org/uniprot\"\n query = f\"?query=gene_exact%3A{gene}+organism%3A{organism}\"\n extend = \"columns=id,protein_names&format=tab\"\n r = requests.get(f\"{domain}/{query}&{extend}\")\n result = r.text.splitlines()\n\n # Extract Uniprot IDs and Offical Protein Names\n uniprot_id = []\n uniprot_name = []\n if result != []:\n del(result[0]) # Remove the header\n for line in result: # Extracting IDs and names\n colonne = line.split('\\t')\n id = colonne[0]\n name = colonne[1]\n uniprot_id.append(id)\n if colonne[1] not in uniprot_name:\n uniprot_name.append(name)\n\n # Write the Uniprot IDs\n output_file.write(\"<td><div class='scroll'>\")\n for id in uniprot_id:\n output_file.write(f'<a href=\"{domain}/{id}\">{id}</a><br>')\n output_file.write(\"</div></td>\")\n\n # Write the Uniprot Offical Names\n output_file.write(\"<td><div class='scroll'>\")\n output_file.write(f\"{'<br>'.join(uniprot_name)}</div></td>\")\n return uniprot_id\n else:\n output_file.write(\"<td><i>No data found</i></td>\"*2)\n return uniprot_id"
] | [
"0.6781977",
"0.6571823",
"0.6411829",
"0.63938284",
"0.6391897",
"0.6324395",
"0.63109064",
"0.6232596",
"0.62234",
"0.6181884",
"0.61223954",
"0.60952413",
"0.6050396",
"0.5965492",
"0.5963545",
"0.5963038",
"0.5939162",
"0.58732736",
"0.5828895",
"0.578113",
"0.57796746",
"0.57611203",
"0.5739137",
"0.5738899",
"0.5735637",
"0.5721851",
"0.5688861",
"0.56881356",
"0.5684088",
"0.5679467"
] | 0.7153276 | 0 |
create dictionary with protein ids and motif positions of N{P}[ST]{P} +overlapping matches | def search_motif(sequences):
motif = re.compile(r'(?=(N[^P](S|T)[^P]))') #N{P}[ST]{P}
motif_index = {}
for key,value in sequences.items():
match_motif = re.finditer(motif, value)
motif_start_list = []
for i in match_motif:
motif_start_list.append(str(i.start()+1))
motif_index[key] = ' '.join(motif_start_list)
return motif_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict",
"def findMotifLocus(sequence, id):\n\n # create search range, motif cannot extend beyond the end of the sequence\n searchRange = len(sequence) - 4\n\n # create empty list to store starting amino acid position of motif\n positions = []\n\n # pass through sequence looking for all occurences of mofit, including overlapping occurences, retuning the position\n # of the first amino acid in the protein sequence at which the motif starts\n for j in range(0, searchRange):\n # first, check if S and not T in third position\n if sequence[j] is 'N' and sequence[j+1] is not 'P' and sequence[j+2] is 'S' and sequence[j+3] is not 'P':\n aminoAcidPosition_S = j + 1\n positions.append(aminoAcidPosition_S)\n j += 1\n # second, check if T and not S in third position\n elif sequence[j] is 'N' and sequence[j+1] is not 'P' and sequence[j+2] is 'T' and sequence[j+3] is not 'P':\n aminoAcidPosition_T = j + 1\n positions.append(aminoAcidPosition_T)\n j += 1\n\n motifPositions = str(positions)\n motifPositions = motifPositions.replace(',', '')\n motifPositions = motifPositions.replace('[', '')\n motifPositions = motifPositions.replace(']', '')\n\n if len(positions) != 0:\n print(uniprotIDsLIST[id])\n print(motifPositions)\n\n return",
"def produce_protein_interaction_dict (inPath, outPath): \n PPIs = pd.read_table(inPath, sep=\"\\t\")\n proteins = set(PPIs[[\"Protein_1\", \"Protein_2\"]].values.flatten())\n proteinPartners = {}\n for protein in proteins:\n partners = set(PPIs.loc[(PPIs[[\"Protein_1\", \"Protein_2\"]]==protein).any(1),\n [\"Protein_1\", \"Protein_2\"]].values.flatten()) - {protein}\n if sum((PPIs[[\"Protein_1\", \"Protein_2\"]]==protein).all(1)) > 0:\n partners.add(protein)\n proteinPartners[protein] = partners\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinPartners, fOut)",
"def findMatches(personDict,personDict2):\n matches = {}\n skepticalMatches = {}\n for i in range(1,19):\n if tags[i] not in ['SSN','PHONE']:\n continue\n\n dictConsidered = personDict[tags[i]]\n done = False\n\n for duplicatedEntry in dictConsidered:\n if duplicatedEntry==\"\":\n #skip the empty entries\n continue\n pairs = itertools.combinations(dictConsidered[duplicatedEntry],2)\n if done:\n break\n for p in pairs:\n if done:\n break\n\n info1 = personDict['EnterpriseID'][p[0]]\n info2 = personDict['EnterpriseID'][p[1]]\n info1b = personDict2['EnterpriseID'][p[0]]\n info2b = personDict2['EnterpriseID'][p[1]]\n k = tuple(sorted(p))\n \n if k not in matches and k not in skepticalMatches:\n if (((info1[1]==info2[1])and info1[1]!='') or((info1[2]==info2[2])and info1[2]!='') or ((info1[5]==info2[5])and info1[5]!='') ):\n score = getScorePair(info1b,info2b)\n \n \n if (abs(int(k[0])-int(k[1]))<10) and score<7:\n #This is likely not a real match\n skepticalMatches[k] = score\n else:\n #This is a real match\n matches[k] = score\n \n return matches,skepticalMatches",
"def motif_count(sequences, start_at=4, stop_at=8):\n motifs = dict()\n for motif_length in range(start_at, stop_at):\n motifs[motif_length] = dict()\n for sequence in sequences:\n for motif in possible_motifs_by_length(motif_length):\n if motif not in motifs[motif_length]:\n motifs[motif_length][motif] = 0\n if sequence.find(motif) != -1:\n motifs[motif_length][motif] += overlap_count(\n sequence, motif)\n return motifs",
"def get_annotation(file, pos_dict, ex_dict, tag):\n results = {}\n with open(file, 'r', encoding='utf-8') as f:\n par = 0\n par_results = []\n for line in f:\n if line is \"\\n\":\n if par_results:\n if \"paragraph\" + str(par) in results:\n results[\"paragraph\" + str(par)].append(par_results)\n else:\n results[\"paragraph\" + str(par)] = par_results\n par += 1\n par_results = []\n continue\n for q in pos_dict:\n qmatches = re.finditer(q, line, re.I)\n for qmatch in qmatches:\n exclude = 0\n for exItem in ex_dict:\n exMatches = re.finditer(exItem.rstrip('\\n'), line, re.I)\n for exMatch in exMatches:\n if exMatch and qmatch.start(1) is exMatch.start(1):\n exclude = 1\n # Save result to list of results with appropriate tag\n if (qmatch and exclude is 0):\n try:\n #results.append((int(qmatch.group(1)),int(qmatch.group(2)), int(qmatch.group(len(qmatch.groups()))), tag))\n par_results.append({\"sentID\": int(qmatch.group(1)), \"spanStart\":int(qmatch.group(2)), \"spanEnd\":int(qmatch.group(len(qmatch.groups()))), \"tag\": tag})\n except TypeError:\n # TypeErrors are usually raised when one of the capture groups of fields is empty (NoneType)\n # Simply throw a warning message and keep going\n print(\"Warning! Something went wrong while matching expression'\" + q + \"' in line '\" + line[0:50] + \"...'\")\n return results",
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def find_match(line,dic):\n seqid = line[0:seqid_len]\n sequence = line[(seqid_len + f_primer_len):(len(line) - r_primer_len)]\n if seqid in dic:\n increment(dic[seqid],sequence,1)\n else:\n dic[seqid] = {sequence:1}",
"def create_match_instance_pairs(plant_match_in):\r\n\t## collect all plant name match instances indices\r\n\tjust_indices = [int(indices) for plant_match_in_set in plant_match_in for indices in plant_match_in_set[1]]\r\n\t\r\n\tassert len(just_indices) == len(set(just_indices)) # check there are no exact duplicates in indices\r\n\r\n\tsorted_index = list(sorted(just_indices)) # sort indices small-large\r\n\tprint(f'Length of corpus bigrams BEFORE ambiguous matches removed: {len(sorted_index)}')\r\n\t#print(sorted_index)\r\n\r\n\t# remove all ambiguous matches that are within 1 word of each other\r\n\tprint('Ambiguous plant name matches: ')\r\n\tfor i, index in enumerate(sorted_index): # iterate over all indices in sorted list\r\n\t\t\r\n\t\tif index == sorted_index[i-1]+1: # indices is within 1 of previous indices in list\r\n\t\t\tprint(index, sorted_index[i-1])\r\n\t\t\tsorted_index.remove(index) # remove indices from list\r\n\t\t\tsorted_index.remove(sorted_index[i-1]) # AND remove previous indices from list\r\n\tprint(f'Length of corpus bigrams AFTER ambiguous matches removed: {len(sorted_index)}')\r\n\r\n\t# create indices dict with 'B', 'I' values\r\n\tpaired_finds = {}\r\n\tfor match_index in sorted_index: # iterate over unambiguous match indices list\r\n\t\t\t\t\t\r\n\t\t\tpaired_finds[match_index] = ('B') # WITH value of 'B'\r\n\t\t\t\r\n\t\t\tpaired_finds[match_index+1] = ('I') # WITH value of 'I'\r\n\r\n\treturn paired_finds",
"def findmotif(MS_seq, MS_name, ProteomeDict, motif_size):\n MS_seqU = MS_seq.upper()\n try:\n UP_seq = ProteomeDict[MS_name]\n assert MS_seqU in UP_seq, \"check \" + MS_name + \" with seq \" + MS_seq + \". Protein sequence found: \" + UP_seq\n regexPattern = re.compile(MS_seqU)\n MatchObs = list(regexPattern.finditer(UP_seq))\n if \"y\" in MS_seq:\n pY_idx = list(re.compile(\"y\").finditer(MS_seq))\n assert len(pY_idx) != 0\n center_idx = pY_idx[0].start()\n y_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pY_idx) > 1:\n DoS_idx = pY_idx[1:]\n assert len(DoS_idx) != 0\n elif \"t\" in MS_seq or \"s\" in MS_seq:\n DoS_idx = list(re.compile(\"y|t|s\").finditer(MS_seq))\n assert len(DoS_idx) != 0\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, y_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n if \"y\" not in MS_seq:\n pTS_idx = list(re.compile(\"t|s\").finditer(MS_seq))\n assert len(pTS_idx) != 0\n center_idx = pTS_idx[0].start()\n ts_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pTS_idx) > 1:\n DoS_idx = pTS_idx[1:]\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, ts_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n except BaseException:\n print(MS_name + \" not in ProteomeDict.\")\n raise\n\n return pos, mappedMotif",
"def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)",
"def build_positional_table(profile):\n prop_dict = {'pos': [], 'ref_base': [], 'cov': [], 'mismatch_rate': [], 'a_mism': [], 'g_mism': [], 't_mism': [],\n 'c_mism': [], 'arrest_rate': []}\n\n ref = sys.argv[3]\n print(ref.replace('__tt__', '|'))\n for line in profile:\n line1 = line.strip().split()\n if line1[0] == ref.replace('__tt__', '|') and start <= int(line1[1]) <= end:\n prop_dict['pos'].append(int(line1[1]))\n prop_dict['ref_base'].append(line1[2])\n prop_dict['cov'].append(int(line1[3]))\n prop_dict['mismatch_rate'].append(float(line1[5]))\n prop_dict['a_mism'].append(int(line1[6]) + int(line1[11]))\n prop_dict['g_mism'].append(int(line1[7]) + int(line1[12]))\n prop_dict['t_mism'].append(int(line1[8]) + int(line1[13]))\n prop_dict['c_mism'].append(int(line1[9]) + int(line1[14]))\n prop_dict['arrest_rate'].append(float(line1[-1]))\n\n return prop_dict",
"def _get_ngram_matches(hyp_n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]], ref_n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]]) ->Dict[int, Tensor]:\n matching_n_grams: Dict[int, Tensor] = defaultdict(lambda : tensor(0.0))\n for n in hyp_n_grams_counts:\n matching_n_grams[n] = tensor(sum(torch.min(ref_n_grams_counts[n][n_gram], hyp_n_grams_counts[n][n_gram]) for n_gram in hyp_n_grams_counts[n]))\n return matching_n_grams",
"def cigar_to_map(cigar_text):\n assert 'I' not in cigar_text\n spans, posn = [], 0\n for n, c in pattern.findall(cigar_text):\n if n:\n n = int(n)\n else:\n n = 1\n \n if c == 'M':\n spans.append(Span(posn, posn+n))\n posn += n\n else:\n spans.append(LostSpan(n))\n map = Map(spans = spans, parent_length = posn)\n return map",
"def matching_ends(list_of_ends, max_mis):\n paf(\"starting matching_ends at \" + str(datetime.datetime.now()))\n \n dict_of_matches = {}\n for n in range(len(list_of_ends)):\n seq1 = list_of_ends[n]\n neighbours = {}\n for m in range(n+1, len(list_of_ends)):\n seq2 = list_of_ends[m]\n alignment_worthwhile = ((seq2.seq[:30] in seq1.reverse_complement().seq) \n or (seq2.seq[30:60] in seq1.reverse_complement().seq)) \n if alignment_worthwhile:\n alignments = pairwise2.align.localms(seq1.reverse_complement().seq, seq2.seq, 1, -100, -100, -100)\n gaps = alignments[0][0].count(\"-\")\n if alignments[0][2] >= len(seq1.seq) - max_mis:\n neighbours[seq2.id] = len(seq1.seq) - gaps\n dict_of_matches[seq1.id] = neighbours\n \n for end1 in dict_of_matches:\n for end2 in dict_of_matches[end1]: \n dict_of_matches[end2][end1] = dict_of_matches[end1][end2]\n \n return dict_of_matches",
"def snp_recovery(exp, obs):\n refs = defaultdict(int)\n matches = defaultdict(int)\n for e, o in zip(exp, obs):\n refs[e] += 1\n if o == e: #match\n matches[e] += 1\n result = dict((e, (matches[e], refs[e]))\n for e in refs.iterkeys())\n return result",
"def pslMap( options ):\n\n if options.format == \"gtf\":\n use_copy = False\n else:\n use_copy = True\n\n ninput, noutput, ndiscarded, nskipped, nskipped_small_queries = 0, 0, 0, 0, 0\n\n min_length = options.min_aligned\n\n for match, qx, tx in iterator_psl_intervals( options ):\n\n map_query2target = match.getMapQuery2Target()\n\n ninput += 1\n\n ## if no filter on qx or tx, use full segment\n if qx == None:\n qx = [ (match.mQueryFrom,match.mQueryTo,0) ]\n elif tx == None:\n tx = [ (match.mSbjctFrom,match.mSbjctTo,0) ]\n\n ## if no overlap: return\n if not qx or not tx: \n nskipped += 1\n continue\n\n for query in qx:\n\n qstart, qend, qval = query\n\n # skip elements that are too small\n if qend - qstart < min_length: \n E.debug( \"query too small - skipped at %s:%i-%i\" % (match.mQueryId, qstart, qend) )\n nskipped_small_queries += 1\n continue\n\n E.debug( \"working on query %s:%i-%i\" % (match.mQueryId, qstart, qend) )\n\n mqstart, mqend = ( map_query2target.mapRowToCol(qstart, \n alignlib_lite.py_RIGHT), \n map_query2target.mapRowToCol(qend, \n alignlib_lite.py_LEFT) )\n \n \n if match.strand == \"-\":\n qstart, qend = match.mQueryLength - qend, match.mQueryLength - qstart\n\n for target in tx:\n\n tstart, tend, tval = target\n if tstart >= mqend or tend <= mqstart: continue\n if tend - tstart < min_length: continue\n\n new = alignlib_lite.py_makeAlignmentBlocks()\n \n if use_copy:\n # do copy with range filter\n if options.loglevel >= 3:\n\n mtstart, mtend = map_query2target.mapColToRow(tstart), map_query2target.mapColToRow(tend) \n E.debug( \"query: %i-%i (len=%i)-> %i-%i(len=%i); target: %i-%i (len=%i)-> %i-%i (len=%i)\" % \\\n (qstart, qend,\n qend - qstart,\n mqstart, mqend,\n mqend - mqstart,\n tstart, tend,\n tend - tstart,\n mtstart, mtend,\n mtend - mtstart ) )\n \n alignlib_lite.py_copyAlignment( \n new, \n map_query2target,\n qstart, qend,\n tstart, tend )\n else:\n # do copy with alignment filter\n map_query = qval\n if map_query:\n tmp = alignlib_lite.py_makeAlignmentBlocks() \n alignlib_lite.py_copyAlignment( tmp, map_query2target, map_query, alignlib_lite.py_RR )\n if options.loglevel >= 5:\n options.stdlog.write( \"######## mapping query ###########\\n\" )\n options.stdlog.write( \"# %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_query2target ) ))\n options.stdlog.write( \"# %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_query ) ))\n options.stdlog.write( \"# %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( tmp ) ))\n else:\n tmp = map_query2target\n \n map_target = tval\n if map_target:\n new = alignlib_lite.py_makeAlignmentBlocks()\n alignlib_lite.py_copyAlignment( new, tmp, map_target, alignlib_lite.py_CR ) \n if options.loglevel >= 5:\n options.stdlog.write( \"######## mapping target ###########\\n\" )\n options.stdlog.write( \"# before: %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( tmp ) ))\n options.stdlog.write( \"# map : %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_target ) ))\n options.stdlog.write( \"# after : %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( new ) ))\n else:\n new = tmp\n\n if options.loglevel >= 4:\n E.debug(\"putative match with intervals: %s and %s: %i-%i\" % \\\n (str(query), str(target), qstart, qend ))\n if options.loglevel >= 5:\n E.debug( \"input : %s\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_query2target ) ))\n E.debug( \"final : %s\" % str(alignlib_lite.py_AlignmentFormatEmissions( new ) ) )\n\n if new.getLength() > 0:\n n = match.copy()\n n.fromMap( new, use_strand = True )\n E.info( \"match : %s\" % (str(n)))\n\n if new.getNumAligned() > options.min_aligned:\n n = match.copy()\n n.fromMap( new, use_strand = True )\n options.stdout.write( str(n) + \"\\n\" )\n noutput += 1\n else:\n ndiscarded += 1\n\n E.info( \"map: ninput=%i, noutput=%i, nskipped=%i, ndiscarded=%i, nsmall_queries=%i\" % \\\n (ninput, noutput, nskipped, ndiscarded, nskipped_small_queries) )",
"def handle_seq(seq, barcode_map, result_dict):\n for i in range(len(seq)):\n for barcode in barcode_map.keys():\n possible_match = seq[i: i + len(barcode)]\n if possible_match == barcode:\n result_dict[barcode][i] += 1",
"def align_matches(matches):\n # align by diffs\n diff_counter = {}\n largest = 0\n largest_count = 0\n song_id = -1\n for tup in matches:\n sid, diff = tup\n if diff not in diff_counter:\n diff_counter[diff] = {}\n if sid not in diff_counter[diff]:\n diff_counter[diff][sid] = 0\n diff_counter[diff][sid] += 1\n\n if diff_counter[diff][sid] > largest_count:\n largest = diff\n largest_count = diff_counter[diff][sid]\n song_id = sid\n\n # extract idenfication\n song = get_song_by_id(song_id)\n # if song:\n # # TODO: Clarify what `get_song_by_id` should return.\n # songname = song.get(\"title\", None)\n # else:\n # return None\n\n # return match info\n nseconds = round(float(largest) / DEFAULT_FS *\n DEFAULT_WINDOW_SIZE *\n DEFAULT_OVERLAP_RATIO, 5)\n song_info = {\n \"idx\": song[0],\n # \"title\": song[1],\n \"confidence\": largest_count,\n \"total_hash_count\": song[2],\n \"offset\": int(largest),\n \"offset_seconds\": nseconds,\n }\n return song_info",
"def calc_positions(zpoints, dsq_list):\n\n pos_map = {}\n points_map = {}\n\n for z, p in zpoints.items():\n if z in dsq_list:\n p = -1\n if p not in points_map:\n points_map[p] = set()\n points_map[p].add(z)\n\n i = 1\n for p in sorted(list(points_map.keys()), reverse = True):\n pos_map[i] = points_map[p]\n i += len(points_map[p])\n\n return pos_map",
"def gen_map_by_matchblocks(self, cleaned_lvr_items, sovc_items,\n lvr_raceid=None,\n sovc_raceid=None ):\n idmap = set()\n #!print('DBG: init idmap=',pformat(idmap))\n fixed_lvr,fixed_sovc = zip(*self.fixed_mapping)\n lvr_items = [(id,title) for (id,title) in cleaned_lvr_items\n if (title not in fixed_lvr)]\n sovc_items = [(id,title) for (id,title) in sovc_items\n if (title not in fixed_sovc)]\n if len(lvr_items) == 0:\n return [(0,None,sid) for sid,stitle in sovc_items]\n iid,ititle = zip(*lvr_items)\n if len(sovc_items) == 0:\n return [(0,lid,None) for lid in iid]\n jid,jtitle = zip(*sovc_items)\n s = SequenceMatcher(None, ititle, jtitle)\n lvr_unmapped = set(iid)\n sovc_unmapped = set(jid) \n for (lvr_idx, sovc_idx, size) in s.get_matching_blocks():\n for offset in range(size):\n lvr_id = iid[lvr_idx+offset]\n sovc_id = jid[sovc_idx+offset]\n lvr_unmapped.discard(lvr_id)\n sovc_unmapped.discard(sovc_id)\n idmap.add((1.0, lvr_id, sovc_id))\n lvr_lut = dict(cleaned_lvr_items)\n sovc_lut = dict(sovc_items)\n bestlvr = None\n bestsovc = None\n while (len(lvr_unmapped) != 0) and (len(sovc_unmapped) != 0):\n bestconf = -1\n for lvr_id,sovc_id in product(lvr_unmapped,sovc_unmapped):\n conf = similar(lvr_lut[lvr_id], sovc_lut[sovc_id])\n if conf > bestconf:\n bestconf = conf\n bestlvr = lvr_id\n bestsovc = sovc_id\n lvr_unmapped.discard(bestlvr)\n sovc_unmapped.discard(bestsovc)\n idmap.add((bestconf, bestlvr, bestsovc))\n # If any LVR ids were not paired up, map them to NONE\n for lvr_id in lvr_unmapped:\n idmap.add((0, lvr_id, None))\n for sovc_id in sovc_unmapped:\n idmap.add((0, None, sovc_id))\n\n #### Add fixed_map for choices (WRITE-IN, etc.)\n if lvr_raceid != None: \n # rcinv[choiceTitle] = choiceId\n lvr_rcinv = dict([(self.lvr_clut[cid],cid)\n for cid in self.lvr_rclut[lvr_raceid]])\n sovc_rcinv = dict([(self.sovc_clut[cid],cid)\n for cid in self.sovc_rclut[sovc_raceid]])\n for (lvr_title,sovc_title) in self.fixed_mapping:\n lvr_id = lvr_rcinv.get(lvr_title, None)\n sovc_id = sovc_rcinv.get(sovc_title, None)\n if lvr_id and sovc_id:\n idmap.add((1, lvr_id, sovc_id))\n \n return idmap # set([(conf, lvr_id, sovc_id), ...])",
"def iter_protenn_matches(file: str):\n with open(file, \"rt\") as fh:\n for line in fh:\n sequence_id, pfam_acc, start, end = line.rstrip().split(\"\\t\")\n if re.fullmatch(r\"PF\\d+\", pfam_acc):\n yield sequence_id, pfam_acc, int(start), int(end)",
"def flowgram_id_to_seq_id_map(seqs):\r\n result = {}\r\n for id_, seq in seqs:\r\n fields = id_.split()\r\n seq_id = id_\r\n flowgram_id = fields[1]\r\n result[flowgram_id] = seq_id\r\n return result",
"def parse_positions(state):\n entity_positions = dict()\n\n # Go through rows and columns\n for row_nr, row in enumerate(state):\n for col_nr, cell in enumerate(row):\n\n # Go through entities that still have not been found\n for entity_nr, entity in enumerate(EntityNames):\n\n # Check if found\n if entity in cell:\n entity_positions[entity] = (abs(col_nr), abs(row_nr))\n\n # Check if all found\n if len(entity_positions) == 3:\n return entity_positions\n\n return entity_positions",
"def _merge_template_search(self, inputs):\n seq_dict = defaultdict(list)\n # flatten and permute\n for input_dic in inputs:\n for name, x in input_dic.items():\n if name == 'mask':\n seq_dict[name].append(x.flatten(1))\n else:\n seq_dict[name].append(\n x.flatten(2).permute(2, 0, 1).contiguous())\n # concatenate\n for name, x in seq_dict.items():\n if name == 'mask':\n seq_dict[name] = torch.cat(x, dim=1)\n else:\n seq_dict[name] = torch.cat(x, dim=0)\n return seq_dict",
"def makeSNPMap(snpfile, referencemap):\n\tbimfile = open(snpfile, \"r\") # open the input file\n\tmapfile = open(referencemap, \"r\")\n\toutfilename = re.sub(r'\\.bim', '.markerpos', snpfile)\n\tposfilename = re.sub(r'\\.bim', '.snp_locations', snpfile)\n\toutfile = open(outfilename, \"w\")\n\tposfile = open(posfilename, \"w\")\n\t# Initialize variables \n\tpreviousCM = 0\n\tpreviousPos = 0\n\ti=0\n\tbimline = bimfile.readline().strip().split() # Pos 1 is rsID, Pos 3 is location\n\tfor mapline in mapfile:\n\t\tif len(bimline) == 0:\n\t\t\tbreak\t\t\n\t\tif i==0:\n\t\t\ti+=1\n\t\t\tcontinue\n\t\tmapline = mapline.strip().split()\n\t\t# Three cases: 1. SNP pos gt map pos\n\t\twhile int(bimline[3]) < int(mapline[0]): # This means that the BIM file is behind the map file, so need to write output here with the interopolation\n\t\t# of the previous values\n\t\t\tdiffCM = float(mapline[2]) - float(previousCM)\n\t\t\tdiffpos = float(mapline[0]) - float(previousPos)\n\t\t\tmulti = (float(bimline[3]) - float(previousPos))/diffpos\n\t\t\tcmout = multi*diffCM + float(previousCM)\n\t\t\tif cmout < 0: # this should not happen so if it does dump data and quit\n\t\t\t\tprint i\n\t\t\t\tprint cmout\n\t\t\t\tprint diffCM\n\t\t\t\tprint diffpos\n\t\t\t\tprint previousCM\n\t\t\t\tprint previousPos\n\t\t\t\tprint bimline\n\t\t\t\tprint mapline\n\t\t\t\texit()\n\n\t\t\toutfile.write( str(cmout) +\"\\n\")\n\t\t\tposfile.write( str(bimline[3]) + \"\\t\" + str(cmout) + \"\\n\")\n\t\t\tbimline = bimfile.readline().strip().split()\n\t\t\tif len(bimline) == 0:\n\t\t\t\tbreak\t\t\n\t\tif len(bimline) ==0:\n\t\t\tbreak\n\t\tif bimline[3] == mapline[0]: # write out genetic position\n\t\t\toutfile.write( mapline[2]+ \"\\n\")\n\t\t\tposfile.write( str(bimline[3]) + \"\\t\" + mapline[2] + \"\\n\")\n\t\t\tbimline = bimfile.readline().strip().split()\n\t\n\t\t#if bimline[3] > mapline[0]: # read next line in the map file\n\t\t#\tpreviousCM = mapline[2]\n\t\t#\tpreviousPos = mapline[0]\n\t\t#\tcontinue\n\t\t# Hits this and continues if bimline is above mapline\n\t\tpreviousCM = mapline[2]\n\t\tpreviousPos = mapline[0]\n\t\ti += 1\n\toutfile.close()\n\treturn(outfile.name)",
"def pos():\n pos_list = []\n for token in doc:\n pos_list.append(token.pos_)\n setList = list(set(pos_list))\n my_dict = {i: pos_list.count(i) for i in setList}\n print(my_dict)",
"def findCenterSeq(dictofSeq):\n seqLen = len(dictofSeq)\n pwMatrix = [[\"-\"]*seqLen for i in range(seqLen)]\n listofSeq = []\n for key in dictofSeq:\n listofSeq.append(dictofSeq.get(key))\n \n findMin = []\n acc = 0\n for seq in listofSeq:\n for seq2 in listofSeq:\n # in1 gives row, in2 gives column \n in1 = listofSeq.index(seq)\n in2 = listofSeq.index(seq2)\n pwMatrix[in1][in2] = pairwise(seq, seq2)\n acc += pwMatrix[in1][in2]\n #TypeError: 'int' object is not subscriptable\n findMin.append(acc)\n acc = 0\n posSeq = findMin.index(min(findMin))\n refString = listofSeq[posSeq]\n refName = \"\"\n \n for name, seq in dictofSeq.items():\n if seq == refString:\n refName = name\n \n print(refName)\n \n return refName",
"def process_n_hmmer_output(file_name):\n dict = {}\n with file_open(file_name, \"r\") as f:\n for l in f:\n if not l.startswith(\"#\"):\n field = l.split()\n target_name = field[0]\n ali_from = int(field[6])\n ali_to = int(field[7])\n sq_len = field[10]\n # flip the co-ordinates start position is bigger then the stop position\n if ali_from > ali_to:\n ali_from = int(field[7])\n ali_to = int(field[6])\n\n if not target_name in dict:\n dict[target_name] = [sq_len, ali_from, ali_to]\n else:\n if dict[target_name][1] > ali_from:\n dict[target_name][1] = ali_from\n if dict[target_name][2] < ali_to:\n dict[target_name][2] = ali_to\n return dict",
"def find_coordinates(hmms, bit_thresh):\n # get coordinates from cmsearch output\n seq2hmm = parse_hmm(hmms, bit_thresh)\n seq2hmm = best_model(seq2hmm)\n group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]\n for seq, info in list(seq2hmm.items()):\n group2hmm[seq] = {}\n # info = [model, [[hit1], [hit2], ...]]\n for group_num, group in enumerate(hit_groups(info[1])):\n # group is a group of hits to a single 16S gene\n # determine matching strand based on best hit\n best = sorted(group, reverse = True, key = itemgetter(-1))[0]\n strand = best[5]\n coordinates = [i[0] for i in group] + [i[1] for i in group]\n coordinates = [min(coordinates), max(coordinates), strand]\n # make sure all hits are to the same strand\n matches = [i for i in group if i[5] == strand]\n # gaps = [[gstart, gend], [gstart2, gend2]]\n gaps = check_gaps(matches)\n group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps]\n return group2hmm"
] | [
"0.58689076",
"0.5789363",
"0.5755219",
"0.5687147",
"0.5647959",
"0.5635121",
"0.5623256",
"0.56161094",
"0.5606003",
"0.5603484",
"0.5583012",
"0.5560962",
"0.55549896",
"0.55387646",
"0.5521165",
"0.55000585",
"0.54610294",
"0.5446249",
"0.54335797",
"0.5430714",
"0.5427685",
"0.54273766",
"0.5425057",
"0.54106486",
"0.54031205",
"0.5393542",
"0.53815645",
"0.5376837",
"0.5373095",
"0.5361617"
] | 0.71227914 | 0 |
Evaluate the provided object to the condition | def evaluate(self, obj):
#obj._print()
# substitute event's attributes names by their values.
cond = self.condition
for attr in obj._attr_:
cond = re.sub('evt\.%s' % attr, "\"%s\"" % str(obj._attr_[attr]), cond)
# if it remains evt.* objects in the rule, there is a problem
# FIXME: false positive is possible when parsing an url for example containing somethingevt.gif <= 'evt.'
if re.search(r'evt\.', cond):
msg = "Correlation rule (%s) not properly translated. " % self.name
msg += "Please fix the correlation rule and/or parser! Unexpected: %s" % cond
self.logger.error(msg)
return False
# condition_rule = "(f1(1,3) and f1(2,10)) and f2(5)"
# eval(condition_rule, {'f1':fct1, 'f2':fct2})
try:
res = eval(cond, self.FunctionsEntryPoints)
except:
res = False
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self, operand: object) -> bool:\n pass",
"def eval(self, Vobj):\n try:\n return Vobj.evaluated_on(self)\n except AttributeError:\n return self.A() * Vobj + self.b()",
"def condition(self) -> global___Expression:",
"def condition(self) -> global___Expression:",
"def rule_evaluator(response, condition):\n return eval(condition)",
"def evaluate(self, payload, level=0, verbose=True):\n # find the value to compare in the payload dict\n field_value_ = pluck(payload, self.field_)\n if not field_value_:\n raise ValueError(f\"Required field '{self.field_}' not in payload.\")\n\n if self.date_field:\n is_date, datetime_value = detect_date_field(field_value_)\n if not is_date:\n raise ValueError(\"Datetime value expected for this comparison.\")\n field_value_ = datetime_value\n \n if verbose:\n tabs = \"\\t\" * level\n print(tabs + f\"Evaluating {self.field_}: {field_value_} {self.op_str_} {self.value_}\")\n \n # run the comparison operation based on the initialzed operator\n result = self.func_(field_value_, self.value_)\n if verbose: print(tabs + f\"Evaluation Result: {result}\")\n \n return result",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def conditional_value(self) -> global___Expression.ConditionalOperator:",
"def _execute_conditional(self, cond, series, ds):\n\n ret = None\n func = cond['function']\n if func not in ['std', 'mean', 'value']:\n self.debug('invalid function. \"{}\"'.format(func))\n return\n\n attr = cond['attribute']\n action = cond.get('action', 'cancel')\n atypes = cond.get('analysis_types', None)\n bin_hours = cond.get('bin_hours', 6)\n\n tolerance_seconds = 60 * 60 * bin_hours\n dd = ds > tolerance_seconds\n bounds = where(dd)[0]\n itemidx = bounds[-1] if bounds else 0\n\n if atypes:\n series = [si for si in series if si['analysis_type'] in atypes]\n\n series_v = [si[attr] for si in series[itemidx:] if attr in si]\n\n if func == 'value':\n if series_v[-1] != series_v[-2]:\n ret = action\n else:\n minx = cond.get('min_n', 10)\n if len(series) <= minx:\n return\n\n x = array(series_v)\n if func == 'std':\n x = x.std()\n elif func == 'mean':\n x = x.mean()\n\n comp = cond['comparison']\n if eval(comp, {'x': x}):\n ret = action\n\n return ret",
"def visit(obj, visitor: BooleanExpressionVisitor[T]) -> T:\n raise NotImplementedError(f\"Cannot visit unsupported expression: {obj}\")",
"def evaluate(self) :\n pass",
"def evaluate(self, *args, **kwargs) -> Union[str, int, float, bool]:\n return True",
"def evaluate(self):\n raise NotImplementedError()",
"def evaluate(self, attributes):\n return self.predicate(attributes[self.name])",
"def evaluateBoolean(compiled_expression):",
"def evaluate(self, d):\n return bool(eval(self.expr, facts_globals, d))",
"def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)",
"def evaluate(self):\n raise NotImplementedError(\"Abstract method\")",
"def update_with_evaluation(param_object, evaluation_dict, verbose):\n if evaluation_dict: # evaluates true if dict is not empty and the value is not None\n for key, value in evaluation_dict.items():\n try:\n setattr(param_object, key, value)\n TellUser.debug('attribute (' + param_object.name + ': ' + key + ') set: ' + str(value))\n except KeyError:\n TellUser.debug('No attribute ' + param_object.name + ': ' + key)",
"def eval_value(self, v):\n okay = False\n if ast_class(v) == 'Dict':\n # dict\n if self.eval_dict(v):\n okay = True\n elif ast_class(v) == 'List':\n # list\n if self.eval_list(v):\n okay = True\n elif ast_class(v) == 'Str':\n # string\n okay = True\n elif ast_class(v) == 'Name' and v.id in ('True', 'False', 'None'):\n # booleans or None\n okay = True\n elif ast_class(v) == 'Num':\n # numbers\n okay = True\n elif ast_class(v) == 'UnaryOp' and ast_class(v.op) == 'USub' and ast_class(v.operand) == 'Num':\n # negative numbers\n okay = True\n return okay",
"def eval(self, Vobj):\n if is_Vector(Vobj):\n return self.A() * Vobj + self.b()\n return Vobj.evaluated_on(self)",
"def evaluate(self, edict):\n pass",
"def evaluate(self, X):\n\n\t\tpass",
"def cond(conditions, value):\n for predicate, transformer in conditions:\n if predicate(value):\n return transformer(value)",
"def cond_predicate(clause):\n return car(clause)",
"def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)",
"def eval_logic(self, checkDict):\n result = True\n #gets individual evaluations from children\n passList = []\n for child in self.children:\n myVal = child.eval_comparison(checkDict)\n passList.append(child.eval_comparison(checkDict))\n\n #if only one child returns the only boolean available\n if(len(passList) == 1):\n result = passList[0]\n\n #TODO: Combine following cases possibly\n #print(passList)\n #gets resutl if only 2 simple logics\n elif(len(passList) == 2 and len(self.operators) == 1):\n\n result = self.operators[0](passList[0], passList[1])\n else:\n #combines all children logic using the operators\n firstCheck = True\n opIndex = 0\n for i in range(0,len(passList)):\n if(firstCheck):\n firstCheck = False\n result = self.operators[opIndex](passList[0], passList[1])\n i+=1\n else:\n result = self.operators[opIndex](result,passList[i])\n opIndex += 1\n \"\"\"\n print('----------------------')\n print(result)\n \"\"\"\n return result",
"def evaluateValue(compiled_expression):",
"def __call__ (self, item, * args, ** kw) :\n return self.predicate (item, * args, ** kw)"
] | [
"0.6598018",
"0.6156341",
"0.6133143",
"0.6133143",
"0.6017951",
"0.59076935",
"0.5841889",
"0.5841889",
"0.5840776",
"0.5840498",
"0.5802535",
"0.57257354",
"0.5681474",
"0.56519026",
"0.56487983",
"0.5630435",
"0.56184256",
"0.56154966",
"0.56067294",
"0.55853695",
"0.5555239",
"0.5506781",
"0.54989773",
"0.5453073",
"0.54401743",
"0.5432008",
"0.54314554",
"0.5424957",
"0.54099566",
"0.5407937"
] | 0.71140516 | 0 |
Gets the assigned_user of this Workitems. | def assigned_user(self):
return self._assigned_user | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assignee(self):\n membership = UnitMembershipFactory(unit=self.unit)\n return membership.user",
"def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")",
"def assigned_to(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"assigned_to\")",
"def get_user_assignd_identity_from_mc(self) -> Union[str, None]:\n user_assigned_identity = None\n if self.mc and self.mc.identity and self.mc.identity.user_assigned_identities:\n user_assigned_identity = safe_list_get(list(self.mc.identity.user_assigned_identities.keys()), 0, None)\n return user_assigned_identity",
"def get_user_assigned_identity_object_id(self, user_assigned_identity=None) -> str:\n assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()\n if assigned_identity is None or assigned_identity == \"\":\n raise RequiredArgumentMissingError(\"No assigned identity provided.\")\n return self.get_identity_by_msi_client(assigned_identity).principal_id",
"def primary_user_assigned_identity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def assigned_user(self, assigned_user):\n self._assigned_user = assigned_user",
"def user(self):\n return self._forced_user",
"def user(self):\n return self._project.user",
"def get_user(self):\n return self.user",
"def get_user(self):\n return self.user",
"def primary_user_assigned_identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def primary_user_assigned_identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def get_user(self) -> User:\n return self.__user",
"def created_user(self):\n return self._created_user",
"def user(self):\n return self._user",
"def user(self):\n return self._user",
"def user(self):\n return self._user",
"def user(self):\n return self._user",
"def get_assignee_email(self, assignee_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, assignee_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"email\"]",
"def user(self):\n return self.owner.user",
"def get_identity_by_msi_client(self, assigned_identity: str) -> Identity:\n return self.external_functions.get_user_assigned_identity_by_resource_id(self.cmd.cli_ctx, assigned_identity)",
"def get_user(self):\n mtool = getToolByName(self.context, 'portal_membership')\n if mtool.isAnonymousUser():\n return\n\n return mtool.getAuthenticatedMember()",
"def get_user_assigned_identity_client_id(self, user_assigned_identity=None) -> str:\n assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()\n if assigned_identity is None or assigned_identity == \"\":\n raise RequiredArgumentMissingError(\"No assigned identity provided.\")\n return self.get_identity_by_msi_client(assigned_identity).client_id",
"def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user",
"def get_task_user():\n from olympia.users.models import UserProfile\n\n return UserProfile.objects.get(pk=settings.TASK_USER_ID)",
"def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]",
"def get_user(self):\n return str(self.request.user.id)",
"def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None",
"def get_user_id(self):\n return self.id_user"
] | [
"0.70582396",
"0.70253783",
"0.68387794",
"0.6594624",
"0.64337957",
"0.63194",
"0.62276447",
"0.61899453",
"0.61852264",
"0.617757",
"0.617757",
"0.6168088",
"0.6168088",
"0.6166981",
"0.612094",
"0.6104734",
"0.6104734",
"0.6104734",
"0.6104734",
"0.606838",
"0.60127085",
"0.5997731",
"0.59963024",
"0.59696466",
"0.5917241",
"0.5884707",
"0.5874357",
"0.58715546",
"0.5869668",
"0.5867232"
] | 0.8579693 | 0 |
Sets the assigned_user of this Workitems. | def assigned_user(self, assigned_user):
self._assigned_user = assigned_user | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assigned_by_user(self, assigned_by_user):\n\n self._assigned_by_user = assigned_by_user",
"def assigned_user(self):\n return self._assigned_user",
"def assign_user_to_issue(self, issue, JIRAUsername):\r\n # TODO: Review docs\r\n self.jira.assign_issue(issue=issue, assignee=JIRAUsername)",
"def assure_tender_assigned_to_user(self, tender_new_id, assigned_user):\n tenders_from_admin = ToDoTenders(division_admin_login, universal_password) # only admin see all chains\n\n all_tender_id_responsibles_chains = tenders_from_admin.get_all_assigned_users_for_tenders(\n tenders_from_admin.get_tenders_with_responsibles('in_work'))\n\n for chain in all_tender_id_responsibles_chains:\n if chain['tender_new_id'] == tender_new_id:\n for res in chain['responsibles']:\n if res['emailAddress'] == assigned_user:\n return True\n else:\n pass",
"def get_user_assigned_identity_object_id(self, user_assigned_identity=None) -> str:\n assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()\n if assigned_identity is None or assigned_identity == \"\":\n raise RequiredArgumentMissingError(\"No assigned identity provided.\")\n return self.get_identity_by_msi_client(assigned_identity).principal_id",
"def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):\n raise NotImplementedError",
"def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):\n raise NotImplementedError",
"def assign(self, assignee, created_by, unit):\n assignment = ReferralAssignment.objects.create(\n assignee=assignee,\n created_by=created_by,\n referral=self,\n unit=unit,\n )\n ReferralActivity.objects.create(\n actor=created_by,\n verb=ReferralActivityVerb.ASSIGNED,\n referral=self,\n item_content_object=assignee,\n )\n # Notify the assignee by sending them an email\n Mailer.send_referral_assigned(\n referral=self,\n assignment=assignment,\n assigned_by=created_by,\n )\n\n if self.state in [ReferralState.IN_VALIDATION, ReferralState.PROCESSING]:\n return self.state\n\n return ReferralState.ASSIGNED",
"def set_user(self, user):\r\n self.user = user",
"def set_user(self, user):\n self._user = user",
"def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")",
"def test01_assigned_users(self):\n print_ln('test_assigned_users')\n \n try:\n rList = review.find_roles(Role(name='py-role*'))\n for rle in rList: \n print_ln(\"Assigned users role=\" + rle.name)\n uList = review.assigned_users(rle)\n for user in uList: \n print_ln(\"Assigned user=\" + user, 1)\n except Exception as e:\n self.fail('test_assigned_users failed, exception=' + e.msg)",
"def set_user(self, user: User):\n self.__user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def assigned_to_changed(self, ar):\n # self.add_change_watcher(self.assigned_to)\n\n if (self.assigned_to is not None and\n self.assigned_to != ar.user and\n dd.is_installed('notify')):\n ctx = dict(user=ar.user, what=ar.obj2memo(self))\n def msg(user, mm):\n subject = _(\"{user} has assigned you to ticket: {what}\").format(**ctx)\n return (subject , tostring(E.span(subject)))\n\n mt = rt.models.notify.MessageTypes.tickets\n\n rt.models.notify.Message.emit_notification(\n ar, self, mt, msg,\n [(self.assigned_to, self.assigned_to.mail_mode)]\n )",
"def assigned_to(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"assigned_to\")",
"def borrow_user_id(self, borrow_user_id):\n\n self._borrow_user_id = borrow_user_id",
"def issued_by(self, issued_by):\n\n self._issued_by = issued_by",
"def assign_task(user_name, task_name, work_server_ip):\r\n\r\n database_handler.update_records(\"current_tasks\",\r\n {\"server_ip\": work_server_ip, \"Task_status\": TaskStatusNames.in_progress.value},\r\n condition=\"Task_name=$? and user_name=$?\", code_args=[task_name, user_name])",
"def id_user(self, id_user):\n\n self._id_user = id_user",
"def set_assignment(self, updates, original=None):\n if not original:\n original = {}\n\n self.set_type(updates, original)\n\n if not updates.get('assigned_to'):\n if updates.get('priority'):\n # Priority was edited - nothing to set here\n return\n else:\n updates['assigned_to'] = {}\n\n assigned_to = updates.get('assigned_to') or {}\n if (assigned_to.get('user') or assigned_to.get('contact')) and not assigned_to.get('desk'):\n raise SuperdeskApiError.badRequestError(message=\"Assignment should have a desk.\")\n\n # set the assignment information\n user = get_user()\n if original.get('assigned_to', {}).get('desk') != assigned_to.get('desk'):\n if original.get('assigned_to', {}).get('state') in \\\n [ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS, ASSIGNMENT_WORKFLOW_STATE.SUBMITTED]:\n raise SuperdeskApiError.forbiddenError(\n message=\"Assignment linked to content. Desk reassignment not allowed.\")\n\n assigned_to['assigned_date_desk'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_desk'] = user.get(config.ID_FIELD)\n\n if assigned_to.get('user') and original.get('assigned_to', {}).get('user') != assigned_to.get('user'):\n assigned_to['assigned_date_user'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_user'] = user.get(config.ID_FIELD)\n\n if not original.get(config.ID_FIELD):\n updates['original_creator'] = str(user.get(config.ID_FIELD)) if user else None\n updates['assigned_to'][\n ITEM_STATE] = get_next_assignment_status(updates, updates['assigned_to'].get(ITEM_STATE) or\n ASSIGNMENT_WORKFLOW_STATE.ASSIGNED)\n else:\n # In case user was removed\n if not assigned_to.get('user'):\n assigned_to['user'] = None\n else:\n # Moving from submitted to assigned after user assigned after desk submission\n if original.get('assigned_to')['state'] == ASSIGNMENT_WORKFLOW_STATE.SUBMITTED:\n updates['assigned_to']['state'] = get_next_assignment_status(updates,\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS)\n\n updates['version_creator'] = str(user.get(config.ID_FIELD)) if user else None",
"def user_capacity(self, user_capacity: SmartSsdUserCapacity):\n\n self._user_capacity = user_capacity"
] | [
"0.8350922",
"0.68189096",
"0.5876797",
"0.58575225",
"0.57323456",
"0.56373864",
"0.56373864",
"0.55778617",
"0.55627865",
"0.55390745",
"0.55118716",
"0.55098593",
"0.54444087",
"0.54271966",
"0.54271966",
"0.54271966",
"0.54271966",
"0.54271966",
"0.54271966",
"0.54271966",
"0.54271966",
"0.54271966",
"0.5406771",
"0.5403722",
"0.53922266",
"0.537916",
"0.5372679",
"0.5357376",
"0.53403354",
"0.52955157"
] | 0.85572076 | 0 |
Gets the developer of this Workitems. | def developer(self):
return self._developer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def developer(self):\n return self.proto.creator",
"def get_maintainer(self):\n return self.paragraphs[0].get(\"Maintainer\")",
"def technical_owner(self):\n return self._technical_owner",
"def user(self):\n return self._project.user",
"def is_developer(self):\n\n return self._is_developer",
"def get_original_maintainer(self):\n return self.paragraphs[0].get(\"XSBC-Original-Maintainer\")",
"def get_customer(self):\n return self._customer",
"def get_customer(self):\n return self._customer",
"def get_maintainer(self, dataset: Dict) -> User:\n maintainer = dataset[\"maintainer\"]\n return self.users.get(maintainer)",
"def get_owner(self):\n return self._creatorsHeap[0][1]",
"def customer(self):\n return self.__customer",
"def vendor(self):\n return self._vendor",
"def getCalendarUser(self):\n # XXX: we assume that calendar are directly stored on the\n # user's workspace\n return aq_parent(self.getCalendar()).getOwnerTuple()[1]",
"def user(self):\n return self._push.get('user', None)",
"def DEVELOPER(cls):\n\n return DataCenter.Environment(\"https://developer.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())",
"def get_building_by_user(self, user):\r\n\t\t\r\n\t\treturn self.transactions[user][1]",
"def getManufacturer(self):\n return self.manufacturer",
"def user(self):\n return self.owner.user",
"def getCustomer(self):\n return self._Customer",
"def getUser(self):\n current_user = self.user\n return current_user",
"def getUser(self):\n current_user = self.user\n return current_user",
"def getUser(self):\n current_user = self.user\n return current_user",
"def user(self):\n return self._user",
"def user(self):\n return self._user",
"def user(self):\n return self._user",
"def user(self):\n return self._user",
"def merchant(self):\n return self.__merchant",
"def owner(self):\n if self.get_team():\n return self.get_team()\n return None",
"def user(self):\n return self.getattr('user')",
"def getUser():\n\t\tuser = users.get_current_user()\n\t\tuserList = db.GqlQuery(\"SELECT * FROM AppUser WHERE id = :1 LIMIT 1\",\n\t\t\t\t\t\t\tuser).fetch(1)\n\t\tif userList == []:\t\t# Wasn't found\n\t\t\treturn AppUser.registerUser()\n\t\treturn userList[0]"
] | [
"0.6467594",
"0.6383037",
"0.59011614",
"0.58858037",
"0.5882263",
"0.58464813",
"0.58373034",
"0.58373034",
"0.58332425",
"0.5788284",
"0.5727513",
"0.5696962",
"0.5679556",
"0.5662634",
"0.56608135",
"0.5650917",
"0.5645489",
"0.5638031",
"0.56320435",
"0.5630453",
"0.5630453",
"0.5630453",
"0.561957",
"0.561957",
"0.561957",
"0.561957",
"0.5606309",
"0.5599774",
"0.5535727",
"0.55343765"
] | 0.77347624 | 0 |
Sets the developer of this Workitems. | def developer(self, developer):
self._developer = developer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def developer_certificate_identity(self, developer_certificate_identity):\n\n self._developer_certificate_identity = developer_certificate_identity",
"def developer(self):\n return self._developer",
"def set_maintainer(self, maintainer):\n self.paragraphs[0][\"Maintainer\"] = maintainer",
"def createDeveloper(self):\n self.createUser()\n self.user.is_developer = True\n self.user.put()",
"def developer_certificate_identity_details(self, developer_certificate_identity_details):\n\n self._developer_certificate_identity_details = developer_certificate_identity_details",
"def is_developer(self):\n\n return self._is_developer",
"def _setbeneficiary_customer_no_option_59(self, val):\n self.swift_obj.BeneficiaryCustomer = val\n self.swift_obj.BeneficiaryCustomer.swiftTag = '59'",
"def buyer(self, buyer):\n\n self._buyer = buyer",
"def set_original_maintainer(self, original_maintainer):\n if \"XSBC-Original-Maintainer\" in self.paragraphs[0]:\n self.paragraphs[0][\"XSBC-Original-Maintainer\"] = original_maintainer\n else:\n _insert_after(self.paragraphs[0], \"Maintainer\",\n \"XSBC-Original-Maintainer\", original_maintainer)",
"def _setbeneficiary_customer_59A(self, val):\n self.swift_obj.BeneficiaryCustomer_A = val\n self.swift_obj.BeneficiaryCustomer_A.swiftTag = '59A'",
"def technical_owner(self, technical_owner):\n\n self._technical_owner = technical_owner",
"def vendor(self, vendor):\n\n self._vendor = vendor",
"def customer(self, customer):\n\n self._customer = customer",
"def customer(self, customer):\n\n self._customer = customer",
"def is_developer(self):\n return int(self.developer_status) == 2",
"def SetCurrentUser(self, email, user_id='123456', is_admin=False):\n email = email or ''\n user_id = user_id or ''\n is_admin = '1' if is_admin else '0'\n self.testbed.setup_env(user_is_admin=is_admin,\n user_email=email,\n user_id=user_id,\n overwrite=True)",
"def user(self, user_token, user_device=None):\n self.set('user', user_token)\n self.set('device', user_device)",
"def merchant(self, merchant):\n if merchant is None:\n raise ValueError(\"Invalid value for `merchant`, must not be `None`\") # noqa: E501\n\n self._merchant = merchant",
"def set_demo_user() -> None:\n g.demo_user = _DEMO_USER",
"def vendor_reference(self, vendor_reference):\n\n self._vendor_reference = vendor_reference",
"def setUser(self, value):\n return self._set(user=value)",
"def _setbeneficiary_customer_59F(self, val):\n self.swift_obj.BeneficiaryCustomer_F = val\n self.swift_obj.BeneficiaryCustomer_F.swiftTag = '59F'",
"def set_owner(self, owner):\n self.__owner = owner",
"def serial_dev(self, serial_dev):\n self._serial_dev = serial_dev\n return self",
"def set_user(self, user):\n self._user = user",
"def set_owner(self, owner):\n self.settings[\"owner\"] = owner",
"def company(self, company):\n self._company = company",
"def set_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.product_kendo_dropdown_locator, product)",
"def setUserCode(self, userCode):\n self.userCode = userCode\n logger.debug('user code set to: %s' % self.userCode)",
"def setusers(self, users=None):\n if users:\n self.users = users\n return\n import jsb.lib.users as u\n if not u.users: u.users_boot()\n self.users = u.users"
] | [
"0.6069986",
"0.6056044",
"0.5680614",
"0.5558243",
"0.5480942",
"0.5418043",
"0.53070056",
"0.5263238",
"0.51771384",
"0.51729125",
"0.5170096",
"0.51270753",
"0.50323236",
"0.50323236",
"0.5031481",
"0.49122766",
"0.48889783",
"0.4874513",
"0.48558733",
"0.4839325",
"0.48194882",
"0.47511476",
"0.47386014",
"0.4735439",
"0.47313073",
"0.47269973",
"0.4688845",
"0.46592656",
"0.4658211",
"0.46525833"
] | 0.78007984 | 0 |
Sets the domain of this Workitems. | def domain(self, domain):
self._domain = domain | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def domain(self, domain):\n\n self._domain = domain",
"def domain(self, domain):\n\n self._domain = domain",
"def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return",
"def set_domain(self, var, domain) :\n if var not in self.variables :\n raise KeyError(str(var) + \" is not a variable in this problem.\")\n self.domains[var] = sorted(domain[:])\n return self",
"def set_domain(self, domain):\n\n self._domain = domain\n\n self.changed = True",
"def set_nisdomain(self, nisdomain):\n\n raise NotImplementedError()",
"def set_axis_domain(self, axis_id, domain):\n\n assert axis_id in self.axes_domains\n\n if axis_id is not None:\n logger.debug('setting domain of axis %s with %s', str(axis_id),\n str(domain))\n if len(domain) != self.data.shape[axis_id]:\n raise Exception('length of domain values (%d) does not '\n ' match length of data (%d) for axis %s'\n % (len(domain), self.data.shape[axis_id],\n self.get_axis_name(axis_id)))\n self.axes_domains[axis_id] = np.array(domain)",
"def domains(self, domains):\n\n self._domains = domains",
"def domain(self, domain):\n # type: (string_types) -> None\n\n if domain is not None:\n if not isinstance(domain, string_types):\n raise TypeError(\"Invalid type for `domain`, type has to be `string_types`\")\n\n self._domain = domain",
"def domain(self, domain):",
"def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)",
"def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)",
"def change_domain(self, new_domain):\n self.domain=new_domain\n for pm in self._maps.values():\n pm.change_domain(new_domain)",
"def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain",
"def domain( self ):\n raise NotImplementedError(\"domain\")",
"def store_domain(self, store_domain):\n self._store_domain = store_domain\n return self",
"def domainRouterSet(self, domain, body):\n pass",
"def domain_id(self, domain_id):\n\n self._domain_id = domain_id",
"def domain(self, value):\n if hasattr(self, \"_domain\"):\n raise ValueError(\"A ServerName's domain cannot be changed.\")\n if value is None:\n raise ValueError(\"A ServerName must be given a domain.\")\n if not isinstance(value, str):\n raise TypeError(\"The domain must be a string, not %s.\" % (type(value)))\n if value is \"\":\n raise ValueError(\"A empty string is not a valid domain.\")\n self._domain = value",
"def set_domain(f, dom):\n if f.dom == dom:\n return f\n else:\n return f.per(dmp_set_domain(f.rep, f.lev, f.dom, dom), dom=dom)",
"def _domain(self):\n if self.__domain is None:\n self.__domain = Domain(\n definition='Need domain definition?',\n updatable='False',\n optional='False',\n )\n self._ident[self._domain_name] = self.__domain\n self._data_record.domain_ids = [self._domain_name,]\n return self.__domain",
"def domain(self, value: ArrayLike):\n\n value = as_float_array(value, self.dtype)\n\n if not np.all(np.isfinite(value)):\n runtime_warning(\n f'\"{self.name}\" new \"domain\" variable is not finite: {value}, '\n f\"unpredictable results may occur!\"\n )\n else:\n attest(\n np.all(value[:-1] <= value[1:]),\n \"The new domain value is not monotonic! \",\n )\n\n if value.size != self._range.size:\n self._range = np.resize(self._range, value.shape)\n\n self._domain = value\n self._function = None # Invalidate the underlying continuous function.",
"def domain(self, domain=None):\n\n return self.domain_class(apiobj=self, domainname=domain)",
"def availability_domain(self, availability_domain):\n self._availability_domain = availability_domain",
"def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )",
"def add_domains_restriction(self, domain_restriction):\n self._domain_restricion = domain_restriction\n self._size_var = self._get_size_var()\n self._nr_of_bits = self._get_nr_of_bits()",
"def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data",
"def domain(self):\n # type: () -> string_types\n return self._domain",
"def SetDomainsList(self, domainsList) :\n\t\t...",
"def domain(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain\")"
] | [
"0.6938591",
"0.6938591",
"0.68689",
"0.68229336",
"0.65074587",
"0.647992",
"0.64145446",
"0.6360417",
"0.6316579",
"0.6233607",
"0.62118834",
"0.62118834",
"0.6182328",
"0.61120147",
"0.60630286",
"0.6041117",
"0.6008707",
"0.5991201",
"0.5972056",
"0.596478",
"0.5924517",
"0.5766147",
"0.5714477",
"0.570475",
"0.56425285",
"0.5540074",
"0.553365",
"0.5520162",
"0.55193996",
"0.5478324"
] | 0.6992701 | 1 |
Convert a timedomain array `E` to the frequency domain via 2D FFT. `dx` and `dy` are sample spacing in x (leftright, 1st axis) and y (updown, 0th axis) directions. An optional `upsample > 1` will zeropad `E` to obtain an upsampled spectrum. Returns `(spectrum, xf, yf)` where `spectrum` contains the 2D FFT of `E`. If `Ny, Nx = spectrum.shape`, `xf` and `yf` will be vectors of length `Nx` and `Ny` respectively, containing the frequencies corresponding to each pixel of `spectrum`. The returned spectrum is zerocentered (via `fftshift`). The 2D FFT, and this function, assume your input `E` has its origin at the topleft of the array. If this is not the case, i.e., your input `E`'s origin is translated away from the first pixel, the returned `spectrum`'s phase will not match what you expect, since a translation in the time domain is a modulation of the frequency domain. (If you don't care about the spectrum's phase, i.e., only magnitude, then you can ignore all these origin issues.) | def makeSpectrum(E, dx, dy, upsample=10):
zeropadded = np.array(E.shape) * upsample
F = fft.fftshift(fft.fft2(E, zeropadded)) / E.size
xf = fft.fftshift(fft.fftfreq(zeropadded[1], d=dx))
yf = fft.fftshift(fft.fftfreq(zeropadded[0], d=dy))
return (F, xf, yf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fft2d(pm, win, x, /, detrend='constant'):\n x = signal.detrend(x, type=detrend, axis=0) # remove trend or mean from \"time\"\n x = signal.detrend(x, type='constant', axis=1) # remove mean from \"longitude\"\n\n # Use 1D numpy.fft.rfft (identical)\n # Follows Libby's recipe, where instead real is cosine and imag is\n # sine. Note only need to divide by 2 when conjugates are included.\n # xi = np.fft.rfft(x, axis=1)[:,1:]/x.shape[1]\n # xi = win[:,None]*xi # got a bunch of sines and cosines\n # C = np.fft.rfft(xi.real, axis=0)[1:,:]/x.shape[0]\n # S = np.fft.rfft(xi.imag, axis=0)[1:,:]/x.shape[0]\n # part1 = (C.real + S.imag + 1j * (C.imag - S.real))[::-1, :]\n # part2 = C.real - S.imag + 1j * (-C.imag - S.real)\n # return np.concatenate((part1, part2), axis=0)\n\n # Use 2D numpy.fft.rfft2\n # NOTE: Read documentation regarding normalization. Default leaves forward\n # transform unnormalized, reverse normalized by 1 / n. The ortho option\n # normalizes both by 1/sqrt(n).\n # https://docs.scipy.org/doc/numpy-1.15.1/reference/routines.fft.html#module-numpy.fft\n # last axis specified should get a *real* transform\n X = np.fft.rfft2(win[:, None] * x, axes=(0, 1)) # last axis gets real transform\n X = X[:, 1:] # remove the zero-frequency value\n X = X / (x.shape[0] * x.shape[1]) # normalize by sample size\n return np.concatenate((X[pm:, :], X[1:pm + 1, :]), axis=0)",
"def fft2(X):\r\n # return scipy.fftpack.fft2(X)\r\n return np.fft.fft2(X)",
"def get_fft(self):\n\t\t# Get the \"ideal\" evenly spaced times\n\t\teven_times = numpy.linspace(self.buf[0][0], self.buf[-1][0], len(self.buf))\n\t\t\n\t\t# Interpolate the data to generate evenly temporally spaced samples\n\t\tinterpolated = numpy.interp(even_times, *zip(*self.buf))\n\t\t\n\t\t# Perform the FFT\n\t\tfft = numpy.fft.rfft(interpolated)\n\t\treturn zip(numpy.abs(fft), numpy.angle(fft))",
"def test_fft_complex_2d():\n\ta, b, c = np.meshgrid([0, 1, 0, 0], [0, 1j, 1j], [0, 1, 1, 1])\n\tdummy_array = xr.DataArray(a * b * c, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'y': 6, 'z': 8}, dim=['y', 'z'],\n\t\t dx={'y': 0.01, 'z': 0.02})\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.fftn(a * b * c, s=(8, 6), axes=(2, 1)))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.fftfreq(6, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(8, d=0.02))\n\tassert ('x', 'f_y', 'f_z') == spectrum_dims",
"def FFT(y, t):\n dt = t[2] - t[1]\n Fs = 1.0 / dt\n L = len(y)\n Y = fft(y, L) * dt # dt should mathematically be included in the result!\n #amp=abs(Y)/(L/2) #FFT single sided spectrum\n amp = abs(Y) #or simply take the amplitude only?\n T = L * dt #1/T=Fs/L\n freq = np.arange(0, Fs / 2, 1 / T) # list frequencies up to Nyquist frequency\n # resize result vectors to match their lengths\n if len(freq) < len(amp):\n amp = amp[0:len(freq)] # make both vectors the same size\n elif len(amp) < len(freq):\n freq = freq[0:len(amp)]\n return freq, amp",
"def test_fft_spectrum_02():\n f, t, Sxx = _spectral_helper(x, x, fs=s_freq,\n window='hann',\n nperseg=x.shape[0],\n noverlap=0,\n nfft=None,\n return_onesided=True,\n mode='psd',\n scaling='spectrum')\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', scaling='energy', sides='one')\n\n assert_array_equal(f0, f)\n assert_array_almost_equal(Sxx0, Sxx[:, 0] * CORRECTION_FACTOR)",
"def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):\n return fftn(x,shape,axes,overwrite_x)",
"def ufft2(inarray):\n return ufftn(inarray, 2)",
"def urfft2(inarray):\n return urfftn(inarray, 2)",
"def _irfft2d(f_x) :",
"def FourierTransform2D(xdata, zdata, nPoints):\r\n freq = FourierFrequency(xdata, nPoints)\r\n tdf = np.zeros_like(zdata, dtype=complex)\r\n for u, i in enumerate(zdata):\r\n tdf[u] = FourierTransform(xdata, i, nPoints)\r\n return freq, tdf",
"def test_fft_real_2d():\n\ta = np.mgrid[:5, :5, :5][0]\n\tdummy_array = xr.DataArray(a, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'y': 14, 'z': 18}, dim=['y', 'z'],\n\t\t dx={'y': 0.01, 'z': 0.02}, sym=False)\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.rfftn(a, s=(18, 14), axes=(2, 1)))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.rfftfreq(14, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(18, d=0.02))\n\tassert ('x', 'f_y', 'f_z') == spectrum_dims",
"def fourier_transform2d(self):\n\n zerofill = np.zeros(1024 * np.array([1,1])) #so it will always be square\n zerofill[:len(self.windowed), :len(self.windowed)] = self.windowed\n transform = np.fft.fft2(zerofill)\n transform = np.fft.fftshift(transform) # shift center to zero\n transformed = np.absolute(transform)\n tmax = transformed.max()\n zdata = (transformed)/(tmax) # normalize to maximum value\n\n return zdata",
"def fft(signal):\r\n if signal.size == 1:\r\n return signal\r\n\r\n even_part = fft(signal[::2]) # Only grab even elements\r\n odd_part = fft(signal[1::2]) # Only grab odd elements\r\n\r\n factor = np.exp(-2j * np.pi * np.arange(signal.size) / signal.size)\r\n return np.concatenate([even_part + factor[:int(signal.size / 2)] * odd_part,\r\n even_part + factor[int(signal.size / 2):] * odd_part])",
"def test_fft_complex_1d():\n\ta = np.exp(2j * np.pi * np.arange(8) / 8)\n\tdummy_array = xr.DataArray(a, dims=['x'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'x': 16}, dim=['x'], dx={'x': 0.5})\n\tassert np.array_equal(spectrum_array.compute(), np.fft.fft(a, n=16))\n\tassert np.array_equal(spectrum_coords['f_x'], np.fft.fftfreq(16, d=0.5))\n\tassert 'f_x' in spectrum_dims",
"def FourierTransform(data, nPoints):\r\n tdf = np.fft.fft(data, nPoints)\r\n return tdf",
"def modified_dft(arr, fs, nfft, window, axis, detrend, scaling):\n\n nsamples = arr.shape[axis]\n\n if nfft < nsamples:\n # crop arr before detrending & windowing; see rfft crop\n arr = slice_along_axis(arr, 0, nfft, axis=-1)\n\n # detrend the array\n arr = sps.detrend(arr, axis=axis, type=detrend)\n\n # fetch and apply window\n coeffs = sps.get_window(window, arr.shape[axis])\n arr = multiply_along_axis(arr, coeffs, axis=axis)\n\n # compute real DFT. Zeropad for nfft > nsamples is automatic\n # rfft uses 'backward' norm default which is no norm on rfft\n arr = np.fft.rfft(arr, nfft, axis=axis)\n freqs = np.fft.rfftfreq(nfft, d=1/fs)\n\n # scale using weighted mean of window values\n if scaling == 'spectrum':\n norm = 1 / np.sum(coeffs)**2\n\n elif scaling == 'density':\n #process loss Shiavi Eqn 7.54\n norm = 1 / (fs * np.sum(coeffs**2))\n \n else:\n msg = 'Unknown scaling: {}'\n raise ValueError(msg.format(scaling))\n \n # before conjugate multiplication unlike scipy\n # see _spectral_helper lines 1808 an 1842.\n arr *= np.sqrt(norm)\n\n return freqs, arr",
"def spectrum_fourier(self):\r\n\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n fft = fftpack.fft\r\n if np.any(np.iscomplex(data)):\r\n # Get negative frequencies, as well as positive:\r\n f = np.linspace(-sampling_rate/2., sampling_rate/2., data.shape[-1])\r\n spectrum_fourier = np.fft.fftshift(fft(data))\r\n else:\r\n f = tsu.get_freqs(sampling_rate, data.shape[-1])\r\n spectrum_fourier = fft(data)[..., :f.shape[0]]\r\n \r\n return f, spectrum_fourier",
"def calculateenergy_timedomain(input_signal_or_spectrum):\n if isinstance(input_signal_or_spectrum, (sumpf.Spectrum)):\n ip = sumpf.modules.InverseFourierTransform(spectrum=input_signal_or_spectrum).GetSignal()\n else:\n ip = input_signal_or_spectrum\n energy_allchannels = []\n for c in ip.GetChannels():\n energy_singlechannel = []\n for s in c:\n energy_singlechannel.append(abs(s) ** 2)\n energy_allchannels.append(numpy.sum(energy_singlechannel))\n return energy_allchannels",
"def numpyFourierTransform2D(self,graph,**kwargs):\n return np.fft.fft2(graph,**kwargs)",
"def fourier(data, temp_freq, axis, output = 'amplitude'):\n\t\t\n\t\n\t# take largest possible multiple of F1 from PSTH.\n\t# Generate freq and fft\n\t# generate amplitude\n\t# return amplitude, F0, F1 and F2 values",
"def test_fft_spectrum_fieldtrip_02():\n ft_psd_hann = [0.00106465976843528, 0.00562957700710057, 0.00214937527201723, 0.000405599433993590, 0.00506141871942431, 0.00343922341551741, 0.00223373256323887, 0.00767611770955874, 0.0426550524445195, 0.0376986963169514]\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='spectraldensity', sides='one', scaling='fieldtrip')\n # less precise because different shape of hann window\n assert_array_almost_equal(Sxx0[100:110], ft_psd_hann, decimal=3)",
"def fft(y, Fs, detrend='constant', hann=True, cons=True, debug=False):\n \n # Copy input array\n y = np.array(y)\n\n # Set variables\n n = y.size\n T = n/Fs\n\n # Check if conservative output is desired\n if cons:\n Fmax = Fs/2.56\n else:\n Fmax = Fs/2.0\n\n # Get number of lines\n LOR = int(T*Fmax)\n\n # Remove mean if desired\n if detrend != 'none':\n y = scipy_detrend(y, type=detrend)\n\n # Apply hanning window\n if hann is True:\n y = np.hanning(y.size)*y\n\n # Perform DFT\n Y = rawfft(y)\n df = 1.0/T\n return np.abs(Y[0:LOR])*2.0/n, df",
"def fft2(a, s=None, axes=(-2, -1), norm=None):\n return image.image(np.fft.fft2(a, s, axes, norm), pixelsize=image.getPixelsize(a))",
"def fft2(data):\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.fft(data, 2, normalized=True)\n data = fftshift(data, dim=(-3, -2))\n return data",
"def fft2(data):\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.fft(data, 2, normalized=False)\n data = fftshift(data, dim=(-3, -2))\n return data",
"def full_spectral_helper(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',\n scale_by_freq=None):\n # The checks for if y is x are so that we can use the same function to\n #implement the core of psd(), csd(), and spectrogram() without doing\n #extra calculations. We return the unaveraged Pxy, freqs, and t.\n same_data = y is x\n\n #Make sure we're dealing with a numpy array. If y and x were the same\n #object to start with, keep them that way\n x = np.asarray(x)\n if not same_data:\n y = np.asarray(y)\n else:\n y = x\n\n # zero pad x and y up to NFFT if they are shorter than NFFT\n if len(x) < NFFT:\n n = len(x)\n x = np.resize(x, (NFFT,))\n x[n:] = 0\n\n if not same_data and len(y) < NFFT:\n n = len(y)\n y = np.resize(y, (NFFT,))\n y[n:] = 0\n\n if pad_to is None:\n pad_to = NFFT\n\n if scale_by_freq is None:\n scale_by_freq = True\n\n # For real x, ignore the negative frequencies unless told otherwise\n if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':\n numFreqs = pad_to\n scaling_factor = 1.\n elif sides in ('default', 'onesided'):\n numFreqs = pad_to // 2 + 1\n scaling_factor = 2.\n else:\n raise ValueError(\"sides must be one of: 'default', 'onesided', or \"\n \"'twosided'\")\n\n if cbook.iterable(window):\n assert (len(window) == NFFT)\n windowVals = window\n else:\n windowVals = window(np.ones((NFFT,), x.dtype))\n\n step = NFFT - noverlap\n ind = np.arange(0, len(x) - NFFT + 1, step)\n n = len(ind)\n Pxx = np.zeros((numFreqs, n), np.float_)\n Pyy = np.zeros((numFreqs, n), np.float_)\n Pxy = np.zeros((numFreqs, n), np.complex_)\n\n # do the ffts of the slices\n for i in range(n):\n thisX = x[ind[i]:ind[i] + NFFT]\n thisX = windowVals * detrend(thisX)\n fx = np.fft.fft(thisX, n=pad_to)\n\n if same_data:\n fy = fx\n else:\n thisY = y[ind[i]:ind[i] + NFFT]\n thisY = windowVals * detrend(thisY)\n fy = np.fft.fft(thisY, n=pad_to)\n Pxy[:, i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]\n Pxx[:, i] = np.conjugate(fx[:numFreqs]) * fx[:numFreqs]\n Pyy[:, i] = np.conjugate(fy[:numFreqs]) * fy[:numFreqs]\n\n # Scale the spectrum by the norm of the window to compensate for\n # windowing loss; see Bendat & Piersol Sec 11.5.2.\n Pxy /= (np.abs(windowVals) ** 2).sum()\n Pxx /= (np.abs(windowVals) ** 2).sum()\n Pyy /= (np.abs(windowVals) ** 2).sum()\n\n # Also include scaling factors for one-sided densities and dividing by the\n # sampling frequency, if desired. Scale everything, except the DC component\n # and the NFFT/2 component:\n Pxy[1:-1] *= scaling_factor\n Pxx[1:-1] *= scaling_factor\n Pyy[1:-1] *= scaling_factor\n\n # MATLAB divides by the sampling frequency so that density function\n # has units of dB/Hz and can be integrated by the plotted frequency\n # values. Perform the same scaling here.\n if scale_by_freq:\n Pxy /= Fs\n Pyy /= Fs\n Pxx /= Fs\n\n t = 1. / Fs * (ind + NFFT / 2.)\n freqs = float(Fs) / pad_to * np.arange(numFreqs)\n\n if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':\n # center the frequency range at zero\n freqs = np.concatenate((freqs[numFreqs // 2:] - Fs, freqs[:numFreqs // 2]))\n Pxy = np.concatenate((Pxy[numFreqs // 2:, :], Pxy[:numFreqs // 2, :]), 0)\n Pxx = np.concatenate((Pxx[numFreqs // 2:, :], Pxx[:numFreqs // 2, :]), 0)\n Pyy = np.concatenate((Pyy[numFreqs // 2:, :], Pyy[:numFreqs // 2, :]), 0)\n\n return Pxx, Pyy, Pxy, freqs, t",
"def fourier_freqs(times):\n # get the number of samples and the sample rate\n N = len(times)\n dt = np.mean(np.diff(times))\n\n # get the Nyquist frequency\n f_nyq = 1.0 / (2 * dt)\n\n # return the frequency array\n return np.linspace(-f_nyq, f_nyq, N, endpoint=False)",
"def FourierDescriptor(type):\n if type == 'temp':\n return np.fft.fft(t_array)\n elif type == 'shapes':\n FDs = []\n for sampleVector in s_arrays:\n sampleFD = np.fft.fft(sampleVector)\n FDs.append(sampleFD)\n\n return FDs",
"def FFT(x):\n x = np.asarray(x, dtype=float)\n N = x.shape[0]\n \n if N % 2 > 0:\n raise ValueError(\"size of x must be a power of 2\")\n elif N <= 32: # this cutoff should be optimized\n return DFT(x)\n else:\n X_even = FFT(x[::2])\n X_odd = FFT(x[1::2])\n factor = np.exp(-2j * np.pi * np.arange(N) / N)\n return np.concatenate([X_even + factor[:int(N / 2)] * X_odd,\n X_even + factor[int(N / 2):] * X_odd])"
] | [
"0.5888486",
"0.56503826",
"0.5601818",
"0.54491526",
"0.5392617",
"0.5390208",
"0.53717124",
"0.5368106",
"0.5360478",
"0.5329879",
"0.5306462",
"0.524618",
"0.5245024",
"0.524321",
"0.5240639",
"0.5181204",
"0.51742285",
"0.51686364",
"0.5167768",
"0.51577264",
"0.5120336",
"0.5055925",
"0.5049158",
"0.5035402",
"0.49645054",
"0.49522766",
"0.49301216",
"0.49102157",
"0.49003598",
"0.48992684"
] | 0.74132943 | 0 |
Output a string to the html file with a trailing newline | def outputHtml(s):
htmlFile.write(s + "\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_html_page(htmldata, filename):\n begin = \"<html>\\n\\n<body>\\n\\n<p>\\n\"\n end = \"\\n</p>\\n\\n</body>\\n\\n</html>\"\n full_text = begin + htmldata + end\n f = open(filename, \"w\")\n f.write(full_text)\n f.close()",
"def finish(self):\r\n\r\n self.text += \"</html>\\n\"\r\n\r\n if self.filename != None:\r\n with open(self.filename, \"w\") as f:\r\n f.write(self.text)\r\n\r\n return self.text",
"def write_html_file(out_table, outpath):\r\n page_out = PAGE_HTML % (outpath, out_table)\r\n out = open(outpath, \"w+\")\r\n out.write(page_out)\r\n out.close()",
"def end_print(outfile: TextIO) -> None:\n outfile.write(\" </body>\\n\")\n outfile.write(\"</html>\\n\")",
"def write_html(self, content):\n self.write(content)",
"def write_to_html_file(self, data: str):\n try:\n os.mkdir(\"../\" + self.uri)\n except FileExistsError:\n pass\n\n f = open(\"../\" + self.uri + self.file_name, \"w\")\n f.write(data)\n print(\"[WRITE] written to .html file\")\n f.close()",
"def footer(self):\n file = open(\"imdb_output.html\", \"a\")\n file.write(\"\\t\\t</table>\\n\\t</body>\\n</html>\\n\")",
"def output_to_html(string_data):\n raise NotImplementedError(\"This function is not yet Implemented!\")",
"def write_output(directory, name, html):\n if not os.path.isdir(directory):\n os.mkdir(directory)\n with open(os.path.join(directory, '.'.join((name, 'html'))), 'w') as f:\n f.write(beautify(html))",
"def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")",
"def publish_html(self, readyhtml):\n with open(self.outfile,'w') as f_out:\n f_out.writelines(readyhtml)",
"def output(self, string_to_output):\n\n html = plain_to_html(string_to_output)\n if html == \"\":\n return\n\n self._output_object.add_report(html)",
"def to_file(self, html_file: str = None) -> None:\n if not html_file:\n html_file = f\"{self.id}.html\"\n\n with open(html_file, \"w\") as f:\n f.write(self.soup.html)",
"def write_html_file(out_table, outpath):\r\n page_out = PAGE_HTML % ('Taxa Summaries', out_table)\r\n out = open(outpath, \"w+\")\r\n out.write(page_out)\r\n out.close()",
"def close(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</report_html>\\n\n \"\"\"\n return self.html_doc",
"def write_to_file(fname, html_body):\n dir_path = os.path.dirname(fname)\n ensure_dir_exists(dir_path)\n\n with open(fname, 'w') as html_file:\n html_file.write(html_body)",
"def end_page_division(outfile: TextIO) -> None:\n outfile.write(\" </div>\\n\")",
"def output_raw(self, string_to_output):\n html = plain_to_html(string_to_output)\n if html == \"\":\n return\n html_pre_output = html_pre(html)\n\n self._output_object.add_report(html_pre_output)",
"def write_html(self, filename):\n # todo: allow writing in split mode\n html = self.to_html()\n open(filename, 'wt').write(html)\n print('Exported app to %r' % filename)",
"def output_html(self, path):\n if path is None:\n return\n import os\n fout = codecs.open(os.path.abspath(path), 'w', encoding='utf-8')\n fout.write('<html><body><table>')\n for data in self.datas:\n fout.write('<tr><td>%s</td><td>%s</td><td>%s</td></tr>' % (data['url'], data['title'], data['summary']))\n self.datas.remove(data)\n fout.write('</table></body></html>')\n fout.close()",
"def makeHTML(header, body, footer):\n f = open(\"crimenews.html\", \"w\")\n f.write(header+body+footer)",
"def write(self,out):\n with open( out, \"wb\") as fi:\n fi.write(html.tostring(self.book))",
"def write_page(soup, fileName):\r\n soup.prettify(formatter='html')\r\n\r\n with open(fileName, 'wb') as f:\r\n f.write(str(soup).encode('utf-8'))",
"def save(self, filename):\n outfile = open(filename, \"w\")\n outfile.write(self.html.encode('utf8'))\n outfile.close()",
"def saveHtml(path: str, filename: str, html: str) -> None:\n filepath = os.path.join(path, filename)\n with open(filepath, \"w\") as fileHandle:\n fileHandle.write(html)\n return filepath",
"def html_close():\n return(\"\"\"\n\n </section>\n\n </div>\n\n</main>\n</body>\n</html>\"\"\")",
"def writeln(self, content):\n ...",
"def print_and_append(string, outfile, new_line=False):\n\tif outfile is not None:\n\t\toutfile.write(string)\n\t\toutfile.write('\\n')\n\t\tif new_line:\n\t\t\toutfile.write('\\n')\n\n\tprint(string)\n\tif new_line:\n\t\tprint '\\n'",
"def output(self, string, rewritable=False):\n\n if not self.enabled:\n return\n\n print(string, end=(\"\\r\" if rewritable else \"\\n\"), file=self.stream)",
"def save_into_html_file(path_html_file: str, response):\n html_file = open(path_html_file, 'w')\n html_file.writelines(response)\n html_file.close()\n\n with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:\n zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)\n zf.close()\n os.remove(path_html_file)"
] | [
"0.66473573",
"0.66443795",
"0.6528386",
"0.6365278",
"0.63228464",
"0.6304377",
"0.6303967",
"0.6297656",
"0.62927836",
"0.62136006",
"0.6187122",
"0.61672395",
"0.6166157",
"0.6161343",
"0.6142821",
"0.61315984",
"0.6131482",
"0.6089356",
"0.6076196",
"0.6050305",
"0.6013549",
"0.6007974",
"0.5969798",
"0.5958496",
"0.59498763",
"0.59468335",
"0.59450233",
"0.5926224",
"0.59102285",
"0.5885866"
] | 0.8278069 | 0 |
Transpose a hash of hashes so that the inner keys are now outer | def transpose(h):
res = {}
for i in list(h.keys()):
v = h[i]
for j in list(v.keys()):
if not res.get(j, None):
res[j] = {}
res[j][i] = v[j]
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transpose_2d_table(dicts_within_dict_table):\n transposed_table = {}\n for x2 in dicts_within_dict_table:\n for x1 in dicts_within_dict_table[x2]:\n if x1 not in transposed_table:\n transposed_table[x1] = \\\n {x2: dicts_within_dict_table[x2][x1]}\n else:\n transposed_table[x1][x2] = \\\n dicts_within_dict_table[x2][x1]\n return transposed_table",
"def pivot_nested_dict(nested_dict):\r\n\r\n reverse_nest_dict = {} #Create an empty dictionary\r\n for k, v in nested_dict.items(): #Iterate through each pair of elements\r\n for k2, v2 in v.items(): #Iterate through pair of values\r\n try:\r\n reverse_nest_dict[k2][k] = v2\r\n except KeyError:\r\n reverse_nest_dict[k2] = { k : v2 }\r\n return reverse_nest_dict\r\n \r\n #Create a dictionary that produces a different nested dictionary which\r\n #contains the same values\r",
"def expand(d):\n # make sure everything is a list\n for k, v in d.iteritems():\n if type(v) is not list:\n d[k] = [v]\n\n # take cross product\n product = [x for x in apply(itertools.product, d.values())]\n return flatten([dict(zip(d.keys(), p)) for p in product])",
"def _dict_to_row(val_in):\n out = []\n\n # keep order\n keys = sorted(val_in.keys())\n for k in keys:\n v = val_in[k]\n if not isinstance(v, dict):\n out.append((k, v,))\n else:\n sub_out = _dict_to_row(v)\n for item in sub_out:\n out.append((f'{k}.{item[0]}', item[1],))\n return out",
"def flatten(self):\n flat = {}\n for d in self.dicts:\n flat.update(d)\n return flat",
"def convert(data):\n return {k: [d[k] for d in data] for k in data[0].keys()}",
"def flatten_dict(dict_input):\n flattened_dict = dict()\n\n for key, value in dict_input.items():\n if isinstance(value, dict):\n new_keys = sorted(value.keys())\n for new_key in new_keys:\n entry = {key + '_' + new_key: value[new_key]}\n flattened_dict.update(entry)\n else:\n entry = {key: value}\n flattened_dict.update(entry)\n\n return flattened_dict",
"def _unflatten_dict_by_feature_name(flattened_dict: Dict[str, Any]) ->Dict[str, Dict[str, Any]]:\n outputs: Dict[str, Dict[str, Any]] = {}\n for concat_key, tensor_values in flattened_dict.items():\n feature_name = get_feature_name_from_concat_name(concat_key)\n tensor_name = get_tensor_name_from_concat_name(concat_key)\n feature_outputs: Dict[str, Any] = {}\n if feature_name not in outputs:\n outputs[feature_name] = feature_outputs\n else:\n feature_outputs = outputs[feature_name]\n feature_outputs[tensor_name] = tensor_values\n return outputs",
"def InvertDict(dict_in):\n return dict(zip(dict_in.values(), dict_in.keys()))",
"def _join_dicts(dicts):\n if dicts is None: # pragma: no cover\n return\n assembled_dict = {k: v for D in dicts for k, v in D.items()}\n return assembled_dict",
"def dict_flatten(*args):\n hold = []\n for a in args:\n hold.append([i for s in a.values() for i in s])\n return hold",
"def unflatten_dict(flat):\n unflattened = dict()\n\n for key, value in sorted(flat.items(), key=_key_order):\n if '__' in key:\n key, subkey = key.split('__', 1)\n subkey, name = subkey.rsplit('__', 1)\n\n if name.isdigit():\n column_index = int(name)\n row_index = int(subkey)\n\n array = unflattened.setdefault(key, list())\n\n if len(array) == row_index:\n row = list()\n array.append(row)\n elif len(array) == row_index + 1:\n row = array[row_index]\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n if len(row) == column_index:\n row.append(value)\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n else:\n subdict = unflattened.setdefault(key, dict())\n if subkey.isdigit():\n subkey = int(subkey)\n\n inner = subdict.setdefault(subkey, dict())\n inner[name] = value\n\n else:\n unflattened[key] = value\n\n return unflattened",
"def dict_collapse(d, into=dict):\n d_collapsed = defaultdict(list)\n for k,v in d.items():\n d_collapsed[v].append(k)\n return into(d_collapsed)",
"def product_from_dict(grid):\n buff = [map_key_to_every_value(key, value) for key, value in grid.items()]\n return [merge_dicts(args) for args in itertools.product(*buff)]",
"def product_from_dict(grid):\n buff = [map_key_to_every_value(key, value) for key, value in grid.items()]\n return [merge_dicts(args) for args in itertools.product(*buff)]",
"def transform_dict(dc: dict):\n tmp_dict = dict()\n for k, v in dc.items():\n k1, k2 = k.split(\"|\")\n v1 = {'e': v, 'c': k2}\n v2 = {'e': v, 'c': k1}\n insert_to_dict(tmp_dict, k1, v1)\n insert_to_dict(tmp_dict, k2, v2)\n return tmp_dict",
"def _flatten_dict(x: Dict) ->Dict:\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict",
"def reverse_dict(d):\r\n result = {}\r\n for key in d:\r\n for val in d[key]:\r\n result[val] = result.get(val, tuple()) + (key, )\r\n return result",
"def transpose(self, p=(1, 0)):\n res = self.empty_like()\n for k, v in self.sects.items():\n kt = tuple(map(k.__getitem__, p))\n res.sects[kt] = v.transpose(p)\n res.shape = list(map(self.shape.__getitem__, p))\n res.qhape = list(map(self.qhape.__getitem__, p))\n res.dirs = list(map(self.dirs.__getitem__, p))\n return res",
"def cartesian_product(dic):\n keys = dic.keys()\n values = dic.values()\n return [dict(zip(keys, x)) for x in itertools.product(*values)]",
"def invert(d):\n if d:\n return [dict(zip(d, i)) for i in zip(*d.values())]",
"def transposeDictionary(scriptByExtension):\n return sorted([[extension, script] \\\n for script, extension in scriptByExtension.items()])",
"def rearrange_subject_data_dict(\n subject_data_dict: SubjectDataDict,\n) -> StudyDataDict:\n dict_flipped = {}\n phases = [np.array(dict_phase.keys()) for dict_phase in subject_data_dict.values()]\n if not all(phases[0] == p for p in phases):\n raise ValueError(\n \"Error rearranging the dictionary! Not all 'PhaseDict's have the same phases. \"\n \"To rearrange the 'SubjectDataDict', \"\n \"the dictionaries of all subjects need to have the exact same phases!\"\n )\n\n for subject, phase_dict in subject_data_dict.items():\n for phase, df in phase_dict.items():\n dict_flipped.setdefault(phase, dict.fromkeys(subject_data_dict.keys()))\n dict_flipped[phase][subject] = df\n\n return dict_flipped",
"def pivot_nested_dict(nested_dict):\n # declare res as the return object which should be a dict\n res = dict()\n # traverse the pollsters\n for pollster in nested_dict:\n \t# travserse the states\n \tfor state in nested_dict[pollster]:\n \t\t# if first meet a state, we need to create a new dict\n \t\tif state not in res:\n \t\t\tres[state] = dict()\n \t\t# put the pollster value in the state dict\n \t\tres[state][pollster] = nested_dict[pollster][state]\n return res",
"def get_transpose_graph(graph: Graph):\n transpose: Graph = {node: set() for node in graph.keys()}\n for node, target_nodes in graph.items():\n for target_node in target_nodes:\n transpose[target_node].add(node)\n return transpose",
"def dict_combine(dicts):\n result = {}\n for dic in dicts:\n for i in dic.keys():\n if i not in result.keys():\n result[i] = dic[i]\n else:\n lst = []\n lst.append(result[i])\n lst.append(dic[i])\n result[i] = lst\n return result",
"def dict_to_row(keys):\n return lambda adict: [adict[k] for k in keys]",
"def aggregate_dict(x):\n agg_x = {}\n\n for ele in x:\n assert isinstance(ele, dict)\n\n for k, v in ele.items():\n if k not in agg_x:\n agg_x[k] = []\n\n if isinstance(v, (tuple, list)):\n agg_x[k].extend(list(v))\n else:\n agg_x[k].append(v)\n\n # Stack if possible\n new_agg_x = {}\n for k, v in agg_x.items():\n try:\n v = torch.cat(v, dim=0)\n except Exception:\n pass\n new_agg_x[k] = v\n\n return new_agg_x",
"def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd",
"def sur_dict2mat(dicts):\n n_dicts = len(dicts.keys())\n mat = np.vstack((dicts[t] for t in range(n_dicts)))\n return(mat)"
] | [
"0.7166209",
"0.65619993",
"0.5991071",
"0.59697735",
"0.5820064",
"0.5810341",
"0.5809102",
"0.57978475",
"0.5764748",
"0.57150954",
"0.5670478",
"0.5656125",
"0.5622417",
"0.55814767",
"0.55814767",
"0.5565617",
"0.5564947",
"0.5563796",
"0.5557678",
"0.5549799",
"0.5546132",
"0.5540304",
"0.5537636",
"0.5512701",
"0.54916203",
"0.5483812",
"0.5461784",
"0.54360914",
"0.54166293",
"0.5394645"
] | 0.7801839 | 0 |
Set up the X axis, including scaling, labels and max/min values | def setupXAxis(plot, minVal, maxVal, label, logarithmic):
plot.set_xlabel(label)
if logarithmic:
plot.set_xscale("log")
plot.set_xlim(minVal, maxVal)
# plot.set_xscale('log', basex=2)
# tickLabels = [1]
# labelValue = minVal
# while labelValue <= maxVal:
# tickLabels.append (labelValue)
# labelValue = labelValue*2
# # Expand the axis a little above and below the data
# inflationFactor = 0.95
# plot.set_xlim(minVal*inflationFactor, maxVal/inflationFactor)
# # Need a blank label on the front for the added axis point on the left. No need for an extra
# # annotation on the right.
# plot.set_xticklabels([' '] + tickLabels)
else:
plot.set_xlim((0 if minVal == 1 else minVal), maxVal) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __draw_xaxis(self):\n self.ax.set_xlim(self.xlims)\n # put x ticks on top\n xticks = [1]\n xticks.extend(range(5, self.xmax+5, 5))\n fs = self.settings.rcParams[\"axes.labelsize\"] if self.settings.otherParams[\n \"xlabel.fontsize\"] is None else self.settings.otherParams[\"xlabel.fontsize\"]\n color = self.settings.rcParams[\"axes.labelcolor\"] if self.settings.otherParams[\n \"xlabel.color\"] is None else self.settings.otherParams[\"xlabel.color\"]\n self.ax.set_xticks(xticks)\n self.ax.set_xticklabels(xticks[:-1])\n self.ax.set_xlabel(self.xaxis_label, fontsize=fs, color=color)\n self.ax.xaxis.set_label_coords(\n *self.settings.otherParams[\"xlabel.position\"])",
"def xaxis(self,label,units):\n if units != \"\": label = label + \" (\" + units + \")\"\n self.subplot.set_xlabel(label)\n pass",
"def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])",
"def format_x_axis(self, x_tick, x_limits):\n self._fig.update_layout(\n xaxis=dict(\n range=x_limits,\n dtick=x_tick,\n ),\n )",
"def __init__(self, x, ax=None, ticksize=10, normalize_data=False, show_xlabel=True, figsize=None):\n\n self.show_xlabel = show_xlabel\n\n if ax is None:\n f = plt.figure(figsize=figsize)\n self.ax = f.add_subplot(111)\n else:\n self.ax = ax\n\n self.figure = self.ax.figure\n self.x = x.copy()\n self.lines = []\n self.labels = []\n self.ticksize = ticksize\n self.normalize = normalize_data",
"def make_x_axis(**kwargs):\n xmax = kwargs.get('xmax', 40)\n xmin = kwargs.get('xmin', 0)\n stepsize = kwargs.get('stepsize', 5)\n xlabel = kwargs.get('xlabel', 'Generations from\\nSenescence')\n figsize = kwargs.get('figsize', (2.5, 2.5))\n fontsize = kwargs.get('fontsize', 12)\n savepath = kwargs.get('savepath', f'legend_xlim_{xmin}-{xmax}.svg')\n # Create the figure and plot\n fig = plt.figure(figsize=figsize)\n fig.set_dpi(300)\n ax = fig.add_subplot(111)\n ax.set_xlim(xmin, xmax)\n ax.set_xticks(np.arange(xmin, xmax+1, stepsize))\n # Remove spines\n for spine in [ax.spines[key] for key in ['top', 'right', 'left']]:\n spine.set_visible(False)\n ax.set_xlabel(xlabel, fontsize=fontsize)\n # No y ticks!\n ax.set_yticks([])\n fig.savefig(savepath)",
"def set_xscale(self, value):\n if value in [\"linear\", \"lin\"]:\n self._pad.SetLogx(0)\n self._logx = False\n\n elif value in [\"log\", \"logy\"]:\n left, right = self.get_xlim()\n if right <= 0:\n warnings.warn(\n \"Current frame has no positive values, and therefore cannot \"\n \"be log-scaled. Try running ax.set_xlim() first.\"\n )\n elif left <= 0:\n # Arbitrarily set left to 0.1 (or 0.1*right if right <= 0.1)\n # so that the frame can be displayed\n if right <= 0.1:\n self.set_xlim(left=0.1 * right)\n else:\n self.set_xlim(left=0.1)\n\n self._pad.cd()\n self._pad.SetLogx(1)\n self._pad.Modified()\n self._logx = True",
"def to_x_coordinates(self):\n self.plotter.to_x_coordinates(self.ax)\n self.plotter.replot(self.ax)\n self.plotter.cells.draw(self.ax)\n self.x_label.set_text(self.plotter.plot_xlabel)\n self.fig.canvas.draw()",
"def make_XAxis(xaxis_title, xaxis_range):\n xaxis = graph_objs.XAxis(title=xaxis_title,\n range=xaxis_range,\n showgrid=False,\n zeroline=False,\n showline=False,\n mirror=False,\n ticks='',\n showticklabels=False)\n return xaxis",
"def xaxis ( self ) :\n return self.__xaxis",
"def xaxis ( self ) :\n return self.__xaxis",
"def xaxis ( self ) :\n return self.__xaxis",
"def xaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.xbox.set_text(r\"$%s$\" % (label))\r\n pass",
"def setScaleX(self,startx,endx):\r\n if startx == endx:\r\n endx += 1\r\n self.scaleLock.acquire()\r\n self.scalex = [startx,endx]\r\n self.scaleLock.release()",
"def cla(self):\n # Don't forget to call the base class\n Axes.cla(self)\n \n x_min = 0\n y_min = 0\n x_max = 1\n y_max = 1\n x_spacing = 0.1\n y_spacing = 0.1\n self.xaxis.set_minor_locator(NullLocator())\n self.yaxis.set_minor_locator(NullLocator())\n self.xaxis.set_ticks_position('bottom')\n self.yaxis.set_ticks_position('left')\n Axes.set_xlim(self, x_min, x_max)\n Axes.set_ylim(self, y_min, y_max)\n self.xaxis.set_ticks(np.arange(x_min, x_max+x_spacing, x_spacing))\n self.yaxis.set_ticks(np.arange(y_min, y_max+y_spacing, y_spacing))",
"def set_xunits(self, units, include_brackets):\n if include_brackets:\n plt.xlabel(\n \"x (\" + self.xunits_from_units(units=units) + \")\", fontsize=self.xsize\n )\n else:\n plt.xlabel(self.xunits_from_units(units=units), fontsize=self.xsize)",
"def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_axis_point",
"def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()",
"def get_axis_x(self):\r\n return self.__x_axis",
"def format_x_axis(self, text=None, positionx=None, positiony=None, color=None, fontsize=None):\n if text is not None:\n self.xaxis_label = text\n\n x, y = self.settings.otherParams[\"xlabel.position\"]\n if positionx is not None:\n x = positionx\n if positiony is not None:\n y = positiony\n self.settings.otherParams[\"xlabel.position\"] = (x, y)\n\n if color is not None:\n self.settings.otherParams[\"xlabel.color\"] = color\n\n if fontsize is not None:\n self.settings.otherParams[\"xlabel.fontsize\"] = fontsize",
"def xlabel(self, xlabel):\n self._checkfigure()\n self.axes.set_xlabel(xlabel)",
"def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()",
"def set_xticks(ax, xlabels_log=None, show_log=True):\n if show_log is False:\n ax.set_xscale('log', basex=2) # show regular numbers (512, 1024, ...)\n ax.get_xaxis().set_major_formatter( matplotlib.ticker.ScalarFormatter() )\n else:\n ax.get_xaxis().set_major_formatter( matplotlib.ticker.ScalarFormatter() )\n ax.set_xscale('log', basex=2) # show power number (2^9, 2^10, ...)\n \n if xlabels_log is not None:\n ax.set_xticks(xlabels_log)\n return ax",
"def setup_axes():\n\taxes = visuals.subplots(1, 2, figsize = (14, 7))\n\taxes[1].set_yscale(\"log\")\n\taxes[0].set_xlabel(\"[Fe/H]\")\n\taxes[0].set_ylabel(\"[Sr/Fe]\")\n\taxes[1].set_xlabel(\"[Sr/Fe]\")\n\taxes[1].set_ylabel(\"Stellar Probability Density\")\n\taxes[0].set_xlim([-2.2, 0.2])\n\taxes[0].set_ylim([-2.4, 0.4])\n\taxes[1].set_xlim([-1.4, 0.4])\n\taxes[1].set_ylim([0.05, 50])\n\treturn axes",
"def xscale(self, kind):\n self._xscale = str(kind).strip() # remove whitespace eventually\n if self._xscale not in self.xscaling():\n self._xscale = self.xscaling(1)",
"def format_xaxis (self, axes, \n n_ticks = 10, # Number of ticks we would like\n timestamp_formatting = '(%Y-%m-%d)%H:%M', # Specified formatting \n xaxis_mode = None): # Several automatic modes\n if (self.X_type == \"categorical\"):\n axes.set_xticks(self.X[self.start_indx:self.end_indx], minor=False)\n axes.set_xticklabels(self.Xcategories[self.start_indx:self.end_indx][:,0], minor=False)\n \n elif(self.X_type == \"numerical\"):\n # If regular numerical we just plot the values\n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))\n# ax.get_xaxis().get_major_formatter().set_useOffset(False)\n \n elif(self.X_type == \"timestamp\"):\n axes.xaxis.set_major_formatter(mdates.DateFormatter(timestamp_formatting))\n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))\n axes.xaxis_date()\n # ax.xaxis.set_major_formatter(FuncFormatter(self.ticklabels[val:val + wsize]))\n self.figure.autofmt_xdate()\n# print (type(self.X), type(self.X[0]))\n \n elif(self.formatXaxis == \"intraday\"):\n # If the data is intraday and we want to apply the Gap Remover !!! \n gap_remover_flag = 1;\n if (gap_remover_flag):\n formatter = FuncFormatter(ul.detransformer_Formatter)\n axes.xaxis.set_major_formatter(formatter) \n # mdates.DateFormatter(formatting)\n \n else:\n axes.xaxis.set_major_formatter(mdates.DateFormatter(formatting))\n \n axes.xaxis.set_major_locator(mticker.MaxNLocator(nbins = n_ticks, prune='upper'))",
"def xaxis(self):\n return self._xaxis",
"def setup_axes():\n fig, ax = plt.subplots(1)\n\n ax.set_xlabel(\"Density [$n_H$ cm$^{-3}$]\")\n ax.set_ylabel(\"Temperature [K]\")\n\n ax.loglog()\n\n return fig, ax",
"def __createLimits(self):\r\n self.lowerXLabel = QLabel(\"lower limits of (x)\")\r\n self.lowerXField = QLineEdit(self)\r\n self.lowerXField.setPlaceholderText(\"-10\")\r\n\r\n self.upperXLabel = QLabel(\"upper limits of (x)\")\r\n self.upperXField = QLineEdit(self)\r\n self.upperXField.setPlaceholderText(\"10\")",
"def __init__(self):\n self.pt = Plotter(2, width=self.width, height=self.height)\n self.pt.use_grid()\n self.pt.set_title(\n \"Exponentials plotted from {:.1f} to {:.1f}\", self.xMin, self.xMax)\n self.pt.set_xlabel(\"X\")\n self.pt.set_ylabel(\"a*exp(-b*X)\")"
] | [
"0.76962334",
"0.7142678",
"0.70777565",
"0.7023679",
"0.6677621",
"0.66705173",
"0.6669855",
"0.66405183",
"0.65903085",
"0.6455307",
"0.64306533",
"0.64306533",
"0.6370854",
"0.6364356",
"0.62985694",
"0.6293797",
"0.6283952",
"0.62325585",
"0.6220945",
"0.61975974",
"0.6170396",
"0.61504585",
"0.61481035",
"0.60937816",
"0.6075711",
"0.6074481",
"0.6048463",
"0.6044733",
"0.6042677",
"0.6031892"
] | 0.7718785 | 0 |
Add the legend to the plot, shrinking the plot slightly to make room, since we add the legend outside the plot to the right, or leaving the plot full sized and allowing matplotlib to choose a good placement | def addLegend(ax, lines, impls, legendPos):
# If there's only one piece of data being plotted, there's no need for a legend
# since all the parameters will be in the title.
# Compute the length (in characters) of the longest implementation.
legendLen = max(list(map(len, impls)))
if legendLen == 0:
return
legendItems = len(impls)
fontSize = 10 if legendLen < 20 and legendItems <= 4 else 8
prop = matplotlib.font_manager.FontProperties(size=fontSize)
if legendPos in (
"best",
"upper right",
"upper left",
"lower right",
"lower left",
"right",
"center right",
"center left",
"lower center",
"upper center",
"center",
):
ax.legend(lines, impls, prop=prop, loc=legendPos)
elif legendPos == "below":
# Place the legend below the x-axis
axisShrink = 0.15 if legendItems < 7 else 0.2
box = ax.get_position()
newHeight = box.height * (1 - axisShrink)
ax.set_position([box.x0, box.y0 + box.height - newHeight, box.width, newHeight])
ax.legend(
lines,
impls,
prop=prop,
bbox_to_anchor=(0, -0.1),
borderaxespad=0.0,
loc="upper left",
)
else:
# Place the legend on the right
# Shink current axis by 15% to make room for the legend on the right.
# If we were smarter we'd work out how much we need to shrink based on the
# size of the legend box and so on, but this is OK for now.
# See how much we think we need to shrink to fit in the legend
axisShrink = 0.15 if legendLen < 20 else 0.2
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * (1 - axisShrink), box.height])
ax.legend(
lines,
impls,
prop=prop,
bbox_to_anchor=(1.02, 1),
borderaxespad=0.0,
loc="upper left",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def legend(self):\n if self.nplots == 1:\n lax = self.ax\n loff = 0.2\n else:\n lax = self.ax1\n loff = 0.4\n box = lax.get_position()\n\n lax.figure.subplots_adjust(bottom=loff) # make space on bottom for legend\n lax.legend(self.plots, self.labels, loc='upper center', bbox_to_anchor=(0.5, -loff), fancybox=True, shadow=True, ncol=3, prop={'size': 8})",
"def make_final_legend():\n fig = plt.figure(figsize=(10, 1))\n me.get_final_graph_legend(fig)\n fig.savefig(\"cumul_shuttle_leg.pdf\")",
"def legend (self, **kwargs):\n axes = self.twin_axes or self.axes\n self.mpl_legend = axes.legend (self.mpl_lines, self.labels, **kwargs)",
"def add_legend(ax, sf=16, loc='upper right'):\n ax.autoscale(False)\n #CONUS\n #leg_s = np.array([0.1, 0.5, 1.0, 5.0, 10.0])\n #HMA\n leg_s = np.array([0.1, 1.0, 10.0, 100.0])\n leg_x = np.full(leg_s.size, -999999999)\n leg_y = np.full(leg_s.size, -999999999)\n #leg_sc = ax.scatter(leg_x, leg_y, c='0.8', s=leg_s)\n #ax.legend(leg_sc, ['%0.1f km^2' % s for s in leg_s], scatterpoints=1, loc='upper right')\n for i, s in enumerate(leg_s):\n lbl = r'$%0.1f\\/km^2$' % s\n ax.scatter(leg_x[i], leg_y[i], s=s*sf, c='gray', label=lbl)\n legend = ax.legend(title='Glacier Area', scatterpoints=1, loc=loc, prop={'size':7})\n legend.get_title().set_fontsize('8')\n return legend",
"def set_legend(ax):\n l = ax.legend()\n plt.setp(l.get_texts(), fontsize=8)",
"def decorate(**options):\n ax = plt.gca()\n ax.set(**options)\n\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()",
"def plot_legend(ax):\n\tlines = 4 * [None]\n\tcolors = [\"black\", \"deepskyblue\", \"lime\", \"crimson\"]\n\tlabels = [r\"Constant $y_\\text{Sr}^\\text{CC}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto 1 - e^{-kZ}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto Z$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC}$ = 0\"]\n\tfor i in range(4):\n\t\tlines[i] = ax.plot([1, 2], [1, 2], c = visuals.colors()[\"white\"],\n\t\t\tlabel = labels[i])[0]\n\tleg = ax.legend(loc = visuals.mpl_loc()[\"upper left\"], ncol = 1,\n\t\tbbox_to_anchor = (0.0, 0.99), frameon = False, handlelength = 0)\n\tfor i in range(4):\n\t\tlines[i].remove()\n\t\tleg.get_texts()[i].set_color(colors[i])",
"def add_legend_scale(\n self,\n corner_offset_factor=2.0,\n bottom_border_offset=30,\n top_border_offset=30,\n left_border_offset=30,\n right_border_offset=30,\n bottom_axis_visibility=True,\n top_axis_visibility=True,\n left_axis_visibility=True,\n right_axis_visibility=True,\n legend_visibility=True,\n xy_label_mode=False,\n render=True,\n color=None,\n font_size_factor=0.6,\n label_size_factor=1.0,\n label_format=None,\n number_minor_ticks=0,\n tick_length=5,\n minor_tick_length=3,\n show_ticks=True,\n tick_label_offset=2,\n ):\n color = Color(color, default_color=self._theme.font.color)\n\n legend_scale = _vtk.vtkLegendScaleActor()\n legend_scale.SetCornerOffsetFactor(corner_offset_factor)\n legend_scale.SetLegendVisibility(legend_visibility)\n if xy_label_mode:\n legend_scale.SetLabelModeToXYCoordinates()\n else:\n legend_scale.SetLabelModeToDistance()\n legend_scale.SetBottomAxisVisibility(bottom_axis_visibility)\n legend_scale.SetBottomBorderOffset(bottom_border_offset)\n legend_scale.SetLeftAxisVisibility(left_axis_visibility)\n legend_scale.SetLeftBorderOffset(left_border_offset)\n legend_scale.SetRightAxisVisibility(right_axis_visibility)\n legend_scale.SetRightBorderOffset(right_border_offset)\n legend_scale.SetTopAxisVisibility(top_axis_visibility)\n legend_scale.SetTopBorderOffset(top_border_offset)\n\n for text in ['Label', 'Title']:\n prop = getattr(legend_scale, f'GetLegend{text}Property')()\n if color != Color('white'):\n # This property turns black if set\n prop.SetColor(*color.int_rgb)\n prop.SetFontSize(\n int(font_size_factor * 20)\n ) # hack to avoid multiple font size arguments\n\n for ax in ['Bottom', 'Left', 'Right', 'Top']:\n axis = getattr(legend_scale, f'Get{ax}Axis')()\n axis.GetProperty().SetColor(*color.int_rgb)\n if color != Color('white'):\n # This label property turns black if set\n axis.GetLabelTextProperty().SetColor(*color.int_rgb)\n axis.SetFontFactor(font_size_factor)\n axis.SetLabelFactor(label_size_factor)\n if label_format:\n axis.SetLabelFormat(label_format)\n axis.SetNumberOfMinorTicks(number_minor_ticks)\n axis.SetTickLength(tick_length)\n axis.SetMinorTickLength(minor_tick_length)\n axis.SetTickVisibility(show_ticks)\n axis.SetTickOffset(tick_label_offset)\n\n return self.add_actor(\n legend_scale,\n reset_camera=False,\n name='_vtkLegendScaleActor',\n culling=False,\n pickable=False,\n render=render,\n )",
"def add_plot_legend(fig, labright='M.', lableft='S.'):\n #............................................\n _leg = fig.add_axes([0.92, 0.865, 0.055, 0.085])\n _leg.fill((0, 0.5, 0.5, 0), (0, 0, 1, 1), fc=ENSOpolygons['W'])\n _leg.text(0.05, 0.5, 'EN', fontsize='smaller')\n _leg.fill((0.5, 1, 1, 0.5), (0, 0, 1, 1), fc=ENSOpolygons['C'])\n _leg.text(0.6, 0.5, 'LN', fontsize='smaller')\n _leg.set_xticks([])\n _leg.set_yticks([])\n #............................................\n _leg = fig.add_axes([0.92, 0.75, 0.055, 0.085])\n _leg.plot((0, 1,), (0, 1), ls='-', c='k', marker='')\n _leg.set_xticks([])\n _leg.set_yticks([])\n _leg.text(0.6, 0.15, labright, fontsize='smaller')\n _leg.text(0.1, 0.5, lableft, fontsize='smaller')",
"def _draw_legend(self, labels, title=None):\n\n if len(self.pos) < 1:\n print 'Legend can not be plotted for Gleckler, as no data available!'\n return\n\n pmax = max(self.pos.values())\n\n # generate separate figure for legend\n f = plt.figure()\n ax = f.add_subplot(111, frameon=True, aspect='equal', axisbg='grey')\n f.subplots_adjust(bottom=0.25, top=0.75, left=0.25, right=0.75)\n\n for k in labels.keys():\n if k == 1:\n pos = 'top'\n elif k == 2:\n pos = 'bottom'\n elif k == 3:\n pos = 'left'\n elif k == 4:\n pos = 'right'\n else:\n raise ValueError('Can not draw Gleckler legend! Invalid position value! %s' % str(k))\n\n oldval = self.show_value\n self.show_value = False\n self.__plot_triangle(ax, np.random.random(), pos=pos)\n self.show_value = oldval\n ax.set_xticks([])\n ax.set_yticks([])\n\n fontsize = 16\n linewidth = 3\n\n for k in labels.keys():\n if k == 1: # top\n ax.annotate(labels[k], xy=(0.5, 0.9), xycoords='axes fraction', xytext=(0., 1.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 2:\n ax.annotate(labels[k], xy=(0.5, 0.1), xycoords='axes fraction', xytext=(0., -0.3), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 3:\n ax.annotate(labels[k], xy=(0.1, 0.5), xycoords='axes fraction', xytext=(-0.6, 0.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 4:\n ax.annotate(labels[k], xy=(0.9, 0.5), xycoords='axes fraction', xytext=(1.1, 0.8), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n\n if title is not None:\n f.suptitle(title, size=fontsize)\n\n return f",
"def legend(obj, ncol=3, **kwargs):\n # Font size handling here is a bit weird. We specify fontsize=6\n # in legend constructor since that affects spacing. However, we\n # need to manually override with 'small' later, because the original\n # specification did not take effect on whole-figure legends (and for\n # actual text, 6 is a wee bit small). We get a specific cramped\n # appearance and correct behavior for whole-figure legends this way.\n l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)\n plt.setp(l.get_texts(), fontsize='small')",
"def legend(self, legend):\n\n self.container['legend'] = legend",
"def _hr_mean_add_legend(**kwargs):\n ax: plt.Axes = kwargs.get(\"ax\")\n legend_loc = kwargs.get(\"legend_loc\", \"upper left\")\n # get handles\n handles, labels = ax.get_legend_handles_labels()\n # remove the errorbars\n handles = [h[0] for h in handles]\n # use them in the legend\n if legend_loc == \"upper left\":\n bbox_to_anchor = (0.01, 0.90)\n elif legend_loc == \"upper right\":\n bbox_to_anchor = (0.99, 0.90)\n else:\n bbox_to_anchor = None\n ax.legend(\n handles,\n labels,\n loc=legend_loc,\n bbox_to_anchor=bbox_to_anchor,\n numpoints=1,\n )",
"def legend_extras(\n self, handles=None, labels=None, *, loc=None,\n frame=None, frameon=None, ncol=None, ncols=None,\n center=None, order='C', label=None, title=None,\n fontsize=None, fontweight=None, fontcolor=None,\n **kwargs\n):\n # Parse input args\n # TODO: Legend entries for colormap or scatterplot objects! Idea is we\n # pass a scatter plot or contourf or whatever, and legend is generated by\n # drawing patch rectangles or markers using data values and their\n # corresponding cmap colors! For scatterplots just test get_facecolor()\n # to see if it contains more than one color.\n # TODO: It is *also* often desirable to label a colormap object with\n # one data value. Maybe add a legend option for the *number of samples*\n # or the *sample points* when drawing legends for colormap objects.\n # Look into \"legend handlers\", might just want to add own handlers by\n # passing handler_map to legend() and get_legend_handles_labels().\n if order not in ('F', 'C'):\n raise ValueError(\n f'Invalid order {order!r}. Choose from '\n '\"C\" (row-major, default) and \"F\" (column-major).'\n )\n ncol = _not_none(ncols=ncols, ncol=ncol)\n title = _not_none(label=label, title=title)\n frameon = _not_none(frame=frame, frameon=frameon, default=rc['legend.frameon'])\n if handles is not None and not np.iterable(handles): # e.g. a mappable object\n handles = [handles]\n if labels is not None and (not np.iterable(labels) or isinstance(labels, str)):\n labels = [labels]\n if title is not None:\n kwargs['title'] = title\n if frameon is not None:\n kwargs['frameon'] = frameon\n fontsize = kwargs.get('fontsize', None) or rc['legend.fontsize']\n if fontsize is None:\n pass\n elif fontsize in mfonts.font_scalings:\n kwargs['fontsize'] = rc._scale_font(fontsize)\n else:\n kwargs['fontsize'] = units(fontsize, 'pt')\n\n # Handle and text properties that are applied after-the-fact\n # NOTE: Set solid_capstyle to 'butt' so line does not extend past error bounds\n # shading in legend entry. This change is not noticable in other situations.\n kw_text = {}\n for key, value in (('color', fontcolor), ('weight', fontweight)):\n if value is not None:\n kw_text[key] = value\n kw_handle = _pop_props(kwargs, 'lines')\n kw_handle['solid_capstyle'] = 'butt'\n\n # Get axes for legend handle detection\n # TODO: Update this when no longer use \"filled panels\" for outer legends\n axs = [self]\n if self._panel_hidden:\n if self._panel_parent: # axes panel\n axs = list(self._panel_parent._iter_axes(hidden=False, children=True))\n else:\n axs = list(self.figure._iter_axes(hidden=False, children=True))\n\n # Handle list of lists (centered row legends)\n # NOTE: Avoid very common plot() error where users draw individual lines\n # with plot() and add singleton tuples to a list of handles. If matplotlib\n # gets a list like this but gets no 'labels' argument, it raises error.\n list_of_lists = False\n if handles is not None:\n handles = [h[0] if isinstance(h, tuple) and len(h) == 1 else h for h in handles]\n list_of_lists = any(isinstance(h, (list, np.ndarray)) for h in handles)\n if list_of_lists:\n if any(not np.iterable(_) for _ in handles):\n raise ValueError(f'Invalid handles={handles!r}.')\n if not labels:\n labels = [None] * len(handles)\n elif not all(np.iterable(_) and not isinstance(_, str) for _ in labels):\n # e.g. handles=[obj1, [obj2, obj3]] requires labels=[lab1, [lab2, lab3]]\n raise ValueError(f'Invalid labels={labels!r} for handles={handles!r}.')\n\n # Parse handles and legends with native matplotlib parser\n if not list_of_lists:\n if isinstance(handles, np.ndarray):\n handles = handles.tolist()\n if isinstance(labels, np.ndarray):\n labels = labels.tolist()\n handles, labels, *_ = mlegend._parse_legend_args(\n axs, handles=handles, labels=labels,\n )\n pairs = list(zip(handles, labels))\n else:\n pairs = []\n for ihandles, ilabels in zip(handles, labels):\n if isinstance(ihandles, np.ndarray):\n ihandles = ihandles.tolist()\n if isinstance(ilabels, np.ndarray):\n ilabels = ilabels.tolist()\n ihandles, ilabels, *_ = mlegend._parse_legend_args(\n axs, handles=ihandles, labels=ilabels,\n )\n pairs.append(list(zip(ihandles, ilabels)))\n\n # Manage pairs in context of 'center' option\n center = _not_none(center, list_of_lists)\n if not center and list_of_lists: # standardize format based on input\n list_of_lists = False # no longer is list of lists\n pairs = [pair for ipairs in pairs for pair in ipairs]\n elif center and not list_of_lists:\n list_of_lists = True\n ncol = _not_none(ncol, 3)\n pairs = [pairs[i * ncol:(i + 1) * ncol] for i in range(len(pairs))]\n ncol = None\n if list_of_lists: # remove empty lists, pops up in some examples\n pairs = [ipairs for ipairs in pairs if ipairs]\n\n # Bail if no pairs\n if not pairs:\n return mlegend.Legend(self, [], [], loc=loc, ncol=ncol, **kwargs)\n # Multiple-legend pseudo-legend\n elif center:\n objs = _multiple_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)\n # Individual legend\n else:\n objs = [_single_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)]\n\n # Add legends manually so matplotlib does not remove old ones\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n continue\n if hasattr(self, 'legend_') and self.legend_ is None:\n self.legend_ = obj # set *first* legend accessible with get_legend()\n else:\n self.add_artist(obj)\n\n # Apply legend box properties\n outline = rc.fill({\n 'linewidth': 'axes.linewidth',\n 'edgecolor': 'axes.edgecolor',\n 'facecolor': 'axes.facecolor',\n 'alpha': 'legend.framealpha',\n })\n for key in (*outline,):\n if key != 'linewidth':\n if kwargs.get(key, None):\n outline.pop(key, None)\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n obj.update(outline) # the multiple-legend bounding box\n else:\n obj.legendPatch.update(outline) # no-op if frame is off\n\n # Apply *overrides* to legend elements\n # WARNING: legendHandles only contains the *first* artist per legend because\n # HandlerBase.legend_artist() called in Legend._init_legend_box() only\n # returns the first artist. Instead we try to iterate through offset boxes.\n # TODO: Remove this feature? Idea was this lets users create *categorical*\n # legends in clunky way, e.g. entries denoting *colors* and entries denoting\n # *markers*. But would be better to add capacity for categorical labels in a\n # *single* legend like seaborn rather than multiple legends.\n for obj in objs:\n try:\n children = obj._legend_handle_box._children\n except AttributeError: # older versions maybe?\n children = []\n for obj in _iter_legend_children(children):\n # Account for mixed legends, e.g. line on top of error bounds shading\n if isinstance(obj, mtext.Text):\n obj.update(kw_text)\n else:\n for key, value in kw_handle.items():\n getattr(obj, 'set_' + key, lambda value: None)(value)\n\n # Append attributes and return, and set clip property!!! This is critical\n # for tight bounding box calcs!\n for obj in objs:\n obj.set_clip_on(False)\n if isinstance(objs[0], mpatches.FancyBboxPatch):\n objs = objs[1:]\n return objs[0] if len(objs) == 1 else tuple(objs)",
"def set_legend(self, **lgdkwargs):\n\n if 'loc' not in lgdkwargs.keys(): \n lgdkwargs['loc'] = 'upper right'\n \n if 'scatterpoints' not in lgdkwargs.keys(): \n lgdkwargs['scatterpoints'] = 1 \n\n self.sub.legend(**lgdkwargs) \n \n return None",
"def make_legend_fig(legend: matplotlib.legend.Legend) -> Figure:\n\n # Get the dimensions (in inches) of the legend's bounding box\n legend_inches = legend.get_window_extent().transformed(\n cast(Figure, legend.figure).dpi_scale_trans.inverted())\n\n fig = Figure(\n figsize=(\n legend_inches.width + 0.05,\n legend_inches.height + 0.05,\n ))\n fig.add_axes([0, 0, 1, 1]).axis('off')\n\n fig.legend(\n legend.legendHandles,\n [t.get_text() for t in legend.texts],\n ncol=legend._ncols,\n loc='center',\n bbox_to_anchor=(0.5, 0.5),\n )\n return fig",
"def legend(self, *args, loc=None, width=None, space=None, **kwargs):\n if loc != '_fill':\n loc = self._loc_translate(loc, rc['legend.loc'])\n if isinstance(loc, np.ndarray):\n loc = loc.tolist()\n\n # Generate panel\n if loc in ('left', 'right', 'top', 'bottom'):\n ax = self.panel_axes(loc, width=width, space=space, filled=True)\n return ax.legend(*args, loc='_fill', **kwargs)\n\n # Fill\n if loc == '_fill':\n # Hide content\n for s in self.spines.values():\n s.set_visible(False)\n self.xaxis.set_visible(False)\n self.yaxis.set_visible(False)\n self.patch.set_alpha(0)\n self._panel_filled = True\n # Try to make handles and stuff flush against the axes edge\n kwargs.setdefault('borderaxespad', 0)\n frameon = _notNone(kwargs.get('frame', None), kwargs.get(\n 'frameon', None), rc['legend.frameon'])\n if not frameon:\n kwargs.setdefault('borderpad', 0)\n # Apply legend location\n side = self._panel_side\n if side == 'bottom':\n loc = 'upper center'\n elif side == 'right':\n loc = 'center left'\n elif side == 'left':\n loc = 'center right'\n elif side == 'top':\n loc = 'lower center'\n else:\n raise ValueError(f'Invalid panel side {side!r}.')\n\n # Draw legend\n return legend_wrapper(self, *args, loc=loc, **kwargs)",
"def legend(self, loc, options=\"\", **kwargs):\n self._pad.cd()\n\n if self._legend is not None and isinstance(self._legend, root.TLegend):\n warnings.warn(\"These axes already have a legend, will overwrite\", stacklevel=2)\n self._legend.Delete()\n\n self._legend = root.TLegend(*loc)\n\n # Default formatting options: use transparent background\n # Do this here since this option is not available in the `TStyle` class\n self._legend.SetFillColorAlpha(0, 0)\n\n # Set graphics attributes\n root_helpers.set_graphics_attributes(self._legend, **kwargs)\n\n # Columns\n if \"ncol\" in kwargs:\n self._legend.SetNColumns(kwargs[\"ncol\"])\n\n # Legend border size\n if \"bordersize\" in kwargs:\n self._legend.SetBorderSize(kwargs[\"bordersize\"])\n\n for obj, label, option in self._legend_entries:\n if option is not None:\n self._legend.AddEntry(obj, label, option)\n else:\n self._legend.AddEntry(obj, label)\n\n self._legend.Draw(options)\n\n return self._legend",
"def test_manual_legend(self):\n # Draw a random scatter plot\n random = np.random.RandomState(42)\n\n Ax, Ay = random.normal(50, 2, 100), random.normal(50, 3, 100)\n Bx, By = random.normal(42, 3, 100), random.normal(44, 1, 100)\n Cx, Cy = random.normal(20, 10, 100), random.normal(30, 1, 100)\n\n _, ax = plt.subplots()\n ax.scatter(Ax, Ay, c=\"r\", alpha=0.35, label=\"a\")\n ax.scatter(Bx, By, c=\"g\", alpha=0.35, label=\"b\")\n ax.scatter(Cx, Cy, c=\"b\", alpha=0.35, label=\"c\")\n\n # Add the manual legend\n manual_legend(\n ax, (\"a\", \"b\", \"c\"), (\"r\", \"g\", \"b\"), frameon=True, loc=\"upper left\"\n )\n\n # Assert image similarity\n self.assert_images_similar(ax=ax, tol=0.5)",
"def legend(colors, labels, shapes='box', loc='best', layout='vertical', reverse_vertical=True, ax=None):\n if ax is None:\n ax = plt.gca()\n\n handles = get_handles(shapes, colors, labels)\n if not all(len(handles) == l for l in [len(colors), len(labels)]):\n warnings.warn('Lengths of one or more of colors, labels, and shapes did not match.', UserWarning)\n\n if layout == 'horizontal' or layout == 'h':\n ncol = len(labels)\n else:\n ncol = 1\n if reverse_vertical: #Reverse so that it goes from bottom to top\n handles = handles[-1::-1]\n\n return ax.legend(handles=handles, loc=loc, ncol=ncol, frameon=False)",
"def _LegendAndSave(Fig,SaveName,loc=\"upper right\",frameon=True,close=False,\n tight=True,use_legend=True,handlelength=1,**kwargs):\n if use_legend and legend_is_useable():\n legend(loc=loc,frameon=frameon,handlelength=handlelength)\n savefig(Fig,SaveName,close=close,tight=tight,**kwargs)",
"def _patch_legend(obj, draw_options, legend_type):\n legend = \"\"\n if _is_in_legend(obj):\n # Unfortunately, patch legend entries need \\addlegendimage in Pgfplots.\n do = \", \".join([legend_type] + draw_options) if draw_options else \"\"\n legend += \"\\\\addlegendimage{{{}}}\\n\\\\addlegendentry{{{}}}\\n\\n\".format(\n do, obj.get_label()\n )\n\n return legend",
"def add_legend(\n self,\n labels=None,\n bcolor=(0.5, 0.5, 0.5),\n border=False,\n size=(0.2, 0.2),\n name=None,\n loc='upper right',\n face='triangle',\n ):\n if self.legend is not None:\n self.remove_legend()\n self._legend = _vtk.vtkLegendBoxActor()\n\n if labels is None:\n # use existing labels\n if not self._labels:\n raise ValueError(\n 'No labels input.\\n\\n'\n 'Add labels to individual items when adding them to'\n 'the plotting object with the \"label=\" parameter. '\n 'or enter them as the \"labels\" parameter.'\n )\n\n self._legend.SetNumberOfEntries(len(self._labels))\n for i, (vtk_object, text, color) in enumerate(self._labels.values()):\n if face is None:\n # dummy vtk object\n vtk_object = pyvista.PolyData([0.0, 0.0, 0.0])\n\n self._legend.SetEntry(i, vtk_object, text, color.float_rgb)\n\n else:\n self._legend.SetNumberOfEntries(len(labels))\n\n legend_face = make_legend_face(face)\n for i, (text, color) in enumerate(labels):\n self._legend.SetEntry(i, legend_face, text, Color(color).float_rgb)\n\n if loc is not None:\n if loc not in ACTOR_LOC_MAP:\n allowed = '\\n'.join([f'\\t * \"{item}\"' for item in ACTOR_LOC_MAP])\n raise ValueError(f'Invalid loc \"{loc}\". Expected one of the following:\\n{allowed}')\n x, y, size = map_loc_to_pos(loc, size, border=0.05)\n self._legend.SetPosition(x, y)\n self._legend.SetPosition2(size[0], size[1])\n\n if bcolor is None:\n self._legend.SetUseBackground(False)\n else:\n self._legend.SetUseBackground(True)\n self._legend.SetBackgroundColor(Color(bcolor).float_rgb)\n\n self._legend.SetBorder(border)\n\n self.add_actor(self._legend, reset_camera=False, name=name, pickable=False)\n return self._legend",
"def test_legend_position():\n\n fig = Figure()\n fig.basemap(region=[-2, 2, -2, 2], frame=True)\n positions = [\"jTR+jTR\", \"g0/1\", \"n0.2/0.2\", \"x4i/2i/2i\"]\n for i, position in enumerate(positions):\n fig.plot(x=[0], y=[0], style=\"p10p\", label=i)\n fig.legend(position=position, box=True)\n return fig",
"def draw_legend(self, *drawables):\n # Check if we already have a legend\n if hasattr(self, '_legend'):\n raise RuntimeError('legend already exists on this plot')\n\n # Switch to the context of the main plot\n self._plot.cd()\n\n # Create the legend\n if self._atlas_label_drawn:\n self._legend = TLegend(self.PLOT_LEGEND_LEFT,\n (self.PLOT_LEGEND_BOTTOM_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_BOTTOM),\n self.PLOT_LEGEND_RIGHT,\n (self.PLOT_LEGEND_TOP_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_TOP))\n else:\n # WJF may need customisation with ratio\n self._legend = TLegend(0.15, 0.7, 0.5, 0.88)\n\n SetOwnership(self._legend, False)\n\n # Style it\n self._legend.SetTextSize((\n self.PLOT_LEGEND_TEXT_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_TEXT_SIZE\n ))\n self._legend.SetBorderSize(0)\n self._legend.SetFillStyle(0) # transparent\n self._legend.SetNColumns(self.PLOT_LEGEND_N_COLUMNS)\n\n # Create a chained list of all drawables. We decompose THStack\n # objects in reverse order, i.e. top-to-bottom.\n drawables = \\\n list(chain(*(drawable_iterable(h, True, True)\n for h\n in drawables)))\n\n # Add anything to this list that we created internally\n drawables.extend(self._legend_extras)\n\n # Because ROOT draws legend entries from left-to-right across rows and\n # not top-to-bottom along columns, we need to do a bit of a pivot on\n # the list so that the histograms appear in the vertical order of the\n # stack\n n_entries = len(drawables)\n n_col = self.PLOT_LEGEND_N_COLUMNS\n n_row = int(ceil(float(n_entries) / n_col))\n legend_order = []\n for r in xrange(0, n_row):\n for c in xrange(0, n_col):\n if (r * n_col + c) == n_entries:\n # Don't need an outer break, this would only happen on the\n # last row if n_row * n_col != n_entries\n break\n legend_order.append(drawables[r + c * n_row])\n\n # Add the drawables\n for drawable in legend_order:\n SetOwnership(drawable, False)\n title = drawable.GetTitle()\n # HACK: Convention: legend for drawables with a non-default\n # marker style (data) to be drawn as point, and with\n # empty fill (signal) to be drawn as line\n #print 'Adding plottable {0} to legend. Has MarkerStyle {1} and fill colour {2}'.format(drawable.GetName(), drawable.GetMarkerStyle(), drawable.GetFillColor())\n #self._legend.AddEntry(drawable, title, 'f')\n this_marker = drawable.GetMarkerStyle()\n if this_marker == 20:\n self._legend.AddEntry(drawable, title, 'p')\n #self._legend.AddEntry(drawable, title, 'l')\n elif drawable.GetTitle() == 'Total Background' or drawable.GetTitle() == 'Total background':\n self._legend.AddEntry(drawable, title, 'lf')\n elif drawable.GetFillColor() == 0:\n self._legend.AddEntry(drawable, title, 'l')\n elif this_marker == 21 or this_marker == 3 or this_marker == 22:\n self._legend.AddEntry(drawable, title, 'lp')\n else:\n self._legend.AddEntry(drawable, title, 'f')\n\n # Draw the legend\n self._legend.Draw()",
"def _createLegend(legendMap, collection, size=9, shape=Hexagon):\n\n class AssemblyLegend:\n \"\"\"\n Custom Legend artist handler.\n\n Matplotlib allows you to define a class that implements ``legend_artist`` to give you\n full control over how the legend keys and labels are drawn. This is done here to get\n Hexagons with Letters in them on the legend, which is not a built-in legend option.\n\n See: http://matplotlib.org/users/legend_guide.html#implementing-a-custom-legend-handler\n \"\"\"\n\n def legend_artist(self, _legend, orig_handle, _fontsize, handlebox):\n letter, index = orig_handle\n x0, y0 = handlebox.xdescent, handlebox.ydescent\n width, height = handlebox.width, handlebox.height\n x = x0 + width / 2.0\n y = y0 + height / 2.0\n normVal = collection.norm(index)\n cmap = collection.get_cmap()\n colorRgb = cmap(normVal)\n if shape == Hexagon:\n patch = matplotlib.patches.RegularPolygon(\n (x, y),\n 6,\n height,\n orientation=math.pi / 2.0,\n facecolor=colorRgb,\n transform=handlebox.get_transform(),\n )\n elif shape == Rectangle:\n patch = matplotlib.patches.Rectangle(\n (x - height / 2, y - height / 2),\n height * 2,\n height,\n facecolor=colorRgb,\n transform=handlebox.get_transform(),\n )\n else:\n patch = matplotlib.patches.Circle(\n (x, y),\n height,\n facecolor=colorRgb,\n transform=handlebox.get_transform(),\n )\n\n luminance = numpy.array(colorRgb).dot(LUMINANCE_WEIGHTS)\n dark = luminance < 0.5\n if dark:\n color = \"white\"\n else:\n color = \"black\"\n handlebox.add_artist(patch)\n txt = mpl_text.Text(\n x=x, y=y, text=letter, ha=\"center\", va=\"center\", size=7, color=color\n )\n handlebox.add_artist(txt)\n return (patch, txt)\n\n ax = plt.gca()\n keys = []\n labels = []\n for value, label, description in legendMap:\n keys.append((label, value))\n labels.append(description)\n\n legend = ax.legend(\n keys,\n labels,\n handler_map={tuple: AssemblyLegend()},\n loc=\"center left\",\n bbox_to_anchor=(1.0, 0.5),\n frameon=False,\n prop={\"size\": size},\n )\n return legend",
"def draw_legend(\n data: pd.Series[Any], da: DrawingArea, lyr: Layer\n ) -> DrawingArea:\n msg = \"The geom should implement this method.\"\n raise NotImplementedError(msg)",
"def legend(self, legend):\n\n self._legend = legend",
"def setLegendFrameWidth(w=1):\n dislin.frame(w)",
"def test_legend_entries():\n fig = Figure()\n fig.basemap(projection=\"x1i\", region=[0, 7, 3, 7], frame=True)\n fig.plot(\n data=\"@Table_5_11.txt\",\n style=\"c0.15i\",\n fill=\"lightgreen\",\n pen=\"faint\",\n label=\"Apples\",\n )\n fig.plot(data=\"@Table_5_11.txt\", pen=\"1.5p,gray\", label=\"My lines\")\n fig.plot(data=\"@Table_5_11.txt\", style=\"t0.15i\", fill=\"orange\", label=\"Oranges\")\n fig.legend(position=\"JTR+jTR\")\n\n return fig"
] | [
"0.8135134",
"0.7360067",
"0.7313128",
"0.73119825",
"0.72300524",
"0.7143622",
"0.7033816",
"0.70186085",
"0.69774044",
"0.6967444",
"0.692776",
"0.6845011",
"0.6801535",
"0.6784947",
"0.6779383",
"0.67106515",
"0.65214556",
"0.6519761",
"0.65141386",
"0.6490154",
"0.6467635",
"0.64387244",
"0.64225644",
"0.6346362",
"0.6341243",
"0.63337106",
"0.63262594",
"0.6318476",
"0.62719935",
"0.62282175"
] | 0.76351845 | 1 |
Generate a single plot, which has various options. The maximum y axis value can be set Grid lines can be plotted across the graph Error bars can be plotted | def generatePlot(
bmName,
yAxisName,
npl,
sizeValues,
deviations=None,
yMax=None,
yMin=None,
yLines=(),
fileSuffix="",
xMin=None,
xLabel="",
logarithmic=False,
legendPos="best",
sortKeyFn=lambda x: x,
timeUnit=None,
):
print("Plot: '" + bmName + "'")
fig = preparePlot(bmName)
ax = fig.axes[0]
impls = sorted(list(sizeValues.keys()), key=cmp_to_key(compareFn))
# print("xmMin = ",xMin)
setupXAxis(ax, npl[0] if xMin == None else xMin, npl[-1], xLabel, logarithmic)
if False:
print("npl: ", npl)
print("sizeValues: ", sizeValues)
print("impls: ", impls)
lines = []
# Choose a marker size based on the number of points we're plotting in all
numPoints = sum([len(sizeValues[impl]) for impl in impls])
markerSize = 5 if numPoints < 1000 else 2.5
# print ("numPoints: ",numPoints, " markerSize: ",markerSize)
for impl in impls:
dataValues = sizeValues[impl]
nplToUse = npl
if len(dataValues) != len(npl):
# print "impl : " +impl
# print "npl : " + str(npl) + " dataValues: " + str(dataValues)
nplToUse = npl[: len(dataValues)]
# print ("impl: ", str(impl), " " + str(dataValues))
(line,) = ax.plot(nplToUse, dataValues)
lines.append(line)
plt.setp(
line,
marker=implementationStyles[impl][0],
markersize=markerSize,
color=implementationStyles[impl][1],
linestyle=implementationStyles[impl][2],
)
if deviations:
# ax.errorbar is inconsistent with ax.plot, and doesn't ignore None entries
# so we have to add the bars one at a time ignoring the Nones ourself
for x, v, e in zip(nplToUse, dataValues, deviations[impl]):
if v is None:
continue
# print("x:", x, "v:", v)
ax.errorbar(x, v, yerr=e, color=implementationStyles[impl][1])
addLegend(ax, lines, impls, legendPos)
# Round up the yMax value so that it is at the granularity of the y axis tick marks
yTicks = ax.get_yticks()
yTMdelta = yTicks[1] - yTicks[0]
# print(bmName," yMax=",yMax,"yTMdelta = ",yTMdelta)
if yMax == None:
yMax = yTicks[-1]
else:
yMax = yTMdelta * math.ceil(yMax / yTMdelta)
# print("Computed yMax: ", yMax)
ax.set_ylim(yMin, yMax)
# And similarly for xMin
xTicks = ax.get_xticks()
xTMdelta = xTicks[1] - xTicks[0]
xTickMin = int(10 ** xTicks[0]) if logarithmic else xTicks[0]
# print ("Incoming xMin:",xMin, " xTicks[0]: ", xTickMin)
if xMin == None or xMin == xTickMin:
xMin = xTicks[0]
else:
xMin = xTMdelta * math.floor(xMin / xTMdelta)
ax.set_xlim(10 ** xMin if logarithmic else xMin)
# print ("xMin computed as ",xMin)
if yLines:
for l in yLines:
ax.axhline(y=l, color="gray")
else:
ax.grid(True)
finalisePlot(ax, bmName, yAxisName, fig, fileSuffix, timeUnit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def TwoOrOneValuePlot(no_of_sets, Xax, Ydat1, Ydat2, Label1, Label2,\n xmin, xmax, ymin_1, ymax_1, ymin_2, ymax_2,\n XLab, YLab_1, YLab_2, SupTitle, Title, FileName,\n currentDate, currentTime, Software_version):\n\n rc('font', size=6, weight='bold')\n if no_of_sets == 1:\n fig = plt.figure(figsize=(9, 5))\n ax1 = fig.add_subplot(111)\n elif no_of_sets == 2:\n fig = plt.figure(figsize=(9, 9))\n ax1 = fig.add_subplot(211)\n else:\n print(' ERROR !!!')\n if no_of_sets == 2:\n ax1.plot(Xax, Ydat2, color=u'#ff7f0e', linestyle='-', alpha=0.4, linewidth='1.00')\n ax1.plot(Xax, Ydat1, color=u'#1f77b4', linestyle='-', alpha=1.0, linewidth='1.00', label=Label1)\n ax1.legend(loc='upper right', fontsize=6)\n ax1.grid(visible=True, which='both', color='silver', linestyle='-')\n ax1.axis([xmin, xmax, ymin_1, ymax_1])\n ax1.set_ylabel(YLab_1, fontsize=6, fontweight='bold')\n ax1.set_title(Title, fontsize=6)\n if no_of_sets == 2:\n ax1.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax2 = fig.add_subplot(212)\n if no_of_sets == 2:\n ax2.plot(Xax, Ydat1, color=u'#1f77b4', linestyle='-', alpha=0.4, linewidth='1.00')\n ax2.plot(Xax, Ydat2, color=u'#ff7f0e', linestyle='-', alpha=1.0, linewidth='1.00', label=Label2)\n ax2.legend(loc='upper right', fontsize=6)\n ax2.grid(visible=True, which='both', color='silver', linestyle='-')\n ax2.axis([xmin, xmax, ymin_2, ymax_2])\n ax2.set_xlabel(XLab, fontsize=6, fontweight='bold')\n ax2.set_ylabel(YLab_2, fontsize=6, fontweight='bold')\n fig.subplots_adjust(hspace=0.05, top=0.94)\n elif no_of_sets == 1:\n ax1.set_xlabel(XLab, fontsize=6, fontweight='bold')\n fig.subplots_adjust(top=0.92)\n else:\n print(' ERROR !!!')\n fig.suptitle(SupTitle, fontsize = 8, fontweight='bold')\n if no_of_sets == 2:\n fig.text(0.73, 0.06, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.09, 0.06, 'Software version: ' + Software_version + ', [email protected], IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n elif no_of_sets == 1:\n fig.text(0.73, 0.03, 'Processed ' + currentDate + ' at '+currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.09, 0.03, 'Software version: ' + Software_version + ', [email protected], IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n else:\n print(' ERROR !!!')\n pylab.savefig(FileName, bbox_inches='tight', dpi=160)\n plt.close('all')\n return",
"def buildPlot(self):\r\n style.use('fivethirtyeight')\r\n self.fig = plt.figure()\r\n self.ax1 = self.fig.add_subplot(1,1,1)\r\n self.ax1.clear()\r\n self.ax1.plot(self.inputValInt,self.inputValInt1)",
"def draw(values):\n\n # Turn on grid with dashed style\n subplot.yaxis.grid(True, linestyle=\"dashed\")\n\n # Get list of new higher values\n new_values = get_new_values(values)\n\n # Plot 2 lines\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n\n # Print left plot title\n pyplot.title(\n \"Press X to exit\\nPress S to save\",\n loc=\"left\",\n fontsize=14,\n color=\"#1F76B4\",\n style=\"italic\",\n pad=20,\n )\n\n # Print right plot title\n pyplot.title(\n f\"{'Max objective:':>25}{max(values):>10.2E}\\n\"\n f\"{'Generation:':>25}{values.index(max(values)):>10}\",\n loc=\"right\",\n fontfamily=\"Lucida Sans Typewriter\",\n fontsize=12,\n color=\"#FF7E0E\",\n pad=20,\n )",
"def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()",
"def plotDistributionWithLimitsOld(lXs, llYs ,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for val in llYs:\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n ax.plot(lXs,val[0],color=Graphics.lColors[i])\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i])\n for j in val[3:]:\n axCurrent.plot(lXs,j,color='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n# CV = variation(val[0])\n# axCurrent.text(60, ymaxCurrent*0.7, \"CV = {}\".format(CV), fontsize=10)\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n if legend:\n ax.legend(legend)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def __init__(self,\n title = '',\n x_title = None,\n y_title = None,\n plot_header = True,\n ratio = False,\n x_range = None,\n y_max = None,\n y_min = None,\n legendColumns = 1):\n # Store the title\n self._title = title\n self._x_title, self._y_title = x_title, y_title\n\n # Store whether or not the user wants to create a plot header\n self._plot_header = plot_header\n\n # Calculate a unique name for the plot components\n name = _rand_uuid()\n\n # Default logy if off\n self._logy = False\n\n # Default off for integer x-ticks \n self._x_integer_ticks = False \n\n # store n columns for legend\n self.PLOT_LEGEND_N_COLUMNS = legendColumns \n\n # Create a canvas\n self._canvas = TCanvas(name + '_canvas',\n name,\n int(self.PLOT_WIDTH),\n int(self.PLOT_HEIGHT))\n SetOwnership(self._canvas, False)\n\n\n\n # Create the main plot and draw it\n self._plot = TPad(\n 'upperPad',\n 'upperPad',\n #name + '_plot', # WJF: don't need upper pad to have unique name \n #name,\n 0.0,\n (self.PLOT_RATIO_FRACTION\n if ratio\n else 0.0),\n 1.0,\n 1.0\n )\n SetOwnership(self._plot, False)\n self._plot.SetMargin(*(self.PLOT_MARGINS_WITH_RATIO\n if ratio\n else self.PLOT_MARGINS))\n self._plot.Draw()\n\n # Store ranges\n self._x_range = x_range\n if y_max is not None:\n self._set_maximum_value(y_max)\n if y_min is not None:\n self._set_minimum_value(y_min)\n\n # Switch back to the context of the canvas\n self._canvas.cd()\n\n\n # Create a ratio plot and draw it if requested\n if ratio:\n self._ratio_plot = TPad(\n 'lowerPad', # WJF, don't need lower pad to have unique name\n 'lowerPad',\n 0.0,\n 0.0,\n 1.0,\n self.PLOT_RATIO_FRACTION\n )\n SetOwnership(self._ratio_plot, False)\n self._ratio_plot.SetMargin(*self.PLOT_RATIO_MARGINS)\n self._ratio_plot.SetGridy(True)\n self._ratio_plot.Draw()\n else:\n self._ratio_plot = None\n # increase canvas margins\n #self._canvas.SetBottomMargin(1)\n #self._plot.SetMargin\n #self._canvas.SetLeftMargin(\n\n # Track whether or not we've already drawn to the main pad\n self._drawn = False\n\n # Track whether or not we've already drawn to the ratio pad\n self._ratio_drawn = False\n\n # Track that object which sets up the axes in the main plot\n self._axes_object = None\n\n # Track whether or not we've already added the atlas label to the main pad\n self._atlas_label_drawn = False\n\n # Create a structure to track any histograms we generate internally\n # which need to be added to any legends created\n self._legend_extras = []\n \n # Flag if y-axis has been set to a log scale \n self._logy = False",
"def plot(self):\n pass",
"def figure4():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_4',\n 'legend': ['control', 'apamin'],\n 'legend_size': 8,\n 'y_on': True}\n line_styles = ['-', 'dotted']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate figure 1 (top)\n for ix, g_sk_bar in enumerate([0.3, 0]):\n t, y = solver(100, g_sk_bar=g_sk_bar)\n plt.plot(t, y[:, 0], c='k', linestyle=line_styles[ix])\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 1, 2)\n t1 = 1200\n t, y = solver(t1, t_start=50, duration=t1, i_bias_on=0.33, g_sk_bar=0.03)\n plt.plot(t, y[:, 0], 'k-')\n\n plot_settings['y_limits'] = [-100, 30]\n plot_settings['x_limits'] = [0, t1]\n plot_settings['y_ticks'] = [-80, -60, -40, -20, 0, 20]\n plot_settings['locator_size'] = 10\n plot_settings['scale_size'] = 100\n plot_settings['legend'] = None\n alter_figure(plot_settings, close=True) # Alter plot for publication",
"def __init__(self):\n self.pt = Plotter(2, width=self.width, height=self.height)\n self.pt.use_grid()\n self.pt.set_title(\n \"Exponentials plotted from {:.1f} to {:.1f}\", self.xMin, self.xMax)\n self.pt.set_xlabel(\"X\")\n self.pt.set_ylabel(\"a*exp(-b*X)\")",
"def plotDistribution(lXs, lYs, out=\"\", title=\"\", xax=\"\", yax=\"\", color=\"blue\", legend=\"\", grid=[]):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.plot(lXs,lYs, color=color)\n if legend:\n ax.legend(legend, fontsize=22)\n for line in grid:\n ax.axvline(x=line, linestyle='dashed', linewidth=1, color='black')\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def format_axes():\n\n plt.axes(frameon=False)\n plt.axvline(0, PlotParameter.y_axis_bot_lim, PlotParameter.y_axis_top_lim, color='k')\n plt.tick_params(which='both', bottom='off', top='off', right='off', labelbottom='off')\n plt.xlim(0, PlotParameter.x_axis_right_lim)\n plt.ylim(PlotParameter.y_axis_bot_lim, PlotParameter.y_axis_top_lim)\n plt.ylabel(PlotParameter.y_axis_label)",
"def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )",
"def plotDistributionWithLimits(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for val in llYs:\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def plot(self, **kwargs):\n if self.order != None:\n name = str(_constructModelName(self.teff, self.logg, \n self.metal, self.en, self.order, self.path))\n output = kwargs.get('output', str(name) + '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', \n alpha=.8, linewidth=1, label=name)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim) \n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()\n\n else:\n output = kwargs.get('output'+ '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', alpha=.8, linewidth=1)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim)\n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()",
"def beamPlot(beamLength,loadPositions,loadForces,beamSupport):\n res = beamSuperposition(np.arange(0,beamLength*1.01,beamLength/100.0), beamLength, loadPositions, loadForces, beamSupport)\n #TODO: FIX PLOT TITLE\n \n #Get subplots to make two y-axes for one graph\n fig, ax1 = plt.subplots()\n \n #Make position line plot\n ax1.plot(np.arange(0,beamLength*1.01,beamLength/100.0),res)\n ax1.set_xlabel(\"Position [m]\")\n ax1.set_ylabel(\"Deflection at point [m]\")\n ax1.invert_yaxis()\n \n #Print the maximum deflection in scientific notation, at a non-obtrusive location\n maxDeflection = max(res)\n textX = 0 if beamSupport == \"cantilever\" else beamLength/4\n textY = maxDeflection if beamSupport==\"cantilever\" else maxDeflection/4\n exponent = np.floor(np.log10(maxDeflection))\n plt.text(textX,textY,\"Max deflection: \" + str(round(maxDeflection*10**(-exponent), 2)) + \"* 10^\" + str(int(exponent)) +\" m\");\n \n \n #Make load point plot\n ax2 = ax1.twinx()\n ax2.plot(loadPositions, loadForces, 'ro')\n ax2.set_ylabel(\"Load at point [N]\")\n ax2.set_ylim(0, max(loadForces)*1.05)\n \n plt.title(\"Beam Deflection\")\n plt.show()",
"def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()",
"def plot(self, ylog=False, category=\"Accuracy\", figsize=(12, 5)):\n if self.CV == False: # no Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'r-', label='Training Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'r-', label='Training Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n if self.CV == True: # has Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].plot(range(1, len(self.cvError) + 1), self.cvError, 'r-', label='CV Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'g-', label='Training Accuracy')\n ax[1].plot(range(1, len(self.cvAcc) + 1), self.cvAcc, 'r-', label='CV Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'g-', label='Training Error Rate')\n ax[1].plot(range(1, len(self.cvAcc) + 1), 1 - np.array(self.cvAcc), 'r-', label='CV Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n\n return fig, ax",
"def creation_plot(speciation):\n plt.close()\n fig, ax = plt.subplots(figsize=(8,5))\n if speciation == '1':\n xlabel = 'Chemical species'\n else:\n xlabel = 'Coordinating polyhedra'\n ax.set_ylabel(r'Cluster absolute lifetime (fs)', fontweight = 'bold', fontsize = 12)\n ax.set_xlabel(xlabel, fontweight = 'bold', fontsize = 12)\n ax.xaxis.set_label_coords(0.5, -0.3)\n ax.yaxis.set_label_coords(-0.1, 0.5)\n #Adjustment of ticks\n ymajorLocator = AutoLocator()\n yminorLocator = AutoMinorLocator()\n ax.yaxis.set_major_locator(ymajorLocator)\n ax.yaxis.set_minor_locator(yminorLocator)\n ax.tick_params(which = 'both', labelsize = 10, width = 0.5)\n #plt.autoscale(enable=True,axis='y',tight=True)\n #ax.set_ylim(0,770) #for NaAlSi3O8 a19.0\n #ax.set_ylim(0,60) #for NaAlSi3O8 a15.0\n plt.tick_params(bottom = False, top = False, labelbottom = True)\n ax.grid(True, which='major',axis = 'y', linestyle=':', linewidth=0.5 )\n return fig,ax",
"def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()",
"def makeGraph(xval, yval, title = \"GRAPH\", xlabel=\"X AXIS\", ylabel=\"Y AXIS\", axisRng=None, style='bo', clear=False):\n plt.plot(xval, yval, style)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n if axisRng is not None:\n plt.axis(axisRng)\n plt.show()\n if clear:\n plt.close()",
"def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()",
"def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')",
"def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])",
"def make_plot(x,y):",
"def plot(self, *args, **kwargs):\n pass",
"def multiplot(self, x, y, **kwargs):\n\n # --------------------------------------------------------------------------------------------- #\n # Attributes\n self._evalKwargs(kwargs)\n # Remove the previous and create the new framework\n plt.delaxes(self.ax)\n count = 0\n colcount = 0\n # Get the min and max values of the X-axis\n xmin = []\n xmax = []\n for i in range( len(x) - 1):\n if hasattr(x[i][0], \"__len__\"):\n for j in range( len(x[i]) - 1):\n xmin.append( min(x[i][j]) )\n xmax.append( max(x[i][j]) )\n else:\n xmin.append( min(x[i]) )\n xmax.append( max(x[i]) )\n if self.xmin is not None:\n xmin = [self.xmin]\n if self.xmax is not None:\n xmax = [self.xmax]\n deltaX = max(xmax) - min(xmin)\n xmin = min(xmin) - 0.05*deltaX\n xmax = max(xmax) + 0.05*deltaX\n\n # --------------------------------------------------------------------------------------------- #\n # Iterate over the number of subplots \n for nSP in range( len(self.prop) ):\n # --------------------------------------------------------------------------------------------- #\n # Initialize the subplot properties\n self.ax = plt.subplot2grid( (sum(self.prop), 1), (count, 0), rowspan=self.prop[nSP])\n count += self.prop[nSP] # Keep track of the size of the plot\n # Extract the errors if any are given\n if self.yerr is not None:\n yerrSP = self.yerr[nSP]\n if self.xerr is not None:\n xerrSP = self.xerr[nSP] \n # Set the y-axis and x-axis scales\n try:\n ymode = self.ymode[colcount]\n except:\n ymode = self.ymode\n self.ax.set_yscale(ymode)\n self.ax.set_xscale(self.xmode)\n\n # --------------------------------------------------------------------------------------------- #\n # Iterate over the different curves to plot in the same subplot\n if hasattr(y[nSP][0], \"__len__\"):\n for nCurv in range( len(y[nSP]) ):\n # Read the plot properties\n try: color = self.color[colcount]\n except: color = self.color\n try: mksize = self.mksize[colcount]\n except: mksize = self.mksize\n try: alpha = self.alpha[colcount]\n except: alpha = self.alpha\n try: ncol = self.ncol[colcount]\n except: ncol = self.ncol\n try: loc = self.loc[colcount]\n except: loc = self.loc\n try: legend = self.label[colcount]\n except: legend = self.label \n try: lstyle = self.lstyle[colcount]\n except: lstyle = self.lstyle\n try: mktype = self.mktype[colcount]\n except : mktype= self.mktype\n\n # Extract the errors if any are given\n if (self.yerr is not None) and (hasattr(self.yerr[nSP][nCurv], \"__len__\")):\n yerrnCurv = self.yerr[nSP][nCurv]\n else:\n yerrnCurv = None\n if (self.xerr is not None) and (hasattr(self.xerr[nSP][nCurv], \"__len__\")):\n xerrnCurv = self.xerr[nSP][nCurv] \n else:\n xerrnCurv = None\n\n # Plot limits as down-arraows\n if (self.limit is not None) and (self.limit[nSP][nCurv]):\n self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], xerr=xerrnCurv, \n yerr=[yerrnCurv, np.zeros( len(yerrnCurv) )], fmt='none', \n ecolor=color, elinewidth=0.5, alpha=alpha, capsize=0, \n barsabove=False, lolims=False, uplims=False, xlolims=False, \n xuplims=False, errorevery=1, capthick=None, zorder=nCurv, legend=None)\n self.ax.plot(x[nSP][nCurv], y[nSP][nCurv]-yerrnCurv, marker='v',\n color=color, alpha=alpha, markersize=mksize, linestyle='',\n markeredgecolor=color, zorder=nCurv)\n # Fill an area between y[nSP][0][0] and y[nSP][0][1]\n #elif hasattr(y[nSP][nCurv], \"__len__\"):\n # self.ax.fill_between(x[nSP][nCurv], y[nSP][nCurv][0], y[nSP][nCurv][1], facecolor=self.color, edgecolor='none', alpha=0.5,\n # rasterized=self.raster, zorder=-10)\n # Plot a 'normal' curve\n else:\n if (legend is not None) and (legend != 'None') :\n graph = self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], yerr=yerrnCurv, \n xerr=xerrnCurv, fmt=mktype, ecolor=color, elinewidth=0.5, capsize=0,\n linestyle=lstyle, markerfacecolor=color, markeredgecolor=color, \n color=color, markersize=mksize, label=legend, linewidth=self.lwdth, \n barsabove=False, errorevery=1, capthick=None, alpha=alpha, zorder=nCurv)\n # Handling of the labels of the curves\n handles, labels = self.ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for k in xrange( len(labels) ):\n if labels[k] in self.label:\n handle_list.append(handles[k])\n label_list.append(labels[k])\n self.ax.legend(handle_list, label_list, loc=\"best\", prop={'size':self.ftsize2},\n frameon=True, numpoints=1, ncol=ncol, handletextpad=0.1)\n else:\n graph = self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], yerr=yerrnCurv,\n xerr=xerrnCurv, fmt=mktype, ecolor=color, elinewidth=0.5, capsize=0,\n linestyle=lstyle, markerfacecolor=color, markeredgecolor=color, \n color=color, markersize=mksize, alpha=alpha, linewidth=self.lwdth,\n barsabove=False, errorevery=1, capthick=None, zorder=nCurv)\n colcount += 1\n # --------------------------------------------------------------------------------------------- #\n # There is only one curve per subplot\n else:\n # Read the plot properties\n try: color = self.color[colcount]\n except: color = self.color\n try: mksize = self.mksize[colcount]\n except: mksize = self.mksize\n try: alpha = self.alpha[colcount]\n except: alpha = self.alpha\n try: ncol = self.ncol[colcount]\n except: ncol = self.ncol\n try: loc = self.loc[colcount]\n except: loc = self.loc\n try: legend = self.label[colcount]\n except: legend = self.label \n try: lstyle = self.lstyle[colcount]\n except: lstyle = self.lstyle\n try: mktype = self.mktype[colcount]\n except : mktype= self.mktype\n\n # Extract the errors if any are given\n if (self.yerr is not None) and (hasattr(self.yerr[nSP], \"__len__\")):\n yerrSP = self.yerr[nSP]\n else:\n yerrSP = None\n if (self.xerr is not None) and (hasattr(self.xerr[nSP], \"__len__\")):\n xerrSP = self.xerr[nSP] \n else:\n xerrSP = None\n # Plot\n if (self.limit is not None) and (self.limit[nSP]):\n self.ax.errorbar(x[nSP], y[nSP], xerr=xerrSP, \n yerr=[yerrSP, np.zeros( len(yerrSP) )], fmt='none', \n ecolor=color, elinewidth=0.5, alpha=alpha, capsize=0, \n barsabove=False, lolims=False, uplims=False, xlolims=False, \n xuplims=False, errorevery=1, capthick=None, legend=None)\n self.ax.plot(x[nSP], y[nSP]-yerrSP, marker='v',\n color=color, alpha=alpha, markersize=mksize, linestyle='',\n markeredgecolor=color)\n else:\n self.ax.errorbar(x[nSP], y[nSP], yerr=yerrSP, xerr=xerrSP, fmt=mktype, ecolor=color,\n elinewidth=0.5, capsize=0, linestyle=lstyle, markerfacecolor=color, \n markeredgecolor=color, markersize=mksize, label=legend, alpha=alpha, color=color,\n barsabove=False, errorevery=1, capthick=None)\n colcount += 1\n if legend is not None:\n # Handling of the labels of the curves\n self.ax.legend(loc=\"best\", prop={'size':self.ftsize2}, frameon=True, numpoints=1,\n ncol=ncol, handletextpad=0.1)\n handles, labels = self.ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for k in xrange(len(labels)):\n if labels[k] in self.label:\n handle_list.append(handles[k])\n label_list.append(labels[k])\n self.ax.legend(handle_list, label_list, loc=\"best\", prop={'size':self.ftsize2}, \n frameon=True, numpoints=1, ncol=ncol, handletextpad=0.1)\n\n # --------------------------------------------------------------------------------------------- #\n # Make pretty each subplot\n\n # Shift the x-label\n self.ax.yaxis.set_label_coords(self.labelx, 0.5)\n # Set the y-label for each subplot\n self.ax.set_ylabel(self.ylabel[nSP], fontsize=self.ftsize1, multialignment='center')\n self._plotDisplay()\n\n # Dimensions\n self.ax.set_xlim(xmin, xmax) # Every subplot has the same x-axis \n ymin, ymax = self.ax.get_ylim()\n try: ymin = self.ymin[nSP]\n except: pass\n try: ymax = self.ymax[nSP]\n except: pass\n self.ax.set_ylim(ymin, ymax) \n\n # Draw a horizontal line\n if (self.hline is not None) and (self.hline[nSP] is not None):\n # Multiple h-line to draw\n self.ax.axhline(y=self.hline[nSP], color='black', linestyle=':')\n # Fill an area\n if self.fill is not None:\n #self.ax.fill_between(x[nSP][nCurv], y[nSP][nCurv][0], y[nSP][nCurv][1], facecolor=self.color, edgecolor='none', alpha=0.5,\n # rasterized=self.raster, zorder=-10)\n for k in range(len(self.fill)/2):\n self.ax.axvspan(self.fill[k*2], self.fill[k*2+1], facecolor=self.shadecol, \n edgecolor=\"none\", linewidth=0., zorder=-10, alpha=0.5)\n # For all upper subplot, remove the last ticks\n if nSP != len(self.prop)-1:\n plt.setp(self.ax.get_xticklabels(), visible=False)\n self.ax.set_xlabel('')\n ymincheck, ymaxcheck=self.ax.get_ylim()\n if ymaxcheck > ymincheck:\n self.ax.get_yticklabels()[0].set_visible(False)\n else: # in case of a revert y axis...\n self.ax.get_yticklabels()[-1].set_visible(False)\n\n self.f.subplots_adjust(hspace=0)",
"def plot(self, nsteps_max=10):\r\n fig = plt.figure()\r\n ax1 = plt.subplot(221)\r\n ax2 = plt.subplot(222)\r\n ax3 = plt.subplot(224)\r\n\r\n if 'fig' in locals(): # assures tight layout even when plot is manually resized\r\n def onresize(event): plt.tight_layout()\r\n try: cid = fig.canvas.mpl_connect('resize_event', onresize) # tighten layout on resize event\r\n except: pass\r\n\r\n self.plot_px_convergence(nsteps_max=nsteps_max, ax=ax1)\r\n\r\n if getattr(self.px_spec, 'ref_tree', None) is None:\r\n self.calc_px(method='LT', nsteps=nsteps_max, keep_hist=True)\r\n\r\n self.plot_bt(bt=self.px_spec.ref_tree, ax=ax2, title='Binary tree of stock prices; ' + self.specs)\r\n self.plot_bt(bt=self.px_spec.opt_tree, ax=ax3, title='Binary tree of option prices; ' + self.specs)\r\n # fig, ax = plt.subplots()\r\n # def onresize(event): fig.tight_layout()\r\n # cid = fig.canvas.mpl_connect('resize_event', onresize) # tighten layout on resize event\r\n # self.plot_px_convergence(nsteps_max=nsteps_max, ax=ax)\r\n\r\n try: plt.tight_layout()\r\n except: pass\r\n plt.show()",
"def plotDistributionWithLimitsRefine(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def setupPlotVariables(self):\n\n ### Borrowed from Thomas' plot routines\n self.plotLabels = [r'$m_1$', r'$m_2$', r'eccentricity', \\\n r'period (days)', \\\n r'inclination (rad)',r'$\\omega$ (rad)',r'$t_0$',r'$\\alpha$ (rad)']\n\n ### Change these to update the plot ranges for each\n ### parameter. \n angOut = np.pi+0.3\n self.plotLimsLo = [1.0, -1.0, -0.2, -1.0, -angOut, -angOut, -10,0]\n self.plotLimsHi = [2.2, 10.0, 1.2, 35.0, angOut, angOut, 10,1.2]\n\n ### We specify the method for the uniformly-spaced grid. If we\n ### want to make one of these logspace (say) we just change\n ### the method identified in the appropriate place in the\n ### list.\n nMeth = len(self.plotLimsLo)\n self.plotSpacerMethods = [np.linspace for i in range(nMeth)]\n\n self.plotNfine = 1000 ### number of fine points to use\n self.plotNcols = 3 ### number of columns in the plot\n\n self.plotNrows = int(np.ceil(nMeth/float(self.plotNcols)) )",
"def draw_plot(yscale='linear'):\n plt.yscale(yscale)\n plt.xticks(list(range(0, 101, 5)))\n plt.xlabel('percentile [%]')\n plt.grid(True)\n plt.ylabel('operation time [ns]')\n plt.legend()\n plt.show()"
] | [
"0.6829554",
"0.661534",
"0.6599279",
"0.6443192",
"0.64005935",
"0.6339009",
"0.6306646",
"0.6288057",
"0.6278511",
"0.62476623",
"0.6240224",
"0.6229401",
"0.62160033",
"0.62088746",
"0.62024546",
"0.6184107",
"0.6181531",
"0.6175892",
"0.61673915",
"0.61650753",
"0.61499107",
"0.61497515",
"0.6148504",
"0.61445856",
"0.6143347",
"0.6133731",
"0.6129831",
"0.6128125",
"0.6109406",
"0.6107705"
] | 0.66698444 | 1 |
Output a media wiki formatted table | def mediaWikiTable(leftmostTitle, array, formatFn=lambda x: str(x)):
columnKeys = extractColumnKeys(array)
print("{|")
for t in [leftmostTitle] + [str(k) for k in columnKeys]:
print("!" + " !! ".join(titles))
for k in sorted(array.keys, key=cmp_to_key(compareFn)):
print("|-")
print("| " + str(k))
v = array[k]
for ck in columnKeys:
value = v.get(k, None)
print("| " + (formatFn(value) if value else ""))
print("|}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list",
"def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))",
"def print_movie_table(self):\n self = self\n headers = [\"Votes\", \"Rank\", \"Year\", \"Title\"]\n self.handler.header(headers)\n\n for movie in self.movie_list:\n self.handler.row([str(movie.get_votes()), str(movie.get_rank()),\n str(movie.get_year()), str(movie.get_title())])\n\n self.handler.footer()",
"def printTable(songs, language):\n\n attributes = \"\"\n if language == ENGLISH:\n print \"Song Name\".ljust(55) + \" | URL\".ljust(60) + \" | Status\\t\\t\"\n print \"-\" * 56 + \"+\" + \"-\" * 57 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n rows = rows + songs[\"song\"][i][\"name\"].ljust(55) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows \n else:\n print \"歌曲名稱\".ljust(55) + \" | URL\".ljust(60) + \" | 狀態\\t\\t\"\n print \"-\" * 52 + \"+\" + \"-\" * 59 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n rows = rows + songs[\"song\"][i][\"name\"].ljust(51) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows",
"def markdown_table(self):\n table_data = [\n [i + 1, filt.__class__.__name__, f'{filt.fc:.0f}', f'{filt.q:.2f}', f'{filt.gain:.1f}']\n for i, filt in enumerate(self.filters)\n ]\n return tabulate(\n table_data,\n headers=['#', 'Type', 'Fc (Hz)', 'Q', 'Gain (dB)'],\n tablefmt='github'\n )",
"def table(self, doc, level, output):\n output('<table border=\"1\" cellpadding=\"2\">\\n')\n for row in doc.getRows()[0]:\n output(\"<tr>\\n\")\n for column in row.getColumns()[0]:\n str = ('<%s colspan=\"%s\" align=\"%s\" valign=\"%s\">'\n % (column.getType(),\n column.getSpan(),\n column.getAlign(),\n column.getValign()))\n output(str)\n for c in column.getChildNodes():\n getattr(self, self.element_types[c.getNodeName()]\n )(c, level, output)\n output(\"</\" + column.getType() + \">\\n\")\n output(\"</tr>\\n\")\n output(\"</table>\\n\")",
"def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")",
"def format_prettytable(table):\r\n for i, row in enumerate(table.rows):\r\n for j, item in enumerate(row):\r\n table.rows[i][j] = format_output(item)\r\n ptable = table.prettytable()\r\n ptable.hrules = FRAME\r\n ptable.horizontal_char = '.'\r\n ptable.vertical_char = ':'\r\n ptable.junction_char = ':'\r\n return ptable",
"def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])",
"def show_html_tables(html_tables):\n\n for (it,t) in enumerate(html_tables):\n print(f\"Table {it}\")\n for (ir,r) in enumerate(t):\n print(f\" Row {ir}\")\n for (ic,c) in enumerate(r):\n print(f\" Col {ic}: {c}\")",
"def processPage(plugin, page, page_format):\n\n txtstr = unicode('', 'utf-8')\n if page_format == 'single':\n txtstr += '\\n{} Plug-in Results\\n\\n'.format(plugin)\n\n # loop through each table in the page\n for tabledata in sorted(page, key=lambda page: page[2]):\n (title, mytable, index) = tabledata\n\n # first we need to go through the table and find the max length for each column\n col_widths = [ len(getattr(col_name, 'name').replace(masOutput.SPACE, ' ')) for col_name in mytable.header ]\n\n # check to see if it should be printed like a horizontal or vertical table\n if mytable.printVertical is False:\n outlist = list()\n\n for row in mytable:\n # modify the col_widths to set a maximum length of each column to 60 characters\n row_lens = list()\n\n for col in row[1:]:\n try:\n row_lens.append(min(60, len(col)))\n except TypeError:\n # if this isn't a str or unicode value, explicitly convert it\n row_lens.append(min(60, len(str(col))))\n\n col_widths = map(max, zip(col_widths, row_lens))\n\n # format the header\n if mytable.printHeader is not False:\n txtstr += \" \".join((getattr(val, 'name')).replace(masOutput.SPACE, ' ').ljust(length) for val, length in zip(mytable.header, col_widths)) + '\\n'\n txtstr += ' '.join([ '-'*val for val in col_widths ])\n\n # format the data\n for row in mytable:\n # combine the row values together and extend them as needed\n # this may be a confusing statement, but its fast!\n #outlist.append(\"\".join(map(lambda x: _extend(x[0], x[1]+2), zip(row[1:], col_widths))))\n outlist.append(\"\".join([_extend(x[0], x[1]+2) for x in zip(row[1:], col_widths) ]))\n\n txtstr += '\\n'\n txtstr += \"\\n\".join(outlist)\n txtstr += '\\n\\n'\n\n else:\n outlist = list()\n\n # get max column width + 2\n max_col = max(col_widths) + 2\n\n # pre-justify header\n newheader = [ getattr(data,'name').replace(masOutput.SPACE, ' ').ljust(max_col) for data in mytable.header ]\n\n # this adds a slight speed increase for large output\n myappend = outlist.append\n\n # go through each row of data and join the header and values together\n for row in mytable:\n #myappend(\"\\n\".join(map(lambda x: x[0] + _extend(x[1], 0), zip(newheader, row[1:]))))\n myappend(\"\\n\".join([ x[0] + _extend(x[1], 0) for x in zip(newheader, row[1:])]))\n myappend(\"\\n\\n\")\n\n txtstr += \"\".join(outlist)\n txtstr += '\\n'\n\n return txtstr",
"def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))",
"def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')",
"def write_the_table(what):\n global count_row\n count_row += 1\n\n if what.get('rank') == 0:\n background_blue.append(count_row)\n\n struct = what.get('structure')\n link = what.get('link')\n exams_1 = what.get('exams_1')\n exams_2 = what.get('exams_2')\n exams_empty = [['', '', '', '', '', '', '', '', '', '', '', '']] \\\n if self.training.session_type != '1' else \\\n [['', '', '', '', '', '']]\n\n def formated(number):\n \"\"\"\n Remove trailing 0\n \"\"\"\n frac, whole = modf(number)\n if frac == 0:\n return int(whole)\n return str(number).rstrip('0')\n\n def write_exams(list_1, list_2):\n exam_table = []\n for ex_1, ex_2 in itertools.zip_longest(list_1, list_2):\n ex_1_table = [\n formated(ex_1.coefficient) if ex_1 is not None else '',\n [\n Paragraph(filter_content(ex_1.label) if ex_1 else '',\n self.styles['SmallNormal']),\n Paragraph(\n \"<para textColor=grey>\" + filter_content(ex_1.additionnal_info) \\\n if ex_1 and ex_1.additionnal_info \\\n else \"\" + \"</para\\>\",\n self.styles['SmallNormal'])\n ],\n ex_1.type_exam if ex_1 is not None else '',\n ex_1.text_duration if ex_1 is not None else '',\n '' if ex_1 is None \\\n else ex_1.convocation if not training_is_ccct \\\n else ex_1.get_type_ccct_display(),\n ex_1.eliminatory_grade if ex_1 is not None else '',\n ex_1.threshold_session_2 if ex_1 is not None else '',\n ]\n\n ex_2_table = [\n formated(ex_2.coefficient) if ex_2 is not None else '',\n [Paragraph(filter_content(ex_2.label) if ex_2 is not None else '', self.styles[\n 'SmallNormal']), Paragraph(\"<para textColor=grey\\\n >\" + ex_2.additionnal_info + \"</para\\\n >\" if ex_2.additionnal_info is not None else \"\",\n self.styles['SmallNormal'])],\n ex_2.type_exam if ex_2 is not None else '',\n ex_2.text_duration if ex_2 is not None else '',\n ex_2.eliminatory_grade if ex_2 is not None else '',\n ] if ex_2 is not None else ['', '', '', '', '']\n if self.training.session_type != '1':\n ex_1_table.extend(ex_2_table)\n else:\n ex_1_table.pop()\n exam_table.append(ex_1_table)\n exam_table = exam_table if len(exam_table) > 0 else exams_empty\n if exam_table == exams_empty:\n # TODO: calculate empty space to set rowHeights in order to\n # avoid blank in table\n pass\n inner_table = Table(\n exam_table, colWidths=width_exams, rowHeights=None)\n inner_table.setStyle(TableStyle(\n [('INNERGRID', (0, 0), (-1, -1), 0.1, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n # ('LEFTPADDING', (0, 0), (-1, -1), 0),\n # ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n ]))\n return inner_table\n\n ref_scol = struct.ref_si_scol if struct.ref_si_scol else \"\" # FIX bug with rof data\n ref_data = (\n Paragraph(struct.ROF_ref, self.styles['CenterSmall']),\n Paragraph(ref_scol, self.styles['CenterSmall'])\n ) if self.reference == 'both' \\\n else Paragraph(struct.ROF_ref, self.styles['CenterSmall']) if self.reference == 'with_rof' \\\n else Paragraph(ref_scol, self.styles['CenterSmall']) if self.reference == 'with_si' \\\n else Paragraph('', self.styles['CenterSmall'])\n\n object_line = [\n Paragraph(\n \"<para leftIndent=%s>%s</para> \" % (what.get('rank')*10, filter_content(struct.label)),\n self.styles['SmallBold'] if what.get('rank') == 0 \\\n or what.get('structure').nature == 'UE' \\\n else self.styles['SmallNormal']\n ),\n Paragraph(\n struct.get_respens_name if not struct.external_name \\\n else struct.external_name,\n self.styles['CenterSmall'] if not struct.external_name else \\\n self.styles['CenterSmallItalic']\n ),\n [ref_data],\n '30' if self.training.degree_type.ROF_code in self.training_types_for_which_to_display_30_ects\\\n and struct.nature == 'SE'\\\n else struct.ECTS_credit if struct.ECTS_credit else '-',\n formated(link.coefficient) if link.coefficient else '',\n link.eliminatory_grade,\n write_exams(exams_1, exams_2)\n ]\n if self.respforms:\n if self.reference == 'without':\n object_line.pop(2)\n else:\n object_line.pop(1)\n if self.reference == 'without':\n object_line.pop(1)\n\n big_table.append(object_line)\n\n for e in what.get('children'):\n write_the_table(e)",
"def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')",
"def latex_table():\n \n t = Table.read('../data/stream_origin.fits')\n N = len(t)\n \n f = open('../paper/stream_origin.tex', 'w')\n for i in range(N):\n t_ = t[i]\n for k in t_.colnames:\n if (t_[k]==np.nan) | (t_[k]=='nan'):\n t_[k] = '\\dots'\n #f.write('{:s} & {:s} & {:s} & {:s} & {:.1f}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['type'], t_['feh']))\n line = '{:s} & {:s} & {:s} & {:s} & {:s}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['friends'], t_['type'])\n f.write(line)\n print(line)\n \n f.close()",
"def header(self, hdata):\n self = self\n file = open(\"imdb_output.html\", \"w\")\n file.write(\"<html>\\n\\t<head>\\n\\t<style>\\n\" +\n \"\\t\\t\\ttable, th, td {border: 1px solid\\n\" +\n \"\\t\\t\\tblack;border-collapse: collapse;}\" +\n \"\\n\\t</style>\\n\" +\n \"\\t</head>\\n\\t<body>\\n\\t\\t<table style=\\\"width:100%\\\">\\n\")\n file.write(\"\\t\\t\\t<tr>\\n\")\n for line in hdata:\n file.write(\n \"\\t\\t\\t\\t\\t<th>\\n\\t\\t\\t\\t\\t\\t\"\n + str(line) + \"\\n\\t\\t\\t\\t\\t</th>\\n\")\n file.write(\"\\t\\t\\t</tr>\\n\")",
"def out(lam, eng, mat): # {{{1\n print(\"\\\\begin{table}[!htbp]\")\n print(\" \\\\renewcommand{\\\\arraystretch}{1.2}\")\n txt = \" \\\\caption{{\\\\label{{tab:{0}}}properties of {0}}}\"\n # Raw underscores in LaTeX text mode produce “Missing $” errors.\n texlname = lam.name.replace('_', '\\_')\n print(txt.format(texlname))\n print(\" \\\\centering\\\\footnotesize{\\\\rule{0pt}{10pt}\")\n print(\" \\\\tiny calculated by lamprop {}\\\\\\\\[3pt]}}\".format(__version__))\n if eng:\n _engprop(lam)\n if mat:\n _matrices(lam)\n print(\"\\\\end{table}\\n\") # 1}}}",
"def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')",
"def convert_table(mkd):\n\t\n\tmd_table_codes = re.findall(r\".*\\|.*\\n.*\\-.*(?:\\n.*\\|.*)*\", mkd, re.M)\n\tfor md_code in md_table_codes:\n\t\t\n\t\tmd_rows = re.findall(r\"(.*\\|.*)\", md_code, re.M)\n\t\theader = md_rows.pop(0)\n\t\tcolumn_count = md_rows.pop(0).count(\"-\")\n\n\t\ttex_code = \"\\\\begin{tabular}{|\"+\"l|\"*column_count+\"}\\n\\hline\\n\"\n\t\ttex_code += header.strip(\" |\").replace(\"|\", \"&\")+\" \\\\\\\\\\n\\hline\\n\"\n\t\tfor row in md_rows:\n\t\t\ttex_code += row.strip(\" |\").replace(\"|\", \"&\")+\" \\\\\\\\\\n\"\n\t\ttex_code += \"\\hline\\n\\end{tabular}\"\n\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\treturn mkd",
"def __writeTable(self, title, imgLink, legend=None):\n tags = {'title':title}\n \n if imgLink !=None:\n tags['image'] = self.parseTemplate({'imgLink': imgLink, 'imgWidth':self.imgWidth} ,\"%stemplates/files/qa.image.tpl\"%self.toadDir)\n else:\n tags['image'] = 'Step not performed during the pipeline execution'\n \n if legend != None:\n tags['legend'] = self.parseTemplate({'legend': legend} ,\"%stemplates/files/qa.legend.tpl\"%self.toadDir)\n else:\n tags['legend'] = ''\n \n return self.parseTemplate(tags, os.path.join(self.toadDir, \"templates/files/qa.table.tpl\"))",
"def show(contents_dict):\n rows_list = get_md_table(contents_dict)\n\n IPython.display.display(IPython.display.Markdown('\\n'.join(rows_list)))",
"def write(self, stream, root, order):\n stream.write('[table]\\n')\n if root and isinstance(root[0], dict):\n self.markup(stream, order, '[tr][th]', '[/th][th]', '[/th][/tr]')\n for row in root:\n self.markup(stream, [row.get(col, '') for col in order], '[tr][td]', '[/td][td]', '[/td][/tr]')\n else:\n for row in root:\n self.markup(stream, row, '[tr][td]', '[/td][td]', '[/td][/tr]')\n stream.write('[/table]\\n')",
"def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))",
"def print_table(hdrs, flag=False, data=[],fmt='psql'):\n\tres = cur.fetchall()\n\tif flag:\n\t\tres = data\n\tprint(tabulate(res, headers=hdrs, tablefmt=fmt))",
"def start_table(self):\n self.result = \"<table>\\n\"",
"def write_table(*lists):\n print(\"<table>\")\n for columns in zip(*lists):\n print(\"<tr>\")\n for val in columns:\n print(\"<td>{}</td>\".format(val))\n print(\"</tr>\")\n print(\"</table>\")",
"def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')",
"def printTableOnlyX(songs, language):\n\n attributes = \"\"\n if language == ENGLISH:\n print \"Song Name\".ljust(50) + \" | URL\".ljust(60) + \" | Status\\t\\t\"\n print \"-\" * 51 + \"+\" + \"-\" * 57 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n if songs[\"song\"][i][\"status\"] == 'X':\n rows = rows + songs[\"song\"][i][\"name\"].ljust(50) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows \n else:\n print \"歌曲名稱\".ljust(50) + \" | URL\".ljust(60) + \" | 狀態\\t\\t\"\n print \"-\" * 47 + \"+\" + \"-\" * 59 + \"+\" + \"-\" * 20\n rows = \"\"\n for i in range(len(songs[\"song\"])):\n if songs[\"song\"][i][\"status\"] == 'X':\n rows = rows + songs[\"song\"][i][\"name\"].ljust(46) + \" | \" \\\n + songs[\"song\"][i][\"url\"].ljust(57) + \" | \" \\\n + songs[\"song\"][i][\"status\"] + \"\\n\"\n print rows",
"def write(self, stream, root, order):\n stream.write('<table>\\n')\n stream.write('<tbody>\\n')\n if root and isinstance(root[0], dict):\n self.markup(stream, order, '<tr><th>', '</th><th>', '</th></tr>')\n for row in root:\n self.markup(stream, [row.get(col, '') for col in order], '<tr><td>', '</td><td>', '</td></tr>')\n else:\n for row in root:\n self.markup(stream, row, '<tr><td>', '</td></tr>', '</td><td>')\n stream.write('</tbody>\\n')\n stream.write('</table>\\n')"
] | [
"0.6593098",
"0.65204376",
"0.6463597",
"0.6431361",
"0.643116",
"0.6429846",
"0.63246125",
"0.63038605",
"0.6286171",
"0.626248",
"0.6258474",
"0.6157692",
"0.6144443",
"0.6130239",
"0.6066116",
"0.60581213",
"0.6050008",
"0.60433495",
"0.602486",
"0.60156596",
"0.6002523",
"0.59983295",
"0.5972675",
"0.5966859",
"0.5959078",
"0.594013",
"0.5924151",
"0.5908861",
"0.59080195",
"0.59044003"
] | 0.7025748 | 0 |
Extract a time from a string of the form "%dm %4.2fs" which is what "time" generates. | def extractTime(s):
msRe = r"([0-9]+)m +([0-9]+\.[0-9]+)s"
matched = re.match(msRe, s)
if matched:
return 60 * int(matched.group(1)) + float(matched.group(2))
# Maybe we don't have any minutes
sRe = r"([0-9]+\.[0-9]+)s"
matched = re.match(sRe, s)
if matched:
return float(matched.group(1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)",
"def parse_time(value: str) -> float:\n return float(value[:-1]) * TIME[value[-1]]",
"def _get_time(string):\n string = string[0:7] # Drop day\n return string.replace(\"-\", \"\")",
"def parse_time(s):\n return time.gmtime(float(s))",
"def parse_time(s):\n if s[-1].lower() in secs:\n return int(s[:-1]) * secs[s[-1].lower()]\n else:\n return int(s)",
"def str_to_time(string: str) -> Optional[timedelta]:\n regexp = r\"(?P<sign>[-+])?(?P<h>\\d+):(?P<m>\\d+):(?P<s>\\d+)\\.?(?P<ms>\\d+)?\"\n match = re.match(regexp, string)\n if not match:\n return None\n\n sign = -1 if match.group(\"sign\") == \"-\" else 1\n hours = int(match.group(\"h\"))\n minutes = int(match.group(\"m\"))\n seconds = int(match.group(\"s\"))\n if match.group(\"ms\"):\n msec = int(match.group(\"ms\"))\n else:\n msec = 0\n return sign * timedelta(\n hours=hours, minutes=minutes, seconds=seconds, milliseconds=msec\n )",
"def str_to_time(string: str) -> Optional[timedelta]:\n regexp = r\"(?P<sign>[-+])?(?P<h>\\d+):(?P<m>\\d+):(?P<s>\\d+)\\.?(?P<ms>\\d+)?\"\n match = re.match(regexp, string)\n if not match:\n return None\n\n sign = -1 if match.group('sign') == '-' else 1\n hours = int(match.group('h'))\n minutes = int(match.group('m'))\n seconds = int(match.group('s'))\n if match.group('ms'):\n msec = int(match.group('ms'))\n else:\n msec = 0\n return sign * timedelta(hours=hours, minutes=minutes, seconds=seconds, milliseconds=msec)",
"def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()",
"def read_time(time_string):\n factors = {\n \"n\": 1e-9,\n \"u\": 1e-6,\n \"m\": 1e-3,\n \"s\": 1\n }\n \n # Check that the time string is properly formatted, e. g. time part\n # is followed by the unit part. The string should contain at least two\n # character, otherwise splitting it into two parts will raise an IndexError.\n try:\n number, unit = time_string[:-1], time_string[-1]\n except (IndexError, TypeError):\n raise ValueError(\"Invalid time string given.\")\n\n # If the 'time part' cannot be converted to float, this raises a ValueError.\n number = float(number)\n \n if number < 0:\n raise ValueError(\"Negative time values are not allowed.\")\n \n # Check that a valid time unit was specified. If no unit was specified,\n # then what we call 'unit' will in fact be the last digit of the time value\n # and as we do not use numeric unit symbols, we still get an error.\n try:\n factor = factors[unit]\n except KeyError:\n raise ValueError(\"Invalid time unit given.\")\n\n time = number * factor\n return time",
"def parse_time(time_string):\n times = time_string.split(\"\\n\")\n\n user_time_str = times[-2].split(\"\\t\")[-1]\n sys_time_str = times[-1].split(\"\\t\")[-1]\n\n #print user_time_str, sys_time_str\n\n user_time = parse_m_s(user_time_str)\n sys_time = parse_m_s(sys_time_str)\n\n return user_time + sys_time",
"def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]",
"def __find_time(line):\n # Given line \"- Rows available: 3s311ms (2s300ms)\", this function returns \"3s311ms\"\n match = re.search(r': (.*) \\(', line)\n if match is None:\n assert False, \"Failed to find time in runtime profile\"\n return match.group(1)",
"def parse(str):\n if len(str) != 16:\n raise ValueError(\"Invalid time length %d\" % len(str))\n if (str[-1]) == 'R':\n return parse_relative_time(str)\n return parse_absolute_time(str)",
"def get_time_from_string(text):\n field = text.split(':')\n hr = int(field[0])\n mn = int(field[1])\n field = field[2].split('.')\n sec = int(field[0])\n usec = int(field[1])\n return datetime.time(hr, mn, sec, usec)",
"def extract_time(maybe_time_str: str) -> Optional[str]:\n match = TIMESTAMP_RE.search(maybe_time_str)\n if match is not None:\n return match.group()\n return None",
"def time_from_string(time):\n _type = type(time)\n try:\n if _type == datetime.time:\n return time\n elif _type == datetime.datetime:\n return datetime.datetime.time(time)\n else:\n try:\n return datetime.datetime.time(datetime.datetime.strptime(time, '%I:%M %p'))\n except ValueError:\n return datetime.datetime.time(datetime.datetime.strptime(time, '%H:%M:%S'))\n except ValueError:\n return time\n except TypeError:\n return time",
"def sanitize(time_string):\n if '-' in time_string:\n splitter = '-'\n\n elif ':' in time_string:\n splitter = ':'\n\n else:\n return (time_string)\n \n (mins, secs) = time_string.split(splitter)\n\n return (mins + '.' + secs)",
"def parse_time(time_string):\n minutes = re.search(r\"(\\d+) minutes\", time_string)\n if minutes:\n minutes = int(minutes.groups()[0])\n else:\n minutes = 0\n\n seconds = re.search(r\"(\\d+\\.\\d+) seconds\", time_string)\n if seconds:\n seconds = float(seconds.groups()[0])\n else:\n seconds = 0.0\n\n seconds += minutes * 60\n\n return seconds",
"def _parse_name_time(self, name):\n time_match = re.search(r'\\d{1,2}:\\d{2}([ apm.]{3,5})?', name)\n if not time_match:\n return name, None\n time_str = time_match.group()\n name = name.replace(time_str, '').strip()\n time_str = time_str.strip().replace('.', '')\n # Default to PM if not AM/PM not provided\n if 'm' not in time_str:\n time_str = '{} pm'.format(time_str)\n return name, datetime.strptime(time_str, '%I:%M %p').time()",
"def time2secs(time_str):\r\n try:\r\n time_str = float(time_str)\r\n return get_simple_digit_str(time_str)\r\n except:\r\n pass\r\n try:\r\n final_secs = 0\r\n new_time = re.split(\"\\s+\", time_str)\r\n day_hour_min_sec = {\"days\" : 24*3600, \"hrs\" : 3600, \"mins\" : 60, \"secs\" : 1}\r\n _keys = (\"days\", \"hrs\", \"mins\", \"secs\")\r\n if len(new_time) == 1: # 01:02:03:04 or 01h:02m:03s\r\n new_time = re.split(\":\", time_str)\r\n new_time = [re.sub(\"\\s+\", \"\", item) for item in new_time]\r\n new_time = [re.sub(\"\\D\", \"\", item) for item in new_time]\r\n new_time = [float(item) for item in new_time]\r\n dhms_dict = dict(list(zip(_keys[-len(new_time):], new_time)))\r\n else:\r\n dhms_dict = dict.fromkeys(_keys, 0)\r\n for i, t in enumerate(new_time):\r\n if t in dhms_dict:\r\n dhms_dict[t] = float(new_time[i-1])\r\n for key, value in list(day_hour_min_sec.items()):\r\n my_value = dhms_dict.get(key)\r\n if my_value:\r\n final_secs += my_value * value\r\n return get_simple_digit_str(final_secs)\r\n except (KeyError, ValueError):\r\n return time_str",
"def get_time(time):\n regtime = re.compile(r'^([0-1][0-9]|[2][0-3]):([0-5][0-9])$')\n if not regtime.match(time):\n return None\n time_group = regtime.match(time).groups()\n time_final = datetime.time(int(time_group[0]), int(time_group[1]))\n return time_final",
"def sanitize(time_string): # Fix non-uniformity in the athletes data to enable sorting\n if '-' in time_string:\n splitter = '-'\n (mins, secs) = time_string.split(splitter)\n elif ':' in time_string:\n splitter = ':'\n (mins, secs) = time_string.split(splitter)\n else:\n return time_string\n return '{0}.{1}'.format(mins, secs)",
"def time2secs( s ):\n t = s.split( ':' )\n nf = len( t )\n if nf == 1:\n # Seconds only!\n secs = int( t[0] )\n elif nf == 2:\n # Minutes & seconds!\n secs = int( t[1] ) + int( t[0] ) * 60\n elif nf == 3:\n # Hours, minutes & seconds!\n secs = int( t[2] ) + int( t[1] ) * 60 + int( t[0] ) * 60 * 60 \n elif nf == 4:\n # Days, hours, minutes, & seconds!\n secs = int( t[3] ) + int( t[2] ) * 60 + int( t[1] ) * 60 * 60\n secs += int( t[0] ) * 60 * 60 * 24\n\n return secs",
"def extract_time_from_log_path(log_path: str) -> str:\n match = FILE_PATH_TIMESTAMP_RE.search(log_path)\n if match is not None:\n return f\"{match.group(1)}:{match.group(2)}:{match.group(3)}\"\n return \"\"",
"def unpack_time(s, type='I'):\n\ttry:\n\t\t(l,), s = unpack(\"!\"+type, s)\n\texcept TypeError, e:\n\t\traise TypeError(\"Problem unpacking time: %s\" % e)\n\n\tif l < 0:\n\t\treturn None\n\treturn datetime.fromtimestamp(l), s",
"def parse_time(expr):\n # first deal with hour\n hsp = expr.lower().split('h')\n if len(hsp) > 1: h = int(hsp[0])\n else: h = 0\n # now hour is out of the way\n expr = hsp[-1]\n msp = expr.lower().split('m')\n if len(msp) > 1: m = int(msp[0])\n else: m = 0\n return f\"{h:02d}:{m:02d}:00\"",
"def parse_time_detail(self, time):\n\n time = re.search(\"<td class=\\\"timeCol\\\">((?P<time>[\\d]{1,3})\\')?</td>\", time)\n\n try:\n if type(time.groups()) != type(()):\n return \"\"\n except AttributeError:\n return \"\"\n\n minute = time.group('time')\n\n return minute",
"def extract_time(file):\n\n for line in file:\n\n if \"Execution time\" in line:\n # this is of the form: <li>Execution time: 412.930 s\n return float(line.split(\":\")[1].strip().split(\" \")[0])\n\n elif \"(seconds)\" in line:\n # this is the older form -- split on \"=\"\n # form: <p><b>Execution Time</b> (seconds) = 399.414828\n return float(line.split(\"=\")[1])\n\n raise RuntimeError()",
"def parse_time_str(self, time_str):\n try:\n return datetime.strptime(self.force_hour_two_digits(time_str), TIME_FORMAT).time()\n except ValueError:\n return None",
"def str_to_time(str):\n if not str:\n return str\n return datetime.datetime.strptime(str.split(\".\")[0], DEFAULT_SERVER_TIME_FORMAT).time()"
] | [
"0.7376245",
"0.69885296",
"0.69597244",
"0.680901",
"0.6714649",
"0.67008376",
"0.66874033",
"0.6630157",
"0.6610557",
"0.65271956",
"0.65221244",
"0.64632",
"0.6434758",
"0.64113086",
"0.6404067",
"0.6317093",
"0.63148624",
"0.6314434",
"0.631036",
"0.6283578",
"0.6228409",
"0.62017787",
"0.61992216",
"0.6177664",
"0.6176513",
"0.6169847",
"0.6165085",
"0.6160633",
"0.61364245",
"0.61197966"
] | 0.7262653 | 1 |
Format a number in engineering format, where the exponent is a multiple of 3 | def engFormat(f):
if f == 0.0:
value = 0.0
exponent = 0
else:
exponent = math.log10(-f if f < 0 else f)
if exponent < 0:
exponent = -int(math.ceil(-exponent))
else:
exponent = int(math.floor(exponent))
for i in range(3):
if (exponent % 3) == 0:
break
exponent = exponent - 1
value = f * 10 ** -exponent
# Choose a format to maintain the number of useful digits we print.
if abs(value) < 10:
fmt = "%6.3f%s"
elif abs(value) < 100:
fmt = "%6.2f%s"
else:
fmt = "%6.1f%s"
return fmt % (value, ("" if exponent == 0 else "e%d" % exponent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_engineering( number, unit = \"\" ):\n if math.isnan(number):\n return \"nan\"\n if number == 0.0:\n return 0\n\n convert_table = {-18:'a', -15:'f', -12:'p', -9:'n', -6:'u',\n -3:'m', -2:'c', -1:'d', 0:'', 3:'k',\n 6:'M', 9:'G', 12:'T', 15:'P', 18:'E'}\n l10 = math.log10(abs(number))\n ten_exp = int(l10)\n\n sci_places = int(ten_exp / 3) * 3\n sci_signific = (ten_exp % 3)\n\n expo_char = convert_table[sci_places]\n trailing = number / 10.0 ** sci_places\n\n # print z, ten_exp, sci_places, sci_signific\n if trailing >= 10:\n lead = \"{:d}\".format(int(round(trailing)))\n elif trailing >= 1:\n lead = \"{:.1f}\".format(trailing)\n else:\n lead = \"{:.2f}\".format(trailing)\n return lead + \" \" + expo_char + unit",
"def _fmt(x, pos):\n a, b = '{:.2e}'.format(x).split('e')\n b = int(b)\n return r'${} \\times 10^{{{}}}$'.format(a, b)",
"def HighPrecisionE(number):\n\n return \"%.22e\" % number",
"def scinotation(self, num):\n num = num.replace(\"D\", \"e\")\n return f\"{decimal.Decimal(num):.9e}\"",
"def reformat(number):\n if number.find('E') == -1:\n exponent = \"-101\"\n mantissa = number.split(exponent)\n return float(mantissa[0])*10**float(exponent)\n else:\n mantissa, exponent = number.split('E')\n\n return float(mantissa)*10**float(exponent)",
"def pretty_float(i):\n if i == 0:\n return r'$0^{\\vphantom{0}}$'\n float_exponent = np.floor(np.log10(abs(i)))\n\n if -3 <= float_exponent <= 3:\n return r\"$%s^{\\vphantom{0}}$\" % str(i)[:6]\n lead_digit, exponent = (\"%.0e\" % i).split('e')\n return r\"$%s\\times 10^{%s}$\" % (lead_digit, exponent)",
"def latex_float(input_number):\n float_str = \"{0:.2g}\".format(input_number)\n if \"e\" in float_str:\n base, exponent = float_str.split(\"e\")\n return r\"${0} \\times 10^{{{1}}}$\".format(base, int(exponent))\n else:\n return float_str",
"def float_to_latex(x, format=\"%.2g\"):\n base_str = format % x\n if \"e\" not in base_str:\n return base_str\n mantissa, exponent = base_str.split(\"e\")\n if float(mantissa).is_integer():\n mantissa = int(float(mantissa))\n exponent = exponent.lstrip(\"0+\")\n if exponent.startswith('-0'):\n exponent = '-' + exponent[2:]\n if float(mantissa) == 1.0:\n return r\"10^{%s}\" % exponent\n else:\n return r\"%s\\!\\!\\times\\!\\!10^{%s}\" % (mantissa, exponent)",
"def exp(self, num, zf=2):\n return str(num).zfill(zf)",
"def format_power(value):\n try:\n return \"10^%.1f\" % math.log10(value)\n except:\n return \"\"",
"def format_number(number):\n return f'{number:8,}'",
"def number_formatter(number, pos=None):\n magnitude = 0\n while abs(number) >= 1000:\n magnitude += 1\n number /= 1000.0\n return '%.1f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])",
"def latex_number(value):\n\n if isinstance(value, str):\n return value\n vstring = '%.4g' % value\n if vstring.find('e+0') > -1:\n vstring = vstring.replace('e+0', times + '10^{') + '}'\n elif vstring.find('e-0') > -1:\n vstring = vstring.replace('e-0', times + '10^{-') + '}'\n elif 'e' in vstring:\n vstring = vstring.replace('e', times + '10^{') + '}'\n if '.' in vstring and decimal_point != '.':\n vstring = vstring.replace('.', decimal_point)\n\n latex = vstring.replace('+', '')\n if \"^\" in latex:\n latex = '$%s$' % latex\n return latex",
"def get_str_from_expnotation(num):\n return '{0:.15f}'.format(num)",
"def scientific(x):\n return '{:.2e}'.format(x)",
"def scientific_notation(x, sigfigs=4, mode='eng'):\n\n times = u'\\u00d7'\n thinspace = u'\\u2009'\n hairspace = u'\\u200a'\n sups = {u'-': u'\\u207b',\n u'0': u'\\u2070',\n u'1': u'\\xb9',\n u'2': u'\\xb2',\n u'3': u'\\xb3',\n u'4': u'\\u2074',\n u'5': u'\\u2075',\n u'6': u'\\u2076',\n u'7': u'\\u2077',\n u'8': u'\\u2078',\n u'9': u'\\u2079'}\n\n prefixes = {\n -24: u\"y\",\n -21: u\"z\",\n -18: u\"a\",\n -15: u\"f\",\n -12: u\"p\",\n -9: u\"n\",\n -6: u\"\\u03bc\",\n -3: u\"m\",\n 0: u\"\",\n 3: u\"k\",\n 6: u\"M\",\n 9: u\"G\",\n 12: u\"T\",\n 15: u\"P\",\n 18: u\"E\",\n 21: u\"Z\",\n 24: u\"Y\"\n }\n\n if not isinstance(x, float):\n raise TypeError('x must be floating point number')\n if np.isnan(x) or np.isinf(x):\n return str(x)\n if x != 0:\n exponent = int(np.floor(np.log10(np.abs(x))))\n # Only multiples of 10^3\n exponent = int(np.floor(exponent / 3) * 3)\n else:\n exponent = 0\n\n significand = x / 10 ** exponent\n pre_decimal, post_decimal = divmod(significand, 1)\n digits = sigfigs - len(str(int(pre_decimal)))\n significand = round(significand, digits)\n result = str(significand)\n if exponent:\n if mode == 'exponential':\n superscript = ''.join(sups.get(char, char) for char in str(exponent))\n result += thinspace + times + thinspace + '10' + superscript\n elif mode == 'eng':\n try:\n # If our number has an SI prefix then use it\n prefix = prefixes[exponent]\n result += hairspace + prefix\n except KeyError:\n # Otherwise display in scientific notation\n superscript = ''.join(sups.get(char, char) for char in str(exponent))\n result += thinspace + times + thinspace + '10' + superscript\n return result",
"def format_score(original):\n return f\"{original:.3f}\"",
"def convert_to_scientific_notation(number):\n\n number = \"%.2e\" % number\n if \"+\" in number:\n positive_exponent = True\n number, exponent = number.split(\"+\")\n else:\n positive_exponent = False\n number, exponent = number.split(\"-\")\n\n exponent = str(int(exponent)) # Removes leading zeros\n\n if positive_exponent:\n return number + exponent\n else:\n return number + \"-\" + exponent",
"def num_repr(num):\n if num <= 9999:\n return str(num)\n\n def digit_count(x):\n \"\"\" Return number of digits. \"\"\"\n return int(math.floor(math.log10(x)) + 1)\n\n digits = digit_count(num)\n sig = 3 if digits % 3 == 0 else 2\n rounded = int(round(num, int(sig - digits)))\n digits = digit_count(rounded)\n suffix = \"_kmBTqXYX\"[(digits - 1) // 3]\n front = 3 if digits % 3 == 0 else digits % 3\n\n if not front == 1:\n return str(rounded)[0:front] + suffix\n\n return str(rounded)[0] + \".\" + str(rounded)[1] + suffix",
"def formatted_number(number):\n try:\n number = int(number)\n if number < 0:\n return '-' + formatted_number(-number)\n result = ''\n while number >= 1000:\n number, number2 = divmod(number, 1000)\n result = \",%03d%s\" % (number2, result)\n return \"%d%s\" % (number, result)\n except Exception:\n return \"\"",
"def format_integer(num, max_num_chars=15, sci_notation=False):\n abs_num = abs(num)\n orig_str = str(abs_num)\n orig_len = len(orig_str)\n num_commas = (orig_len-1) // 3 if abs_num >= 1000 else 0\n neg_sign_len = 1 if num < 0 else 0\n final_len = orig_len + num_commas + neg_sign_len\n if sci_notation or final_len > max_num_chars:\n num_str = '%.1e' % abs_num\n elif 0 <= abs_num < 1000:\n num_str = orig_str\n else:\n first_chars = ((orig_len - 1) % 3) + 1\n char_list = [orig_str[:first_chars]] + [orig_str[i:i+3] for i in range(first_chars, orig_len, 3)]\n num_str = ','.join(char_list)\n if num < 0:\n num_str = '-' + num_str\n return num_str",
"def MakeHumanReadable(num):\n i = 0\n while i+1 < len(EXP_STRINGS) and num >= (2 ** EXP_STRINGS[i+1][0]):\n i += 1\n rounded_val = round(float(num) / 2 ** EXP_STRINGS[i][0], 2)\n return '%s %s' % (rounded_val, EXP_STRINGS[i][1])",
"def eur(value):\n float(value)\n return f\"€{value:,.2f}\"",
"def format_large_number(num):\n if num < 1e4: # 10,000\n # https://stackoverflow.com/a/10742904\n return '{:,}'.format(num)\n\n for exp, prefix in iteritems(Formatter.NUM_PREFIX):\n fraction = float(num) / float(10**exp)\n if 1 <= fraction < 1e3:\n return '{0:.1f}{1}'.format(fraction, prefix)\n\n # fallback to the highest defined defined prefix\n highest = max(Formatter.NUM_PREFIX)\n # comma separated in case there are more than 1,000 digits\n return '{0:,.1f}{1}'.format(\n float(num) / float(10**highest),\n Formatter.NUM_PREFIX[highest],\n )",
"def format_value(val, dec_places=4):\r\n val_str = \"{:.{dec}E}\".format(val, dec=dec_places)\r\n if val_str.endswith('+00'):\r\n val_str = \"{:.{dec}f}\".format(val, dec=dec_places)\r\n return val_str",
"def format_num(number) -> str:\n should_be_padded = isinstance(number, (float, str))\n if not isinstance(number, str):\n number = tqdm.format_num(number)\n if should_be_padded and 'e' not in number:\n if '.' not in number and len(number) < 5:\n try:\n _ = float(number)\n except ValueError:\n return number\n number += '.'\n number += \"0\" * (5 - len(number))\n return number",
"def format_val(self, val: float) -> str:\n\n val_conv = val / UNITS[self._unit]\n return f\"{val_conv:.3f} {self._unit}\"",
"def latex_format(n,u, precision = 1):\n if u<0:\n raise ValueError(\"uncertainty cannot be negative\")\n x = ufloat(n,u)\n s = (\"{:.\"+str(precision)+\"uS}\").format(x)\n # s = (\"{:.\"+str(precision)+\"L}\").format(x)\n # This removes a decimal point in uncertainty\n # so that flipping LaTex SiunitX can swallow it\n m = re.search('([^\\(]+\\()([^\\(]+\\))', s)\n g1 = m.group(1)\n g2 = m.group(2)\n g2 = re.sub('\\.','',g2)\n s = g1+g2\n return s",
"def formatSI(n: float) -> str:\n s = ''\n if n < 0:\n n = -n\n s += '-'\n if type(n) is int and n < 1000:\n s = str(n) + ' '\n elif n < 1e-22:\n s = '0.00 '\n else:\n assert n < 9.99e26\n log = int(math.floor(math.log10(n)))\n i, j = divmod(log, 3)\n for _try in range(2):\n templ = '%.{}f'.format(2 - j)\n val = templ % (n * 10 ** (-3 * i))\n if val != '1000':\n break\n i += 1\n j = 0\n s += val + ' '\n if i != 0:\n s += 'yzafpnum kMGTPEZY'[i + 8]\n return s",
"def friendly_number(number, base=1000, decimals=0, suffix='',\n\t\t\t\t\tpowers=['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']):\n\tfrom math import log, pow\n\textend = pow(10, decimals)\n\tpower_index = int(log(number * extend, base))\n\tpower = powers[power_index]\n\tif decimals:\n\t\tcut_off_length = base * power_index - decimals\n\t\tstr_num = str(number)[:-cut_off_length]\n\t\tif number[-cut_off_length] >= '5':\n\t\t\tstr_num = str(int(str_num)+1)\n\n\treal = number / power\n\treturn str(number)"
] | [
"0.7254678",
"0.6709356",
"0.65849704",
"0.6535096",
"0.64507467",
"0.64036494",
"0.63798475",
"0.6357471",
"0.6309954",
"0.6143084",
"0.61399496",
"0.6122176",
"0.61172795",
"0.6037605",
"0.59719115",
"0.5958524",
"0.58732384",
"0.5864186",
"0.5839863",
"0.5835042",
"0.58245146",
"0.58160317",
"0.5804051",
"0.577131",
"0.5765837",
"0.57586294",
"0.5744802",
"0.5740902",
"0.57060707",
"0.5704342"
] | 0.6764551 | 1 |
If a measurement has Min and Max, we can convert them into a notional error bar by replacing the name_SD field with a [minNamevalue, maxNamevalue] pair | def convertMinMaxIntoError(m, name, minName, maxName):
minVal = m.__dict__.get(minName, None)
maxVal = m.__dict__.get(maxName, None)
if maxVal == None or minVal == None:
return None
value = m.__dict__[name]
return [[value - minVal], [maxVal - value]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determinePlotLimits(self):\n max_str = \"up99\"\n min_str = \"dn99\"\n if self.keywords.get(\"limit_type\",\"99per\") == \"minmax\":\n max_str = \"max\"\n min_str = \"min\"\n \n # Determine the min/max of variables over all models\n limits = {}\n prune = False\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n if \"MeanState\" not in dataset.groups: continue\n group = dataset.groups[\"MeanState\"]\n variables = [v for v in group.variables.keys() if v not in group.dimensions.keys()]\n for vname in variables:\n var = group.variables[vname]\n pname = vname.split(\"_\")[0]\n region = vname.split(\"_\")[-1]\n if var[...].size <= 1: continue\n if space_opts.has_key(pname):\n if not limits.has_key(pname):\n limits[pname] = {}\n limits[pname][\"min\"] = +1e20\n limits[pname][\"max\"] = -1e20\n limits[pname][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][\"min\"] = min(limits[pname][\"min\"],var.getncattr(min_str))\n limits[pname][\"max\"] = max(limits[pname][\"max\"],var.getncattr(max_str))\n elif time_opts.has_key(pname):\n if not limits.has_key(pname): limits[pname] = {}\n if not limits[pname].has_key(region):\n limits[pname][region] = {}\n limits[pname][region][\"min\"] = +1e20\n limits[pname][region][\"max\"] = -1e20\n limits[pname][region][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][region][\"min\"] = min(limits[pname][region][\"min\"],var.getncattr(\"min\"))\n limits[pname][region][\"max\"] = max(limits[pname][region][\"max\"],var.getncattr(\"max\"))\n if not prune and \"Benchmark\" in fname and pname == \"timeint\":\n prune = True\n self.pruneRegions(Variable(filename = fname,\n variable_name = vname,\n groupname = \"MeanState\"))\n \n # Second pass to plot legends (FIX: only for master?)\n for pname in limits.keys():\n\n try:\n opts = space_opts[pname]\n except:\n continue\n \n # Determine plot limits and colormap\n if opts[\"sym\"]:\n vabs = max(abs(limits[pname][\"min\"]),abs(limits[pname][\"min\"]))\n limits[pname][\"min\"] = -vabs\n limits[pname][\"max\"] = vabs\n\n # if a score, force to be [0,1]\n if \"score\" in pname:\n limits[pname][\"min\"] = 0\n limits[pname][\"max\"] = 1\n\n limits[pname][\"cmap\"] = opts[\"cmap\"]\n if limits[pname][\"cmap\"] == \"choose\": limits[pname][\"cmap\"] = self.cmap\n\n # Plot a legend for each key\n if opts[\"haslegend\"]:\n fig,ax = plt.subplots(figsize=(6.8,1.0),tight_layout=True)\n label = opts[\"label\"]\n if label == \"unit\": label = limits[pname][\"unit\"]\n post.ColorBar(ax,\n vmin = limits[pname][\"min\"],\n vmax = limits[pname][\"max\"],\n cmap = limits[pname][\"cmap\"],\n ticks = opts[\"ticks\"],\n ticklabels = opts[\"ticklabels\"],\n label = label)\n fig.savefig(os.path.join(self.output_path,\"legend_%s.png\" % (pname))) \n plt.close()\n\n # Determine min/max of relationship variables\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n for g in dataset.groups.keys():\n if \"relationship\" not in g: continue\n grp = dataset.groups[g]\n if not limits.has_key(g):\n limits[g] = {}\n limits[g][\"xmin\"] = +1e20\n limits[g][\"xmax\"] = -1e20\n limits[g][\"ymin\"] = +1e20\n limits[g][\"ymax\"] = -1e20\n limits[g][\"xmin\"] = min(limits[g][\"xmin\"],grp.variables[\"ind_bnd\"][ 0, 0])\n limits[g][\"xmax\"] = max(limits[g][\"xmax\"],grp.variables[\"ind_bnd\"][-1,-1])\n limits[g][\"ymin\"] = min(limits[g][\"ymin\"],grp.variables[\"dep_bnd\"][ 0, 0])\n limits[g][\"ymax\"] = max(limits[g][\"ymax\"],grp.variables[\"dep_bnd\"][-1,-1])\n\n \n self.limits = limits",
"def draw_errbars(labels, # type: thelper.typedefs.LabelList\n min_values, # type: thelper.typedefs.ArrayType\n max_values, # type: thelper.typedefs.ArrayType\n stddev_values, # type: thelper.typedefs.ArrayType\n mean_values, # type: thelper.typedefs.ArrayType\n xlabel=\"\", # type: thelper.typedefs.LabelType\n ylabel=\"Raw Value\", # type: thelper.typedefs.LabelType\n show=False, # type: Optional[bool]\n block=False, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.DrawingType\n if min_values.shape != max_values.shape \\\n or min_values.shape != stddev_values.shape \\\n or min_values.shape != mean_values.shape:\n raise AssertionError(\"input dim mismatch\")\n if len(min_values.shape) != 1 and len(min_values.shape) != 2:\n raise AssertionError(\"input dim unexpected\")\n if len(min_values.shape) == 1:\n np.expand_dims(min_values, 1)\n np.expand_dims(max_values, 1)\n np.expand_dims(stddev_values, 1)\n np.expand_dims(mean_values, 1)\n nb_subplots = min_values.shape[1]\n fig, axs = plt.subplots(nb_subplots)\n xrange = range(len(labels))\n for ax_idx in range(nb_subplots):\n ax = axs[ax_idx]\n ax.locator_params(nbins=nb_subplots)\n ax.errorbar(xrange, mean_values[:, ax_idx], stddev_values[:, ax_idx], fmt='ok', lw=3)\n ax.errorbar(xrange, mean_values[:, ax_idx], [mean_values[:, ax_idx] - min_values[:, ax_idx],\n max_values[:, ax_idx] - mean_values[:, ax_idx]],\n fmt='.k', ecolor='gray', lw=1)\n ax.set_xticks(xrange)\n ax.set_xticklabels(labels, visible=(ax_idx == nb_subplots - 1))\n ax.set_title(\"Band %d\" % (ax_idx + 1))\n ax.tick_params(axis=\"x\", labelsize=\"6\", labelrotation=45)\n fig.set_tight_layout(True)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, axs",
"def derr(min, max):\n return lambda mate: min <= mate['d_err'] <= max",
"def min_max_outliers(res, min=None, max=None):\n min_max_list = []\n if isinstance(min, (int, float)):\n data1 = res[res < min].reset_index()\n data1['limit type'] = 'minimum'\n data1['limit'] = min\n min_max_list.append(data1)\n if isinstance(max, (int, float)):\n data1 = res[res > max].reset_index()\n data1['limit type'] = 'maximum'\n data1['limit'] = max\n min_max_list.append(data1)\n\n min_max1 = pd.concat(min_max_list)\n\n return min_max1",
"def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled",
"def limits(array, names):\n\n args = ['%s(%s)' % (f, n)\n for n in names\n for f in ['min', 'max']]\n result = array.afl.aggregate(array, *args).toarray()\n return dict((n, (int(result['%s_min' % n][0]), int(result['%s_max' % n][0])))\n for n in names)",
"def collect_rms(self, rms):\n if self._data:\n self._data['min'] = min(rms, self._data['min'])\n self._data['max'] = max(rms, self._data['max'])\n self._data['avg'] = float(rms + self._data['avg']) / 2\n else:\n self._data['min'] = rms\n self._data['max'] = rms\n self._data['avg'] = rms",
"def get_dataset_normalization_mean_std(name):\n if name == 'em-corr-arduino' or name == 'em-cpa-arduino':\n mean = 0.014595353784991782\n std = 0.006548281541447703\n elif name == 'ASCAD':\n mean = -11.587280595238095\n std = 25.75363459386104\n elif name == 'ASCAD_desync50':\n mean = -11.195121833333333\n std = 25.89963055607876\n elif name == 'ASCAD_desync100':\n mean = -11.093145738095238\n std = 26.11483790582092\n else:\n return 0.0, 1.0\n\n return mean, std",
"def describe(name, values):\n log.info(\"Describing %s component.\" % name)\n abs_max_indices = np.unravel_index(np.argmax(np.abs(values), axis=None), values.shape)\n abs_max_polar = zdi_geometry.centers()[0][abs_max_indices]\n abs_max_azimuth = zdi_geometry.centers()[1][abs_max_indices]\n abs_max = np.abs(values[abs_max_indices])\n\n abs_mean = np.sum(np.abs(values) * zdi_geometry.areas()) / (4 * np.pi)\n abs_rms = (np.sum(values**2 * zdi_geometry.areas()) / (4 * np.pi))**.5\n abs_std = (np.sum((np.abs(values) - abs_mean)**2 * zdi_geometry.areas()) / (4 * np.pi))**.5\n\n # This is a statistical identity.\n assert np.isclose(abs_rms**2, abs_mean**2 + abs_std**2), \"RMS does not match mean and std.\"\n\n dest[f\"magnetogram.{name}.abs.max\"] = abs_max\n log.info(f\"{name} |B|_max = %4.4g Gauss\" % abs_max)\n log.info(f\"{name} |B|_max at az=%2.2f deg, pl=%3.2f deg\" % (np.rad2deg(abs_max_azimuth),\n np.rad2deg(abs_max_polar)))\n log.info(f\"{name} |B|_mean = %4.4g Gauss\" % abs_mean)\n log.info(f\"{name} |B|_var = %4.4g Gauss\" % abs_std)\n dest[f\"magnetogram.{name}.abs.mean\"] = abs_mean\n dest[f\"magnetogram.{name}.abs.rms\"] = abs_rms\n dest[f\"magnetogram.{name}.abs.std\"] = abs_std\n\n return abs_mean",
"def standardize(data, params=None, rangeval=1.0):\n\t\n\tif params == None:\n\t\t(mins, maxs) = (np.min(data, axis=0), np.max(data, axis=0))\n\telse:\n\t\t(mins, maxs) = params\n\n\tdelta_vals = maxs-mins\n\tdelta_vals[delta_vals < 1e-10] = 1e-9\n\n\tstd_data = (data - mins)/delta_vals\n\tstd_data = -rangeval + 2.0*rangeval * std_data\n\t\n\tif params == None:\n\t\treturn (std_data, (mins, maxs))\n\telse:\n\t\treturn std_data",
"def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax",
"def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return",
"def extreme_values(self, extreme):\n\n\t\tif extreme.lower() == 'min':\n\t\t\treturn data.min()\n\t\telif extreme.lower() == 'max':\n\t\t\treturn data.max()\n\t\telse:\n\t\t\tassert 'Invalid Parameter !'",
"def set_mapping(self, value_min, value_min_raw, value_max, value_max_raw):\n assert value_min <= value_max\n # prevent division by zero.\n if value_min == value_max:\n value_max += 1.\n if value_min_raw == value_max_raw:\n value_max_raw += 1.\n self.value_min = value_min\n self.value_max = value_max\n self.value_min_raw = value_min_raw\n self.value_max_raw = value_max_raw\n self._value_scale = (self.value_max - self.value_min) / (self.value_max_raw - self.value_min_raw)",
"def reset_low_high(self, name):\n self.range_dict[name][\"low\"] = self.range_dict[name][\"low_default\"]\n self.range_dict[name][\"high\"] = self.range_dict[name][\"high_default\"]\n self.limit_dict[name][\"low\"] = 0.0\n self.limit_dict[name][\"high\"] = 100.0\n self.show_image()",
"def normalize_data(mjd, flux, ignore):\n\n t = mjd - np.min(mjd)\n y_err = np.sqrt(flux) \n y = flux[t > ignore]\n y_err = y_err[t > ignore]\n t = t[t > ignore]\n y_err /= np.max(y)\n y /= np.max(y)\n\n sorted_tups = sorted(list(zip(t, y, y_err)), key=lambda x: x[0])\n t = np.array([tup[0] for tup in sorted_tups])\n y = np.array([tup[1] for tup in sorted_tups])\n y_err = np.array([tup[2] for tup in sorted_tups])\n\n return t, y, y_err",
"def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0",
"def checkranges(self, var, name):\r\n\r\n # reshape var\r\n assert len(var) == 2, \"%s must have two elements,\" % name\r\n var = np.array([float(v) for v in var])\r\n\r\n # check values\r\n if name in [\"arange\", \"Rprange\", \"Mprange\"]:\r\n assert np.all(var > 0), \"%s values must be strictly positive\" % name\r\n if name in [\"erange\", \"prange\"]:\r\n assert np.all(var >= 0) and np.all(var <= 1), (\r\n \"%s values must be between 0 and 1\" % name\r\n )\r\n\r\n # the second element must be greater or equal to the first\r\n if var[1] < var[0]:\r\n var = var[::-1]\r\n\r\n return var",
"def min_max(self, data, era):\n return 0, np.max(data)",
"def __init__(self, min_value=0.0, max_value=1.0, input_name=\"image\", output_name=\"image\"):\n super().__init__(input_name=input_name, output_names=[output_name])\n self.min_value = min_value\n self.max_value = max_value",
"def get_minmax_stats(dataframe, variable):\n\n print(\"Maximum value of \", variable, \"is: \", dataframe[variable].max())\n print(\"Minimum value of \", variable, \"is: \", dataframe[variable].min())",
"def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []",
"def __init__(self, name, value, description = 'Numeric Data', limits = None, initvalue = None, unit = ''):\n super().__init__(name=name, description=description, value=value, limits=limits , initvalue=initvalue, unit = unit)",
"def ds_preprocessing(x, error_threshold, min_val=0, max_val=1):\n # Scale in range [min_val, max_val]\n scaler = MinMaxScaler((min_val, max_val))\n processed = scaler.fit_transform(x)\n\n # Quantization\n bins = np.arange(min_val, max_val, 2 * error_threshold)\n digitized = np.digitize(processed, bins)\n quantized = (digitized - 1) * (2 * error_threshold) + error_threshold\n\n return quantized, scaler",
"def __str__(self):\n\t\tself.__normalize()\n\t\tmyStr ='# min = %lf\\n'%(float(self._min))\n\t\tmyStr+='# max = %lf\\n'%(float(self._max))\n\t\tmyStr+='# dx = %lf\\n'%(float(self._dx))\n\t\tmyStr+='# av = %lf (sErr = %lf)\\n'%(self.av,self.sErr)\n\t\tmyStr+='# sDev = %lf\\n'%(self.sDev)\n\t\tmyStr+='# xLow xHigh p(xLow <= x < xHigh) Gaussian_error\\n'\n\t\tfor bin in range(self._nBins): \n\t\t\tlow,up=self.__bdry(bin)\n\t\t\tmyStr+='%lf %lf %lf %lf\\n'%(low,up,self._norm[bin], self.__GErr(bin))\n\t\treturn myStr",
"def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return",
"def _normalize_measure(value, maximum=1.0, center=0.0):\n if isiterable(value):\n value = np.asarray(value)\n if isiterable(center):\n center = np.asarray(center)\n if isiterable(maximum):\n maximum = np.asarray(maximum)\n return np.divide(value - center, maximum - center)",
"def transformMeasurementError(self):\n var = self.model.observationError**2\n self.errShD = self.model.observationError\n self.errSinvD = 1.0/var\n self.errSinvhD = np.sqrt(self.errSinvD)",
"def normalize(s, lo_pctl=0.01, hi_pctl=.99):\n\n data_types = {pd.core.series.Series: (lambda x: x.values),\n np.ndarray: (lambda x: x), \n list: (lambda x: np.array(x))}\n\n this_type = type(s)\n assert this_type in data_types.keys(), 'invalid data type. Enter numpy array, pandas series , or list of float.'\n \n for b in [lo_pctl, hi_pctl]:\n assert (b >= 0) & (b <= 1), 'invalid winsor bound. Value must be fraction: > 0 and < 1.'\n assert lo_pctl < hi_pctl, 'invalid winsor bound. First item '\n\n y = data_types[type(s)](s)\n z = np.empty(y.shape)\n z[:] = np.nan\n\n # Compute mean and stdev excluding outliers defined by lo and hi_pctl\n if len(y) > 1:\n upper_bound = np.nanquantile(y, hi_pctl)\n lower_bound = np.nanquantile(y, lo_pctl)\n with np.errstate(invalid='ignore'): # ignore stupid warning about 'invalid value encountered in less than'\n mu = np.nanmean(y[(y >= lower_bound) & (y <= upper_bound)])\n sigma = np.nanstd(y[(y >= lower_bound) & (y <= upper_bound)])\n\n if sigma == 0:\n sigma = np.nan\n\n # Compute normalized variable\n with np.errstate(invalid='ignore'):\n y[y < lower_bound] = lower_bound\n y[y > upper_bound] = upper_bound\n z = (y - mu) / sigma\n\n return z",
"def check_range(number: object, min_r: float, max_r: float, name: str = \"\") -> float:\n if not isinstance(number, (float, int)):\n raise FFmpegNormalizeError(f\"{name} must be an int or float\")\n if number < min_r or number > max_r:\n raise FFmpegNormalizeError(f\"{name} must be within [{min_r},{max_r}]\")\n return number"
] | [
"0.55634105",
"0.5557625",
"0.5547021",
"0.54915667",
"0.53918475",
"0.5362521",
"0.5332207",
"0.5294759",
"0.5274828",
"0.52670044",
"0.5266675",
"0.5235942",
"0.5220849",
"0.51992804",
"0.51608247",
"0.51373273",
"0.51251656",
"0.5120511",
"0.51145077",
"0.5092589",
"0.5086175",
"0.50773865",
"0.50647813",
"0.5039673",
"0.50254345",
"0.5020943",
"0.501092",
"0.5008461",
"0.50070196",
"0.5000486"
] | 0.66927224 | 0 |
Add an overall mean for the given field | def addOverallMeans(results, fieldNames, fields):
# Work out what the values we already have look like
meanValues = ["Overall Mean"]
geoMeanValues = ["Overall Geometric Mean"]
for name in fieldNames[1:]:
if name in fields:
values = [r.__dict__[name] for r in results]
geoMeanValues.append(geomean(values))
meanValues.append(mean(values))
else:
geoMeanValues.append(0)
meanValues.append(0)
results.append(measurement(fieldNames, meanValues))
results.append(measurement(fieldNames, geoMeanValues))
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMean(self, field):\n\n return np.mean([self.fitnesses[i][field] for i in range(len(self.fitnesses))])",
"def fmean(field):\n warnings.simplefilter(\"ignore\")\n return np.nanmean(field, axis=3, keepdims=True)",
"def mean(self, field, axis=None, region=None):\n # \n # If a region is given, extract region if shape of field matches that\n # of grid else check that region has already been extracted from field\n if region is None:\n assert field.shape == self.shape\n elif field.shape == self.shape:\n field = region.extract(field)\n else:\n assert field.shape == region.shape\n # Determine area weights for mean calculation\n area = self.gridpoint_area[:,None] if region is None else region.gridpoint_area[:,None]\n # Pick normalization depending on axis over which mean is taken\n if axis is None:\n return (field * area).sum() / area.sum() / field.shape[1]\n elif axis == 0 or axis == -2 or axis == \"meridional\":\n return ((field * area).sum(axis=0) / area.sum(axis=0))\n elif axis == 1 or axis == -1 or axis == \"zonal\":\n return field.mean(axis=1)\n else:\n raise ValueError(\"invalid value for axis parameter: {}\".format(axis))",
"def add_mean(mean):\n return sum(mean)/len(mean)",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def mean(self):\n return self._summarize(lambda c: c.mean)",
"def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_mean(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._mean if not avg else self._mean_avg",
"def mean(self):\n return self._lift(\"mean\")",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def avg(self, column):\n self.aggregate(\"AVG\", \"{column}\".format(column=column))\n return self",
"def mean(self):\n return self.aggregate(np.mean)",
"def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)",
"def mean(self):\n return self._mean_func",
"def mean(self, like_params):\n\t\traise NotImplementedError",
"def mean(self, weight_by_area=True):\n if weight_by_area:\n return self.integral() / self.indicator.integral()\n else:\n return self.sum() / self.indicator.sum()",
"def _mean(items):\n return sum(items) / len(items)",
"def get_mean(self):\n average = self.df[self.col_name].mean()\n return average",
"def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)",
"def my_mean(x):\n return my_sum(x) / my_len(x)",
"def mean(self, mean):\n\n self._mean = mean",
"def mean(self):\n return self.vmean",
"def mean(self):\n\n return time_stat(self, stat=\"mean\")",
"def mean(self):\n return self._mean",
"def mean(self):\n return self._mean",
"def avg(self, column):\n\n return self.aggregate('avg', *[column])",
"def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)",
"def findMean (*args):\r\n total = my_module.addStuff(*args)\r\n return total/len(args)",
"def calccalmean(self,blk):\n calind=self.getcalind(blk)\n x=self.spec[calind,:]\n return np.nanmean(x,axis=0)",
"def mean(self):\n return self.sum / self.sum_weights",
"def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu"
] | [
"0.7278746",
"0.7135875",
"0.6684064",
"0.6644748",
"0.6622957",
"0.65778464",
"0.6480201",
"0.644461",
"0.6373308",
"0.6351624",
"0.6294219",
"0.62699544",
"0.62606704",
"0.6239302",
"0.62274325",
"0.6176736",
"0.6157787",
"0.6149142",
"0.6136901",
"0.61185",
"0.6103634",
"0.61033887",
"0.6067405",
"0.6067405",
"0.6053849",
"0.60535693",
"0.60418206",
"0.6036822",
"0.60285956",
"0.6020606"
] | 0.7149263 | 1 |
Find each set of results with the same first parameter, compute the min of their means and then scale all their results by that. The aim here is to scale results for specific processors in a way that is internally consistent. Scaling each run by its own min can be misleading, since then results which were smaller can look larger when compared with others for the same processor where the min was different but the range larger. | def normalizeResults(results, independentVariable, basis):
normValues = {}
if basis == "min":
reduction = min
elif basis == "mean":
reduction = mean
elif basis == "max":
reduction = max
print("Normalising by " + basis)
for k in results.keys():
ik = k.split(",")[0]
if ik not in normValues.keys():
normValues[ik] = []
values = []
for iv in results[k].keys():
values += [results[k][iv].__dict__["Mean"]]
normValues[ik] += values
for ik in normValues.keys():
normValues[ik] = reduction(normValues[ik])
# And now scale everything
for k in results.keys():
ik = k.split(",")[0]
norm = normValues[ik]
experiment = results[k]
for line in experiment.values():
for value in line.__dict__.keys():
if value == independentVariable:
continue
line.__dict__[value] = line.__dict__[value] / norm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scale_together(data, comp):\n scales = []\n guess = 1.\n s = opt.minimize(sq_residuals_in_range, guess, args = (data, comp), \n method = 'Nelder-Mead').x\n return s",
"def cluster_means_scaled(self):\n if self.evaluate_by is not None:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().transpose())",
"def runmaxmin(self):\n import random\n random.seed(self.seed)\n mindist_ptolandmarkset = np.full(self.pointcloud.size, np.inf)\n self.subsetindices = []\n for i in xrange(self.subsetsize):\n if i == 0:\n selected_index = random.randint(0, self.pointcloud.size - 1)\n # update min for all the rest indices\n # update min for this index to 0.\n for z in xrange(self.pointcloud.size):\n # if z == selected_index:\n # mindist_ptolandmarkset[z] = 0.0\n # else:\n mindist_ptolandmarkset[z] = self.pointcloud.distmat[selected_index][z]\n else:\n selected_index = np.argmax(mindist_ptolandmarkset)\n # update minimum distance for all points\n for z in xrange(self.pointcloud.size):\n mindist_ptolandmarkset[z] = min(mindist_ptolandmarkset[z],\n self.pointcloud.distmat[selected_index][z])\n\n self.subsetindices.append(selected_index)\n\n self.subsetpointcloud = pc.PointCloud(self.pointcloud.points[self.subsetindices])",
"def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))",
"def selectedT1(results, what, doMin=True):\n selectedCount = min(\n [k for k in transpose(results).keys() if not isinstance(k, str)]\n )\n selectedVal = 1e9 if doMin else -1e9\n\n # print \"selectedCount \" + str(selectedCount)\n\n comparison = min if doMin else max\n for k in list(results.keys()):\n res = results[k]\n thisVal = (\n None\n if res.get(selectedCount, None) == None\n else res[selectedCount].__dict__.get(what, None)\n )\n if thisVal != None:\n selectedVal = comparison(selectedVal, thisVal)\n if selectedCount != 1:\n print(\"Using time for \" + str(selectedCount) + \" threads as scale basis\")\n if doMin:\n return (selectedVal * selectedCount, selectedCount)\n else:\n return (selectedVal / selectedCount, selectedCount)",
"def calc_min(data: list) -> float:\n acc = data[0]\n for n in data:\n if n < acc:\n acc = n\n return float(acc)",
"def min_max_normalization(x, min_x = None, max_x = None):\n if min_x is None:\n min_x = np.min(x, axis=0)\n if max_x is None:\n max_x = np.max(x, axis=0)\n return (x - (min_x)) / (max_x - min_x), min_x, max_x",
"def _compute_normalised_scores(self):\n\n results = self.snapshot['results']\n assg = AssignmentConfig().get_assignment()\n\n if results:\n self.snapshot['best_average_bugs_detected'] = \\\n max([results[submitter]['average_bugs_detected'] for submitter in results])\n self.snapshot['best_average_tests_evaded'] = \\\n max([results[submitter]['average_tests_evaded'] for submitter in results])\n\n for submitter in results.keys():\n submitter_bugs_detected = float(results[submitter]['average_bugs_detected'])\n submitter_tests_escaped = float(results[submitter]['average_tests_evaded'])\n\n results[submitter]['normalised_test_score'] = assg.compute_normalised_test_score(\n submitter_bugs_detected, self.snapshot['best_average_bugs_detected'],\n self.snapshot['results'][submitter]['average_tests_per_suite']\n )\n\n results[submitter]['normalised_prog_score'] = assg.compute_normalised_prog_score(\n submitter_tests_escaped, self.snapshot['best_average_tests_evaded']\n )\n\n # The current scoring algo for tests doesn't give the best test suite a maximums score.\n # re-normalise to make this happen\n best_test_score = max([results[submitter]['normalised_test_score'] for submitter in results.keys()])\n if best_test_score == 0:\n best_test_score = 1\n for submitter in results.keys():\n new_score = round(results[submitter]['normalised_test_score'] * (2.5 / best_test_score), 2)\n results[submitter]['normalised_test_score'] = new_score",
"def normalize(scores, default_score):\n if len(scores) > 0:\n max_scores = max(scores)\n min_scores = min(scores)\n\n if max_scores == min_scores:\n return [default_score] * len(scores)\n scores = [float((score - min_scores) / (max_scores - min_scores)) for score in scores]\n return scores\n else:\n return [default_score]",
"def min_max_normalize_one_image(image):\n\n image = image.astype(np.float32)\n for i in range(len(image)):\n max_int = image[i].max()\n min_int = image[i].min()\n image[i] = (image[i] - min_int) / (max_int - min_int)\n\n return image",
"def nudged_min_max_compute(min_broadcast, max_broadcast, num_bits, narrow_range):\n\n\n dtype = min_broadcast.dtype\n quant_min = 1 if narrow_range else 0\n quant_max = (2 ** num_bits) - 1\n\n # because of need compute each channel, so quant_min and quant_max need to broadcast.\n quant_min_float = topi.full(min_broadcast.shape, dtype, tvm.const(quant_min, dtype))\n quant_max_float = topi.full(min_broadcast.shape, dtype, tvm.const(quant_max, dtype))\n\n # caculate each channel max and min difference.\n max_sub_min = topi.subtract(max_broadcast, min_broadcast)\n quant_max_sub_quant_min = topi.subtract(quant_max_float, quant_min_float)\n # compute scale = (max_broadcast - min_broadcast) / (quant_max - quant_min)\n # and min_div_scale = min_broadcast / scale\n if product_is_mini():\n scale = mul(max_sub_min, reciprocal(quant_max_sub_quant_min), target=utils.CCE)\n min_div_scale = Mul(min_broadcast, reciprocal(scale), target=utils.CCE)\n else:\n scale = divide(max_sub_min, quant_max_sub_quant_min, target=utils.CCE)\n min_div_scale = divide(min_broadcast, scale, target=utils.CCE)\n\n # zero_point_from_min = quant_min_float - min_broadcast / scale\n zero_point_from_min = topi.subtract(quant_min_float, min_div_scale)\n # if zero_point_from_min < quant_min_float, bool_less_quant_min_float = 1 else 0\n bool_less_quant_min_float = less_compare_float32(zero_point_from_min, quant_min_float)\n # if quant_max_float < zero_point_from_min, bool_more_quant_max_float = 1 else 0\n bool_more_quant_max_float = less_compare_float32(quant_max_float, zero_point_from_min)\n\n # according to above bool param to select effective value\n less_quant_min_float = topi.multiply(quant_min_float, bool_less_quant_min_float)\n more_quant_max_float = topi.multiply(quant_max_float, bool_more_quant_max_float)\n\n # compute which num is not less than quant_min_float and not large than quant_max_float\n tensor_one = topi.full(min_broadcast.shape, dtype, dc.one_const(dtype))\n bool_not_less_quant_min_float = topi.subtract(tensor_one, bool_less_quant_min_float)\n bool_not_more_quant_max_float = topi.subtract(tensor_one, bool_more_quant_max_float)\n bool_between_min_max = topi.multiply(bool_not_less_quant_min_float, bool_not_more_quant_max_float)\n between_min_max_float = topi.multiply(zero_point_from_min, bool_between_min_max)\n # add 0.5 to num which min <= num <= max and then floor them.\n between_min_max_add_half_one = topi.add(between_min_max_float, dc.half_const(dtype))\n between_min_max_round = akg.lang.ascend.floor(between_min_max_add_half_one)\n if product_is_mini():\n between_min_max_round = topi.cast(between_min_max_round, \"float16\")\n\n between_min_max_round = topi.cast(between_min_max_round, \"float32\")\n\n # calculate the maximum and minimum values of the quantization\n nudged_zero_point_tmp = topi.add(less_quant_min_float, more_quant_max_float)\n nudged_zero_point = topi.add(nudged_zero_point_tmp, between_min_max_round)\n\n nudged_min_tmp = topi.subtract(quant_min_float, nudged_zero_point)\n nudged_max_tmp = topi.subtract(quant_max_float, nudged_zero_point)\n nudged_min = topi.multiply(nudged_min_tmp, scale)\n nudged_max = topi.multiply(nudged_max_tmp, scale)\n res = [nudged_min, nudged_max, scale]\n\n return res",
"def min_scaling_factor(components, default=1, warning=True, hint=None):\n return map_scaling_factor(\n components, default=default, warning=warning, func=min, hint=hint\n )",
"def simple_scaling(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n\n # Simple sclaing \n scaled_input_data = input_data / (Max + sys.float_info.min)\n\n # Return scaled input data\n return scaled_input_data",
"def MIN(*args):\n return _group_function(min, *args)",
"def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S",
"def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))",
"def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data",
"def test_scaling():\n rng = np.random.RandomState(42)\n shape = (400, 10)\n u = rng.standard_normal(size=shape)\n mean = 100 * rng.uniform(size=shape[1]) + 1\n Y = u + mean\n Y_, mean_ = mean_scaling(Y)\n assert_almost_equal(Y_.mean(0), 0, 5)\n assert_almost_equal(mean_, mean, 0)\n assert Y.std() > 1",
"def normalize_scores(scores, small_is_better=0):\n # Avoid division by zero errors\n vsmall = 0.00001\n if small_is_better:\n minscore = min(scores.values())\n return dict([(u, float(minscore) / max(vsmall, l)) for (u, l) in scores.items()])\n else:\n maxscore = max(scores.values())\n if maxscore == 0:\n maxscore = vsmall\n return dict([(u, float(c) / maxscore) for (u, c) in scores.items()])",
"def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])",
"def compute_minimum_scale(self):\n dt = self.dt\n\n def func_to_solve(s):\n return self.wavelet.fourier_period(s) - 2 * dt\n\n return optimize.fsolve(func_to_solve, 1)[0]",
"def minimum_value(drawbles):\n # Loop over histograms\n result = 10**20 \n for drawable in drawbles:\n # Unpack things if there is an error band\n if isinstance(drawable, tuple):\n drawable, error_band = drawable\n else:\n error_band = None\n\n if is_histo(drawable):\n MIN_VAL = 0.0 # found minimum must be larger than 0\n minimum = drawable.GetMinimum(MIN_VAL) \n elif is_stack(drawable):\n minimum = drawable.GetMinimum(\"nostack\")\n elif is_graph(drawable):\n minimum = TMath.MinElement(drawable.GetN(), drawable.GetY())\n elif is_line(drawable):\n minimum = min(drawable.GetY1(), drawable.GetY2()) # not tested\n else:\n print type(drawable)\n raise ValueError('unsupported drawable type')\n\n if minimum < 0.0: continue\n\n # Update the result\n result = min(result, minimum)\n\n return result",
"def scaled(values, output_min, output_max, input_min=0, input_max=1):\n values = _normalize(values)\n if input_min >= input_max:\n raise ValueError('input_min must be smaller than input_max')\n input_size = input_max - input_min\n output_size = output_max - output_min\n for v in values:\n yield (((v - input_min) / input_size) * output_size) + output_min",
"def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)",
"def normalize(first, *others):\n min_ = tf.reduce_min(first)\n max_ = tf.reduce_max(first)\n f = lambda i: ((i - min_) / ((max_ - min_) / 2)) - 1\n return [min_, max_, f(first)] + list(map(f, others))",
"def scale(x):\n min_x, max_x = numpy.min(x), numpy.max(x)\n if min_x != max_x:\n x = (x-min_x)/(max_x-min_x)\n else:\n # all the numbers are the same in x\n x = numpy.asarray([1/len(x) for i in range(len(x)) ])\n return x.tolist()",
"def auxminf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmin_f1_part_i(x,m_ind) \n \n return f",
"def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)",
"def minmax_normalize(samples, out=None):\n if out is None:\n dtype = np.common_type(np.empty(0, 'float32'), samples)\n out = np.array(samples, dtype=dtype, copy=True)\n else:\n out[:] = samples\n\n sample_mins = np.min(samples, -1)[..., None]\n sample_maxes = np.max(samples, -1)[..., None]\n out -= sample_mins\n out /= (sample_maxes - sample_mins)\n return out",
"def compute_means(runtimes):\n# tmp = runtimes[kernel_name]\n tmp_ = [ (int(key), float(np.mean(val)))\n for key, val in runtimes.iteritems()\n ]\n return sort_fst(tmp_)"
] | [
"0.5730076",
"0.57292515",
"0.5662298",
"0.5552494",
"0.551228",
"0.5499445",
"0.5480341",
"0.5479149",
"0.5476675",
"0.5461815",
"0.5429813",
"0.5428306",
"0.5410855",
"0.5396706",
"0.5393401",
"0.5375831",
"0.5367997",
"0.5347249",
"0.53420913",
"0.5333558",
"0.5322741",
"0.52995825",
"0.52955914",
"0.5289862",
"0.5287421",
"0.52656627",
"0.52428085",
"0.5235547",
"0.5234156",
"0.52297443"
] | 0.6127019 | 0 |
Center the ship on the screen | def center_ship(self):
self.center = self.screen_rect.centerx | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def center_ship(self):\n self.center = self.screen_rect.centerx",
"def center_ship(self):\n self.center = self.screen_rect.centerx",
"def center_ship(self):\n self.center = self.screen_rect.centerx",
"def center_ship(self):\n # Start each new ship at the bottom center\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n # Store a decimal value for the ship' center.\n self.center = float(self.rect.centerx)\n self.bottom = float(self.rect.bottom)",
"def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery",
"def positioning_ship(self):\n self.rect.midleft = self.screen_rect.midleft\n self.y = float(self.rect.y)",
"def CenterZombie(self):\n # Requirement ID: 8.0.1\n\n self.center = self.screen_rect.centerx",
"def center_mario(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x, self.y = float(self.rect.x), float(self.rect.y)",
"def center(self):\r\n frameGm = self.frameGeometry()\r\n screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())\r\n centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()\r\n frameGm.moveCenter(centerPoint)\r\n self.move(frameGm.topLeft())",
"def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)",
"def center(self):\n if self.pos != 0.0:\n self.pos = 0.0",
"def center_on_screen(self):\n window_frame = self.frameGeometry()\n screen_center = QtGui.QDesktopWidget().availableGeometry().center()\n window_frame.moveCenter(screen_center)\n self.move(window_frame.topLeft())",
"def center_on(self, x, y):\n\n # Mark that we can start actually drawing now\n self.given_center = True\n\n # Center the view\n (ctr_x, ctr_y) = self.ingame_to_scene(x, y)\n self.parent.centerOn(ctr_x, ctr_y)\n\n # Draw what needs drawing\n self.draw_visible_area()",
"def centre(self):\n self.top.update_idletasks()\n # The horizontal position is calculated as (screenwidth - window_width)/2\n hpos = int((self.top.winfo_screenwidth() - self.top.winfo_width())/2)\n # And vertical position the same, but with the height dimensions\n vpos = int((self.top.winfo_screenheight() - self.top.winfo_height())/2)\n # And the move call repositions the window\n self.top.geometry('+{x}+{y}'.format(x=hpos, y=vpos))",
"def center(self):\n self.root.update_idletasks()\n w = self.root.winfo_screenwidth()\n h = self.root.winfo_screenheight()\n size = tuple(int(_) for _ in self.root.geometry().split('+')[0].split('x'))\n x = w/2 - size[0]/2\n y = h/2 - size[1]/2\n self.root.geometry(\"240x80+%d+%d\" % (x, y))",
"def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2",
"def update(self):\n # Update the ship's center, not the rect\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.ai_settings.ship_speed_factor\n if self.moving_left and self.rect.left > 0:\n self.center -= self.ai_settings.ship_speed_factor\n \n # Change the rect according to the self.center\n self.rect.centerx = self.center",
"def update(self):\r\n # Update the center value of the ship instead of rect\r\n if self.moving_right and self.rect.right < self.screen_rect.right:\r\n self.center += self.ai_settings.ship_speed_factor\r\n if self.moving_left and self.rect.left > 0:\r\n self.center -= self.ai_settings.ship_speed_factor\r\n # according to self.center Update rect object\r\n self.rect.centerx = self.center",
"def center(window):\n window.update_idletasks()\n\n # Find the screen resolution\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # Find new (x, y) coordinates\n size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))\n x = screen_width/2 - 7 * size[0] / 13\n y = screen_height/2 - 6 * size[1] / 11\n\n # Apply new coordinates\n window.geometry(\"+%d+%d\" % (x, y))",
"def center_screen(self, window_width, window_height):\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))",
"def center_screen(self, window_width, window_height):\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))",
"def center_window(self):\n\n\t\tframe_geo = self.frameGeometry()\n\t\tcursor_pos = QtWidgets.QApplication.desktop().cursor().pos()\n\t\tscreen = QtWidgets.QApplication.desktop().screenNumber(cursor_pos)\n\t\tcenter_point = QtWidgets.QApplication.desktop().screenGeometry(screen).center()\n\t\tframe_geo.moveCenter(center_point)\n\t\tself.move(frame_geo.topLeft())",
"def center(self):\n # get the compute screen's size\n screen = QDesktopWidget().screenGeometry()\n # get the app windows' size\n size = self.geometry()\n self.move(int((screen.width() - size.width()) / 2), int((screen.height() - size.height()) / 2))",
"def update(self):\r\n \"\"\" Update the ship's center value not the rect beacuse rect attributes can only deal with integers \"\"\"\r\n\r\n if self.moving_right and self.rect.right < self.screen_rect.right: #rect.right returns the x-coordinate value of the right edge of the ship's rect,\r\n #self.rect.centerx += 1 #if this value < than self.screen_rect.right then the ship hasnt reached the right edge.\r\n self.center += self.ai_settings.ship_speed_factor\r\n\r\n if self.moving_left and self.rect.left > 0: #if the value of the left side of the rect is greater than zero,\r\n #self.rect.centerx -= 1 #the ship hasn’t reached the left edge of the screen.\r\n self.center -= self.ai_settings.ship_speed_factor\r\n\r\n \"\"\" Update the rect object from self.center \"\"\"\r\n \"\"\" Only the integer portion of self.center will be stored in self.rect.centerx,\r\n but that’s fine for displaying the ship.\"\"\"\r\n self.rect.centerx = self.center",
"def center_horizontal_paddle(self):\n self.top_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.bot_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)",
"def __moveCenterTo(self, x, y):\n x0, y0, w, h = self.currentBox\n x2, y2 = x - (w/2), y - (h/2)\n self.__moveTo(x2, y2)",
"def wrap(self):\n if self.center.x > SCREEN_WIDTH:\n self.center.x = 0\n if self.center.y > SCREEN_HEIGHT:\n self.center.y = 0\n if self.center.x < 0:\n self.center.x = SCREEN_WIDTH\n if self.center.y < 0:\n self.center.y = SCREEN_HEIGHT",
"def update(self):\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.center += self.ai_settings.ship_speed_factor\n if self.moving_left and self.rect.left > 0:\n self.center -= self.ai_settings.ship_speed_factor\n\n self.rect.centerx = self.center",
"def update(self):\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.center += self.ai_settings.ship_speed_factor\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.center -= self.ai_settings.ship_speed_factor\t\t\t\n\n\t\tself.rect.centerx = self.center",
"def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()"
] | [
"0.90181214",
"0.90181214",
"0.90181214",
"0.8599386",
"0.77910644",
"0.761576",
"0.7553747",
"0.7458518",
"0.7252782",
"0.72375983",
"0.7168899",
"0.71423084",
"0.7113291",
"0.7064254",
"0.7018259",
"0.6994591",
"0.6934956",
"0.69271857",
"0.6910503",
"0.69049215",
"0.69049215",
"0.68707186",
"0.68307614",
"0.68140435",
"0.68032366",
"0.6799061",
"0.6780891",
"0.67585427",
"0.6748598",
"0.67466635"
] | 0.90641564 | 1 |
Calculates word frequency for a given text. We don't consider stop words when calculating frequency. | def word_frequency(text):
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
stop = set(stopwords.words('english'))
tokens_without_stop = list(filter(lambda word: word.lower() not in stop, tokens))
counts = Counter(tokens_without_stop)
return counts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))",
"def frequency(text):\n # TODO: change function input to a textfile?\n import collections\n freq = collections.Counter(text)\n # print freq\n return freq",
"def complexity(text:str) -> float:\n words = text.split(' ')\n freqs = [frequency(w) for w in words]\n return sum(freqs) / (len(frequency_list) - freqs.count(0)) #sum of the frequencies / all the words that were in the list",
"def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies",
"def freq(word, document):\n return document.split(None).count(word)",
"def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)",
"def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)",
"def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq",
"def word_frequency_table(self, text_string):\r\n stopWords = set(stopwords.words(\"english\"))\r\n words = word_tokenize(text_string)\r\n ps = PorterStemmer()\r\n\r\n freqTable = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n if word in stopWords:\r\n continue\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable",
"def get_freq_dist_from_corpus(text):\n word_list = tokenize.word_tokenize(text.lower())\n return FreqDist(word_list)",
"def word_frequency(self, document):\n freq_table = {}\n words = nltk.word_tokenize(document)\n for word in words:\n if word in freq_table:\n freq_table[word] = freq_table.get(word) + 1\n else:\n freq_table[word] = 1\n # cut down the frequency table so that only common words are scored for\n freq_table = sorted(freq_table.items(), key=lambda x: x[1], reverse=True)\n scorable_words = []\n for word, occ in freq_table:\n # set threshold as words appearing x times or more - set to optimal valeue = 0\n # in hindsight this can just be deleted\n if int(occ) > 0:\n scorable_words.append(word)\n else:\n break\n self.sent_pos = self.sent_pos + 1 \n return scorable_words",
"def word_frequencies(url):\n\ttexts = get_all_texts(url)\n\tcount = count_words_in_sentence_list(texts)\n\treturn count",
"def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END",
"def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1",
"def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq",
"def count_words_fast(text):\r\n\ttext = text.lower()\r\n\tskips = ['.', ',', ';',':',\"'\", '\"'] # remove all punctuations\r\n\t# punctuations can lead to misleading counting...\r\n\t\r\n\tfrom collections import Counter\r\n\r\n\tfor ch in skips:\r\n\t\ttext = text.replace(ch, \"\")\r\n\r\n\tword_counts = Counter(text.split(\" \"))\r\n\t\r\n\treturn word_counts # Counter object ~ dictionnary\r",
"def word_frequency(words):\r\n frequency = {}\r\n for w in words:\r\n frequency[w] = frequency.get(w, 0) + 1\r\n return frequency",
"def freqWords(self, words):\n return nltk.FreqDist(words)",
"def getFrequencies(tweets):\n total_words = 0\n word_freq = {}\n for tweet in tweets:\n twext = tweet['clean_text']\n for word in twext.split(' '):\n word = word.strip()\n if word:\n total_words += 1\n if word not in word_freq:\n word_freq[word] = float(1)\n else:\n word_freq[word] += 1\n for key in word_freq:\n word_freq[key] = word_freq[key]/total_words\n return word_freq",
"def word_count(text):\n\n # Tokenize text on whitespace / newline\n words = text.strip().split()\n\n # Create a dictionary from the set of tokens, initializing each count to 0\n counts = dict.fromkeys(words, 0)\n\n # Iterate over the text to count occurences of each token\n for word in words:\n counts[word] += 1\n\n # Return the counts\n return counts",
"def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts",
"def countWords(text):\r\n\r\n\tlistOfWord = []\r\n\tlistOfFrequency = []\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghitung frekuensi kata\r\n if word == '':\r\n pass\r\n elif word not in listOfWord:\t\t\t\t\t# menyimpan kata ke dalam list\r\n listOfWord.append(word)\r\n listOfFrequency.append(1)\r\n else:\r\n index = listOfWord.index(word)\r\n listOfFrequency[index] = listOfFrequency[index] + 1 # menambah frekuensi kata yang sudah ada\r\n\r\n\r\n\tlst = [listOfWord, listOfFrequency]\r\n\r\n\treturn lst",
"def word_frequency(words):\n freq = {}\n for w in words:\n cur_word = w.lower().strip(punctuation)\n freq[cur_word] = freq.get(cur_word, 0) + 1\n return freq",
"def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict",
"def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )",
"def word_frequencies(corpus):\n return frequencies(corpus, 1, to_lower=True)",
"def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq",
"def calculate_frequency_for_word(self, input_string: str, word: str) \\\n -> int:\n return self._word_counter(input_string=input_string)[word.lower()]",
"def get_avg_word_len(text):\r\n words = [len(s.translate(str.maketrans('', '', string.punctuation))) for s in text]\r\n return sum(words) / len(words)",
"def word_count(text):\n # Use a dictionary to store the words\n words = {}\n\n # Simple way to strip extra whitespace\n text = ' '.join(text.split())\n\n # Now iterate through, splitting on space\n for word in text.split(\" \"):\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n\n return words"
] | [
"0.8071999",
"0.75517035",
"0.74244624",
"0.7402747",
"0.73363006",
"0.7313311",
"0.730345",
"0.72393984",
"0.72356933",
"0.7208666",
"0.7173497",
"0.7112697",
"0.7105219",
"0.7059449",
"0.7059299",
"0.7014396",
"0.7002395",
"0.69982463",
"0.6997513",
"0.69905484",
"0.6984714",
"0.69711655",
"0.6954166",
"0.6950386",
"0.6948521",
"0.69191736",
"0.6915094",
"0.6893181",
"0.6853664",
"0.6841007"
] | 0.818352 | 0 |
Check if a mol has 2D coordinates and if not, calculate them. | def check_2d_coords(mol, force=False):
if not force:
try:
mol.GetConformer()
except ValueError:
force = True # no 2D coords... calculate them
if force:
if USE_AVALON_2D:
pyAv.Generate2DCoords(mol)
else:
mol.Compute2DCoords() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_coords():\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed",
"def test_coord_preceding_fs(self):",
"def _has_coordinates_and_gradient(self) -> bool:\n return self._coords is not None and self._coords.g is not None",
"def check_coordinates(X, Y):\n\n # Accounting for elliptical Jupiter disk\n Y *= 1.071374\n\n return sqrt(X ** 2 + Y ** 2)",
"def test_coords():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n\n return x, y",
"def get_allowed_positions(coordXY, grid):\n\n\tsurrounding_coord = []\n\ttesting_coord = []\n\n\t# Get the coordinates of the external square\n\tfor i in range(coordXY[0] - 1, coordXY[0] + 2, 2):\n\t\tfor j in range(coordXY[1] - 1, coordXY[1] +2, 1):\n\t\t\tif (i,j) == coordXY:\n\t\t\t\tpass\n\t\t\telif i < 0 or j < 0:\n\t\t\t\tsurrounding_coord.append('None')\n\t\t\telse:\n\t\t\t\tsurrounding_coord.append((i,j))\n\n\t# Get the coordinates of the internal square\n\tfor i in range(coordXY[0] - 2, coordXY[0] + 3, 4):\n\t\tfor j in range(coordXY[1] - 2, coordXY[1] + 3, 2):\n\t\t\tif i < 0 or j < 0 or i > 7 or j > 7:\n\t\t\t\ttesting_coord.append('None')\n\t\t\telse:\n\t\t\t\ttesting_coord.append((i,j))\n\n\t# Get the position of Bottom and Top of the 2 squares\n\tTC = [(coordXY[0], coordXY[1] + 2), (coordXY[0], coordXY[1] - 2)]\n\tfor elem in TC:\n\n\t\tif elem[0] not in range(8) or elem[1] not in range(8):\n\t\t\ttesting_coord.append('None')\n\t\telse:\n\t\t\ttesting_coord.append(elem)\n\n\n\tSC = [(coordXY[0], coordXY[1] + 1), (coordXY[0], coordXY[1] - 1)]\n\tfor elem in SC:\n\t\tif elem[0] not in range(8) or elem[1] not in range(8):\n\t\t\tsurrounding_coord.append('None')\n\t\telse:\n\t\t\tsurrounding_coord.append(elem)\n\n\treturn testing_coord, surrounding_coord",
"def fix_coords_non_symetric_lon(cube):\n # first fix any completely missing coord var names\n utils.fix_dim_coordnames(cube)\n # fix individual coords\n for cube_coord in cube.coords():\n # fix time\n if cube_coord.var_name == 'time':\n logger.info(\"Fixing time...\")\n cube.coord('time').convert_units(\n Unit('days since 1950-1-1 00:00:00', calendar='gregorian'))\n utils.fix_bounds(cube, cube.coord('time'))\n\n # fix longitude\n if cube_coord.var_name == 'lon':\n logger.info(\"Fixing longitude...\")\n if cube_coord.ndim == 1:\n if cube_coord.points[0] < 0. and \\\n cube_coord.points[-1] < 181.:\n lon_coord = cube.coord('longitude').copy()\n lons_below_0 = lon_coord.points[lon_coord.points < 0.] + \\\n 360.\n lons_above_0 = lon_coord.points[lon_coord.points >= 0.]\n lons = np.hstack((lons_above_0, lons_below_0))\n cube_coord.points = lons\n\n utils.fix_bounds(cube, cube_coord)\n cube.attributes['geospatial_lon_min'] = 0.\n cube.attributes['geospatial_lon_max'] = 360.\n utils.roll_cube_data(cube, len(lons_above_0), -1)\n\n # fix latitude\n if cube_coord.var_name == 'lat':\n logger.info(\"Fixing latitude...\")\n utils.fix_bounds(cube, cube.coord('latitude'))\n\n # fix depth\n if cube_coord.var_name == 'lev':\n logger.info(\"Fixing depth...\")\n utils.fix_bounds(cube, cube.coord('depth'))\n\n # fix air_pressure\n if cube_coord.var_name == 'air_pressure':\n logger.info(\"Fixing air pressure...\")\n utils.fix_bounds(cube, cube.coord('air_pressure'))\n\n # remove CS\n cube.coord('latitude').coord_system = None\n cube.coord('longitude').coord_system = None\n\n return cube",
"def extra_coords(self) -> ExtraCoordsABC:",
"def _calc_coords(self):\n i = num.outer(num.arange(self.size[0]), num.ones(self.size[1]))\n i0 = self.pos[0] + (i * self.space[0])\n i1 = self.pos[0] + (i * self.space[0]) + self.bub[0]\n\n j = num.outer(num.ones(self.size[0]), num.arange(self.size[1]))\n j0 = self.pos[1] + (j * self.space[1])\n j1 = self.pos[1] + (j * self.space[1]) + self.bub[1]\n\n self.coords = num.dstack((i0, i1, j0, j1)).astype('i')",
"def check_2x2_solved(self):\n return self._grid[0][0] == 0 and self._grid[0][1] == 1 \\\n and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)",
"def island_perimeter(grid):\n cx, cy = 0, 0\n len_grid = len(grid)\n for x in range(len_grid):\n for y in range(len(grid[x])):\n if grid[x][y] == 1:\n cx += 1\n if (y != len(grid[x]) - 1 and grid[x][y + 1] == 1):\n cy += 1\n if (x != len(grid) - 1 and grid[x + 1][y] == 1):\n cy += 1\n return 4 * cx - 2 * cy",
"def _calc_coords(self, X, Y, Z):\r\n def _write_coords(coord):\r\n XX.append(X[coord])\r\n YY.append(Y[coord])\r\n ZZ.append(Z[coord])\r\n\r\n def _build_layer():\r\n for j in range(self.size[1]):\r\n for i in range(self.size[0]):\r\n # write NW corner\r\n if i == 0:\r\n nwCoord = 2 * i + 4 * self.size[0] * j + const\r\n _write_coords(nwCoord)\r\n # write NE corner\r\n neCoord = 2 * i + 4 * self.size[0] * j + const + 1\r\n _write_coords(neCoord)\r\n if j == self.size[1] - 1:\r\n for i in range(self.size[0]):\r\n # write SW corner\r\n if i == 0:\r\n swCoord = 2 * i + 4 * self.size[0] * j + 2 * self.size[0] + const\r\n _write_coords(swCoord)\r\n # write SE corner\r\n seCoord = 2 * i + 4 * self.size[0] * j + 2 * self.size[0] + const + 1\r\n _write_coords(seCoord)\r\n\r\n # At this point, we have all points needed for unstructured grid in X,Y,Z\r\n # However, they must be re-arranged so we can define Hexahedrons\r\n # TODO: REFINE CELLS\r\n # PSUEDO:\r\n # find cell to be refined\r\n # add new cells (as easy as pie)\r\n\r\n XX, YY, ZZ = ([] for i in range(3))\r\n const = 0\r\n for k in range(self.size[2]):\r\n _build_layer()\r\n if k == self.size[2] - 1:\r\n const += self.size[0] * self.size[1] * 4\r\n _build_layer()\r\n break\r\n else:\r\n const += self.size[0] * self.size[1] * 8\r\n return XX, YY, ZZ",
"def is_2d(self) -> bool:\n return self.layers == 1 and self.times == 1",
"def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean",
"def is_coord_empty(self, data):\n check = False\n if data[\"topic\"] in DRONE_POS_TOPICS:\n check = self.drone.check_if_pos(data[\"coord\"])\n elif data[\"topic\"] in DRONE_VEL_TOPICS:\n check = self.drone.check_if_vel(data[\"coord\"])\n elif data[\"topic\"] in DRONE_ACC_TOPICS:\n check = self.drone.check_if_acc(data[\"coord\"])\n elif data[\"topic\"] in SUBJECT_TOPICS:\n check = self.subject.check_if_pos(data[\"coord\"])\n elif data[\"topic\"] in self.PEDESTRIAN_TOPICS:\n check = self.peds[data[\"pid\"]].check_if_pos(data[\"coord\"])\n return check",
"def test_cell_coordinates(mock_amg):\n expected = [(0, 0), (64, 0), (64, 64), (0, 64)]\n assert mock_amg.cells[0].coordinates == expected",
"def square2_checker(self, x, y, row2, col2):\n \n self.x = x\n self.y = y\n self.row2 = row2\n self.col2 = col2\n\n return abs(self.x - self.row2) == 1 and self.col2 == self.y \\\n or abs(self.y - self.col2) == 1 and self.row2 == self.x",
"def coordinates(self):",
"def validate_in(self, xcoord, ycoord):\r\n x = int(xcoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n y = int(ycoord/(self.tr.bd.TILE_WIDTH + self.tr.bd.LINE_WIDTH))\r\n if not self.tr.turn_tracker and self.tr.bd.disks[x][y].halo_tag:\r\n return True, x, y\r\n else:\r\n return False, x, y",
"def nocoordinate(self):\n return self.__nocoordinate",
"def czyMur(mapObj, x, y):\n if x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return False # (x,y) nie sa na mapie\n elif mapObj[x][y] in ('#'):\n return True # mur na drodze\n return False",
"def check_coords_file(self):\n if path.exists(self.coords_file):\n return True\n return False",
"def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )",
"def inside_square(self, x, y):\n square_centers = self.get_square_centers()\n for i, row in enumerate(square_centers):\n for j, (square_x, square_y) in enumerate(row):\n\n if (square_x - self.square_width_half < x < square_x + self.square_width_half and\n square_y - self.square_width_half < y < square_y + self.square_width_half):\n\n return (i, j), (float(square_x), float(square_y))\n\n return None, None",
"def is_perfect_square():",
"def test_shape_fail():\n lons, lats = np.arange(10), np.arange(10).reshape(5, 2)\n emsg = \"Require longitudes and latitudes with same shape\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)",
"def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy",
"def part2():\r\n my_input = 368078\r\n coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\r\n x = y = dx = 0\r\n dy = -1\r\n grid = {}\r\n\r\n while True:\r\n total = 0\r\n for offset in coords:\r\n ox, oy = offset\r\n if (x+ox, y+oy) in grid:\r\n total += grid[(x+ox, y+oy)]\r\n if total > int(my_input):\r\n return total\r\n if (x, y) == (0, 0):\r\n grid[(0, 0)] = 1\r\n else:\r\n grid[(x, y)] = total\r\n if (x == y) or (x < 0 and x == -y) or (x > 0 and x == 1-y):\r\n dx, dy = -dy, dx\r\n x, y = x+dx, y+dy",
"def IsSolid(self,coord):\r\n x,y=coord\r\n if x<0 or x>=self.size[0] or y<0 or y>=self.size[1]: return True \r\n return self.map[x][y].solid",
"def is_square(self):\n lines, columns = self.get_size()\n return lines == columns"
] | [
"0.6219354",
"0.6077779",
"0.6013348",
"0.584337",
"0.58144647",
"0.5808867",
"0.5737401",
"0.57239455",
"0.56685406",
"0.56669194",
"0.56379074",
"0.56226474",
"0.5609471",
"0.56056577",
"0.5604061",
"0.55906796",
"0.55845845",
"0.5557084",
"0.5556743",
"0.55530614",
"0.5545311",
"0.5537153",
"0.5533226",
"0.5491783",
"0.54729927",
"0.54565877",
"0.5453876",
"0.5437473",
"0.5416955",
"0.54162365"
] | 0.7064952 | 0 |
Returns True, if x is a number (i.e. can be converted to float). | def isnumber(x):
try:
float(x)
return True
except ValueError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False",
"def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True",
"def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False",
"def isNumber(x):\n return isinstance(x, (int, float))",
"def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True",
"def isNumber(x):\n\treturn type(x) in [int, float]",
"def is_number(n):\n return isinstance(n, (int, float))",
"def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False",
"def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False",
"def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False",
"def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False",
"def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False",
"def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False",
"def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False",
"def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(number):\n if type(number) == type(1) or type(number) == type(0.1) or type(number) == type('') or type(u''):\n try:\n float(number)\n return True\n except ValueError:\n return False\n except TypeError:\n return False\n else:\n return False",
"def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False",
"def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False",
"def is_number(value):\n\n return isinstance(value, (int, long, float))",
"def is_integer(x):\r\n if isinstance(x, float):\r\n return x == int(x)\r\n else:\r\n raise TypeError, \"Input float\"",
"def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False"
] | [
"0.8831771",
"0.84678894",
"0.8269232",
"0.8268981",
"0.8215385",
"0.81769127",
"0.8087828",
"0.80597544",
"0.8051537",
"0.8027391",
"0.7998552",
"0.7986285",
"0.7984602",
"0.7984602",
"0.7984602",
"0.79576087",
"0.79458576",
"0.79081887",
"0.78952646",
"0.78746027",
"0.78025687",
"0.7698081",
"0.7688956",
"0.7633072",
"0.7607364",
"0.7599614",
"0.75978893",
"0.7570176",
"0.7553362",
"0.7542159"
] | 0.90320134 | 0 |
For a given cluster_id calculate the distance from each point to the centroid/medoid. | def calculate_distances_for_cluster(self, cluster_id):
cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy()
if cluster_of_interest.empty:
raise ValueError(f'Cluster id {cluster_id} not found')
# Don't calculate distances for the noise cluster
if cluster_id == -1:
return pd.DataFrame(np.nan, columns=['dist_to_rep_point'], index=cluster_of_interest.index)
if self.selection_method == 'centroid':
rep_point = self.clusterer.weighted_cluster_centroid(cluster_id)
if self.selection_method == 'medoid':
rep_point = self.clusterer.weighted_cluster_medoid(cluster_id)
dists = cdist(rep_point.reshape((1,len(self._embedding_cols))), cluster_of_interest[self._embedding_cols].values, metric=self.metric)
return pd.DataFrame(dists[0], columns=['dist_to_rep_point'], index=cluster_of_interest.index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calcDistortion(medoids, clusters, class_header=\"Class\"):\n distortion = 0\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()): # For every Medoid\n for _, datum in clusters[medoid_row_index].iterrows(): # For each point in the medoid cluster\n # Add the distance between medoid and data point squared to total distortion\n distortion += (Cluster.calcDistance(medoid_tuple[1], datum, class_header=class_header)) ** 2\n return distortion",
"def distance(point, cluster):\n return np.sqrt((point[0] - cluster[0])**2 + (point[1] - cluster[1])**2)",
"def get_distance(self, samples, clusters):\n n_samples = samples.shape[0]\n n_features = samples.shape[1]\n n_centroids = clusters.shape[0]\n dist = np.zeros(shape=(n_samples, n_centroids))\n\n # computing squared euclidian distance for each sample-cluster pair\n for i in range(n_samples):\n for j in range(n_centroids):\n for k in range(n_features):\n dist[i, j] += (samples[i, k] - clusters[j, k])**2\n # dist[i, j] = D[i, j]**(1/2)\n\n return np.sqrt(dist)",
"def distance(centroid, datapoint):\r\n d=0\r\n for j in range(len(datapoint)-1):\r\n d=d+(datapoint[j]-centroid[j])**2\r\n d= d**(1/2)\r\n return d",
"def calcAvgDistances(centroids, clusters, class_header=\"Class\"):\n avg_distances = [0] * len(centroids)\n multiprocess_count = multiprocessing.cpu_count() # Find processor count\n for centroid_row_index, centroid_tuple in enumerate(centroids.iterrows()): # For each cluster\n work_list = [] # initialize multiprocessing structures\n set_list = []\n for _, datum in clusters[centroid_row_index].iterrows(): # For each point in the medoid cluster\n work_list.append((centroid_tuple[1], datum, class_header)) # add calculation to work list\n\n partition_size = math.ceil(len(work_list) / multiprocess_count) # find size of each work subeset\n for i in range(multiprocess_count - 1): # repeat for every subset\n sample = work_list[i * partition_size: (i + 1) * partition_size] # break work list into fair subsets\n set_list.append(sample)\n set_list.append((work_list[(multiprocess_count - 1) * partition_size:]))\n pool = multiprocessing.Pool(processes=multiprocess_count) # create multiprocessing pool\n # calculate sum of list of all distances from work list tasks\n avg_distances[centroid_row_index] = sum(sum(pool.map(Cluster.calcDistanceList, set_list), []))\n pool.close()\n pool.join()\n\n if avg_distances[centroid_row_index] is not 0: # make sure we do not divide by 0\n # calculate average of distance list\n avg_distances[centroid_row_index] = avg_distances[centroid_row_index] / len(clusters[centroid_row_index])\n return avg_distances",
"def calculate_distances(data_point, centroids):\n distances = []\n for centroid_index, centroid_value in enumerate(centroids):\n distances.append(distance(data_point, centroid_value))\n return distances",
"def __compute_distance(self, x, centroid):\n \n diff = x - centroid\n return np.sqrt(np.dot(diff.T, diff))",
"def get_distances(centroid, points):\r\n return np.linalg.norm(points - centroid, axis=1)",
"def rank_cluster_points_by_distance(self, cluster_id):\n cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy()\n \n if cluster_of_interest.empty:\n raise ValueError(f'Cluster id {cluster_id} not found')\n \n if 'dist_to_rep_point' not in self.embedding_df.columns:\n distance_df = self.calculate_distances_for_cluster(cluster_id)\n cluster_of_interest = cluster_of_interest.merge(distance_df, left_index=True, right_index=True)\n \n cluster_of_interest.sort_values('dist_to_rep_point', inplace=True)\n return cluster_of_interest",
"def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances",
"def clusterAndDistance(self, data):\n\t\treturn closestClusterAndDistance(data, self.centers)",
"def _compute_dist(self, K, dist, within_distances, update_within):\r\n\r\n sw = self.sample_weight_\r\n\r\n for j in xrange(self.n_clusters):\r\n mask = self.labels_ == j\r\n if np.sum(mask) == 0:\r\n raise ValueError(\"Empty cluster found, try smaller n_cluster.\")\r\n\r\n denom = sw[mask].sum()\r\n denomsq = denom * denom\r\n if update_within:\r\n KK = K[mask][:, mask] \r\n dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)\r\n within_distances[j] = dist_j\r\n dist[:, j] += dist_j\r\n else:\r\n dist[:, j] += within_distances[j]\r\n\r\n dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom #calculating distance of each point from centroid of cluster j by finding \r\n #diff. b/w centroid of cluster j & similarity of it with points in cluster j\r",
"def calculate_all_distances_to_center(self):\n all_distances = pd.DataFrame()\n for label in np.unique(self.embedding_df['cluster']): \n distance_df = self.calculate_distances_for_cluster(label)\n all_distances = pd.concat([all_distances, distance_df])\n \n self.embedding_df = self.embedding_df.merge(all_distances, left_index=True, right_index=True)",
"def euclidean_distance(self, point, centroid):\n square_diff = 0.0\n for i in range(0, len(point)):\n square_diff += abs(point[i] - centroid[i]) ** 2\n return math.sqrt(square_diff)",
"def calc_distances_from_central(cluster, embedding):\n\n return calc_distances_in_embedding(cluster, embedding)",
"def computeSSE(data, centers, clusterID):\n sse = 0\n nData = len(data)\n for i in range(nData):\n c = clusterID[i]\n sse += squaredDistance(data[i], centers[c]) ** 2\n return sse",
"def compute_distance(X, K_clusters):\n dis = np.linalg.norm((X-K_clusters),2,axis=1)**2\n return dis",
"def distance(self, point):\r\n assert a6checks.is_point(point)\r\n assert len(point)==len(self._centroid)\r\n\r\n sum=0\r\n for i in range (len(self._centroid)):\r\n sum+=(point[i]-self._centroid[i])*(point[i]-self._centroid[i])\r\n dist=math.sqrt(sum)\r\n return dist",
"def find_centroid_for_each(self):",
"def clusters_distance(cluster1, cluster2):\n return max([euclidean_distance(point1, point2) for point1 in cluster1 for point2 in cluster2])",
"def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass",
"def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)",
"def __get_cluster_centroid_distance(self, single_training: np.ndarray, cluster_center: np.ndarray) -> (int, float):\n training_label, training_distance = None, float('inf')\n # Check the distance of this point from all the cluster point.\n # This training point belongs to a cluster, which ever cluster centroid have the lowest distance from this point\n for cluster_label, single_cluster in enumerate(cluster_center):\n # Distance from the this training point to this cluster centroid\n this_distance = self.__get_distance(single_cluster, single_training)\n if this_distance < training_distance:\n training_label = cluster_label\n training_distance = this_distance\n return training_label, training_distance",
"def cluster_partition_distance(individual, test_data, truth_data, name=None):\r\n distance_sum = 0\r\n max_sum = 0\r\n for test_clusters, truth_clusters in zip(test_data, truth_data):\r\n # Get last column of target data\r\n test_clusters = test_clusters[-1].flatten()\r\n\r\n p1_dict = {}\r\n for i, x in enumerate(test_clusters):\r\n if x not in p1_dict:\r\n p1_dict[x] = []\r\n p1_dict[x].append(i)\r\n\r\n p2_dict = {}\r\n for i, x in enumerate(truth_clusters):\r\n if x not in p2_dict:\r\n p2_dict[x] = []\r\n p2_dict[x].append(i)\r\n\r\n p1 = list(p1_dict.values())\r\n p2 = list(p2_dict.values())\r\n d = _fast_partition_distance(p1, p2, len(test_clusters))\r\n if d is None:\r\n d = _partition_distance(p1, p2, len(test_clusters))\r\n distance_sum += d\r\n max_sum += len(test_clusters) - 1\r\n return distance_sum / max_sum",
"def calculate_mahalanobis_distance(point, centroid, sd, dimension):\n x = 0 # initialize\n for i in range(dimension):\n x += ((point[i]-centroid[i]) / sd[i]) ** 2 # calculate sum of squares\n\n return x ** 0.5 # return mahalanobis distance",
"def centroid_link(clusters, i, j, dendrogram):\n n_i, n_j = len(dendrogram[i]), len(dendrogram[j])\n a_i = n_i / (n_i + n_j)\n a_j = n_j / (n_i + n_j)\n b = -(n_i * n_j) / (n_i + n_j)**2\n update_fn = lambda d_ik,d_jk: a_i*d_ik + a_j*d_jk + b*clusters[i,j]\n return _general_link(clusters, i, j, update_fn)",
"def update(self, clusters):\n centroids = {}\n for cluster, coordinates in clusters.iteritems():\n sumLat = 0\n sumLong = 0\n for coordinate in coordinates:\n sumLat += float(coordinate[0])\n sumLong += float(coordinate[1])\n centroids[cluster] = (sumLat/float(len(coordinates)), sumLong/float(len(coordinates)))\n return centroids",
"def calculate_centroids(self, data, clusters):\n centroids = []\n for i in range(self.n_clusters):\n mask = clusters == i \n centroids.append(np.mean(data[mask, :], axis = 0)) \n return centroids",
"def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion",
"def rms(trained_data, dist):\n sum = 0\n for i in trained_data:\n point = i[:-2]\n centroid = i[-1]\n distance = (calculate_distance(point,centroid, dist)**2)\n sum +=distance\n return sum"
] | [
"0.67842567",
"0.67745066",
"0.6764742",
"0.67052865",
"0.6705151",
"0.6693539",
"0.6685048",
"0.65840197",
"0.6572326",
"0.65583515",
"0.64906234",
"0.6264555",
"0.6253788",
"0.6250515",
"0.62433743",
"0.62167305",
"0.6175956",
"0.61737406",
"0.6170333",
"0.61438906",
"0.6141153",
"0.61008596",
"0.6050886",
"0.60416114",
"0.6032168",
"0.6011223",
"0.59763324",
"0.5970216",
"0.5967373",
"0.5914577"
] | 0.7704725 | 0 |
For a given cluster return a pandas dataframe of points ranked by distance to the cluster centroid/medoid | def rank_cluster_points_by_distance(self, cluster_id):
cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy()
if cluster_of_interest.empty:
raise ValueError(f'Cluster id {cluster_id} not found')
if 'dist_to_rep_point' not in self.embedding_df.columns:
distance_df = self.calculate_distances_for_cluster(cluster_id)
cluster_of_interest = cluster_of_interest.merge(distance_df, left_index=True, right_index=True)
cluster_of_interest.sort_values('dist_to_rep_point', inplace=True)
return cluster_of_interest | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_cluster_rankings(self):\n if 'dist_to_rep_point' not in self.embedding_df.columns:\n self.calculate_all_distances_to_center()\n\n self.embedding_df['rank_in_cluster'] = self.embedding_df.groupby('cluster')['dist_to_rep_point'].rank(method='min')",
"def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def k_means_clustering(rows, distance=pearson_distance, k=4):\n # Determine the min and max values for each point\n ranges = [(min(row[i] for row in rows), max([row[i] for row in rows])) for i in range(len(rows[0]))]\n\n # Create k RANDOMLY placed centroids\n clusters = [[random() * (ranges[i][1] - ranges[i][0]) + ranges[i][0] for i in range(len(rows[0]))] for j in\n range(k)]\n distances_from_centroids = {}\n last_matches = None\n best_matches = None\n for t in range(100):\n print ('Iteration {}'.format(t))\n best_matches = [[] for i in range(k)]\n\n # Find the centroid that is the closest for each row\n for j in range(len(rows)):\n row = rows[j]\n best_match = 0\n for i in range(k):\n d = distance(clusters[i], row)\n if d < distance(clusters[best_match], row):\n best_match = i\n best_matches[best_match].append(j)\n\n # if the results are the same as last time, then this is complete\n if best_matches == last_matches:\n break\n last_matches = best_matches\n\n # Move the centroids to the average of their members\n for i in range(k):\n avgs = [0.0] * len(rows[0])\n if len(best_matches[i]) > 0:\n for row_id in best_matches[i]:\n for m in range(len(rows[row_id])):\n avgs[m] += rows[row_id][m]\n for j in range(len(avgs)):\n avgs[j] /= len(best_matches[i])\n clusters[i] = avgs\n\n # Chapter 3 Exercise 5: Return along with the cluster results the total distance between all items\n # and their respective centroids\n for i in range(k):\n for j in range(len(best_matches[i])):\n distances_from_centroids[best_matches[i][j]] = distance(clusters[i],rows[best_matches[i][j]])\n return best_matches, distances_from_centroids",
"def distance(point, cluster):\n return np.sqrt((point[0] - cluster[0])**2 + (point[1] - cluster[1])**2)",
"def calculate_distances_for_cluster(self, cluster_id):\n cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy()\n \n if cluster_of_interest.empty:\n raise ValueError(f'Cluster id {cluster_id} not found')\n \n # Don't calculate distances for the noise cluster\n if cluster_id == -1:\n return pd.DataFrame(np.nan, columns=['dist_to_rep_point'], index=cluster_of_interest.index)\n \n if self.selection_method == 'centroid':\n rep_point = self.clusterer.weighted_cluster_centroid(cluster_id)\n if self.selection_method == 'medoid':\n rep_point = self.clusterer.weighted_cluster_medoid(cluster_id)\n \n dists = cdist(rep_point.reshape((1,len(self._embedding_cols))), cluster_of_interest[self._embedding_cols].values, metric=self.metric)\n return pd.DataFrame(dists[0], columns=['dist_to_rep_point'], index=cluster_of_interest.index)",
"def cluster(players_df, columns):\n\toptimal_n=None\n\toptimal_clusters=None\n\toptimal_clusterer=None\n\toptimal_silhouette=-99\n\tfor n in range(2,9):\n\t\tclusterer=KMeans(n_clusters=n)\n\t\tcluster_labels=clusterer.fit_predict(players_df[columns])\n\t\tavg_silhouette=silhouette_score(players_df[columns], cluster_labels)\n\t\tprint('The avg silhouette score for {} clusters is {}'.format(n, avg_silhouette))\n\t\tif avg_silhouette > optimal_silhouette:\n\t\t\toptimal_silhouette=avg_silhouette\n\t\t\toptimal_clusterer=clusterer\n\t\t\toptimal_clusters=cluster_labels\n\t\t\toptimal_n=n\n\tprint('Returning optimal clusters found with n={}'.format(optimal_n))\n\tclusters = {n: [] for n in range(optimal_n)}\n\tfor i, label in enumerate(optimal_clusters):\n\t\tclusters[label].append(\n\t\t\tdict(\n\t\t\t\tplayer_id=players_df.iloc[i]['PERSON_ID'],\n\t\t\t\tfirst_name=players_df.iloc[i]['DISPLAY_LAST_COMMA_FIRST'].split()[-1],\n\t\t\t\tlast_name=players_df.iloc[i]['DISPLAY_LAST_COMMA_FIRST'].split()[0],\n\t\t\t\t)\n\t\t\t)\n\treturn clusters",
"def cluster_dpc_knn(token_dict, cluster_num, k=5, token_mask=None):\n with torch.no_grad():\n x = token_dict['x']\n B, N, C = x.shape\n dist_matrix = torch.cdist(x, x) / C ** 0.5\n if token_mask is not None:\n token_mask = token_mask > 0\n dist_matrix = dist_matrix * token_mask[:, None, :] + (dist_matrix.max() + 1) * ~token_mask[:, None, :]\n dist_nearest, index_nearest = torch.topk(dist_matrix, k=k, dim=-1, largest=False)\n density = (-(dist_nearest ** 2).mean(dim=-1)).exp()\n density = density + torch.rand(density.shape, device=density.device, dtype=density.dtype) * 1e-06\n if token_mask is not None:\n density = density * token_mask\n mask = density[:, None, :] > density[:, :, None]\n mask = mask.type(x.dtype)\n dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None]\n dist, index_parent = (dist_matrix * mask + dist_max * (1 - mask)).min(dim=-1)\n score = dist * density\n _, index_down = torch.topk(score, k=cluster_num, dim=-1)\n dist_matrix = index_points(dist_matrix, index_down)\n idx_cluster = dist_matrix.argmin(dim=1)\n idx_batch = torch.arange(B, device=x.device)[:, None].expand(B, cluster_num)\n idx_tmp = torch.arange(cluster_num, device=x.device)[None, :].expand(B, cluster_num)\n idx_cluster[idx_batch.reshape(-1), index_down.reshape(-1)] = idx_tmp.reshape(-1)\n return idx_cluster, cluster_num",
"def density_based_cluster(R, clusters):\n c_points = {}\n i = 0\n for cluster in clusters:\n points = set()\n for attr in cluster:\n for point in R[attr]:\n points.add(point)\n c_points[i] = points\n i += 1\n return c_points",
"def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist",
"def matching_clusterization(self):\n result = []\n self.reclustering(self.groups.copy(deep=True), result)\n self.result = pd.DataFrame(result)\n return self.result.sort_values(by=['cluster_size'], ascending=False)",
"def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings",
"def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion",
"def clusterAndDistance(self, data):\n\t\treturn closestClusterAndDistance(data, self.centers)",
"def fit_predict(self, indexes, dataset_obj, sample_weight=None, sort_by_distance_to_mean=False):\n\n # Query data\n query_data = dataset_obj.data_matx[dataset_obj.query_idx]\n query_ids = dataset_obj.query_idx\n # Gallery data\n gallery_data = dataset_obj.data_matx[indexes]\n gallery_ids = indexes\n\n logging.info('Finding cluster mean positions.')\n # Fitted is the gallery id cluster labels in order\n fitted = sk_kmeans.fit_predict(\n self, dataset_obj.data_matx[indexes], None, sample_weight=sample_weight)\n logging.info('Done')\n cluster_means = self.cluster_centers_\n # Cluster ids for each different class\n cluster_ids = [[x for x in range(len(cluster_means))] for i in range(len(query_ids))]\n\n # Measure distances to cluster centres\n cluster_distance_matrix = pairwise_distances(query_data, cluster_means, metric=self.metric)\n\n cluster_ids_swapped = swap_indices(cluster_ids)\n\n cluster_gallery_ids = []\n cluster_gallery_data = []\n for cluster in range(len(cluster_ids_swapped)):\n valid_cluster_gallery_ids = gallery_ids[fitted == cluster]\n valid_cluster_gallery_data = dataset_obj.data_matx[valid_cluster_gallery_ids]\n cluster_gallery_ids.append(valid_cluster_gallery_ids)\n cluster_gallery_data.append(valid_cluster_gallery_data)\n\n gallery_distances_per_cluster = []\n for cluster in cluster_gallery_data:\n # Take only the gallery ids in the cluster\n gallery_distance_for_cluster = pairwise_distances(query_data, cluster, metric=self.metric)\n gallery_distances_per_cluster.append(gallery_distance_for_cluster)\n\n gallery_distances_per_cluster_swapped = swap_indices(gallery_distances_per_cluster) \n\n cluster_gallery_ids_stacked = [cluster_gallery_ids for i in range(len(gallery_distances_per_cluster_swapped))]\n\n sorted_gallery_distances_per_query = []\n sorted_gallery_ids_per_query = []\n for cluster_distances, gallery_distances, gallery_ids, index in zip(cluster_distance_matrix, gallery_distances_per_cluster_swapped, cluster_gallery_ids_stacked, range(len(cluster_distance_matrix))):\n sorted_gallery_distances_per_query.append(sort_by_another(gallery_distances, cluster_distances))\n sorted_gallery_ids_per_query.append(sort_by_another(gallery_ids, cluster_distances))\n\n num_query_items = len(sorted_gallery_distances_per_query)\n num_clusters = len(gallery_ids)\n num_gallery_items = len(gallery_data)\n\n double_sorted_gallery_distances_per_query = [[] for i in range(num_query_items)]\n double_sorted_gallery_ids_per_query = [[] for i in range(num_query_items)]\n for query_item, query_item_id, index1 in zip(sorted_gallery_distances_per_query, sorted_gallery_ids_per_query, range(len(sorted_gallery_distances_per_query))):\n for cluster, cluster_id, index2 in zip(query_item, query_item_id, range(len(query_item))):\n sorted_gallery_distances = sort_by_another(cluster, cluster)\n sorted_gallery_ids = sort_by_another(cluster_id, cluster)\n double_sorted_gallery_distances_per_query[index1].append(sorted_gallery_distances)\n double_sorted_gallery_ids_per_query[index1].append(sorted_gallery_ids)\n\n final_distance_array = []\n final_ids_array = []\n for distances, indexes in zip(double_sorted_gallery_distances_per_query, double_sorted_gallery_ids_per_query):\n final_distance_array.append([item for sublist in distances for item in sublist])\n final_ids_array.append([item for sublist in indexes for item in sublist])\n\n final_distance_array = np.array(final_distance_array)\n final_ids_array = np.array(final_ids_array)\n\n final_updated_distance_array = []\n final_updated_ids_array = []\n for distances, indexes, query_id in zip(final_distance_array, final_ids_array, range(num_query_items)):\n mask = [id_is_valid(gal_id, query_id, dataset_obj) for gal_id in indexes]\n redone_distances = np.append(distances[mask], ([-1] * 20))[:num_gallery_items]\n redone_indexes = np.append(indexes[mask], ([-1] * 20))[:num_gallery_items]\n final_updated_distance_array.append(redone_distances)\n final_updated_ids_array.append(redone_indexes)\n\n final_updated_distance_array = np.array(final_updated_distance_array)\n final_updated_ids_array = np.array(final_updated_ids_array)\n\n def gal_to_label(row_of_ids):\n return dataset_obj.labels[row_of_ids]\n\n final_updated_labels_array = np.stack([gal_to_label(row) for row in final_updated_ids_array])\n tensor_array = torch.tensor(np.array(final_updated_labels_array, dtype=np.int32))\n\n ranks = torch.stack([get_rank(row, i, dataset_obj) for i, row in enumerate(tensor_array)]).numpy()\n ranked_count = np.bincount(ranks.flatten())[1:-1]\n # CMC curve (percentage of query items which were in any particular rank or below)\n self.ranked_acc = np.cumsum(ranked_count / dataset_obj.query_idx.shape[0])\n\n return self",
"def pc_cluster(data, clusters):\n dist = MorningstarPCA.pc_distance(data, clusters)\n return MorningstarPCA.get_column_with_min_value(dist)",
"def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)",
"def predict_cluster(point):\n # assuming point belongs to clusters that were computed by fit functions\n return cluster[tuple(point)]",
"def byMedoids(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n medoids = dataset.sample(number_of_clusters) # randomly select medoids from dataset\n\n if verbosity >= 1:\n print(\"INITIAL MEDOIDS\")\n print(medoids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1:\n print(\"ITERATIONS\")\n print(iterations)\n\n clusters = Cluster.calcClusters(dataset, medoids, number_of_clusters, verbosity=verbosity,\n class_header=class_header) # Assign all points to a cluster\n\n base_distortion = Cluster.calcDistortion(medoids, clusters, class_header=class_header)\n # Find base distortion\n\n set_list = [] # set up multiprocessing structures\n work_list = []\n change_list = []\n\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()): # For each medoid\n medoid_frame_index = medoid_tuple[0]\n for datum_index, datum in clusters[medoid_row_index].iterrows(): # For each point in the medoid cluster\n if medoid_frame_index != datum_index: # Do not try to swap a medoid with itself\n temp = medoids.copy() # Make a copy of the medoids DataFrame\n temp.iloc[medoid_row_index] = datum # Swap the medoid in the copy\n temp.index.values[medoid_row_index] = datum.name\n work_list.append((temp, clusters, class_header)) # add calculation arguments to work list\n change_list.append((medoid_row_index, datum)) # add swap info to change list\n\n multiprocess_count = multiprocessing.cpu_count() # Find cpu count\n partition_size = math.ceil(len(work_list) / multiprocess_count) # find size of work list partitions\n if verbosity >= 1: # optionally print work list length\n print(\"Work list length:\")\n print(len(work_list))\n for i in range(multiprocess_count - 1): # repeat for every subset\n sample = work_list[i * partition_size: (i + 1) * partition_size] # take a subset of the work list\n set_list.append(sample) # add that subset as an item in the set list\n set_list.append((work_list[(multiprocess_count - 1) * partition_size:])) # add tailing subset to set list\n if verbosity > 2: # optionally print entire set list.\n print(\"Set list\")\n print(set_list)\n pool = multiprocessing.Pool(processes=multiprocess_count) # create multiprocessing pool\n distortion_lists = pool.map(Cluster.calcDistortionList, set_list) # map set list to processing pool\n pool.close()\n pool.join()\n #print(distortion_lists)\n distortions = sum(distortion_lists, [])\n #print(distortions)\n\n break_flag = True # set break flag in case there are no good changes\n distortion_index = 0\n for medoid_row_index, _ in enumerate(medoids.iterrows()): # For each medoid\n cluster_size = len(clusters[medoid_row_index])\n distortions_subset = distortions[distortion_index: distortion_index + cluster_size]\n distortion_index += cluster_size # keep track of how far we are through the change list\n if len(distortions_subset) != 0: # did this cluster have any possible changes\n best_distortion = min(distortions_subset) # pick the best distortion\n if best_distortion < base_distortion: # if that distortion is better than our old distortion\n best_dist_index = distortions.index(best_distortion)\n best_change = change_list[best_dist_index] # apply the change for that distortion.\n else:\n best_change = None\n else:\n best_change = None\n if verbosity > 0: # Optionally print best changes\n print(\"MEDOIDS\")\n print(medoids)\n print(\"BEST_CHANGE\")\n print(best_change)\n if best_change is not None: # make sure there is a change before trying to make it.\n medoids.iloc[best_change[0]] = best_change[1] # swap best change into medoids list\n medoids.index.values[best_change[0]] = best_change[1].name\n break_flag = False\n\n if break_flag: # if we made no changes then the clustering is settled.\n break\n\n medoids = medoids.drop_duplicates() # make sure we do not duplicate medoids\n if return_clusters is True: # optionally return clusters\n return medoids, clusters\n pass\n else:\n return medoids # return medoids dataframe",
"def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:\n return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index()",
"def get_closest_samples_for_cluster(self, cluster_id, n_samples=5):\n return self.rank_cluster_points_by_distance(cluster_id).head(n_samples)",
"def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]",
"def get_cluster_assignments(self, data, n_clusters=10):\n kmeans = KMeans(n_clusters=n_clusters)\n kmeans.fit(data)\n pred = kmeans.predict(data)\n return pd.DataFrame(pred)",
"def closestCluster(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n\treturn bestIndex",
"def nearest_cluster_center(point, cluster_centers):\n def sqr_distance_2D(a, b):\n return abs(a.x - b.x) *abs(a.x - b.x) + abs(a.y - b.y) *abs(a.x - b.x) + abs(a.z-b.z)*abs(a.x - b.x)\n min_index = point.group\n min_dist = FLOAT_MAX\n \n for i, cc in enumerate(cluster_centers):\n d = sqr_distance_2D(cc, point)\n if min_dist > d:\n min_dist = d\n min_index = i\n \n return (min_index, min_dist)",
"def __create_cluster_profiles(self,\n clustered_dataframes,\n shrunken_df,\n numerical_features,\n le_map,\n output_path,\n find_nearest_on_cols=False,\n show=True):\n\n def find_nearest(numbers, target):\n \"\"\"\n Find the closest fitting number to the target number\n \"\"\"\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]\n\n cluster_profiles_df = pd.DataFrame(columns=shrunken_df.columns).drop(\n 'Cluster_Name', axis=1)\n rows_count = 0\n for cluster_identfier, cluster_dataframe in \\\n clustered_dataframes.items():\n df = pd.DataFrame(columns=cluster_dataframe.columns)\n df = df.append(cluster_dataframe.mean(), ignore_index=True)\n df.index = [cluster_identfier]\n\n if cluster_dataframe.shape[0] <= 1:\n continue\n\n # Attempt to convert numbers found within the full set of data\n for col in cluster_dataframe.columns:\n if col not in numerical_features or find_nearest_on_cols:\n df[col] = find_nearest(numbers=shrunken_df[\n col].value_counts().index.tolist(),\n target=df[col].values[0])\n\n # Evaluate cluster dataframe by dataframe\n eval_df = pd.DataFrame(columns=cluster_dataframe.columns)\n eval_df = eval_df.append(\n cluster_dataframe.mean(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.min(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.median(),\n ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.max(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.std(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.var(), ignore_index=True)\n eval_df.index = [\"Mean\", \"Min\", \"Median\",\n \"Max\", \"Standard Deviation\", \"Variance\"]\n\n if show:\n print(\"Total found in {0} is {1}\".format(\n cluster_identfier, cluster_dataframe.shape[0]))\n self.__render_mpl_table(\n df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Means_Rounded_To_Nearest_Real_Numbers\",\n header_columns=0,\n col_width=4.0)\n\n self.__render_mpl_table(\n eval_df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Eval_Df\",\n header_columns=0,\n col_width=4.0)\n display(df)\n display(eval_df)\n self.__vertical_spacing(7)\n\n cluster_profiles_df = cluster_profiles_df.append(\n self.__decode_df(df, le_map))\n\n rows_count += cluster_dataframe.shape[0]\n\n return rows_count, cluster_profiles_df",
"def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters",
"def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)",
"def out_xi(self, sample, cluster):\n min_distance = 0\n first = True\n for cur_cluster in self.cluster_lst:\n sum_distance = 0\n if cur_cluster.get_c_id() != cluster.get_c_id():\n for cur_sample in cur_cluster.get_samples():\n if cur_sample.get_s_id() < sample.get_s_id():\n sum_distance += self.distance_dict[(cur_sample.get_s_id(), sample.get_s_id())]\n elif cur_sample.get_s_id() > sample.get_s_id():\n sum_distance += self.distance_dict[(sample.get_s_id(), cur_sample.get_s_id())]\n cluster_size = len(cur_cluster.get_samples())\n temp_distance = sum_distance / cluster_size\n if first:\n min_distance = temp_distance\n first = False\n min_distance = min(temp_distance, min_distance)\n return min_distance",
"def distance_scipy_spatial(z, k=4, metric='euclidean'):\n d = scipy.spatial.distance.pdist(z, metric)\n d = scipy.spatial.distance.squareform(d)\n # k-NN graph.\n idx = np.argsort(d)[:, 1:k + 1]\n d.sort()\n d = d[:, 1:k + 1]\n return d, idx"
] | [
"0.74079317",
"0.67105204",
"0.64238435",
"0.63077164",
"0.6306624",
"0.6294445",
"0.62434506",
"0.6157537",
"0.6113591",
"0.61009115",
"0.6098193",
"0.607835",
"0.60371375",
"0.6021003",
"0.60085094",
"0.60084623",
"0.5979887",
"0.59464717",
"0.59397215",
"0.5935677",
"0.59211224",
"0.5918804",
"0.59111226",
"0.5909138",
"0.5905096",
"0.59010565",
"0.59002876",
"0.5871066",
"0.585604",
"0.58478856"
] | 0.74946755 | 0 |
Calculate the rank of each point within a cluster | def get_all_cluster_rankings(self):
if 'dist_to_rep_point' not in self.embedding_df.columns:
self.calculate_all_distances_to_center()
self.embedding_df['rank_in_cluster'] = self.embedding_df.groupby('cluster')['dist_to_rep_point'].rank(method='min') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rank():\n return 0",
"def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist",
"def rank_cluster_points_by_distance(self, cluster_id):\n cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy()\n \n if cluster_of_interest.empty:\n raise ValueError(f'Cluster id {cluster_id} not found')\n \n if 'dist_to_rep_point' not in self.embedding_df.columns:\n distance_df = self.calculate_distances_for_cluster(cluster_id)\n cluster_of_interest = cluster_of_interest.merge(distance_df, left_index=True, right_index=True)\n \n cluster_of_interest.sort_values('dist_to_rep_point', inplace=True)\n return cluster_of_interest",
"def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0",
"def __rank__(self) -> int:",
"def get_rank(self) -> int:\n return dist.get_rank()",
"def get_rank(points: int, cutoffs: List[int]) -> int:\n rank = 0\n for i, cutoff in enumerate(cutoffs):\n if points < cutoff:\n if i == 0:\n break\n else:\n rank = i - 1\n break\n else:\n rank = RANK_COUNT - 1\n\n return rank",
"def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r",
"def rank(self):\n return self.lib.calculate_rank()",
"def get_ranked_points(zpoints, dsq):\n pos_map = calc_positions(zpoints, dsq)\n rpoints = calc_ranked_points(pos_map, dsq)\n return rpoints",
"def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)",
"def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank",
"def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank",
"def predict_cluster(point):\n # assuming point belongs to clusters that were computed by fit functions\n return cluster[tuple(point)]",
"def get_rank() -> int:\n return collective.get_rank()",
"def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")",
"def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)",
"def get_num_hit_rank(boxes_truth, boxes_pred, rank):\n\n def is_hit(box_truth, box_pred):\n return is_label_match_rank(box_truth, box_pred, rank)\n\n return get_num_hit(boxes_truth, boxes_pred, is_hit)",
"def determine_rank(self, X, err):\n singularValues,_,_,_ = self.compute_svd(X,k=-1)\n ratio = np.array([np.linalg.norm(singularValues[k:]) / np.linalg.norm(singularValues) for k in\n range(len(singularValues) - 1, 0, -1)])\n find_idx = numpy.nonzero(ratio <= err)\n rank = find_idx[0]\n if self.global_rank==0: print('Estimated rank=',rank)\n return rank",
"def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()",
"def get_rank(self) -> int:\r\n return self.rank",
"def inter_cost(cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in cluster.points:\n if point != pt:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n\n return int(sum(map(_p2p, cluster.points)))",
"def _compute_ranks(df, lower_better=True):\n # return df.rank(axis=1, numeric_only=True, ascending=lower_better)\n return df.rank(axis=1, numeric_only=True, ascending=lower_better, method='min')",
"def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks",
"def spatial_rank(self) -> int:\n return self.shape.spatial.rank",
"def calOffsetOfCluster(color_cluster, cluster):\n offset = 0\n for cc in color_cluster:\n l = len(cluster)\n for i in range(l):\n offset_temp = math.pow( (cc[1][i] - cluster[i]), 2 )\n offset += offset_temp\n return offset",
"def test_rank(self):\n self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)\n self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)",
"def intra_cost(points, cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in points:\n if point != pt and pt not in cluster.points:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n return int(sum(map(_p2p, cluster.points)))",
"def rank(self):\n return self.matrix().rank()",
"def get_rank(self, points):\n sql_command = \"SELECT * FROM points WHERE amount > ?;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [points])\n\n all = cursor.fetchall()\n cursor.close()\n connection.close()\n return len(all) + 1"
] | [
"0.70503414",
"0.7008832",
"0.6786571",
"0.6764138",
"0.67443216",
"0.66818386",
"0.6654572",
"0.6623286",
"0.6598719",
"0.65756667",
"0.65539867",
"0.6456434",
"0.6411865",
"0.6378525",
"0.63583297",
"0.63149124",
"0.6295258",
"0.62677336",
"0.6262239",
"0.6217441",
"0.6161058",
"0.61578333",
"0.61429185",
"0.6127528",
"0.61243725",
"0.6124239",
"0.61024994",
"0.60743976",
"0.6066121",
"0.6062506"
] | 0.7514025 | 0 |
Returns the object located at given coordinates. | def get_object_at_location(self, x, y):
object_map_at_target_location = self.maps.get((x, y))
if not object_map_at_target_location:
return None
return object_map_at_target_location.get_real_object() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_object_at_location(cls, x, y):\n object_map_at_target_location = cls.query\\\n .filter_by(x=x, y=y).one_or_none()\n if not object_map_at_target_location:\n return None\n return object_map_at_target_location.get_real_object()",
"def get_object_at(self, position, ignore=set()):\n for o in self.objects:\n if o in ignore:\n continue\n if position in o.cells:\n return o",
"def findcoordinate(self, *args, **kwargs):\n return _coordsys.coordsys_findcoordinate(self, *args, **kwargs)",
"def check_by_xyz(cls, x=None, y=None, z=None):\n\n location_obj = cls.query.filter(cls.x_coord == x,\n cls.y_coord == y,\n cls.z_coord == z).first()\n return location_obj",
"def getObjectFromPosition(position):\n for object in self.objects:\n if (int(self.position[0]) == int(object.position[0])) and (int(self.position[1]) == int(object.position[1])):\n return object\n\n return None",
"def get_cell_by_coords(self, coords):\n try:\n cell = GameCell.objects.get(row=coords[0], col=coords[1], game=self)\n return cell\n except GameCell.DoesNotExist:\n return None",
"def get_game_piece_object_at_position(self, position):\n\n column, row = self.transpose_position(position)\n\n return self.get_board()[int(row)][int(column)]",
"def _bycoord(self, coord):\n query = \"\"\"SELECT * \n FROM ppmxl \n WHERE circle(coord,0.0006) @> circle(point(%f,%f),0) LIMIT 1;\"\"\" % coord\n result = self.corot.query(query)\n return result",
"def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]",
"def _resolve_object(self, objectname):\n\n service = 'Mast.Name.Lookup'\n params = {'input': objectname,\n 'format': 'json'}\n\n response = self.service_request_async(service, params)\n\n result = response[0].json()\n\n if len(result['resolvedCoordinate']) == 0:\n raise ResolverError(\"Could not resolve {} to a sky position.\".format(objectname))\n\n ra = result['resolvedCoordinate'][0]['ra']\n dec = result['resolvedCoordinate'][0]['decl']\n coordinates = coord.SkyCoord(ra, dec, unit=\"deg\")\n\n return coordinates",
"def find_coordinates(self):\n\n raise NotImplementedError",
"def find_coordinates(self):\n\n raise NotImplementedError",
"def unit_at(x, y, units):\r\n for u in units:\r\n if u.x == x and u.y == y:\r\n return u\r\n return None",
"def locate(x, y):\n position(x * 6, y)",
"def getCellFromPosition(self, xPos, yPos):\n for cell in self.cells:\n if(xPos == cell.x and yPos == cell.y):\n return cell\n return False",
"def getPoint(self):\n return Point(*self.position)",
"def _get_orf_object_by_coords(self,start,end):\n for orfobj in self.input['orfs'].orfs:\n if orfobj.start == start and orfobj.end == end:\n return orfobj\n elif start == None and orfobj.end == end:\n # used in case of SignalP output -> no Orf start coord given!\n return orfobj\n else:\n pass\n else:\n return None",
"def locate_point(self, coord):\n lowest_lat = self.lower_left[0]\n leftmost_lng = self.lower_left[1]\n dist_lat = utils.haversine((coord[0], leftmost_lng), self.lower_left)*1000 # in meters\n dist_lng = utils.haversine((lowest_lat, coord[1]), self.lower_left)*1000 # in meters\n grid_coord = (floor(dist_lng/self.distance), floor(dist_lat/self.distance))\n if grid_coord in self.cells:\n return grid_coord\n return None",
"def get_object_position(self, object_handle):\n res, object_position = vrep.simxGetObjectPosition(self.client_id, object_handle, -1, \\\n vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n return Point((object_position[0], object_position[1]))\n else:\n print('Remote function call failed with result {0}.'.format(res))\n return ()",
"def create_general_object(self, x, y):\n return self.img[y-self.rad:y+self.rad, x-self.rad:x+self.rad]",
"def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))",
"def get_piece(self, selected_piece_coords):\n for piece in self.game_pieces:\n if selected_piece_coords[0] == piece.x and selected_piece_coords[1] == piece.y:\n return piece.clone()\n return None",
"def get_display_object(self, x, y):\n if x < 0 or x >= self.width:\n return DisplayObject.StaticObject(chr(0b11110111))\n if y < 0 or y >= self.height:\n return DisplayObject.StaticObject(chr(0b11110111))\n return self.mapArray[y][x].get_display_object()",
"def getPoint(self, x, y):\n return self._c[x*self.__height + y]",
"def get_coord(self):\n return self.coord",
"def __init__(self, coordinates):\n self.coordinates = coordinates",
"def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]",
"def room_xy(room, x, y, value=None):\n return room[x][y]",
"def object_coordinates(*objs, depsgraph=None):\n\n # To be on the safe side, we use the evaluated object after\n # all modifiers etc. applied (done internally by bmesh)\n\n dg = depsgraph or bpy.context.evaluated_depsgraph_get()\n xyz = []\n for obj in objs:\n eval_obj = obj.evaluated_get(dg)\n xyz_obj = [v.co for v in eval_obj.data.vertices]\n xyz.extend(xyz_obj)\n return np.stack(xyz)",
"def coordinates(self):"
] | [
"0.7401787",
"0.65084153",
"0.64662015",
"0.6462374",
"0.6404084",
"0.62852186",
"0.61791605",
"0.6087064",
"0.5851252",
"0.5840479",
"0.58224994",
"0.58224994",
"0.5814853",
"0.5790992",
"0.57650596",
"0.5756242",
"0.5745209",
"0.5740608",
"0.5726151",
"0.5704214",
"0.5667024",
"0.5658999",
"0.565538",
"0.56328195",
"0.5630551",
"0.5629281",
"0.56275266",
"0.5613274",
"0.55913913",
"0.5557284"
] | 0.71457946 | 1 |
Creates a food object randomly somewhere in this world. | def generate_food(self):
x = random.randint(0, self.width)
y = random.randint(0, self.height)
new_food = Food(self.id, x, y)
food_created = self.add_object(new_food)
if not food_created:
existing_object = self.get_object_at_location(x, y)
if isinstance(existing_object, Food):
existing_object.value += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_food(self):\n self.penup()\n self.shape(\"circle\")\n self.color(\"green\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Food {self.x_cordinates} and {self.y_cordinates}\")\n # self.stamp()",
"def generate_food() -> FoodItem:\n presets = random.choice(FOOD_BANK)\n return FoodItem(presets['name'], presets['hp'], presets['msg'])",
"def foodGen():\n pos_w = int(randint(0, canv_w - pt_dim)/pt_dim) * pt_dim\n pos_h = int(randint(0, canv_h - pt_dim)/pt_dim) * pt_dim\n \n food = pg.rect.Rect((pos_w, pos_h), cell)\n \n food_on_snake = 1\n \n # check to see that the food does not fall on top of the snake\n while food_on_snake:\n for snake in snake_pos:\n # if food falls on snake, generate new food position\n if(food.colliderect(snake)):\n food_on_snake = 1\n pos_w = int(randint(0, canv_w - pt_dim)/pt_dim) * pt_dim\n pos_h = int(randint(0, canv_h - pt_dim)/pt_dim) * pt_dim\n food = pg.rect.Rect((pos_w, pos_h), cell)\n break\n else:\n food_on_snake = 0\n return food",
"def __init__(self):\n super(Food, self).__init__()\n self.radius = random.randint(5, 10)\n self.eaten = False",
"def createtown_random(self):\n town = m.Town()\n town.name = town.name + str(len(self.alltowns))\n self.print_mainlog(\n \"A new town, %s, appeared at %d,%d!\" %\n (town.name, town.pos.x, town.pos.y)\n )\n self.alltowns.append(town)",
"def bonus_food(self):\n self.penup()\n self.shape(\"turtle\")\n self.color(\"red\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Bonus Food {self.x_cordinates} and {self.y_cordinates}\")",
"def generate_random_toy() -> Toy:\n dimensions = round(uniform(5, 100), 2)\n rooms_number = randint(1, 5)\n return SantaWorkShop(dimensions, rooms_number, 5)",
"def make_car():\n car = Car() \n car.drop_val = random.randint(0,1)\n\n if car.drop_val == 0:\n car.drop_x = random.randint(77, 400) * 2\n\n elif car.drop_val == 1:\n car.drop_y = random.randint(62, 300) *2\n\n return car",
"def create_food(self, snake):\n while not self.pos:\n x = random.randint(0, 7)\n y = random.randint(0, 7)\n if [x, y] not in snake:\n self.pos = [x, y]",
"def generate_random_candy() -> Candy:\n contains_nuts = True if randint(0, 1) == 0 else False\n pack_size = randint(1, 5)\n return CremeEggs(contains_nuts, pack_size)",
"def create_goat(name):\n weight = random.randint(50, 100)\n insult_damage = 250 - weight\n\n return Goat(name, weight, insult_damage)",
"def setup_food():\n redbug_new_rect = food_rect.copy()\n pos_x = random.randrange(0,screen_width//40)*40\n pos_y = random.randrange(0,(screen_height-40)//40)*40 \n redbug_new_rect.topleft = (pos_x, pos_y)\n index_redbug = redbug_new_rect.collidelist(snake_body_positions)\n if index_redbug >= 0 or [pos_x,pos_y] in wall_positions or [pos_x,pos_y] in hole_positions or [pos_x,pos_y] in xfood_positions or [pos_x,pos_y] in goodfood_positions or [pos_x,pos_y] in badfood_positions :\n setup_food()\n else :\n food_positions.append(redbug_new_rect)",
"def __init__(self, xPos: int, yPos: int, isPredator = False, food=1):\n\n if xPos >= settings.X_SIZE or yPos >= settings.Y_SIZE:\n raise ValueError(\"xPos/yPos cannot be larger than X/Y_SIZE!\")\n self.xPos = xPos\n self.yPos = yPos\n self.isPredator = isPredator\n self.id = random.randint(0, 1000000)\n self.food = food\n self.age = 0\n self.bred = settings.MAXIMUM_BABIES",
"def __new_apple(self):\n apple_position = Position(randint(0, 7), randint(0, 7))\n while apple_position in self._snake.body:\n apple_position = Position(randint(0, 7), randint(0, 7))\n\n self._apple = self.Apple(apple_position)",
"def create_fish():\n if config.F_LIST == []:\n fitem = scene.Fish(randint(2, common.COLS-2),\n randint(common.MIDS_R + 3, common.ROWS-2))\n config.F_LIST.append(fitem)\n elif randint(0, 10) == 1:\n fitem = scene.Fish(randint(2, common.COLS-2),\n randint(common.MIDS_R + 3, common.ROWS-2))\n config.F_LIST.append(fitem)\n\n for i in config.F_LIST:\n i.move(i.x_pos+1)",
"def default_factory(*args, **kwargs):\n obj = RandomGameEntity()\n obj.build(*args, **kwargs)\n return obj",
"def reset_food_level(self):\n food_level = random() * FOOD_DEFAULT\n self.plant = food_level",
"def generateFood():\n temp = [random.randint(0,7), random.randint(0,7)]\n\n snakePixels = snake.getPixels()\n\n #check that the food does not fall in the snake\n while temp in snakePixels:\n temp = [random.randint(0,7), random.randint(0,7)]\n\n return temp",
"def new_car(self):\r\n random_num = random.randint(1, 3)\r\n if random_num == 1:\r\n new_car = Turtle('square')\r\n new_car.shapesize(stretch_wid=1, stretch_len=2)\r\n new_car.penup()\r\n new_car.color(random.choice(COLOURS))\r\n random_y = random.randint(-240, 270)\r\n new_car.goto(280, random_y)\r\n self.all_cars.append(new_car)",
"def create_the_world(cls):\n from muddery.server.mappings.element_set import ELEMENT\n world = ELEMENT(\"WORLD\")()\n world.setup_element(\"\")\n cls._world_data = world",
"def randomHelmet():\n return random.choice(HELMETS)",
"def random_placement(area):\n\n area.create_houses(True)\n\n for house in area.houses:\n place_house(area, house)",
"def simulate(self):\n self._t = self._t + 1\n if self._t == self._cycle:\n # End of a season, start of the next one. Year is also cyclic that is WINTER -> SPRING.\n self._t = 0\n self._season = self._season.next()\n\n # When the ammount of newly produced food in a cell is over and the cell can seed we\n # randomly choose another spot where some random ammount of newly produced food should\n # be stored.\n for i in range(self._height):\n for j in range(self._width):\n if self._env[i][j].get_newly() == 0 and not self._seeded[i][j]:\n # if the cell become empty just now seed in once in a randomn cell on the grid.\n self._seeded[i][j] = True\n cap = self._height + self._width\n while cap > 0:\n seedi = random.randint(0, self._height - 1)\n seedj = random.randint(0, self._width - 1)\n\n production_cap = self._food_per_season[self._season.value]\n\n production_cap -= self._env[seedi][seedj].get_newly()\n\n if production_cap > 0:\n seed_amount = random.randint(1, production_cap)\n self._env[seedi][seedj].produce(seed_amount)\n self._seeded[seedi][seedj] = False\n break\n\n cap = cap - 1",
"def faker() -> Faker:\n\n return Faker()",
"def fake_init():\n return Faker()",
"def spawn(self, y, x, h, w):\n self.pos = (np.random.randint(y, y + h), np.random.randint(x, x + w))",
"def make_planet():\n dice = random.randint(0, 100)\n if dice < 60:\n status = 'nopressure'\n else:\n status = random.choice(list(constants.STATUS.keys()))\n\n return Planet(\n name=random.choice(constants.NAMES),\n x=random.randint(constants.XMIN, constants.MAXWIDTH - 1),\n y=random.randint(constants.YMIN, constants.MAXHEIGHT - 1),\n system_size=random.choice(list(constants.SYSTEMSIZE.keys())),\n tech_level=random.choice(list(constants.TECHLEVEL.keys())),\n regim=random.choice(list(constants.REGIM.keys())),\n special=random.choice(list(constants.SPECIALRESOURCES.keys())),\n status=status,\n price_slip={},\n shipyard=[])",
"def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory",
"def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory",
"def create_object(object_name):\n if object_name == 'deathstar':\n return Deathstar()\n elif object_name == 'mercury':\n return Mercury()\n elif object_name == 'venus':\n return Venus()\n elif object_name == 'mars':\n return Mars()\n elif object_name == 'earth':\n return Earth()\n elif object_name == 'moon':\n return Moon()\n elif object_name == 'tatooine':\n return Tatooine()\n elif object_name == 'mordor':\n return Mordor()\n elif object_name == 'xwing':\n return Xwing()"
] | [
"0.7924229",
"0.7388662",
"0.68496233",
"0.67906237",
"0.6670745",
"0.66257876",
"0.6595156",
"0.64915293",
"0.63676316",
"0.63369346",
"0.63189137",
"0.6317571",
"0.61416024",
"0.6115891",
"0.6053006",
"0.6016661",
"0.60126746",
"0.6003472",
"0.58886176",
"0.58674264",
"0.5827067",
"0.58264446",
"0.5783772",
"0.5754089",
"0.57299376",
"0.57297933",
"0.56958824",
"0.56951034",
"0.56951034",
"0.5685313"
] | 0.79373443 | 0 |
Return True if the choice's value is empty string or None. | def _choice_has_empty_value(choice):
value, _, crige = choice
return value is None or value == '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def non_empty(val):\n return val is not None and val != \"\"",
"def is_str_none_or_empty(val):\n if val is None:\n return True\n if isinstance(val, string_types):\n val = val.strip()\n if not val:\n return True\n return False",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def empty(self, value):\r\n return value is None",
"def is_empty_value(cls, value):\n return not value or cls.is_placeholder_value(value)",
"def empty(self, value):\n return value is None",
"def is_empty(self, value, context):\n return (value is None)",
"def is_null_or_empty(string_val):\n if string_val and string_val.strip():\n return False\n return True",
"def is_empty(value):\n logger.info('is_empty value:%s' % value )\n if not value.strip(' '):\n return True\n else:\n return False",
"def check_empty_string(value: str):\n if not value:\n return True\n if not value.strip():\n return True\n else:\n return False",
"def not_set(string):\n if string is None:\n return True\n elif string == '':\n return True\n return False",
"def empty(self, value):\r\n return not value",
"def is_empty(val):\n return val in [None, ''] or val.isspace()",
"def non_empty_string(value):\n return value and bool(value.strip())",
"def is_empty_str(val):\n s = str(val)\n if not isinstance(s, str):\n return False\n if not s.strip():\n return True\n else:\n return False",
"def empty(self, value):\n return not value",
"def _is_bumf(value):\n if type(value) in (unicode, str):\n return value.strip() == ''\n return value is None",
"def is_empty(val):\n if val is None or isinstance(val, Sized) and len(val) == 0: # Empty string is also Sized of len 0\n return True\n return False",
"def is_empty(self):\n return not self._text",
"def query_is_empty(input_string):\n if re.match(r'\\A\\s*\\Z', input_string) is None:\n return True\n else:\n return False",
"def is_empty(val):\n return not bool(val)",
"def _is_null_value(self, value):\n if value is None:\n return True\n\n if IS_PY3:\n # Python 3.X\n if isinstance(value, str) and len(value) == 0:\n return True\n else:\n # Python 2.X\n if isinstance(value, basestring) and len(value) == 0: # NOQA: F821\n return True\n\n # TODO: This should probably be removed when solved in core Solr level?\n return False",
"def IsEmptyString (s) :\n if s is None : return True\n elif isinstance (s, str) :\n return len (s) == 0 \n else :\n raise PQHException (\"the type is unexpected %s\" % str (type (s)))",
"def is_non_empty_value(value):\n if value is None:\n return False\n if isinstance(value, str) and len(value.strip()) == 0:\n return False\n if (isinstance(value, list) or isinstance(value, dict)) and not value:\n return False\n return True",
"def is_empty(self) -> bool:",
"def check_empty(value, label):\n if value == u'':\n flash(label + \" Is Empty\")",
"def is_empty(string):\n return string == None or re.sub(\"\\\\s+\", \"\", string) == \"\""
] | [
"0.7685549",
"0.76462",
"0.74445015",
"0.74445015",
"0.74445015",
"0.74445015",
"0.74445015",
"0.73680663",
"0.7361419",
"0.73225313",
"0.7155328",
"0.7097266",
"0.7065049",
"0.7033588",
"0.70299554",
"0.6994321",
"0.69693965",
"0.691823",
"0.6896403",
"0.687022",
"0.6839705",
"0.6800078",
"0.6761992",
"0.67488956",
"0.6747428",
"0.6737491",
"0.6737387",
"0.6715744",
"0.6683098",
"0.66769934"
] | 0.8938283 | 0 |
Return a list of optgroups for this widget. | def optgroups(self, name, value, attrs=None):
groups = []
has_selected = False
for index, (option_value, option_label, option_crige) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label, option_crige)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel, subextra in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected))
has_selected |= selected
subgroup.append(
self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, crige=option_crige))
if subindex is not None:
subindex += 1
return groups | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_options(self):\n return [o for g in self.parser.option_groups for o in g.option_list]",
"def optgroups(self, name, value, attrs=None):\n options = []\n\n for index, (name, product_data) in enumerate(self.product_fields.items()):\n quantity = product_data['quantity']\n name = product_data['name']\n price = product_data['price']\n if index:\n label = 'product_{}'.format(str(index))\n else:\n label = 'product'\n\n options.append({\n 'value': quantity,\n 'price': price,\n 'name': 'products',\n 'label': name,\n 'type': self.input_type,\n 'template_name': self.option_template_name,\n 'wrap_label': True,\n 'index': index\n })\n\n return options",
"def groups(self):\n return self.get_data(\"groups\")",
"def groups(self):\n return []",
"def settings_group_options():\n return [('', _('No group')), *[(str(a.id), str(a)) for a in Group.objects.all()]]",
"def list_groups(self):\n return self.get_admin(\"groups\")",
"def getGroups(self):\n return [g[0] for g in grp.getgrall()]",
"def groups(self):\n return self._groups",
"def groups(self):\n return self._groups",
"def groups(self):\n return self._groups",
"def getGroups():\r\n return Group.getGroups()",
"def get_group_options(self, table_id):\n table_url = self.base_url + \"/table?table=\" + str(table_id)\n html_text = self.fetch(table_url)\n restrict_group_select = re.search(r'<select id=\"restrictToGroup\">([\\s\\S]*?)<\\/select>', html_text)[0]\n options = re.findall(r'\"(\\d*)\">([^<]*)', restrict_group_select)\n return options",
"def groups(self):\n # type: (...) -> Set[str]\n return self._groups",
"def get_groups(self):\n return Client._get(self)",
"def get_options(self):\n if self.parent is not None:\n all_options = self.parent.get_options()\n else:\n all_options = []\n all_options.extend(self.options)\n return all_options",
"def get_group_options(\n config_module: str,\n group_name: str,\n results_filter: Optional[ObjectType] = ObjectType.CONFIG,\n) -> List[str]:\n cl = _get_config_loader(config_module)\n group_options = cl.get_group_options(group_name)\n return group_options",
"def all_options(self):\n # More reliable using javascript\n options = self.browser.execute_script(self.ALL_OPTIONS, self.browser.element(self))\n return [self.Option(normalize_space(unescape(option[0])), option[1]) for option in options]",
"def groups(self):\r\n return resources.Groups(self)",
"def groups(self, protocol=None): \n return ProtocolPurpose.group_choices",
"def get_all_options(self): \n return self._options.items()",
"def _all_cli_opts(self):\n for item in self._cli_opts:\n yield item['opt'], item['group']",
"def get_default_options():\n return GROUPS_.values()",
"def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out",
"def get_cli_groups():\n\n return get_component(CLIPackage.COMPONENT_NAME).get_cli_groups()",
"def getListOfGroups(self, *args):\n return _libsbml.GroupsModelPlugin_getListOfGroups(self, *args)",
"def get_pingroups(self):\n return self.groups[:]",
"def get_all_groups(self):\n return self.groups + ['all']",
"def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"groups\")",
"def get_options(self):\n return []",
"def list(self):\n return self._options"
] | [
"0.7063895",
"0.69440085",
"0.67643887",
"0.65815574",
"0.6523809",
"0.6515766",
"0.6399603",
"0.6360311",
"0.6360311",
"0.6360311",
"0.63217825",
"0.63090414",
"0.62958103",
"0.62820065",
"0.6253235",
"0.62510276",
"0.6211395",
"0.62001765",
"0.6191107",
"0.61584747",
"0.61467355",
"0.613493",
"0.6134833",
"0.6084867",
"0.60616654",
"0.60328186",
"0.599813",
"0.598799",
"0.5969412",
"0.59609824"
] | 0.75911134 | 0 |
Sets up the cairo context and pango layout | def set_up_pangocairo(self, widget, event):
# Create the cairo context
self.cr = self.window.cairo_create()
#Create a pango layout
self.pg = self.cr.create_layout()
# Restrict Cairo to the exposed area; avoid extra work
self.cr.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
self.cr.clip()
if self.set_pc:
self.desc = pango.FontDescription("sans normal")
pango.FontDescription.set_size(self.desc, int(self.zoom*1024))
self.pg.set_font_description(self.desc)
#Only reset cairo and pango if new file is opened
self.set_pc = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_on_surface(surface):\n pangocairo_ctx = pangocairo.CairoContext(cairo.Context(surface))\n layout = pangocairo_ctx.create_layout()\n\n pango_ctx = layout.get_context()\n if language is not None:\n pango_ctx.set_language(pango.Language(language))\n\n if rtl:\n if vertical:\n base_dir = pango.DIRECTION_TTB_RTL\n else:\n base_dir = pango.DIRECTION_RTL\n alignment = pango.ALIGN_RIGHT\n else:\n if vertical:\n base_dir = pango.DIRECTION_TTB_LTR\n else:\n base_dir = pango.DIRECTION_LTR\n alignment = pango.ALIGN_LEFT\n\n pango_ctx.set_base_dir(base_dir)\n layout.set_alignment(alignment)\n\n layout.set_width(width * pango.SCALE)\n layout.set_spacing((line_spacing-font_size) * pango.SCALE)\n\n # TODO: use ctypes to wrap fontconfig to avoid using the system's fonts\n font = pango.FontDescription()\n font.set_family(family)\n font.set_size(font_size * pango.SCALE)\n font.set_style(style)\n font.set_weight(weight)\n layout.set_font_description(font)\n\n layout.set_text(text)\n\n# # Doesn't work for some reason\n# pango_ctx.set_base_gravity(pango.GRAVITY_AUTO)\n# matrix = pango_ctx.get_matrix()\n# matrix.rotate(90)\n# pango_ctx.set_matrix(matrix)\n# layout.context_changed()\n\n extents = layout.get_pixel_extents()\n top_usage = min(extents[0][1], extents[1][1], 0)\n bottom_usage = max(extents[0][3], extents[1][3])\n\n pangocairo_ctx.set_antialias(cairo.ANTIALIAS_GRAY)\n pangocairo_ctx.set_source_rgb(1, 1, 1) # White background\n pangocairo_ctx.paint()\n\n pangocairo_ctx.translate(0, -top_usage)\n pangocairo_ctx.set_source_rgb(0, 0, 0) # Black text color\n pangocairo_ctx.show_layout(layout)\n\n return bottom_usage - top_usage",
"def draw(self, context):\n rect = self.get_allocation()\n #initial context settings: line width & font\n context.set_line_width(1)\n font = gtk.Label().style.font_desc.get_family()\n context.select_font_face(font,cairo.FONT_SLANT_NORMAL, \\\n cairo.FONT_WEIGHT_NORMAL)\n \n self.draw_basics(context, rect)\n if self.data:\n self._do_draw(context, rect)",
"def on_draw(self, ctx):\n gradient = cairo.LinearGradient(0, self.height * 2, 0, 0)\n gradient.add_color_stop_rgba(0.1, 0.1, 0.1, 0.1, 0.7)\n gradient.add_color_stop_rgba(0.1, 0.1, 0.1, 0.1, 0.75)\n ctx.set_source(gradient)\n self.draw_rectangle_advanced (ctx, self.pos[0], self.pos[1], self.__width - 20,\n self.__height - 280,\n rounded_angles=(5, 5, 5, 5),\n fill=True, border_size=1,\n border_color=(0, 0, 0, 0.25),\n shadow_size=10,\n shadow_color=(0, 0, 0, 0.25))\n # Make sure we have a pango layout initialized and updated.\n if self.p_layout == None :\n self.p_layout = ctx.create_layout()\n else:\n ctx.update_layout(self.p_layout)\n p_fdesc = pango.FontDescription()\n p_fdesc.set_family(\"Garuda\");\n p_fdesc.set_size(20 * pango.SCALE)\n self.p_layout.set_font_description(p_fdesc);\n pos = [(self.pos[0]+self.__width/2-40), self.pos[1]+5]\n ctx.set_source_rgb(1, 1, 1)\n x=0;\n self.__selected = None\n ctx.save()\n ctx.translate(*pos)\n txt = \"To-Do\";\n self.p_layout.set_markup('%s' % txt)\n ctx.show_layout(self.p_layout)\n ctx.restore()\n x += 1\n p_fdesc.set_family(\"Free Sans\");\n p_fdesc.set_size(10 * pango.SCALE)\n self.p_layout.set_font_description(p_fdesc);\n pos = [self.pos[0]+20, self.pos[1] + 60];\n self.__position = [];\n for item in self.__items:\n ctx.set_source(gradient);\n ctx.set_line_width (10);\n ctx.rectangle(self.pos[0]-20,pos[1]+4,7,7);\n ctx.fill();\n self.__position.append((pos[1]+4,item));\n self.draw_rectangle_advanced (ctx, self.pos[0], pos[1]-14, self.__width - 20,\n\t\t\t\t\t\t\t\t self.__height - (295),\n\t\t\t\t\t\t\t\t rounded_angles=(5, 5, 5, 5),\n\t\t\t\t\t\t\t\t fill=True, border_size=1,\n\t\t\t\t\t\t\t\t border_color=(0, 0, 0, 0.25),\n\t\t\t\t\t\t\t\t shadow_size=10,\n\t\t\t\t\t\t\t\t shadow_color=(0, 0, 0, 0.25))\n ctx.set_source_rgb(0.8,0.8,0.8);\n ctx.save()\n ctx.translate(*pos)\n self.p_layout.set_markup('%s' % item)\n ctx.show_layout(self.p_layout)\n pos[1] += 30\n ctx.restore()\n x += 1",
"def __enter__(self):\n if self.back_flag:\n # Set LaTeX params\n matplotlib.rcParams.update({ \n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n \"pgf.preamble\": \"\\n\".join( self.packages ),\n })\n plt.rc('font', size=self.SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=self.BIGGER_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=self.MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=self.SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=self.SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=self.MEDIUM_SIZE) # legend fontsize\n plt.rc('figure', titlesize=self.BIGGEST_SIZE) # fontsize of the figure title",
"def create_png(text, output_path, family='Noto Sans',\n language=None, rtl=False, vertical=False,\n width=1370, font_size=32, line_spacing=50,\n weight=pango.WEIGHT_NORMAL, style=pango.STYLE_NORMAL):\n\n def draw_on_surface(surface):\n \"\"\"Draw the string on a pre-created surface and return height.\"\"\"\n pangocairo_ctx = pangocairo.CairoContext(cairo.Context(surface))\n layout = pangocairo_ctx.create_layout()\n\n pango_ctx = layout.get_context()\n if language is not None:\n pango_ctx.set_language(pango.Language(language))\n\n if rtl:\n if vertical:\n base_dir = pango.DIRECTION_TTB_RTL\n else:\n base_dir = pango.DIRECTION_RTL\n alignment = pango.ALIGN_RIGHT\n else:\n if vertical:\n base_dir = pango.DIRECTION_TTB_LTR\n else:\n base_dir = pango.DIRECTION_LTR\n alignment = pango.ALIGN_LEFT\n\n pango_ctx.set_base_dir(base_dir)\n layout.set_alignment(alignment)\n\n layout.set_width(width * pango.SCALE)\n layout.set_spacing((line_spacing-font_size) * pango.SCALE)\n\n # TODO: use ctypes to wrap fontconfig to avoid using the system's fonts\n font = pango.FontDescription()\n font.set_family(family)\n font.set_size(font_size * pango.SCALE)\n font.set_style(style)\n font.set_weight(weight)\n layout.set_font_description(font)\n\n layout.set_text(text)\n\n# # Doesn't work for some reason\n# pango_ctx.set_base_gravity(pango.GRAVITY_AUTO)\n# matrix = pango_ctx.get_matrix()\n# matrix.rotate(90)\n# pango_ctx.set_matrix(matrix)\n# layout.context_changed()\n\n extents = layout.get_pixel_extents()\n top_usage = min(extents[0][1], extents[1][1], 0)\n bottom_usage = max(extents[0][3], extents[1][3])\n\n pangocairo_ctx.set_antialias(cairo.ANTIALIAS_GRAY)\n pangocairo_ctx.set_source_rgb(1, 1, 1) # White background\n pangocairo_ctx.paint()\n\n pangocairo_ctx.translate(0, -top_usage)\n pangocairo_ctx.set_source_rgb(0, 0, 0) # Black text color\n pangocairo_ctx.show_layout(layout)\n\n return bottom_usage - top_usage\n\n temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 0, 0)\n calculated_height = draw_on_surface(temp_surface)\n\n real_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,\n width, calculated_height)\n draw_on_surface(real_surface)\n\n real_surface.write_to_png(output_path)",
"def setup_draw(self):\n pass",
"def draw(self, ctx):\n self.set_size(self.width, self.available_height) \n #Drawing cell lines\n for i in range(0, (max(self.available_width,int(self.width)) / self.cell_width) + 1):\n ctx.move_to(i * self.cell_width, 0)\n ctx.line_to(i * self.cell_width, self.available_height)\n ctx.set_line_width(1)\n red = float(self.get_style().fg[gtk.STATE_INSENSITIVE].red) / 65535\n green = float(self.get_style().fg[gtk.STATE_INSENSITIVE].green) / 65535\n blue = float(self.get_style().fg[gtk.STATE_INSENSITIVE].blue) / 65535\n ctx.set_source_rgba(red, green, blue, 0.3)\n ctx.stroke()\n greatest = self.calculate_greatest() \n # Drawing scale lines\n step = greatest / 5\n ctx.save()\n ctx.set_dash([5],5)\n for i in range(int(step), int(greatest),5):\n ctx.move_to(0, self.available_height - (self.available_height - 20) * i / greatest)\n ctx.line_to(max(self.available_width,int(self.width)), self.available_height - (self.available_height - 20) * i / greatest)\n ctx.set_source_rgba(red,green,blue,0.3)\n ctx.stroke()\n\n ctx.restore()\n # Drawing the diagram\n loadingCopy = copy.deepcopy(self.loading)\n colorIndex = 0\n loadingKeys = loadingCopy.keys()\n loadingKeys.sort()\n for key in loadingKeys:\n while loadingCopy[key] != []:\n x1, y1 = loadingCopy[key].pop(0)\n if loadingCopy[key] != []:\n x2, y2 = loadingCopy[key][0]\n else:\n x2 = self.duration\n ctx.line_to (x1 * self.cell_width, self.available_height - (self.available_height - 20) * y1 / greatest)\n ctx.line_to (x2 * self.cell_width, self.available_height - (self.available_height - 20) * y1 / greatest)\n \n ctx.set_line_width(2)\n ctx.set_source_rgba(self.colors[colorIndex][0], self.colors[colorIndex][1], self.colors[colorIndex][2],0.5)\n ctx.stroke()\n colorIndex = (colorIndex + 1) % 11",
"def Init(self):\r\n\r\n base_colour = GetBaseColour()\r\n darker1_colour = StepColour(base_colour, 85)\r\n darker2_colour = StepColour(base_colour, 75)\r\n darker3_colour = StepColour(base_colour, 60)\r\n darker4_colour = StepColour(base_colour, 40)\r\n\r\n self._background_colour = base_colour\r\n self._background_gradient_colour = StepColour(base_colour, 180)\r\n\r\n isMac = wx.Platform == \"__WXMAC__\"\r\n\r\n if isMac:\r\n self._active_caption_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)\r\n else:\r\n self._active_caption_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_ACTIVECAPTION)\r\n\r\n self._active_caption_gradient_colour = LightContrastColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n self._active_caption_text_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)\r\n self._inactive_caption_colour = darker1_colour\r\n self._inactive_caption_gradient_colour = StepColour(base_colour, 97)\r\n self._inactive_caption_text_colour = wx.BLACK\r\n \r\n self._sash_brush = wx.Brush(base_colour)\r\n self._background_brush = wx.Brush(base_colour)\r\n self._border_pen = wx.Pen(darker2_colour)\r\n self._gripper_brush = wx.Brush(base_colour)\r\n self._gripper_pen1 = wx.Pen(darker4_colour)\r\n self._gripper_pen2 = wx.Pen(darker3_colour)\r\n self._gripper_pen3 = wx.WHITE_PEN",
"def draw(font_string,font_size,lang,alphabets,outdir=\".\"): # language, font file name, font full path, font size, characters\n \n \n image_dir=lang+\".\"+\"images\"\n if(os.path.exists(image_dir)):\n pass\n else:\n os.mkdir(image_dir)\n \n #Using a font\n #font= ImageFont.truetype(font,fsz)\n boxfile=image_dir+\"/\"+\"bigimage.box\"\n f=open(boxfile,\"w\")\n wt = 4000\n ht = 4000 #modified later using a separate script\n\t\n bigimage=Image.new(\"L\",(wt,ht),255)\t#change here for inverting\n bigdraw=ImageDraw.Draw(bigimage)\n x=y=10\n count=0\n for akshar in alphabets:\n akshar.strip() #remove nasty characters\n \n #I shall now create an image with black bgc and white font color. One\n #getbbox() determines the bounding box values I shall invert the image.\n #This has to be done since getbbox() only finds bounding box values for\n #non-zero pixels (read as white), but tesseract-ocr runs on the exact\n #opposite bgc fgc combination. Contact [email protected].\n \n \n #The lines below are pango/cairo code \n surface = cairo.ImageSurface(cairo.FORMAT_A8, font_size*4, font_size*3)\n context = cairo.Context(surface)\n\n pc = pangocairo.CairoContext(context)\n\n layout = pc.create_layout()\n layout.set_font_description(pango.FontDescription(font_string))\n layout.set_text(akshar)\n print akshar\n\n # lines take care of centering the text.\n width, height = surface.get_width(), surface.get_height()\n w, h = layout.get_pixel_size()\n position = (10,10) #most likely this part messes up when you try to change the size within this script. It is suggested to use the separate script.\n context.move_to(*position)\n pc.show_layout(layout)\n surface.write_to_png(\"pango.png\")\n\t\n #Here we open the generated image using PIL functions\n temp_image=Image.open(\"pango.png\") #black background, white text\n draw = ImageDraw.Draw(temp_image)\n bbox = temp_image.getbbox()\n deltax=bbox[2]-bbox[0]\n deltay=bbox[3]-bbox[1]\n\n \n print bbox\n new_image=temp_image.crop(bbox)\n temp_image=temp_image.load()\n inverted_image = ImageChops.invert(new_image) #White background, black text\n\t\n\tinverted_image.save(image_dir+\"/\"+str(count)+\".png\")\n\tbigimage.paste(inverted_image,(x,y))\n\tos.unlink(image_dir+\"/\"+str(count)+\".png\")\n\tcount = count+1\n\t#bigimage.load()\n bigbox=(x,y,x+deltax,y+deltay)\n print bigbox\n draw=ImageDraw.Draw(bigimage)\n\t#draw.rectangle(bigbox,None,100)\n x=bigbox[2]+5\n if x>(wt-10):\n x=10; y=y+40\n\n os.unlink(\"pango.png\") #delete the pango generated png\n\n line=akshar+\" \"+str(bigbox[0]-1)+\" \"+str(ht-(bigbox[1]+deltay)-1)+\" \"+str(bigbox[2]+1)+\" \"+str(ht-(bigbox[3]-deltay)+1) # this is the line to be added to the box file\n\tf.write(line+'\\n')\n\n\t#degrade code starts\n\tstrip=[deltax*.2,deltax*.4,deltax*.7]\n\tfor values in range(0,2):\n\t\tdistort2=inverted_image\n\t\tfor wai in range(0,deltay):\n\t\t\tfor ex in range(strip[values],strip[values]+1):\n\t\t\t\tdistort2.putpixel((ex,wai),255)\n\t\tbigbox=(x,y,x+deltax,y+deltay)\n\t\t#draw.rectangle(bigbox,None,10)\n\t\tline=akshar+\" \"+str(bigbox[0]-1)+\" \"+str(ht-(bigbox[1]+deltay)-1)+\" \"+str(bigbox[2]+1)+\" \"+str(ht-(bigbox[3]-deltay)+1) # this is the line to be added to the box file\n \tf.write(line+'\\n')\n\t\tbigimage.paste(distort2,(x,y))\n\t\tx=bigbox[2]+5\n \tif x>(wt-10):\n \t\tx=10; y=y+40\n\t\t\n\t\t\t\n\t#degrade code ends\n \n #distort.distort(filename2,bbox,fsz,akshar)\n \n \n \n #bigimage.save(image_dir+\"/\"+\"bigimage.tif\",\"TIFF\") #useful to generate merged file for all images when using default sizes.\n f.close()\n train.train(lang,outdir)",
"def __init__(self, *args, **kwargs):\n _gdi_.GraphicsFont_swiginit(self,_gdi_.new_GraphicsFont(*args, **kwargs))",
"def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20",
"def paint(self):\r\n cbase = self.gox.curr_base\r\n cquote = self.gox.curr_quote\r\n self.sort_currency_list_if_changed()\r\n self.win.bkgd(\" \", COLOR_PAIR[\"status_text\"])\r\n self.win.erase()\r\n\r\n #\r\n # first line\r\n #\r\n line1 = \"Market: %s%s | \" % (cbase, cquote)\r\n line1 += \"Account: \"\r\n if len(self.sorted_currency_list):\r\n for currency in self.sorted_currency_list:\r\n if currency in self.gox.wallet:\r\n line1 += currency + \" \" \\\r\n + goxapi.int2str(self.gox.wallet[currency], currency).strip() \\\r\n + \" + \"\r\n line1 = line1.strip(\" +\")\r\n else:\r\n line1 += \"No info (yet)\"\r\n\r\n #\r\n # second line\r\n #\r\n line2 = \"\"\r\n if self.gox.config.get_bool(\"goxtool\", \"show_orderbook_stats\"):\r\n str_btc = locale.format('%d', self.gox.orderbook.total_ask, 1)\r\n str_fiat = locale.format('%d', self.gox.orderbook.total_bid, 1)\r\n if self.gox.orderbook.total_ask:\r\n str_ratio = locale.format('%1.2f',\r\n self.gox.orderbook.total_bid / self.gox.orderbook.total_ask, 1)\r\n else:\r\n str_ratio = \"-\"\r\n\r\n line2 += \"sum_bid: %s %s | \" % (str_fiat, cquote)\r\n line2 += \"sum_ask: %s %s | \" % (str_btc, cbase)\r\n line2 += \"ratio: %s %s/%s | \" % (str_ratio, cquote, cbase)\r\n\r\n line2 += \"o_lag: %s | \" % self.order_lag_txt\r\n line2 += \"s_lag: %.3f s\" % (self.gox.socket_lag / 1e6)\r\n self.addstr(0, 0, line1, COLOR_PAIR[\"status_text\"])\r\n self.addstr(1, 0, line2, COLOR_PAIR[\"status_text\"])",
"def prepare_canvas(width=350.0, fontsize=12, fontsize_small=10, lw=0.5):\n try:\n width = width.split()\n width_value = float(width[0])\n width_units = width[1]\n except AttributeError:\n width_value = float(width)\n width_units = 'pt' # assume points\n\n inches_per_pt = 1.0/72.27 # According to TeX\n inches_per_cm = 1.0/2.54 #\n if width_units == 'pt':\n fig_width = width_value * inches_per_pt\n elif width_units == 'cm':\n fig_width = width_value * inches_per_cm\n elif width_units == 'in':\n fig_width = width_value\n\n golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio\n fig_height = 0.9*fig_width # height in inches\n fig_size = [fig_width,fig_height]\n\n rc('figure', figsize=fig_size)\n rc('lines', linewidth=lw)\n rc('font', family='sans-serif', serif=['Latin Modern Roman','Palatino'], size=fontsize)\n rc('text', usetex=False)\n rc('legend', fontsize=fontsize)\n rc('axes', labelsize=fontsize)\n rc('xtick', labelsize=fontsize_small)\n rc('ytick', labelsize=fontsize_small)",
"def draw(self, output_file):\n self.calc_width()\n self.calc_height()\n\n surface = cairo.SVGSurface(output_file, self.width, self.height)\n ctx = cairo.Context(surface)\n\n ## change background color\n ctx.rectangle(0, 0, self.width, self.height)\n ctx.set_source_rgb(1, 1, 1)\n ctx.fill()\n\n ## Variables\n line_spacing = 125\n line_depth = 125\n header_depth = 75\n left_spacing = 35\n\n ## Create custom color palette\n color_palette = [[],[],[]]\n num_colors_per = self.number_of_motifs//3\n max_num_colors_per = self.number_of_motifs - (2 * num_colors_per)\n gradient = 1/num_colors_per\n max_gradient = 1/max_num_colors_per\n # color_gradient_value = \n for i in range(3):\n if i == 2:\n for k in range(1,max_num_colors_per + 1):\n color_palette[i].append(k*max_gradient)\n else:\n for k in range(1,num_colors_per + 1):\n color_palette[i].append(k*gradient)\n # print(max_num_colors_per)\n # print(color_palette)\n\n\n ## Legend\n x_legend = self.width - self.width_of_legend\n y_legend = 75\n legend_width = 145\n legend_height = (self.number_of_motifs * 15) + 8\n ctx.rectangle(x_legend,y_legend,legend_width,legend_height)\n ctx.set_source_rgb(0,0,0)\n ctx.stroke()\n legend_line_length = 35\n count = 1\n for i in range(3):\n for j in range(len(color_palette[i])):\n ctx.move_to(x_legend + 5, y_legend + (count*15))\n ctx.line_to(x_legend + legend_line_length, y_legend + (count*15))\n if i == 0:\n ctx.set_source_rgb(color_palette[i][j],0,0)\n if i == 1:\n ctx.set_source_rgb(0,color_palette[i][j],0)\n if i == 2:\n ctx.set_source_rgb(0,0,color_palette[i][j])\n ctx.set_line_width(3)\n ctx.stroke()\n\n ctx.move_to((x_legend + legend_line_length) + 10, y_legend + (count*15))\n ctx.set_font_size(11)\n ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n ctx.set_source_rgb(0,0,0)\n ctx.show_text(self.list_of_motifs[count-1])\n\n count += 1\n\n for i in range(len(self.list_of_motif_objects)):\n current_motif_obj = self.list_of_motif_objects[i]\n current_length_of_seq = len(current_motif_obj.sequence)\n current_motif_coords = current_motif_obj.motif_coordinates\n current_motif_sequences = current_motif_obj.motif_sequences\n current_exon_coords = current_motif_obj.exon_coordinates\n\n width_left = self.width - current_length_of_seq - self.width_of_legend\n \n ## Draw main sequence line\n ctx.move_to(left_spacing,(i*line_spacing) + line_depth) \n ctx.line_to(left_spacing + current_length_of_seq,(i*line_spacing) + line_depth)\n ctx.set_source_rgb(0,0,0)\n ctx.set_line_width(2)\n ctx.stroke()\n\n ## Draw the exon\n x1 = left_spacing + current_exon_coords[0][0]\n y1 = (i*line_spacing) + line_depth - 20\n rec_width = current_exon_coords[0][1] - current_exon_coords[0][0]\n rec_height = 40\n ctx.rectangle(x1,y1,rec_width,rec_height)\n ctx.set_source_rgb(0,0,0)\n ctx.stroke()\n\n ## Loop to draw all motifs\n for j in range(len(current_motif_coords)):\n ctx.move_to(left_spacing + current_motif_coords[j][0],(i*line_spacing) + line_depth) \n ctx.line_to(left_spacing + current_motif_coords[j][0] + 2,(i*line_spacing) + line_depth)\n motif_num = current_motif_coords[j][2]\n if(motif_num < num_colors_per):\n ctx.set_source_rgb(color_palette[0][motif_num],0,0)\n if(motif_num >= num_colors_per and motif_num < (2*num_colors_per)):\n ctx.set_source_rgb(0,color_palette[1][motif_num-num_colors_per],0)\n if(motif_num >= (2*num_colors_per)):\n ctx.set_source_rgb(0,0,color_palette[2][motif_num-(2*num_colors_per)])\n ctx.set_line_width(15)\n ctx.stroke()\n\n ## adding header text\n ctx.move_to(left_spacing, (i*line_spacing) + header_depth)\n ctx.set_font_size(17)\n ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n ctx.set_source_rgb(0,0,0)\n ctx.show_text(current_motif_obj.header)\n\n # ## adding sequence text (MAYBE MAKE THIS OPTIONAL FLAG?)\n # disp_length = 80\n # last_k = 0\n # for k in range(len(current_motif_obj.sequence)//disp_length):\n # current_seq = current_motif_obj.sequence[k*disp_length:(k*disp_length)+disp_length]\n # ctx.move_to(50, (i*512) + 125 + (25*k))\n # ctx.set_font_size(14)\n # ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n # ctx.set_source_rgb(0,0,0)\n # ctx.show_text(current_seq)\n # last_k = k\n # final_num = ((len(current_motif_obj.sequence)//disp_length)*disp_length)\n # the_rest = current_motif_obj.sequence[final_num:]\n # ctx.move_to(50, (i*512) + 125 + (25*(last_k + 1)))\n # ctx.set_font_size(14)\n # ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n # ctx.set_source_rgb(0,0,0)\n # ctx.show_text(the_rest)\n\n\n\n surface.finish()",
"def __init__(self, *args, **kwargs):\n _gdi_.GraphicsPen_swiginit(self,_gdi_.new_GraphicsPen(*args, **kwargs))",
"def OnPaint(self, event):\n dc = wx.PaintDC(self)\n self.SetCurrent(self.context)\n if not self.init:\n self.appInit()\n self.init = True\n self.redraw()",
"def setup():\n size(800, 600)\n stroke_weight(3)",
"def basic_char_setup( self ):\n\t\tsmall_bar = 3 # number of points per bar\n\t\twide_bar = round(small_bar * 2.25,0) # 2.25 x small_bar\n\t\tdpl = 50 # dots per line 300dpi/6lpi = 50dpl\n\t\tself._nb = bytes( self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ( '*c%02ia%ib0P' % (small_bar, self.bc_height*dpl) ) + \n\t\t\t\t\t\t self.owner.PRINTER_ESC + \n\t\t\t\t\t\t (\"*p+%02iX\" % small_bar) )\n\t\tself._wb = bytes( self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ('*c%02ia%ib0P' % (wide_bar, self.bc_height*dpl) )+\n\t\t\t\t\t\t self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ('*p+%02iX' % wide_bar ) )\n\t\tself._ns = bytes( self.owner.PRINTER_ESC + ( '*p+%02iX' % small_bar ) )\n\t\tself._ws = bytes( self.owner.PRINTER_ESC + ( '*p+%02iX' % wide_bar ) )\n \n\t\t# DONE nb = bc39_esc+\"*c\"+TRANSFORM(small_bar,'99')+\"a\"+Alltrim(STR(bc39_height*dpl))+\"b0P\"+bc39_esc+\"*p+\"+TRANSFORM(small_bar,'99')+\"X\"\n\t\t# DONE wb = bc39_esc+\"*c\"+TRANSFORM(wide_bar,'99')+\"a\"+Alltrim(STR(bc39_height*dpl))+\"b0P\"+bc39_esc+\"*p+\"+TRANSFORM(wide_bar,'99')+\"X\"\n\t\t# DONE ns = bc39_esc+\"*p+\"+TRANSFORM(small_bar,'99')+\"X\"\n\t\t# DONE ws = bc39_esc+\"*p+\"+TRANSFORM(wide_bar,'99')+\"X\"\n \n\t\t# adjust cusor position to start at top of line and return to bottom of line\n\t\tself._bc_start = bytes( self.owner.PRINTER_ESC + '*p-50Y' )\n\t\tself._bc_end = bytes( self.owner.PRINTER_ESC + '*p+50Y' )\n\t\t# DONE bc39_start = bc39_esc+\"*p-50Y\"\n\t\t# DONE bc39_END = bc39_esc+\"*p+50Y\"\n\n\t\t# setup the structure allowing to print the code codebar section for various LETTERS\n\t\tself._char39 = { u'1' : 'wb+ns+nb+ws+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'2' : 'nb+ns+wb+ws+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'3' : 'wb+ns+wb+ws+nb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'4' : 'nb+ns+nb+ws+wb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'5' : 'wb+ns+nb+ws+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'6' : 'nb+ns+wb+ws+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'7' : 'nb+ns+nb+ws+nb+ns+wb+ns+wb' , \n\t\t\t\t\t\t u'8' : 'wb+ns+nb+ws+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'9' : 'nb+ns+wb+ws+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'0' : 'nb+ns+nb+ws+wb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'A' : 'wb+ns+nb+ns+nb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'B' : 'nb+ns+wb+ns+nb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'C' : 'wb+ns+wb+ns+nb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'D' : 'nb+ns+nb+ns+wb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'E' : 'wb+ns+nb+ns+wb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'F' : 'nb+ns+wb+ns+wb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'G' : 'nb+ns+nb+ns+nb+ws+wb+ns+wb' , \n\t\t\t\t\t\t u'H' : 'wb+ns+nb+ns+nb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'I' : 'nb+ns+wb+ns+nb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'J' : 'nb+ns+nb+ns+wb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'K' : 'wb+ns+nb+ns+nb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'L' : 'nb+ns+wb+ns+nb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'M' : 'wb+ns+wb+ns+nb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'N' : 'nb+ns+nb+ns+wb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'O' : 'wb+ns+nb+ns+wb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'P' : 'nb+ns+wb+ns+wb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'Q' : 'nb+ns+nb+ns+nb+ns+wb+ws+wb' , \n\t\t\t\t\t\t u'R' : 'wb+ns+nb+ns+nb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'S' : 'nb+ns+wb+ns+nb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'T' : 'nb+ns+nb+ns+wb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'U' : 'wb+ws+nb+ns+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'V' : 'nb+ws+wb+ns+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'W' : 'wb+ws+wb+ns+nb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'X' : 'nb+ws+nb+ns+wb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'Y' : 'wb+ws+nb+ns+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'Z' : 'nb+ws+wb+ns+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'-' : 'nb+ws+nb+ns+nb+ns+wb+ns+wb' , \n\t\t\t\t\t\t u'.' : 'wb+ws+nb+ns+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u' ' : 'nb+ws+wb+ns+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'*' : 'nb+ws+nb+ns+wb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'$' : 'nb+ws+nb+ws+nb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'/' : 'nb+ws+nb+ws+nb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'+' : 'nb+ws+nb+ns+nb+ws+nb+ws+nb' , \n\t\t\t\t\t\t u'%' : 'nb+ns+nb+ws+nb+ws+nb+ws+nb' }",
"def prepare_canvas( self ):\n self.bottom_text = self.make_bottom_text()\n title = getattr( self, 'title', self.metadata.get('title','') )\n xlabel = getattr( self, 'xlabel', self.metadata.get('xlabel','') )\n ylabel = getattr( self, 'ylabel', self.metadata.get('ylabel','') )\n labels = getattr( self, 'labels', [] )\n colors = getattr( self, 'colors', [] )\n colors = list(colors); colors.reverse()\n x_formatter_cb = getattr( self, 'x_formatter_cb', lambda x: None )\n y_formatter_cb = getattr( self, 'y_formatter_cb', lambda x: None )\n legend = getattr( self, 'legend', self.metadata.get('legend', True) )\n bottom_text = getattr( self, 'bottom_text', None )\n kw = self.kw\n\n if type(legend) == types.StringType and legend.lower().find('f') > -1:\n legend = False\n elif type(legend) == types.StringType:\n legend = True\n\n prefs = self.prefs\n if 'svg' in kw.keys():\n svg = kw['svg']\n else:\n svg = False\n if svg:\n FigureCanvas = FigureCanvasSVG\n else:\n FigureCanvas = FigureCanvasAgg\n\n # Change the preferences based on passed metadata *and* kw keys.\n for key in prefs.keys():\n if key in self.metadata.keys():\n my_type = type( prefs[key] )\n # bool('false') is true! That's\n # why we have to do this override.\n if my_type == types.BooleanType:\n if str(self.metadata[key]).lower().find('f') >= 0:\n prefs[key] = False\n else:\n prefs[key] = True\n else:\n prefs[key] = my_type(self.metadata[key])\n if key in kw.keys():\n my_type = type( prefs[key] )\n # bool('false') is true! That's\n # why we have to do this override.\n if my_type == types.BooleanType:\n if str(self.kw[key]).lower().find('f') >= 0:\n prefs[key] = False\n else:\n prefs[key] = True\n else:\n prefs[key] = my_type(self.kw[key])\n\n self.prefs = prefs\n # Alter the number of label columns, if necessary. First,\n # calculate the max length of all the labels we are considering.\n max_length = 0\n for label in labels:\n max_length = max( len(label), max_length )\n\n # This is a hack to change the number of columns if the max_length\n # is very long.\n if max_length > 23:\n prefs['columns'] = min( 4, prefs['columns'] )\n if max_length > 30:\n prefs['columns'] = min( 3, prefs['columns'] )\n if max_length > 37:\n prefs['columns'] = min( 2, prefs['columns'] )\n\n # Figure size\n num_labels = len( labels )\n dpi = prefs['width'] / float(prefs['width_inches'])\n height_inches = prefs['height'] / dpi\n\n # Conversion from pixels to percentage of screen\n figure_padding_perc = prefs['figure_padding'] / float(prefs['height'])\n\n # Calculations for the legend\n rows = 0.0; column_height = 0.0; bottom = 0.0\n # Max number of rows in the legend\n rows = max(1,min( numpy.ceil(num_labels / float(prefs['columns'])), \\\n prefs['max_rows']) + 2*int(bottom_text != None))\n # Width and height for the legend, then converted into pixels.\n legend_width = 1 - 2 * prefs['legend_padding'] # In percent of screen.\n legend_height = (2*prefs['text_padding'] + prefs['text_size']) * \\\n rows/float(prefs['height']) # In percent of screen.\n leg_pix_height = legend_height * height_inches * dpi\n leg_pix_width = legend_width * prefs['width_inches'] * dpi\n self.leg_pix_width = leg_pix_width\n self.leg_pix_height = leg_pix_height\n column_width = 1.0 / float( prefs['columns'] )\n self.column_width = column_width\n\n if legend:\n column_height = (2 * prefs['text_padding'] + prefs['text_size']) / \\\n leg_pix_height\n bottom = 2 * prefs['legend_padding'] + legend_height\n\n box_width = prefs['text_size']\n self.box_width = box_width\n\n # Create our figure and canvas to work with\n fig = Figure()\n canvas = FigureCanvas( fig )\n\n # Set the figure properties we derived above.\n fig.set_size_inches( prefs['width_inches'], height_inches )\n fig.set_dpi( dpi )\n\n fig.set_facecolor('white')\n\n # rect = (left, bottom, width, height)\n legend_rect = prefs['legend_padding'], prefs['legend_padding'], \\\n legend_width, legend_height\n self.legend_rect = legend_rect\n if prefs['square_axis']:\n min_size = min( 1 - 1.5*figure_padding_perc, 1 - bottom - \\\n 2*figure_padding_perc )\n ax_rect = (.5 - min_size/2.0*prefs['height']/float(prefs['width']),\n figure_padding_perc + bottom,\n prefs['height']/float(prefs['width'])*min_size,\n min_size )\n else:\n ax_rect = (figure_padding_perc,\n figure_padding_perc + bottom,\n 1 - 1.5*figure_padding_perc,\n 1 - bottom - 2*figure_padding_perc)\n\n # Add a watermark:\n if 'watermark' in prefs.keys() and str(prefs['watermark']) != 'False':\n watermark_filename = os.path.expandvars( os.path.expanduser( \\\n prefs['watermark'] ) )\n if os.path.exists(watermark_filename):\n try:\n i = PILImage.open(watermark_filename)\n enh = PILImageEnhance.Contrast( i )\n i = enh.enhance( .033 )\n img_size = i.size\n resize = 1.0\n if prefs['width'] < img_size[0]:\n resize = prefs['width'] / float(img_size[0])\n if prefs['height'] < img_size[1]:\n resize = min(resize, prefs['height']/float(img_size[1]))\n box = (0.0, 0.0, img_size[0]/float(prefs['width'])*resize, \\\n img_size[1]/float(prefs['height'])*resize)\n #print box\n ax_wm = fig.add_axes( box )\n im = ax_wm.imshow( i, origin='lower', aspect='equal' )\n ax_wm.axis('off')\n ax_wm.set_frame_on( False )\n ax_wm.set_clip_on( False )\n except Exception, e:\n print e\n pass\n else:\n # Do nothing right now. Write a warning sometime?\n pass\n\n # Create our two axes, and set properties\n ax = fig.add_axes( ax_rect )\n\n # If requested, make x/y axis logarithmic\n if find_info('log_xaxis',kw,self.metadata,'False').find('r') >= 0:\n ax.semilogx()\n self.log_xaxis = True\n else:\n self.log_xaxis = False\n if find_info('log_yaxis',kw,self.metadata,'False').find('r') >= 0:\n ax.semilogy()\n self.log_yaxis = True\n else:\n self.log_yaxis = False\n\n setp( ax.get_xticklabels(), family=prefs['font_family'] )\n setp( ax.get_xticklabels(), fontname=prefs['font'] )\n setp( ax.get_xticklabels(), size=prefs['text_size'] )\n\n setp( ax.get_yticklabels(), family=prefs['font_family'] )\n setp( ax.get_yticklabels(), fontname=prefs['font'] )\n setp( ax.get_yticklabels(), size=prefs['text_size'] )\n\n setp( ax.get_xticklines(), markeredgewidth=2.0 )\n setp( ax.get_yticklines(), markeredgewidth=2.0 )\n setp( ax.get_xticklines(), zorder=4.0 )\n\n if legend:\n legend_ax = fig.add_axes( legend_rect )\n legend_ax.set_axis_off()\n\n ax.grid( True, color='#555555', linewidth=0.1 )\n\n # Set text on main axes.\n # Creates a subtitle, if necessary\n title = title.split('\\n',1)\n subtitle_height_pix = (prefs['subtitle_size'] + \\\n 2*prefs['text_padding']) * \\\n (len(title) > 1)\n ax_height_pix = ax_rect[-1] * height_inches * dpi\n ax.title = ax.text( 0.5, 1 + (subtitle_height_pix + \\\n prefs['text_padding'])/ \\\n ax_height_pix, title[0],\n verticalalignment='bottom', \\\n horizontalalignment='center' )\n ax.title.set_transform( ax.transAxes )\n ax.title.set_clip_box( None )\n ax._set_artist_props( ax.title )\n\n if len(title) > 1:\n ax.subtitle = ax.text( 0.5, 1.0 + prefs['text_padding']/\\\n ax_height_pix, title[1],\n verticalalignment='bottom',\n horizontalalignment='center' )\n ax.subtitle.set_family( prefs['font_family'] )\n ax.subtitle.set_fontname( prefs['font'] )\n ax.subtitle.set_size(prefs['subtitle_size'])\n ax.subtitle.set_transform( ax.transAxes )\n ax.subtitle.set_clip_box( None )\n\n ax.title.set_family( prefs['font_family'] )\n ax.title.set_fontname( prefs['font'] )\n ax.title.set_weight('bold')\n ax.title.set_size( prefs['title_size'] )\n\n # Set labels\n t = ax.set_xlabel( xlabel )\n t.set_family(prefs['font_family'])\n t.set_fontname(prefs['font'])\n t.set_size(prefs['text_size'])\n\n t = ax.set_ylabel( ylabel )\n t.set_family(prefs['font_family'])\n t.set_fontname(prefs['font'])\n t.set_size(prefs['text_size'])\n # Now, make the legend.\n offset = 0\n early_stop = False; labels = list(labels)\n labels.reverse()\n zipped = zip(labels,colors); #zipped.reverse()\n\n # Loop over the labels.\n for my_text, my_color in zipped:\n # Size calculations\n left = (box_width+3*prefs['text_padding'])/leg_pix_width + \\\n column_width*(offset % prefs['columns'])\n top = 1 - (column_height)*(numpy.floor( offset / prefs['columns'] ))\n next_bottom = 1 - (column_height)*(numpy.floor((offset+1)/prefs['columns']) + 2*int(bottom_text != None))\n\n # Stop early if we ran out of room.\n if next_bottom < 0 and (num_labels - offset > 1):\n early_stop = True\n break\n\n # Create text\n if legend:\n t = legend_ax.text( left, top, str(my_text), horizontalalignment='left',\n verticalalignment='top', size=prefs['text_size'])\n t.set_fontname( prefs['font'] )\n t.set_family( prefs['font_family'] )\n\n # Create legend rectangle:\n patch = Rectangle( ((column_width*(offset % prefs['columns']) + \\\n 1.2*prefs['text_padding']/leg_pix_width),\n top - box_width/leg_pix_height),\n 1.2*box_width/leg_pix_width, 1.2*box_width/leg_pix_height )\n patch.set_ec('black')\n patch.set_linewidth(0.25)\n patch.set_fc( my_color )\n legend_ax.add_patch( patch )\n\n offset += 1\n\n # Set some additional text if we stopped early\n if early_stop == True:\n my_text = '... plus %i more' % (num_labels - offset)\n if legend: legend_ax.text( left, top, my_text, horizontalalignment='left',\n verticalalignment='top', size = prefs['text_size'] )\n\n top = 1 - column_height*( rows-1 )\n left = 0.5\n\n if bottom_text != None:\n if legend:\n t = legend_ax.text( left, top, str(bottom_text), horizontalalignment='center',\n verticalalignment='top', size=prefs['text_size'] )\n t.set_family( prefs['font_family'] )\n t.set_fontname( prefs['font'] )\n\n x_formatter_cb( ax )\n y_formatter_cb( ax )\n\n self.ax = ax\n self.canvas = canvas\n self.fig = fig",
"def display_cairo_context(ctx):\n surface = ctx.get_target()\n return display_cairo_surface(surface)",
"def init(self):\n\n # Configuration interface support comes with plasma\n self.setHasConfigurationInterface(False)\n\n # Aspect ratio defined in Plasma\n self.setAspectRatioMode(Plasma.IgnoreAspectRatio)\n\n # Theme is a const variable holds Applet Theme\n self.theme = Plasma.Svg(self)\n\n # It gets default plasma theme's background\n self.theme.setImagePath(\"widgets/background\")\n\n # Resize current theme as applet size\n self.theme.resize(self.size())\n\n self.mainWidget = None\n self.layout = None\n\n self.initPlasmoid()",
"def _create(self):\n if self.h >= 2:\n # Draw standard shape\n for i in range(1, self.h - 1):\n self.window.addch(i, 0, curses.ACS_VLINE | self.colour) # '|'\n\n # Draw scrolling bar if necessary\n if self.size > 0:\n end = min(self.pos + self.size, self.h)\n for i in range(self.pos, end):\n self.window.addch(i, 0, chr(0x2588), self.colour) # '█'\n\n # Draw arrows if necessary\n if self.counter > 0:\n self.window.addch(0, 0, chr(0x25B2), self.colour) # '▲'\n if self.counter < self.content_size - self.h:\n self.window.addch(self.h - 1, 0, chr(0x25BC), self.colour) # '▼'\n\n # Finally refresh window\n self.window.refresh()",
"def draw(self, ctx, centerpoint, basepoint=(0, 0),\n angle=0, scale_x=1.0, scale_y=1.0, \n opacity=1,\n axes=True):\n ctx.set_line_width(3)\n ctx.set_line_join(cairo.LINE_JOIN_ROUND)\n \n ctx.translate(centerpoint[0], centerpoint[1])\n ctx.rotate(angle)\n ctx.scale(scale_x, scale_y)\n\n ctx.translate(basepoint[0], basepoint[1])\n\n # sign panels\n ctx.set_source_rgba(*color_hex_unpack(\"#3165A5\", opacity))\n for c, p in zip([(50, 100), (-50, 100), (-50, -100), (50, -100)], xrange(4)):\n ctx.arc(c[0], c[1], 5, math.radians(p * 90), math.radians((p + 1) * 90)) \n ctx.close_path()\n ctx.fill()\n\n ctx.set_source_rgba(*color_hex_unpack(\"#EFEFEF\", opacity))\n for c, p in zip([(35, 30), (-35, 30), (-35, -70), (35, -70)], xrange(4)):\n ctx.arc(c[0], c[1], 10, math.radians(p * 90), math.radians((p + 1) * 90)) \n ctx.close_path()\n ctx.fill()\n \n # text label\n ctx.set_source_rgba(*color_hex_unpack(\"#293531\", opacity))\n ctx.set_font_size(18)\n ctx.move_to(-ctx.text_extents('Такси')[4] / 2, -50)\n ctx.show_text('Такси')\n\n # car shape\n ctx.move_to(0, -40)\n ctx.curve_to(20, -40, 10, -10, 30, -10)\n ctx.curve_to(40, -10, 40, 15, 30, 15)\n\n # wheels\n ctx.curve_to(15, 15, 30, 30, 15, 30)\n ctx.curve_to(0, 30, 15, 15, 0, 15)\n\n ctx.curve_to(-15, 15, 0, 30, -15, 30)\n ctx.curve_to(-30, 30, -15, 15, -30, 15)\n\n ctx.curve_to(-40, 15, -40, -10, -30, -10)\n ctx.curve_to(-10, -10, -20, -40, 0, -40)\n ctx.close_path()\n ctx.fill()\n\n # windscreen\n ctx.set_source_rgba(*color_hex_unpack(\"#EFEFEF\", opacity))\n ctx.move_to(0, -30)\n for point in [(5, -30), (10, -10), (-10, -10), (-5, -30), (0, -30)]:\n ctx.line_to(point[0], point[1])\n ctx.close_path()\n ctx.fill()\n\n # lights\n for c in 17, -17:\n ctx.move_to(c, -3)\n for point in [(c + 5, -3), (c + 5, 5), (c - 5, 5), (c - 5, -3)]:\n ctx.line_to(point[0], point[1])\n ctx.close_path()\n ctx.stroke()\n\n ctx.translate(-basepoint[0], -basepoint[1])\n\n ctx.scale(1/scale_x, 1/scale_y)\n ctx.rotate(-angle)\n ctx.translate(-centerpoint[0], -centerpoint[1])",
"def __init__(self, *args, **kwargs):\n _gdi_.Pen_swiginit(self,_gdi_.new_Pen(*args, **kwargs))",
"def setup(self):\n self.ca_lines = []\n self.ca_lines.append(self.build_initial_line())\n self.set_display_from_lines()",
"def setup(self):\n\n # push the frame for the toplevel window\n self.lumpy.pushfr(self.tl)\n self.lumpy.col([0,1])\n\n # the frame at the top contains buttons\n self.lumpy.row([0,0,1], bg='white')\n self.lumpy.bu(text='Close', command=self.close)\n self.lumpy.bu(text='Print to file:', command=self.printfile)\n self.en = self.lumpy.en(width=10, text='lumpy.ps')\n self.en.bind('<Return>', self.printfile)\n self.la = self.lumpy.la(width=40)\n self.lumpy.endrow()\n\n # the grid contains the canvas and scrollbars\n self.lumpy.gr(2)\n \n self.ca_width = 1000\n self.ca_height = 500\n self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')\n\n yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)\n xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,\n sticky=E+W)\n self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,\n scrollregion=(0, 0, 800, 800))\n \n self.lumpy.endgr()\n self.lumpy.endcol()\n self.lumpy.popfr()\n\n # measure some sample letters to get the text height\n # and set the scale factor for the canvas accordingly\n self.canvas.clear_transforms()\n bbox = self.canvas.measure(['bdfhklgjpqy'])\n self.unit = 1.0 * bbox.height()\n transform = ScaleTransform([self.unit, self.unit])\n self.canvas.add_transform(transform)",
"def OnDrawGTKStyle(self, event):\r\n\r\n dc = wx.AutoBufferedPaintDC(self._pButton)\r\n dc.SetBackground(wx.Brush(self.GetBackgroundColour()))\r\n dc.Clear()\r\n \r\n self.OnDrawGTKExpander(dc)\r\n self.OnDrawGTKText(dc)",
"def configure_cb(self, darea, event):\n self.width, self.height = darea.window.get_size()\n self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width,\n self.height)\n self.cr = cairo.Context(self.surface)\n self.draw(self.cr, self.width, self.height)\n\n return True",
"def setupStyling(self):\n\n\t\tfaces = {\n\t\t\t'times': 'Times New Roman',\n\t\t\t'mono' : 'Courier New',\n\t\t\t'helv' : 'Arial',\n\t\t\t'other': 'Comic Sans MS',\n\t\t\t'size' : 10,\n\t\t\t'size2': 8,\n\t\t}\n\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleClearAll()\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"fore:#928374,back:#212121,face:%(mono)s,size:%(size2)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEXT, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HEADING, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HIDDEN, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODE, \"fore:#b8bb26,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.SYMBOL, \"fore:#81ac71,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEST, \"fore:#ff00ff,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.STRIKE, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.BOLD, \"fore:#d9a62e,bold,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.UNDERLINE, \"fore:#d9a62e,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.ITALIC, \"fore:#7d9d90,italic,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML_ATTRIBUTE, \"fore:#d9a62e,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.FORMAT, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.COMMENT, \"fore:#928372,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_KEYWORD, \"fore:#569cd6,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_SYMBOL, \"fore:#9cdcfe,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TEXT, \"fore:#F9FFE0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_STRING, \"fore:#d69d73,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_COMMENT, \"fore:#57a64a,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FUNCTION, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_CLASS, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TYPE, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FLOW, \"fore:#d8a0df,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_DIGIT, \"fore:#b5ce92,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.IndicatorSetStyle(0, stc.STC_INDIC_SQUIGGLE)\n\t\tself.edit.IndicatorSetForeground(0, wx.RED)",
"def __init__(self, content, template, **styles):\n self.content = content\n self.template = template\n self.styles = DEFAULT_DOCUMENT_STYLE.copy()\n # This is a rendered document ready to be painted on a cairo surface\n self._document = None # weasyprint.document.Document object\n self.compiled_html = None\n # Update the default styles and initialize self._document object\n self.update_style(**styles)"
] | [
"0.6378648",
"0.63250196",
"0.5820568",
"0.5630213",
"0.5574149",
"0.5553529",
"0.5505494",
"0.54557025",
"0.5414631",
"0.54144245",
"0.5352626",
"0.53398526",
"0.53367114",
"0.5331944",
"0.5321142",
"0.53158367",
"0.5298759",
"0.5253618",
"0.52352786",
"0.52059555",
"0.5188168",
"0.5179152",
"0.51635146",
"0.51621383",
"0.5145012",
"0.51306075",
"0.5117274",
"0.51089364",
"0.5100582",
"0.5099557"
] | 0.7677612 | 0 |
Handles expose event. Sets up cairo and calls draw() to draw the text | def do_expose_event(self, widget, event):
self.set_up_pangocairo(widget, event)
self.draw(*self.window.get_size()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def expose (self,widget,event):\n #Creating Cairo drawing context\n self.ctx = self.bin_window.cairo_create()\n #Setting context size to available size\n self.ctx.rectangle(event.area.x, event.area.y, event.area.width, event.area.height)\n self.ctx.clip()\n self.ctx.translate(20.5,-0.5)\n #Obtaining available width and height\n self.available_width = event.area.width\n self.available_height = event.area.height\n #Drawing\n self.draw(self.ctx)\n return False",
"def expose(self, widget, event):\n cr = widget.window.cairo_create()\n cr.set_source_rgb(0, 0, 0)\n cr.paint()\n for pos, color in self.locked_squares.iteritems():\n self.paint_square(pos, color, cr)\n for pos in self.curr_piece.occupying():\n self.paint_square(pos, self.curr_piece.color, cr)\n ### Easiest to put \"GAME OVER\" message here ###\n if self.over:\n cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_BOLD)\n ### HACK: The following doesn't scale with DOT_SIZE ###\n cr.set_font_size(41)\n cr.move_to(10, 200)\n cr.set_source_rgb(0, 0, 0) # dark drop-shadow\n cr.show_text('GAME OVER')\n cr.move_to(12, 202)\n cr.set_source_rgb(.82, .82, .82) # light main text\n cr.show_text('GAME OVER')\n cr.stroke()",
"def expose (self,widget,event):\n #Creating Cairo drawing context\n self.ctx = self.bin_window.cairo_create()\n #Setting context size to available size\n self.ctx.rectangle(event.area.x, event.area.y, event.area.width, event.area.height)\n self.ctx.clip()\n self.ctx.translate(0.5,-0.5)\n #Obtaining available width and height\n self.available_width = event.area.width\n self.available_height = event.area.height\n #Drawing\n self.draw(self.ctx)\n return False",
"def text_draw(self, x, y, text, style={}):",
"def draw(self, draw_surface):\n super().draw(draw_surface)\n if self.sub_event is not None:\n self.sub_event.draw(draw_surface)\n else:\n self.text_cursor.draw(draw_surface)",
"def OnDrawGTKText(self, dc):\r\n\r\n self._pButton.OnDrawGTKText(dc)",
"def draw(self, surface, offset=(0,0)):\n mouse = pg.mouse.get_pos()\n pos = mouse[0]-offset[0], mouse[1]-offset[1]\n if self.clicked:\n fill_color = pg.Color(\"white\")\n text = self.selected_text\n elif self.rect.collidepoint(pos):\n fill_color = (198, 226, 255)\n text = self.selected_text\n else:\n fill_color = self.color\n text = self.text\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(fill_color, self.rect.inflate(-2,-2))\n surface.blit(text, self.text_rect)",
"def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)",
"def expose_cb(self, darea, event):\n cr = darea.window.cairo_create()\n cr.rectangle(event.area.x, event.area.y,\n event.area.width, event.area.height)\n cr.clip()\n cr.set_source_surface(self.surface, 0, 0)\n cr.paint()\n\n return False",
"def draw( self, **kw ):\n pass",
"def on_paint(self, event):\n self.SetCurrent(self.context)\n if not self.init:\n # Configure the viewport, modelview and projection matrices\n self.init_gl()\n self.init = True\n\n size = self.GetClientSize()\n text = \"\".join([_(u\"Canvas redrawn on paint event, size is \"),\n str(size.width), \", \", str(size.height)])\n self.render(text)",
"def draw(self):\n if self.dirty:\n self._render()\n for text in self.text_lines:\n text.draw()",
"def draw(self, draw_surface):\n self.give_sub_event.draw(draw_surface)",
"def _render(self):\n self.dirty = False\n self.image = self.font.render(self._text, self.aa, self.color_fg)\n self.rect = self.image.get_rect()",
"def draw(self):\n raise NotImplementedError",
"def draw(self):\n raise NotImplementedError",
"def draw(self):\n raise NotImplementedError",
"def expose(self, widget, event):\n cr = widget.window.cairo_create()\n cr.set_source_rgb(0.05, 0.05, 0.05)\n cr.paint()\n for pos in self.next_piece.occupying():\n self.paint_square(tuple_add(pos, (-1, 1)),\n self.next_piece.color, cr)",
"def draw(self):\r\n pygame.draw.rect(self.screen, self.background_color, self.bounds)\r\n line_window = self.lines[self.scroll_window_top:self.scroll_window_bottom]\r\n for idx,line in enumerate(line_window):\r\n text = self.font.render(line, True, self.foreground_color)\r\n x,y = self._get_x_y_from_pos(self.position[0], self.position[1]+idx)\r\n self.screen.blit(text,(x,y))\r\n \r\n if self.cursor_visible and self.scroll_window_bottom == len(self.lines):\r\n x,y = self._get_x_y_from_pos(len(line_window[-1]), len(line_window))\r\n cursor_rect = pygame.Rect(x,y,\r\n self.text_width,self.text_height)\r\n pygame.draw.rect(self.screen, self.foreground_color, cursor_rect)",
"def draw(self, context):\n rect = self.get_allocation()\n #initial context settings: line width & font\n context.set_line_width(1)\n font = gtk.Label().style.font_desc.get_family()\n context.select_font_face(font,cairo.FONT_SLANT_NORMAL, \\\n cairo.FONT_WEIGHT_NORMAL)\n \n self.draw_basics(context, rect)\n if self.data:\n self._do_draw(context, rect)",
"def draw(self):\n pass",
"def draw(self):\n pass",
"def draw(self):\n pass",
"def draw(self):\n pass",
"def draw_text(self, text, i, j, **params):",
"def draw():",
"def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence\n text_obj = font.render(text, True, color)\n text_rect = text_obj.get_rect()\n text_rect.center = (x, y)\n surface.blit(text_obj, text_rect)",
"def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32",
"def draw(self):",
"async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")"
] | [
"0.63333666",
"0.6331875",
"0.6298491",
"0.6249288",
"0.62065977",
"0.62057835",
"0.60598",
"0.6035482",
"0.5889406",
"0.58473253",
"0.5832056",
"0.58307505",
"0.5823392",
"0.5807295",
"0.579405",
"0.579405",
"0.579405",
"0.5785034",
"0.57550627",
"0.5724503",
"0.5721456",
"0.5721456",
"0.5721456",
"0.5721456",
"0.5716641",
"0.5703485",
"0.5696948",
"0.56856227",
"0.5680523",
"0.56797194"
] | 0.7019397 | 0 |
Invalidates the canvas to allow cairo to redraw | def invalidate_canvas(self):
if self.window:
x, y, w, h = self.get_allocation()
self.window.invalidate_rect((0,0,w,h), False)
self.cr = self.window.cairo_create()
self.cr.update_layout(self.pg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def undraw(self):\n \n if not self.canvas: return\n if not self.canvas.isClosed():\n #self.canvas.delete(self.id)\n _tkExec(self.canvas.delete, self.id)\n if self.canvas.autoflush:\n #_root.update()\n _tkCall(_root.update)\n pass\n self.canvas = None\n self.id = None",
"def refresh_canvas(self):\n self.canvas.delete('all')\n self.draw_handler(self)\n self.canvas.after(CANVAS[\"REFRESH_TIME\"], self.refresh_canvas)",
"def clear_canvas():\r\n global _canvas\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n _canvas.clear()",
"def clean_canvas(self):\n self.canvas.fill(self.colorPalette.black)\n self.background.to_canvas(canvas=self.canvas)",
"def clear_canvas(self):\n # clear the image for next operation\n # self.axes.hold(False)\n\n # clear image\n self.axes.cla()\n # Try to clear the color bar\n if len(self.fig.axes) > 1:\n self.fig.delaxes(self.fig.axes[1])\n self._colorBar = None\n # This clears the space claimed by color bar but destroys sub_plot too.\n self.fig.clear()\n # Re-create subplot\n self.axes = self.fig.add_subplot(111)\n self.fig.subplots_adjust(bottom=0.15)\n\n # flush/commit\n self._flush()\n\n return",
"def clearCanvas():\n global c, coordinates\n c.delete(\"all\")\n drawMusicLines()\n coordinates.clear()",
"def clear_canvas():\n self.parent_class.canvas.delete(\"all\")",
"def clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n if self.useblit:\n self.background = self.canvas.copy_from_bbox(self.ax.bbox)",
"def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._checks)\n if hasattr(self, '_lines'):\n for l1, l2 in self._lines:\n self.ax.draw_artist(l1)\n self.ax.draw_artist(l2)",
"def _clear(self, event):\n if self.ignore(event) or self._changed_canvas():\n return\n self._background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self._buttons)\n if hasattr(self, \"_circles\"):\n for circle in self._circles:\n self.ax.draw_artist(circle)",
"def invalidate(self):\n\t\tself.invalidate_rect(self.viewed_rect())",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.selected_element = None\n self.pressed_elements.clear()",
"def clear(self):\n for i in range(len(self.canvas)):\n self.canvas[i] = 0",
"def invalidate(self):\n self.set_viewport(self.x, self.y, self.w, self.h, True)",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.subplot2.clear()",
"def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()",
"def clear(self):\n self.canvas = [[self.style] * self.cols for _ in range(self.lines)]",
"def __del__(self):\n if self._alloc:\n _pychidg.f90wrap_graphics_bc_t_finalise(this=self._handle)",
"def plot_clear():\n plt.cla()",
"def remove_canvas(self,):\r\n # reset plot view beofre change\r\n self.canvas.toolbar.home()\r\n # remove widgets from canvas_vlayout\r\n self.canvas_vlayout.removeWidget(self.toolbar)\r\n self.toolbar.close()\r\n self.canvas_vlayout.removeWidget(self.canvas)\r\n self.canvas.close()",
"def new_canvas(self):\n libtcod.console_clear(self.console)",
"def invalidate(self):\n self._valid = False",
"def invalidate(self):\n self.valid = False",
"def on_draw(self):\n self.clear()\n self.manager.draw()",
"def redraw(self):\n self.vispy_widget.canvas.update()",
"def clear(self):\n self.image = None\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n self.update()",
"def unbind(self, *args, **kwargs):\n self._canvas.unbind(*args, **kwargs)",
"def clear(self):\n self.animation.stop()\n self.draw(0, 0, 0, 0, 0)",
"def remove_drawing_rect(self):\n self.drawing_rect = QPolygonF()\n if self.connecting_rect:\n self.connecting_rect.setVisible(False)\n self.connecting_rect = None\n self.first_draw = True",
"def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)"
] | [
"0.7170448",
"0.70177877",
"0.6811474",
"0.67474014",
"0.6741104",
"0.65798086",
"0.6572473",
"0.651364",
"0.64646524",
"0.6450033",
"0.64039034",
"0.6386848",
"0.6327218",
"0.62746847",
"0.62502813",
"0.6237061",
"0.6207101",
"0.612459",
"0.6124122",
"0.6107939",
"0.6056458",
"0.60497254",
"0.60440826",
"0.59680444",
"0.5939536",
"0.5917331",
"0.59095526",
"0.5883173",
"0.5867043",
"0.5832837"
] | 0.8453374 | 0 |
Calls continuous_scroll every 38 ms until drag stops and the gobject.source is removed | def start_refresh(self, widget, context):
self.source_id = gobject.timeout_add(38, self.continuous_scroll, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True",
"def on_scroll(self, event):\n if event.button == 'up':\n self.generations += 4000\n elif event.button == 'down':\n if self.generations >= 4000:\n self.generations -= 4000\n self.redraw()",
"def stop_drag_motion(self, widget, context):\n gobject.source_remove(self.source_id)\n self.mouse_click_point = 0",
"def __stopScrolling(self):\n self.__view.inputWidget().releaseMouse()\n QApplication.restoreOverrideCursor()\n \n self.__indicator.hide()\n self.__indicator.setParent(None)\n self.__scroller.stopScrolling()",
"def stop_scroll():\n send_command(0x2E)",
"def idle_loop(self):\n sleep(0.1)\n self.scroll()",
"def on_scroll(event):\n if event.step > 0:\n if plot_mode == 'time_cut':\n sld['time'].set_val( min( sld['time'].val+1, sld['time'].valmax ) )\n else:\n sld['freq'].set_val( min(sld['freq'].val + scale_freq, sld['freq'].valmax) )\n else:\n if plot_mode == 'time_cut':\n sld['time'].set_val( max( sld['time'].val-1, sld['time'].valmin ) )\n else:\n sld['freq'].set_val( max(sld['freq'].val - scale_freq, sld['freq'].valmin) )",
"def _on_scroll(self, event):",
"def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos = event.y\n self.xpos = event.x",
"def on_scroll(self, event):\n if not self.ignore(event):\n self._on_scroll(event)",
"def scroll(self, direction):\n\n self.counter += direction # Counter of 'up' and 'down'\n do_redraw = self.counter == self.content_size - self.h\n\n if self.size > 0:\n self.count += direction\n pos = self.pos\n if math.fabs(self.count) == math.floor(self.content_size / self.h):\n pos += direction\n self.count = 0\n\n pos = max(0, pos) # Top limit\n pos = min(pos, self.h - self.size) # Bottom limit\n do_redraw = pos != self.pos # Redraw if pos has changed\n self.pos = pos\n\n if do_redraw:\n self._create()",
"def lulz(self):\n self.reset()\n self.scrollproc = threading.Thread(target=self.lulzloop)\n self.killedevent.wait()\n self.scrollproc.start()",
"def page_down(self):\n counter = self.get_entry_count_per_screen()\n while counter != 0 and self.pointer < (len(self.contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1\n counter -= 1\n self.refresh()\n self.reset_scrolling()\n return True",
"def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )",
"def slider_dragged(self):\n pass",
"def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r",
"def _on_scroll(self, event):\n self._zoom(event.step, draw=True)",
"def cb_move(self, event):\n if not self.move_timer.IsRunning():\n self.move_timer.StartOnce(2000)",
"def move_down(self):\n if self.pointer < (len(self.contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1\n self.reset_scrolling()\n self.refresh()\n return True\n else:\n return False",
"def start_scroll():\n send_command(0x2F)",
"def do_scroll_event(self, event):\n\t\tif event.state & gtk.gdk.CONTROL_MASK:\n\t\t\tif event.direction == gtk.gdk.SCROLL_UP:\n\t\t\t\tself.zoom *= 1.1\n\t\t\telif event.direction == gtk.gdk.SCROLL_DOWN:\n\t\t\t\tself.zoom /= 1.1",
"def mouse_wheel(self, event):\n\n if event.num == 5 or event.delta == -120:\n event.widget.yview_scroll(1, UNITS)\n self.tablerowheader.yview_scroll(1, UNITS)\n if event.num == 4 or event.delta == 120:\n if self.canvasy(0) < 0:\n return\n event.widget.yview_scroll(-1, UNITS)\n self.tablerowheader.yview_scroll(-1, UNITS)\n self.redrawVisible()\n return",
"def remove_slide_timeout(self, widget):\n try:\n gobject.source_remove(self.timeouts.pop(widget)[0])\n except KeyError:\n pass",
"def move_move(self, event):\n self.canvas.scan_dragto(event.x, event.y, gain=1)",
"def stopPaging(self):\n self._stillPaging = 0",
"def _scrollEvent(self, widget, event, adj):\n if event.direction in (gtk.gdk.SCROLL_UP, gtk.gdk.SCROLL_LEFT):\n inc = -adj.step_increment\n elif event.direction in (gtk.gdk.SCROLL_DOWN, gtk.gdk.SCROLL_RIGHT):\n inc = adj.step_increment\n else:\n inc = 0\n adj.set_value(min(adj.upper - adj.page_size, adj.value + inc))\n return False",
"def Scroll(self, steps):\n self._EnsureHIDValueInRange(steps)\n self._kit.MouseScroll(steps)\n time.sleep(self.send_delay)",
"def stopGTK( ):\n if LOOP_TRACKER:\n LOOP_TRACKER.decrement()",
"def dnd_motion(self, source, event):",
"def mouseMove(self, evt):\n if self.__enabled and self.__indicator.isVisible():\n rect = self.__indicatorGlobalRect()\n xlen = 0\n ylen = 0\n egp = evt.globalPos()\n \n if rect.left() > egp.x():\n xlen = egp.x() - rect.left()\n elif rect.right() < egp.x():\n xlen = egp.x() - rect.right()\n \n if rect.top() > egp.y():\n ylen = egp.y() - rect.top()\n elif rect.bottom() < egp.y():\n ylen = egp.y() - rect.bottom()\n \n self.__scroller.startScrolling(xlen, ylen)\n \n return False"
] | [
"0.67773175",
"0.65299225",
"0.6115261",
"0.6041647",
"0.5851163",
"0.57974243",
"0.5567167",
"0.5549752",
"0.5510689",
"0.54062074",
"0.5401114",
"0.5395652",
"0.53080606",
"0.5282082",
"0.5279963",
"0.52645594",
"0.519223",
"0.5188579",
"0.517752",
"0.51675165",
"0.5145534",
"0.5145527",
"0.5141829",
"0.51399153",
"0.5126277",
"0.5103978",
"0.5063557",
"0.50625086",
"0.5042306",
"0.503752"
] | 0.65430206 | 1 |
CPU kernel for 3d mesh to particles quantity interpolation | def mesh_to_particles_CPU_3d(mesh, mesh_quantity, indices, weights):
ip, jp, kp = indices
stridex = mesh.nx
stridey = mesh.ny
mq = np.ravel(mesh_quantity)
@np.vectorize
def check_outside(ip, jp, kp):
outside_idx = (jp < 0 or jp >= mesh.nx - 1 or
ip < 0 or ip >= mesh.ny - 1 or
kp < 0 or kp >= mesh.nz - 1)
return outside_idx
outside_idx = check_outside(ip, jp, kp)
inside_idx = ~outside_idx
ip, jp, kp = ip[inside_idx], jp[inside_idx], kp[inside_idx]
weights = [w[inside_idx] for w in weights]
particles_quantity = np.empty(len(indices[0]), dtype=mesh_quantity.dtype)
particles_quantity[inside_idx] = (
mq[jp + stridex*ip + stridex*stridey*kp ] * weights[0]
+ mq[jp + stridex*(ip+1) + stridex*stridey*kp ] * weights[1]
+ mq[jp+1 + stridex*ip + stridex*stridey*kp ] * weights[2]
+ mq[jp+1 + stridex*(ip+1) + stridex*stridey*kp ] * weights[3]
+ mq[jp + stridex*ip + stridex*stridey*(kp+1)] * weights[4]
+ mq[jp + stridex*(ip+1) + stridex*stridey*(kp+1)] * weights[5]
+ mq[jp+1 + stridex*ip + stridex*stridey*(kp+1)] * weights[6]
+ mq[jp+1 + stridex*(ip+1) + stridex*stridey*(kp+1)] * weights[7])
particles_quantity[outside_idx] = 0
return particles_quantity | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def mesh_to_particles_CPU_2d(mesh, mesh_quantity, indices, weights):\n ip, jp = indices\n stridex = mesh.nx\n mesh_quantity = np.ravel(mesh_quantity)\n\n @np.vectorize\n def check_outside(ip, jp):\n outside_idx = (jp < 0 or jp >= mesh.nx - 1 or\n ip < 0 or ip >= mesh.ny - 1)\n return outside_idx\n outside_idx = check_outside(ip, jp)\n inside_idx = ~outside_idx\n ip, jp = ip[inside_idx], jp[inside_idx]\n weights = [w[inside_idx] for w in weights]\n\n particles_quantity = np.empty(len(indices[0]), dtype=mesh_quantity.dtype)\n particles_quantity[inside_idx] = (\n mesh_quantity[jp + stridex*ip ] * weights[0]\n + mesh_quantity[jp + stridex*(ip+1)] * weights[1]\n + mesh_quantity[jp+1 + stridex*ip ] * weights[2]\n + mesh_quantity[jp+1 + stridex*(ip+1)] * weights[3])\n\n particles_quantity[outside_idx] = 0\n return particles_quantity",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def __init__process(self, n_cpu):\n global shared_slices\n global shared_data\n\n shared_slices_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_slices = np.frombuffer(shared_slices_base)\n shared_slices = shared_slices.reshape((len(self._q.R), -1))\n\n shared_grad_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_grad = np.frombuffer(shared_grad_base)\n shared_grad = shared_grad.reshape((len(self._q.R), -1))\n\n shared_data_base = mp.Array(ctypes.c_double,\n self._data.size,\n lock=False)\n shared_data = np.ctypeslib.as_array(shared_data_base)\n shared_data = shared_data.reshape(self._data.shape)\n shared_data[:] = self._data\n\n self._pool = mp.Pool(n_cpu)",
"def calculate_potential_3D_parallel(true_csd, ele_xx, ele_yy, ele_zz, \n csd_x, csd_y, csd_z):\n\n xlin = csd_x[:,0,0]\n ylin = csd_y[0,:,0]\n zlin = csd_z[0,0,:]\n xlims = [xlin[0], xlin[-1]]\n ylims = [ylin[0], ylin[-1]]\n zlims = [zlin[0], zlin[-1]]\n sigma = 1.0\n #tic = time.time()\n pots = Parallel(n_jobs=num_cores)(delayed(integrate_3D)(ele_xx[ii],ele_yy[ii],ele_zz[ii],\n xlims, ylims, zlims, true_csd,\n xlin, ylin, zlin,\n csd_x, csd_y, csd_z) for ii in range(len(ele_xx)))\n pots = np.array(pots)\n pots /= 4*np.pi*sigma\n #toc = time.time() - tic\n #print toc, 'Total time taken - parallel, sims '\n return pots",
"def TST_MMD_u_linear_kernel(Fea, N_per, N1, alpha, device, dtype):\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu_linear_kernel(Fea, N1)\r\n mmd_value = get_item(TEMP[0], is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()",
"def c_src_kernel_tiling(self, node, nodename):\r\n\r\n #The kernel is intended to be structured roughly like this:\r\n \"\"\"\r\n static __global__ void kernel()\r\n {\r\n for (int v = blockIdx.y; v < dim0; v += gridDim.x)\r\n {\r\n for (int w = blockIdx.y; w < dim1; w += gridDim.y)\r\n {\r\n for (int x = threadIdx.x; x < dim2; x += blockDim.x)\r\n {\r\n for (int y = threadIdx.y; y < dim3; y += blockDim.y)\r\n {\r\n for (int z = threadIdx.z; z < dim4; z += blockDim.z)\r\n {\r\n out[v * out_stride[0] + ...] = f(in1[...], in2[...])\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n\r\n \"\"\"\r\n\r\n nd = node.outputs[0].type.ndim\r\n sio = StringIO()\r\n #print 'C_SRC_KERNEL', sio.getvalue()\r\n\r\n if nd in (4,):\r\n # print some leading comments to make the code easier to read\r\n for ipos, i in enumerate(node.inputs):\r\n print >> sio, \"// Input \", ipos, str(i.type)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \"// Output \", ipos, str(i.type)\r\n print >> sio, \"static __global__ void kernel_%s_%s_%s(unsigned int numEls\" %(\r\n self.scalar_op.__class__.__name__,\r\n nodename,\r\n 'tiling%i'%nd)\r\n if (nd):\r\n print >> sio, \"\\t,\", \", \".join(\"const int dim%i\" % i for i in xrange(nd))\r\n #declare inputs\r\n for ipos, i in enumerate(node.inputs):\r\n s = \", \".join([\"const float * i%i_data\" % ipos] + list(\"int i%i_str_%i\" % (ipos, d) for d in xrange(nd)))\r\n print >> sio, \"\\t,\", s\r\n #declare outputs\r\n for ipos, i in enumerate(node.outputs):\r\n s = \", \".join([\"float * o%i_data\" % ipos] + list(\"int o%i_str_%i\" % (ipos, d) for d in xrange(nd)))\r\n print >> sio, \"\\t,\", s\r\n #print >> sio, \"\\t,\", \", \".join(\"int o%i_str_%i\" % (ipos, d) for d in xrange(nd))\r\n #print >> sio, \"\\t,\", \"float * o%i_data\" % ipos\r\n print >> sio, \"\\t)\\n{\"\r\n\r\n # For each input that is a scalar which has been broadcasted to a tensor,\r\n # load it into a local variable\r\n print >> sio, \" __shared__ float value0[%i];\" % len(node.inputs)\r\n print >> sio, \" __shared__ int shared_dims[%(nd)s];\" % locals()\r\n #print >> sio, \" __shared__ int shared_i_str[%(n_in)s][%(nd)s]\"\r\n print >> sio, \" if ((threadIdx.x == 0) && (threadIdx.y == 0)) {\"\r\n for ipos, i in enumerate(node.inputs):\r\n if _logical_scalar(i):\r\n print >> sio, \" value0[%i] = i%i_data[0];\" % (ipos, ipos)\r\n for ipos in xrange(nd):\r\n print >> sio, \" shared_dims[%i] = dim%i;\" % (ipos, ipos)\r\n print >> sio, \" }\"\r\n print >> sio, \" __syncthreads();\"\r\n\r\n\r\n if (nd == 4):\r\n print >> sio, \"\"\"\r\n for (int pos0 = blockIdx.x; pos0 < shared_dims[0]; pos0 += gridDim.x)\r\n {\r\n for (int pos1 = blockIdx.y; pos1 < shared_dims[1]; pos1 += gridDim.y)\r\n {\r\n //for (int pos2 = threadIdx.x; pos2 < shared_dims[2]; pos2 += blockDim.x)\r\n for (int pos2 = threadIdx.y; pos2 < shared_dims[2]; pos2 += blockDim.y)\r\n {\r\n //for (int pos3 = threadIdx.y; pos3 < shared_dims[3]; pos3 += blockDim.y)\r\n for (int pos3 = threadIdx.x; pos3 < shared_dims[3]; pos3 += blockDim.x)\r\n {\r\n \"\"\"\r\n else:\r\n raise NotImplementedError()\r\n\r\n for ipos, i in enumerate(node.inputs):\r\n if not _logical_scalar(i):\r\n print >> sio, \" const float * ii_i%i_data = i%i_data;\" % (ipos, ipos)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \" float * ii_o%i_data = o%i_data;\" % (ipos, ipos)\r\n for d in xrange(nd):\r\n for ipos, i in enumerate(node.inputs):\r\n if not _logical_scalar(i):\r\n print >> sio, \" ii_i%i_data += pos%i * i%i_str_%i;\" % (ipos, d, ipos, d)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \" ii_o%i_data += pos%i * o%i_str_%i;\" % (ipos, d, ipos, d)\r\n\r\n # perform the scalar operation on the input and output references\r\n #TODO: What if the scalar_op needs support_code??\r\n task_code = self.scalar_op.c_code(\r\n Apply(self.scalar_op,\r\n [scalar.Scalar(dtype = input.type.dtype)() for input in node.inputs],\r\n [scalar.Scalar(dtype = output.type.dtype)() for output in node.outputs])\r\n , nodename + '_scalar_'\r\n , get_str_list_logical_scalar(node, value_str='value0[%i]')\r\n , ['ii_o%i_data[0]'%ipos for ipos, i in enumerate(node.outputs)]\r\n , sub=dict(fail='return;')) #TODO: set a failure code somehow!!!\r\n print >> sio, \" \", task_code\r\n\r\n print >> sio, \" }\" * nd\r\n\r\n #TODO: insert runtime stride checks that select the best loop order either here, or in\r\n # the host code that launched the kernel (host code probably better spot)\r\n\r\n #indent = \" \"*(4*d+7)\r\n #for ipos, i in enumerate(node.inputs):\r\n #print >> sio, indent, \"const float * i%i\" % ipos, '= i%i_data', ''\r\n print >> sio, \"}\"\r\n\r\n print sio.getvalue()\r\n return sio.getvalue()",
"def _interpolate_scalar_3d(volume, dkk, dii, djj):\n ns = volume.shape[0]\n nr = volume.shape[1]\n nc = volume.shape[2]\n\n if not (-1 < dkk < ns and -1 < dii < nr and -1 < djj < nc):\n out = 0\n return 0\n # find the top left index and the interpolation coefficients\n kk = np.floor(dkk).astype('int')\n ii = np.floor(dii).astype('int')\n jj = np.floor(djj).astype('int')\n # no one is affected\n cgamma = (dkk - kk).astype('float32')\n calpha = (dii - ii).astype('float32')\n cbeta = (djj - jj).astype('float32')\n alpha = (1.0 - calpha).astype('float32')\n beta = (1.0 - cbeta).astype('float32')\n gamma = (1.0 - cgamma).astype('float32')\n\n inside = 0\n # ---top-left\n if (ii >= 0) and (jj >= 0) and (kk >= 0):\n out = alpha * beta * gamma * volume[kk, ii, jj]\n inside += 1\n else:\n out = 0\n # ---top-right\n jj += 1\n if (ii >= 0) and (jj < nc) and (kk >= 0):\n out += alpha * cbeta * gamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-right\n ii += 1\n if (ii < nr) and (jj < nc) and (kk >= 0):\n out += calpha * cbeta * gamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-left\n jj -= 1\n if (ii < nr) and (jj >= 0) and (kk >= 0):\n out += calpha * beta * gamma * volume[kk, ii, jj]\n inside += 1\n kk += 1\n if(kk < ns):\n ii -= 1\n if (ii >= 0) and (jj >= 0):\n out += alpha * beta * cgamma * volume[kk, ii, jj]\n inside += 1\n jj += 1\n if (ii >= 0) and (jj < nc):\n out += alpha * cbeta * cgamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-right\n ii += 1\n if (ii < nr) and (jj < nc):\n out += calpha * cbeta * cgamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-left\n jj -= 1\n if (ii < nr) and (jj >= 0):\n out += calpha * beta * cgamma * volume[kk, ii, jj]\n inside += 1\n\n # assert that inside == 8\n #return 1 if inside == 8 else 0\n return out",
"def update_particle_cloud(self, scan):\n\n \"\"\"\n Initialise arrays for the new particle cloud,\n particle weights and cummulative weights\n \"\"\"\n newParticleCloud = []\n particleWeights = []\n \n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n\n sensorSigma=0.1 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n\n for p in self.particlecloud.poses:\n particleWeights.append(self.sensor_model.get_weight(scan, p))\n\n for i in range(len(self.particlecloud.poses)):\n randomSelection = numpy.random.random()\n csum = 0\n for p in self.particlecloud.poses:\n weight = self.sensor_model.get_weight(scan, p) / sum(particleWeights)\n csum += weight\n if csum >= randomSelection:\n newParticle = copy.deepcopy(p)\n newParticle.position.x = newParticle.position.x + (gaussianRandomNumX[i] * noise)\n newParticle.position.y = newParticle.position.y + (gaussianRandomNumY[i] * noise)\n newParticle.position.z = newParticle.position.z\n newParticleCloud.append(newParticle)\n break\n self.particlecloud.poses = newParticleCloud\n\n pass",
"def deposit_J_gpu(x, y, z, w,\r\n ux, uy, uz, inv_gamma,\r\n invdz, zmin, Nz,\r\n invdr, rmin, Nr,\r\n J0, J1,\r\n J2, J3,\r\n cell_idx, prefix_sum):\r\n # Get the 1D CUDA grid\r\n i = cuda.grid(1)\r\n # Deposit the field per cell in parallel (for threads < number of cells)\r\n if i < prefix_sum.shape[0]:\r\n # Calculate the cell index in 2D from the 1D threadIdx\r\n iz = int(i / Nr)\r\n ir = int(i - iz * Nr)\r\n # Calculate the inclusive offset for the current cell\r\n # It represents the number of particles contained in all other cells\r\n # with an index smaller than i + the total number of particles in the\r\n # current cell (inclusive).\r\n incl_offset = np.int32(prefix_sum[i])\r\n # Calculate the frequency per cell from the offset and the previous\r\n # offset (prefix_sum[i-1]).\r\n if i > 0:\r\n frequency_per_cell = np.int32(incl_offset - prefix_sum[i - 1])\r\n if i == 0:\r\n frequency_per_cell = np.int32(incl_offset)\r\n # Initialize the local field value for\r\n # all four possible deposition directions\r\n # Mode 0, 1 for r, t, z\r\n # 1 : lower in r, lower in z\r\n # 2 : lower in r, upper in z\r\n # 3 : upper in r, lower in z\r\n # 4 : upper in r, upper in z\r\n Jr1_m0 = 0. + 0.j\r\n Jr2_m0 = 0. + 0.j\r\n Jr3_m0 = 0. + 0.j\r\n Jr4_m0 = 0. + 0.j\r\n # -------------\r\n Jr1_m1 = 0. + 0.j\r\n Jr2_m1 = 0. + 0.j\r\n Jr3_m1 = 0. + 0.j\r\n Jr4_m1 = 0. + 0.j\r\n # -------------\r\n Jt1_m0 = 0. + 0.j\r\n Jt2_m0 = 0. + 0.j\r\n Jt3_m0 = 0. + 0.j\r\n Jt4_m0 = 0. + 0.j\r\n # -------------\r\n Jt1_m1 = 0. + 0.j\r\n Jt2_m1 = 0. + 0.j\r\n Jt3_m1 = 0. + 0.j\r\n Jt4_m1 = 0. + 0.j\r\n # -------------\r\n Jz1_m0 = 0. + 0.j\r\n Jz2_m0 = 0. + 0.j\r\n Jz3_m0 = 0. + 0.j\r\n Jz4_m0 = 0. + 0.j\r\n # -------------\r\n Jz1_m1 = 0. + 0.j\r\n Jz2_m1 = 0. + 0.j\r\n Jz3_m1 = 0. + 0.j\r\n Jz4_m1 = 0. + 0.j\r\n # Loop over the number of particles per cell\r\n for j in range(frequency_per_cell):\r\n # Get the particle index\r\n # ----------------------\r\n # (Since incl_offset is a cumulative sum of particle number,\r\n # and since python index starts at 0, one has to add -1)\r\n ptcl_idx = incl_offset - 1 - j\r\n\r\n # Preliminary arrays for the cylindrical conversion\r\n # --------------------------------------------\r\n # Position\r\n xj = x[ptcl_idx]\r\n yj = y[ptcl_idx]\r\n zj = z[ptcl_idx]\r\n # Velocity\r\n uxj = ux[ptcl_idx]\r\n uyj = uy[ptcl_idx]\r\n uzj = uz[ptcl_idx]\r\n # Inverse gamma\r\n inv_gammaj = inv_gamma[ptcl_idx]\r\n # Weights\r\n wj = w[ptcl_idx]\r\n\r\n # Cylindrical conversion\r\n rj = math.sqrt(xj**2 + yj**2)\r\n # Avoid division by 0.\r\n if (rj != 0.):\r\n invr = 1. / rj\r\n cos = xj * invr # Cosine\r\n sin = yj * invr # Sine\r\n else:\r\n cos = 1.\r\n sin = 0.\r\n exptheta_m0 = 1.\r\n exptheta_m1 = cos + 1.j * sin\r\n\r\n # Get linear weights for the deposition\r\n # --------------------------------------------\r\n # Positions of the particles, in the cell unit\r\n r_cell = invdr * (rj - rmin) - 0.5\r\n z_cell = invdz * (zj - zmin) - 0.5\r\n # Original index of the uppper and lower cell\r\n # in r and z\r\n ir_lower = int(math.floor(r_cell))\r\n ir_upper = ir_lower + 1\r\n iz_lower = int(math.floor(z_cell))\r\n iz_upper = iz_lower + 1\r\n # Linear weight\r\n Sr_lower = ir_upper - r_cell\r\n Sr_upper = r_cell - ir_lower\r\n Sz_lower = iz_upper - z_cell\r\n Sz_upper = z_cell - iz_lower\r\n # Set guard weights to zero\r\n Sr_guard = 0.\r\n\r\n # Treat the boundary conditions\r\n # --------------------------------------------\r\n # guard cells in lower r\r\n if ir_lower < 0:\r\n Sr_guard = Sr_lower\r\n Sr_lower = 0.\r\n ir_lower = 0\r\n # absorbing in upper r\r\n if ir_lower > Nr - 1:\r\n ir_lower = Nr - 1\r\n if ir_upper > Nr - 1:\r\n ir_upper = Nr - 1\r\n # periodic boundaries in z\r\n # lower z boundaries\r\n if iz_lower < 0:\r\n iz_lower += Nz\r\n if iz_upper < 0:\r\n iz_upper += Nz\r\n # upper z boundaries\r\n if iz_lower > Nz - 1:\r\n iz_lower -= Nz\r\n if iz_upper > Nz - 1:\r\n iz_upper -= Nz\r\n\r\n # Calculate the currents\r\n # --------------------------------------------\r\n # Mode 0\r\n Jr_m0 = wj * c * inv_gammaj * (cos * uxj + sin * uyj) * exptheta_m0\r\n Jt_m0 = wj * c * inv_gammaj * (cos * uyj - sin * uxj) * exptheta_m0\r\n Jz_m0 = wj * c * inv_gammaj * uzj * exptheta_m0\r\n # Mode 1\r\n Jr_m1 = wj * c * inv_gammaj * (cos * uxj + sin * uyj) * exptheta_m1\r\n Jt_m1 = wj * c * inv_gammaj * (cos * uyj - sin * uxj) * exptheta_m1\r\n Jz_m1 = wj * c * inv_gammaj * uzj * exptheta_m1\r\n\r\n # Caculate the weighted currents for each\r\n # of the four possible direction\r\n # --------------------------------------------\r\n if ir_lower == ir_upper:\r\n # In the case that ir_lower and ir_upper are equal,\r\n # the current is added only to the array corresponding\r\n # to ir_lower.\r\n # (This is the case for the boundaries in r)\r\n Jr1_m0 += Sz_lower * Sr_lower * Jr_m0\r\n Jr1_m0 += Sz_lower * Sr_upper * Jr_m0\r\n Jr3_m0 += Sz_upper * Sr_lower * Jr_m0\r\n Jr3_m0 += Sz_upper * Sr_upper * Jr_m0\r\n # -------------------------------\r\n Jr1_m1 += Sz_lower * Sr_lower * Jr_m1\r\n Jr1_m1 += Sz_lower * Sr_upper * Jr_m1\r\n Jr3_m1 += Sz_upper * Sr_lower * Jr_m1\r\n Jr3_m1 += Sz_upper * Sr_upper * Jr_m1\r\n # -------------------------------\r\n Jt1_m0 += Sz_lower * Sr_lower * Jt_m0\r\n Jt1_m0 += Sz_lower * Sr_upper * Jt_m0\r\n Jt3_m0 += Sz_upper * Sr_lower * Jt_m0\r\n Jt3_m0 += Sz_upper * Sr_upper * Jt_m0\r\n # -------------------------------\r\n Jt1_m1 += Sz_lower * Sr_lower * Jt_m1\r\n Jt1_m1 += Sz_lower * Sr_upper * Jt_m1\r\n Jt3_m1 += Sz_upper * Sr_lower * Jt_m1\r\n Jt3_m1 += Sz_upper * Sr_upper * Jt_m1\r\n # -------------------------------\r\n Jz1_m0 += Sz_lower * Sr_lower * Jz_m0\r\n Jz1_m0 += Sz_lower * Sr_upper * Jz_m0\r\n Jz3_m0 += Sz_upper * Sr_lower * Jz_m0\r\n Jz3_m0 += Sz_upper * Sr_upper * Jz_m0\r\n # -------------------------------\r\n Jz1_m1 += Sz_lower * Sr_lower * Jz_m1\r\n Jz1_m1 += Sz_lower * Sr_upper * Jz_m1\r\n Jz3_m1 += Sz_upper * Sr_lower * Jz_m1\r\n Jz3_m1 += Sz_upper * Sr_upper * Jz_m1\r\n # -------------------------------\r\n if ir_lower != ir_upper:\r\n # In the case that ir_lower and ir_upper are different,\r\n # add the current to the four arrays according to\r\n # the direction.\r\n Jr1_m0 += Sz_lower * Sr_lower * Jr_m0\r\n Jr2_m0 += Sz_lower * Sr_upper * Jr_m0\r\n Jr3_m0 += Sz_upper * Sr_lower * Jr_m0\r\n Jr4_m0 += Sz_upper * Sr_upper * Jr_m0\r\n # -------------------------------\r\n Jr1_m1 += Sz_lower * Sr_lower * Jr_m1\r\n Jr2_m1 += Sz_lower * Sr_upper * Jr_m1\r\n Jr3_m1 += Sz_upper * Sr_lower * Jr_m1\r\n Jr4_m1 += Sz_upper * Sr_upper * Jr_m1\r\n # -------------------------------\r\n Jt1_m0 += Sz_lower * Sr_lower * Jt_m0\r\n Jt2_m0 += Sz_lower * Sr_upper * Jt_m0\r\n Jt3_m0 += Sz_upper * Sr_lower * Jt_m0\r\n Jt4_m0 += Sz_upper * Sr_upper * Jt_m0\r\n # -------------------------------\r\n Jt1_m1 += Sz_lower * Sr_lower * Jt_m1\r\n Jt2_m1 += Sz_lower * Sr_upper * Jt_m1\r\n Jt3_m1 += Sz_upper * Sr_lower * Jt_m1\r\n Jt4_m1 += Sz_upper * Sr_upper * Jt_m1\r\n # -------------------------------\r\n Jz1_m0 += Sz_lower * Sr_lower * Jz_m0\r\n Jz2_m0 += Sz_lower * Sr_upper * Jz_m0\r\n Jz3_m0 += Sz_upper * Sr_lower * Jz_m0\r\n Jz4_m0 += Sz_upper * Sr_upper * Jz_m0\r\n # -------------------------------\r\n Jz1_m1 += Sz_lower * Sr_lower * Jz_m1\r\n Jz2_m1 += Sz_lower * Sr_upper * Jz_m1\r\n Jz3_m1 += Sz_upper * Sr_lower * Jz_m1\r\n Jz4_m1 += Sz_upper * Sr_upper * Jz_m1\r\n # -------------------------------\r\n if ir_lower == ir_upper == 0:\r\n # Treat the guard cells.\r\n # Add the current to the guard cells\r\n # for particles that had an original\r\n # cell index < 0.\r\n Jr1_m0 += -1. * Sz_lower * Sr_guard * Jr_m0\r\n Jr3_m0 += -1. * Sz_upper * Sr_guard * Jr_m0\r\n # -----------------------------------\r\n Jr1_m1 += -1. * Sz_lower * Sr_guard * Jr_m1\r\n Jr3_m1 += -1. * Sz_upper * Sr_guard * Jr_m1\r\n # -----------------------------------\r\n Jt1_m0 += -1. * Sz_lower * Sr_guard * Jt_m0\r\n Jt3_m0 += -1. * Sz_upper * Sr_guard * Jt_m0\r\n # -----------------------------------\r\n Jt1_m1 += -1. * Sz_lower * Sr_guard * Jt_m1\r\n Jt3_m1 += -1. * Sz_upper * Sr_guard * Jt_m1\r\n # -----------------------------------\r\n Jz1_m0 += -1. * Sz_lower * Sr_guard * Jz_m0\r\n Jz3_m0 += -1. * Sz_upper * Sr_guard * Jz_m0\r\n # -----------------------------------\r\n Jz1_m1 += -1. * Sz_lower * Sr_guard * Jz_m1\r\n Jz3_m1 += -1. * Sz_upper * Sr_guard * Jz_m1\r\n # Write the calculated field values to\r\n # the field arrays defined on the interpolation grid\r\n J0[iz, ir, 0] = Jr1_m0\r\n J0[iz, ir, 1] = Jr1_m1\r\n J0[iz, ir, 2] = Jt1_m0\r\n J0[iz, ir, 3] = Jt1_m1\r\n J0[iz, ir, 4] = Jz1_m0\r\n J0[iz, ir, 5] = Jz1_m1\r\n # --------------------\r\n J1[iz, ir, 0] = Jr2_m0\r\n J1[iz, ir, 1] = Jr2_m1\r\n J1[iz, ir, 2] = Jt2_m0\r\n J1[iz, ir, 3] = Jt2_m1\r\n J1[iz, ir, 4] = Jz2_m0\r\n J1[iz, ir, 5] = Jz2_m1\r\n # --------------------\r\n J2[iz, ir, 0] = Jr3_m0\r\n J2[iz, ir, 1] = Jr3_m1\r\n J2[iz, ir, 2] = Jt3_m0\r\n J2[iz, ir, 3] = Jt3_m1\r\n J2[iz, ir, 4] = Jz3_m0\r\n J2[iz, ir, 5] = Jz3_m1\r\n # --------------------\r\n J3[iz, ir, 0] = Jr4_m0\r\n J3[iz, ir, 1] = Jr4_m1\r\n J3[iz, ir, 2] = Jt4_m0\r\n J3[iz, ir, 3] = Jt4_m1\r\n J3[iz, ir, 4] = Jz4_m0\r\n J3[iz, ir, 5] = Jz4_m1",
"def parallel_generate_particle_distribution(self, max_loop = np.inf, Ncore = 1, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n # start running\n nmax = self.N_part / Ncore\n #pool = Pool(processes = Ncore)\n #pool.apply_async(_while_loop,)\n #result = pool.map(_while_loop, args=(self, nmax, max_loop,))\n #print result.get(timeout = 100)\n #p = Process(target=_while_loop, args=(nmax, max_loop,))\n jobs = []\n for i in np.arange(Ncore):\n p = multiprocessing.Process(target=_while_loop, args=(self, nmax, max_loop, \n Ncore, outfile,))\n jobs.append(p)\n p.start()\n \n for p in jobs:\n p.join()\n \n #results = [None]*self.N_part\n #results = [OUTPUT.get() for p in jobs]\n \n #results = np.array(results)\n \n #pos = results[:,0]\n #pos = pos.reshape(self.N_part,3)\n #self.pos = pos\n \n #vel = results[:,1]\n #vel = vel.reshape(self.N_part,3)\n #self.vel = vel\n \n \n #if (not outfile == None):\n # self.write_pd(outfile)\n # combine to a single output\n bash_command = \"cat \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n bash_command = bash_command + \"> \" + outfile\n os.system(bash_command)\n \n # now remove temporary files\n bash_command = \"rm \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n os.system(bash_command)\n \n bash_command = \"sed -i -e '1i#m x y z vx vy vz\\' \" + outfile\n os.system(bash_command)\n self.load_particle_ic(outfile)\n \n return self.pos, self.vel",
"def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))",
"def main():\n\n # Create an empty array to hold our points.\n n = gpuarray.zeros(shape=(x, y, z),\n dtype=gpuarray.vec.float3)\n\n # Populate the array with randomized points from the search space.\n for k in range(z):\n for j in range(y):\n for i in range(x):\n n[i, j, k] = gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-depth, depth))\n\n # Declare our elementwise CUDA kernel.\n mod = Elementwise(\n arguments=\"float3 pt, float3 *ns, float *rs\",\n operation=\"rs[i] = sqrt(pow(pt.x-ns[i].x,2)+pow(pt.y-ns[i].y,2)+pow(pt.z-ns[i].z,2))\",\n name=\"euclidean_distance\",\n preamble=\"#include <math.h>\"\n )\n\n # Declare an empty results array.\n r = gpuarray.zeros(shape=(50, 50, 2), dtype=numpy.float32)\n start = cuda.Event()\n end = cuda.Event()\n start.record()\n # Call the kernel with a randomize point from the search space.\n mod(gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-width, width)), n, r)\n end.record()\n end.synchronize()\n print((start.time_till(end)))\n print(r)",
"def calc_x(x, ALD,PL): ## jit works\n\n row, col = cuda.grid(2)\n if row < ALD.shape[0] and col < ALD.shape[1]:\n if PL[row,col] != 0 :\n x[row,col] = (ALD[row,col] / PL[row,col]) - 1",
"def f(x):\n n_particles = x.shape[0]\n j = [f_per_particle(x[i]) for i in range(n_particles)]\n #print(\"f j: \", j)\n return np.array(j)",
"def mesh_uniform(N_e, d, Omega):",
"def E_step(X, pi, mu, sigma):\n N = X.shape[0] # number of objects\n C = pi.shape[0] # number of clusters\n d = mu.shape[1] # dimension of each object\n gamma = np.zeros((N, C)) # distribution q(T)\n\n ### YOUR CODE HERE\n # For all objects in dataset X\n for i in range(N):\n z = 0\n # Likelihood: P(x_i|t_i=c,theta) = N(x_i|mu_c, sigma_c²)\n # N(x_i|mu_c, sigma_c²) = (1/sqrt((2pi)^n*sigma_c_det)) * exp(-0.5*(x_i-mu_c).T*sigma_c⁻1*(x_i-mu_c))\n x_i = X[i]\n # For all clusters in mixture distribution\n for c in range(C):\n # parameters for cluster c\n pi_c = pi[c] # Prior prob. p(ti=c)\n mu_c = mu[c, :] # vector of means\n sigma_c = sigma[c, :] # covariance matrix\n # Covariance matrix determinant\n sigma_c_det = np.linalg.det(sigma_c)\n # Compute inverse as y = A⁻1*x (trick2)\n x = x_i - mu_c\n y = np.linalg.solve(sigma_c, x)\n exp = np.exp(-0.5*np.matmul(x, y))\n # Constant term\n norm_ct_c = pi_c / np.sqrt(sigma_c_det)\n # c component of q distribution for x_i\n gamma[i, c] = norm_ct_c * exp\n z += gamma[i, c]\n for c in range(C):\n gamma[i, c] /= z\n # # Normalize cluster distribution q(t_i=c): Softmax (trick1)\n # numerator = np.exp(gamma[i, :] - np.max(gamma[i, :]))\n # denominator = numerator.sum()\n # gamma[i, :] = numerator / denominator\n \n return gamma",
"def computeNodeVolumes(self):\n for i in np.arange(0,self.ni):\n for j in np.arange(0,self.nj):\n for k in np.arange(0,self.nk):\n \n V = self.dh[0]*self.dh[1]*self.dh[2]\n if (i==0 or i==self.ni-1): V*=0.5\n if (j==0 or j==self.nj-1): V*=0.5\n if (k==0 or k==self.nk-1): V*=0.5\n \n self.node_vol[i][j][k] = V",
"def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()",
"def transform_pc3d(pcl_c3d, Ts, seq_n, K_cur, batch_n):\n\n ## need to transform: flat.uvb, flat.feature['xyz'], flat.feature['normal']\n ## no need to transform grid features\n \n assert batch_n % seq_n == 0 # mode==0\n n_group = batch_n // seq_n\n\n ## get relative pose\n T, R, t, target_id = relative_T(Ts, seq_n, batch_n)\n\n ## get accumulative length\n nb = pcl_c3d.flat.nb\n acc_b = []\n acc = 0\n acc_b.append( acc )\n for ib in range(batch_n):\n acc = acc + nb[ib]\n acc_b.append( acc )\n\n ## process flat features\n flat_xyz = pcl_c3d.flat.feature['xyz'] # 1*C*NB\n flat_normal = pcl_c3d.flat.feature['normal']\n trans_normal_list = []\n trans_xyz_list = []\n uvb_list = []\n new_nb = []\n for ib in range(batch_n):\n ## xyz\n trans_xyz = torch.matmul(R[ib], flat_xyz[:, :, acc_b[ib]:acc_b[ib+1]]) + t[ib]\n mask_positive = trans_xyz[0, 2, :] > 0\n trans_xyz = trans_xyz[:, :, mask_positive]\n trans_xyz_list.append(trans_xyz)\n new_nb.append(trans_xyz.shape[2])\n\n ## normal\n trans_normal = torch.matmul(R[ib], flat_normal[:, :, acc_b[ib]:acc_b[ib+1]])\n trans_normal = trans_normal[:, :, mask_positive]\n trans_normal_list.append(trans_normal)\n\n ## project to uv, add b\n uvb = torch.matmul(K_cur[ib], trans_xyz)\n uvb[:, :2] = uvb[:, :2] / uvb[:, [2]] #- 1 , commented because in dataset_read.py there is a K_mat2py() function converting K from matlab to python coordinate\n uvb[:, 2, :] = target_id[ib]\n uvb_list.append(uvb)\n\n ## construct the new object\n tr_pcl_c3d = PCL_C3D_Flat()\n tr_pcl_c3d.feature['xyz'] = torch.cat(trans_xyz_list, dim=2)\n tr_pcl_c3d.feature['normal'] = torch.cat(trans_normal_list, dim=2)\n tr_pcl_c3d.uvb = torch.cat(uvb_list, dim=2)\n tr_pcl_c3d.nb = new_nb\n\n for feat_key in pcl_c3d.flat.feature:\n if feat_key not in ['xyz', 'normal']:\n tr_pcl_c3d.feature[feat_key] = pcl_c3d.flat.feature[feat_key]\n\n return tr_pcl_c3d",
"def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])",
"def _launch_particles(self, istep):\n for i in range(self.grid.NX-1):\n for j in range(self.grid.NY-1):\n INDX = i\n INDY = j\n cell = self.grid.CELLS[INDX, INDY]\n TLOCAL = self.TIME[istep] - cell.CLOCK\n TCRIT = cell.TIGNTR * (1 + RELT*normal())\n if cell.BURNSTAT == 1 and TLOCAL > TCRIT and cell.BURNSTAT2 == 1:\n LOCALF = LANGFACTOR\n indp = (INDX*(self.grid.NY - 1) + INDY)*2*Cell.NPARTMAX - 1\n for k in range(cell.NPARTTR):\n self.particles[k + indp].update(state=1.0, factor=LOCALF)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + indp].update(state=1.0, factor=LOCALF)\n cell.BURNSTAT2 = 0",
"def optimizeFluidArray(self):\n print(\"Run the function for optimization.\")\n self.fluidNodes = np.empty(self.voidSpace, dtype = np.int64)\n ySize = self.ny; xSize = self.nx\n print(\"Start to fill effective fluid nodes.\")\n tmpIndicesDomain = -np.ones(self.isDomain.shape, dtype = np.int64)\n tmpIndicesFN = 0\n for i in sp.arange(ySize):\n for j in sp.arange(xSize):\n if (self.isDomain[i, j] == 1):\n# if (self.effectiveDomain[i, j] == 255.):\n tmpIndices = i * xSize + j\n self.fluidNodes[tmpIndicesFN] = tmpIndices\n tmpIndicesDomain[i, j] = tmpIndicesFN\n tmpIndicesFN += 1\n self.neighboringNodes = np.zeros(self.fluidNodes.size * 8, dtype = np.int64)\n if self.interactionType == \"'EFS'\":\n if self.explicitScheme == 8:\n self.neighboringNodesISO8 = np.zeros(self.fluidNodes.size * 24, \\\n dtype = np.int64)\n elif self.explicitScheme == 10:\n self.neighboringNodesISO10 = np.zeros(self.fluidNodes.size * 36, \\\n dtype = np.int64)\n totalNodes = self.fluidNodes.size\n #use cuda to generate the array for neighboring nodes\n print(\"Start to fill neighboring nodes\")\n deviceFluidNodes = cuda.to_device(self.fluidNodes)\n devicetmpIndicesDomain = cuda.to_device(tmpIndicesDomain)\n# deviceIsDomain = cuda.to_device(self.isDomain)\n deviceNeighboringNodes = cuda.to_device(self.neighboringNodes)\n blockNumX = int(self.xDimension / self.threadNum) \n blockNumY = math.ceil(self.fluidNodes.size / self.xDimension)\n threadPerBlock1D = (self.threadNum, 1)\n grid = (blockNumX, blockNumY)\n\n fillNeighboringNodes[grid, threadPerBlock1D](totalNodes, self.nx, self.ny, \\\n self.xDimension, deviceFluidNodes, devicetmpIndicesDomain, \\\n deviceNeighboringNodes)\n self.neighboringNodes = deviceNeighboringNodes.copy_to_host()\n if self.interactionType == \"'EFS'\":\n if self.explicitScheme == 8:\n deviceNeighboringNodesISO8 = cuda.to_device(self.neighboringNodesISO8)\n fillNeighboringNodesISO8[grid, threadPerBlock1D](totalNodes, self.nx, self.ny, \\\n self.xDimension, deviceFluidNodes, devicetmpIndicesDomain, \\\n deviceNeighboringNodesISO8)\n self.neighboringNodesISO8 = deviceNeighboringNodesISO8.copy_to_host()\n elif self.explicitScheme == 10:\n deviceNeighboringNodesISO10 = cuda.to_device(self.neighboringNodesISO10)\n fillNeighboringNodesISO10[grid, threadPerBlock1D](totalNodes, self.nx, self.ny, \\\n self.xDimension, deviceFluidNodes, devicetmpIndicesDomain, \\\n deviceNeighboringNodesISO10)\n self.neighboringNodesISO10 = deviceNeighboringNodesISO10.copy_to_host()\n \n print(\"Redefine the fluid nodes.\")\n# cuda.current_context().trashing.clear()\n self.optFluidPDF = np.empty([self.typesFluids, self.fluidNodes.size, 9])\n self.optFluidRho = np.empty([self.typesFluids, self.fluidNodes.size])\n self.optMacroVelocity = np.zeros(self.fluidNodes.size)\n self.optMacroVelocityX = np.zeros(self.fluidNodes.size, dtype = np.float64)\n self.optMacroVelocityY = np.zeros(self.fluidNodes.size, dtype = np.float64)\n self.optForceX = np.zeros([self.typesFluids, self.fluidNodes.size], \\\n dtype = np.float64)\n self.optForceY = np.zeros([self.typesFluids, self.fluidNodes.size], \\\n dtype = np.float64)\n tmpDomain = np.array([i == 1 for i in self.isDomain.reshape(ySize * xSize)])\n for i in sp.arange(self.typesFluids):\n self.optFluidRho[i] = self.fluidsDensity.reshape(self.typesFluids, \\\n ySize * xSize)[i, tmpDomain]\n self.optFluidPDF[i] = self.fluidPDF.reshape(self.typesFluids, ySize * \\\n xSize, 9)[i, tmpDomain]",
"def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count",
"def compute(self): \n Ex=np.zeros((self.nx,self.ny+1))\n Ey=np.zeros((self.nx+1,self.ny))\n Hz=np.zeros((self.nx,self.ny))\n Hzx=np.zeros((self.nx,self.ny))\n Hzy=np.zeros((self.nx,self.ny))\n \n imx = []\n #eps, mu = self.makeenv()\n mu=np.ones((self.nx,self.ny))*const.mu_0\n eps = self.luneberg(int(self.nx/2), int(self.ny*2/3), self.R)\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n\n c = self.dt/(eps*self.ds)\n d = self.dt/(mu* self.ds)\n \n sigma = self.pml(eps, mu, 20)\n cax = 1 - (sigma[0] * self.dt / eps)\n cay = 1 - (sigma[1] * self.dt / eps)\n dax = 1 - (sigma[2] * self.dt / mu) \n day = 1 - (sigma[3] * self.dt / mu)\n \n bar = progressbar.ProgressBar()\n for n in bar(range(self.nt+1)):\n Ex[:,1:-1] = (cay[:,1:]+cay[:,:-1])/2*Ex[:,1:-1] + (c[:,1:]+c[:,:-1])/2*(Hz[:,1:]-Hz[:,:-1])\n Ey[1:-1,:] = (cax[1:,:]+cax[:-1,:])/2*Ey[1:-1,:] - (c[1:,:]+c[:-1,:])/2*(Hz[1:,:]-Hz[:-1,:])\n \n Hzx = dax*Hzx - d*(Ey[1:,:] - Ey[:-1,:])\n Hzy = day*Hzy + d*(Ex[:,1:] - Ex[:,:-1]) \n Hz = Hzx + Hzy + self.actualsource(self.source, self.f, n, self.dt) \n \n if(n%self.interval == 0): imx.append(Ex[:self.nx,:self.ny]**2 + Ey[:self.nx, :self.ny]**2)\n\n return imx",
"def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n #start = time.time()\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]]).astype(float32)\r\n osh = output.shape\r\n\r\n assert c_kernel.conv2d_c(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d\") \r\n #end = time.time()\r\n\r\n #print(end - start) \r\n return output\r\n \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n print(input[m,strides[1]*i+di,strides[2]*j+dj,:])\r\n print(filter[di,dj,:,:])\r\n t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\r\n output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n #print(\"type(output)\")\r\n #print(type(output))\r\n return output\r\n '''",
"def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma",
"def flow_pc3d(pcl_c3d, flow_grid, flow_mask_grid, K_cur, feat_comm_keys, use_normal, sparse_nml_opts=None, return_stat=False, timer=None):\n if timer is not None:\n timer.log(\"flow_pc3d start\", 1, True)\n\n batch_size = flow_grid.shape[0]\n\n ### compose the flow to xyz\n xyz_grid = pcl_c3d.grid.feature['xyz']\n xyz_flat = xyz_grid.reshape(batch_size, 3, -1)\n flow_flat = flow_grid.reshape(batch_size, 3, -1)\n flow_flat = torch.cat([flow_flat[:,:2].detach(), flow_flat[:, 2:]], dim=1) # detach the x and y dimension of the flow\n xyz_flowed_flat = xyz_flat.detach() + flow_flat # detach so that the flowed c3d loss only affects the flow gradient instead of both flow and depth. Otherwise depth could be confused. \n # logging.info(\"xyz_flat.detach(): %s\"%(xyz_flat.detach().requires_grad))\n\n ### mask out invalid pixels and project to image uv coordinate\n xyz_mask_grid = pcl_c3d.grid.mask\n # if False:\n if flow_mask_grid is not None:\n mask_grid = xyz_mask_grid & flow_mask_grid\n else:\n mask_grid = xyz_mask_grid \n mask_flat = mask_grid.reshape(batch_size, 1, -1)\n\n xyz_flowed_flat_list = [None]*batch_size\n uvb_list = [None]*batch_size\n new_nb = [None]*batch_size\n inview_mask_list = [None]*batch_size\n \n for ib in range(batch_size):\n if timer is not None:\n timer.log(\"uvb, inview_mask ib=%d\"%ib, 2, True)\n mask_vec = mask_flat[ib, 0]\n xyz_flowed_flat_cur = xyz_flowed_flat[[ib]][:,:,mask_vec] # 1*3*N\n\n uvb = torch.matmul(K_cur[ib], xyz_flowed_flat_cur) # 1*3*N\n uvb_1 = ( uvb / torch.clamp(torch.abs(uvb[:, [2]]), min=1e-6) ).round() #- 1 , commented because in dataset_read.py there is a K_mat2py() function converting K from matlab to python coordinate\n uvb_1[:, 2] = ib\n # uvb_list[ib] = uvb\n\n # assert (uvb[:,2] == xyz_flowed_flat_cur[:,2]).all(), \"{} {}\".format(uvb[0,2,0], xyz_flowed_flat_cur[0,2,0])\n # logging.info( \"{} {}\".format(uvb[0,2,0], xyz_flowed_flat_cur[0,2,0]) )\n ### check whether the new points are in the view of camera\n inview_mask = (uvb_1[0,0,:] > 0) & (uvb_1[0,0,:] < mask_grid.shape[3]) & (uvb_1[0,1,:] > 0) & (uvb_1[0,1,:] < mask_grid.shape[2]) & (xyz_flowed_flat_cur[0,2,:] > 0.1)\n inview_mask_list[ib] = inview_mask\n\n xyz_flowed_flat_cur = xyz_flowed_flat_cur[:,:,inview_mask]\n uvb_1 = uvb_1[:,:,inview_mask]\n # logging.info(\"diff between uvb2: {}, {}, {}\".format((uvb_1-uvb_2).max(), (uvb_1-uvb_2).min(), (uvb_1[:,:2]-uvb_2[:,:2]).mean()) )\n # logging.info(\"uvb_1.shape: {} {}\".format(uvb_1.shape, uvb.shape))\n xyz_flowed_flat_list[ib] = xyz_flowed_flat_cur\n uvb_list[ib] = uvb_1\n\n new_nb[ib] = uvb_1.shape[2]\n \n # print(\"new_nb:\", new_nb)\n if timer is not None:\n timer.log(\"cat xyz, uvb\", 1, True)\n\n xyz_flowed_flat = torch.cat(xyz_flowed_flat_list, dim=2)\n uvb_flat = torch.cat(uvb_list, dim=2)\n\n ### The occlusion check is the speed bottleneck (>0.4s), and the effect is similar to flow_mask_grid, therefore disabled\n # if timer is not None:\n # timer.log(\"occlu_mask\", 1, True)\n # ### find the duplicate points and filter out those not close to the camera\n # occlu_mask = torch.ones(uvb_flat.shape[2], dtype=torch.bool, device=mask_grid.device)\n\n # uvb_dim = [xyz_grid.shape[0], xyz_grid.shape[2], xyz_grid.shape[3]]\n # velo_proj_lin = sub2ind(uvb_dim, uvb_flat[0, 2, :], uvb_flat[0, 1, :], uvb_flat[0, 0, :] ) # B, H, W\n # dupe_proj_lin = [item for item, count in Counter(velo_proj_lin).items() if count > 1]\n # # print(\"# or dupe_proj_lin:\", len(dupe_proj_lin))\n # for dd in dupe_proj_lin:\n # pts = torch.where(velo_proj_lin == dd)[0] ### torch.where() [actually torch.nonzero(condition, as_tuple=True)] returns a tuple. [0] takes the array of the first dim.\n # z_min = 1e7\n # for pt_idx in pts:\n # z_cur = xyz_flowed_flat[0, 2, pt_idx]\n # if z_cur < z_min:\n # z_min = z_cur\n # min_idx = pt_idx\n # else:\n # occlu_mask[pts] = False\n # ib = uvb_flat[0, 2, pt_idx]\n # new_nb[ib] -= 1\n \n # # print(\"before occlu_mask:\", xyz_flowed_flat.shape[2])\n # xyz_flowed_flat = xyz_flowed_flat[:,:,occlu_mask]\n # uvb_flat = uvb_flat[:,:,occlu_mask]\n # # print(\"after occlu_mask:\", xyz_flowed_flat.shape[2])\n\n if timer is not None:\n timer.log(\"PCL_C3D_Flat\", 1, True)\n ### construct PCL_C3D_Flat\n flow_pcl_c3d_flat = PCL_C3D_Flat()\n flow_pcl_c3d_flat.uvb = uvb_flat\n flow_pcl_c3d_flat.feature['xyz'] = xyz_flowed_flat\n flow_pcl_c3d_flat.nb = new_nb\n\n ### need to exit early if empty, otherwise later processing will produce unpredicted result and failure in next iteration\n if any(n <= 0 for n in new_nb):\n return flow_pcl_c3d_flat, None\n # raise ValueError(\"empty pcl: {}\".format(new_nb))\n\n if timer is not None:\n timer.log(\"feat_flat\", 1, True)\n ### copy those shared features from original point cloud. Remember to apply the same masking.\n for feat in feat_comm_keys:\n feat_flat = pcl_c3d.grid.feature[feat].reshape(batch_size, 3, -1)\n feat_flat_list = [None]*batch_size\n for ib in range(batch_size):\n mask_vec = mask_flat[ib, 0]\n feat_flat_list[ib] = feat_flat[[ib]][:,:,mask_vec]\n\n ### filter out out-of-view points\n feat_flat_list[ib] = feat_flat_list[ib][:,:,inview_mask_list[ib]]\n\n feat_flat_concat = torch.cat(feat_flat_list, dim=2)\n ### filter out points duplicated on image\n # flow_pcl_c3d_flat.feature[feat] = feat_flat_concat[:,:,occlu_mask]\n flow_pcl_c3d_flat.feature[feat] = feat_flat_concat\n\n if timer is not None:\n timer.log(\"feat_grid\", 1, True)\n ### prepare xyz_grid of the flowed point cloud\n uvb_split = uvb_flat.to(dtype=torch.long).squeeze(0).transpose(0,1).split(1,dim=1) # a tuple of 3 elements of tensor N*1, only long/byte/bool tensors can be used as indices\n xyz_flowed_grid = grid_from_concat_flat_func(uvb_split, xyz_flowed_flat, xyz_grid.shape)\n mask_flowed_grid = (xyz_flowed_grid != 0).any(1, keepdim=True)\n\n if timer is not None:\n timer.log(\"calc_normal\", 1, True)\n ### calculate sparse normal\n if use_normal:\n if return_stat:\n normal_flat, nres_flat, dist_stat_flat = calc_normal(flow_pcl_c3d_flat.uvb, xyz_flowed_grid, mask_flowed_grid, sparse_nml_opts.normal_nrange, sparse_nml_opts.ignore_ib, sparse_nml_opts.min_dist_2, return_stat=return_stat)\n else:\n normal_flat, nres_flat = calc_normal(flow_pcl_c3d_flat.uvb, xyz_flowed_grid, mask_flowed_grid, sparse_nml_opts.normal_nrange, sparse_nml_opts.ignore_ib, sparse_nml_opts.min_dist_2, return_stat=return_stat)\n \n flow_pcl_c3d_flat.feature['normal'] = normal_flat\n flow_pcl_c3d_flat.feature['nres'] = nres_flat\n\n if return_stat:\n flow_pcl_c3d_flat.feature['dist_stat'] = dist_stat_flat\n\n if timer is not None:\n timer.log(\"PCL_C3D_Grid\", 1, True)\n ### construct PCL_C3D_Grid\n flow_pcl_c3d_grid = PCL_C3D_Grid()\n flow_pcl_c3d_grid.mask = mask_flowed_grid\n flow_pcl_c3d_grid.feature['xyz'] = xyz_flowed_grid\n\n for feat in feat_comm_keys:\n flow_pcl_c3d_grid.feature[feat] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature[feat], pcl_c3d.grid.feature[feat].shape)\n\n if use_normal:\n flow_pcl_c3d_grid.feature['normal'] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature['normal'], pcl_c3d.grid.feature['normal'].shape)\n flow_pcl_c3d_grid.feature['nres'] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature['nres'], pcl_c3d.grid.feature['nres'].shape)\n if return_stat:\n flow_pcl_c3d_grid.feature['dist_stat'] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature['dist_stat'], pcl_c3d.grid.feature['dist_stat'].shape) \n\n return flow_pcl_c3d_flat, flow_pcl_c3d_grid",
"def compute_graphlet_kernel(graphs):\n start_time = time.time()\n\n N = len(graphs)\n\n phi = np.zeros((N, 2))\n\n ind = 0\n for G in graphs:\n for node1 in G.nodes():\n for node2 in G.neighbors(node1):\n for node3 in G.neighbors(node2):\n if node1 != node3:\n if node3 in G.neighbors(node1):\n increment = 1.0 / 2.0\n phi[ind, 0] += increment\n else:\n increment = 1.0 / 6.0\n phi[ind, 1] += increment\n\n ind += 1\n\n K = np.dot(phi, phi.T)\n end_time = time.time()\n print \"Total time for Graphlet kernel: \", (end_time - start_time)\n\n return K"
] | [
"0.6445836",
"0.6372974",
"0.61668825",
"0.59983677",
"0.5791244",
"0.5653549",
"0.5652757",
"0.56389385",
"0.56339914",
"0.55467594",
"0.55444217",
"0.55392367",
"0.5479442",
"0.5449648",
"0.5441298",
"0.5424582",
"0.54130393",
"0.53808963",
"0.5377156",
"0.5375097",
"0.5365496",
"0.53516036",
"0.53505725",
"0.53346366",
"0.53034264",
"0.52912366",
"0.5283205",
"0.52688444",
"0.52674884",
"0.5257063"
] | 0.6884111 | 0 |
Make a short score with pick up and two voices. | def makeScoreWithPickup(self):
sc = stream.Score()
num_voices = 2
pitches = ['C', 'A-']
for i in range(num_voices):
part = stream.Part()
part.id = 'part %d' % i
time_sig = meter.TimeSignature('4/4')
key_sig = key.Key('c')
# Add pickup measure.
pickup = stream.Measure()
pickup.append(time_sig)
pickup.append(key_sig)
n1 = music21_note.Note(pitches[i])
n1.duration.quarterLength = 1
pickup.append(n1)
part.append(pickup)
# Add full measure.
full_m = stream.Measure()
full_m.append(n1)
n2 = n1.transpose('M2')
full_m.append(n2)
full_m.repeatAppend(n1, 2)
part.append(full_m)
sc.insert(0, part)
# Show the full score and all score elements in indented text.
# sc.show('text')
return sc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeScore(self):\n sc = stream.Score()\n num_voices = 2\n pitches = ['C', 'A-']\n for i in range(num_voices):\n part = stream.Part()\n part.id = 'part %d' % i\n time_sig = meter.TimeSignature('4/4')\n key_sig = key.Key('c')\n\n # Make a note.\n n1 = music21_note.Note(pitches[i])\n n1.duration.quarterLength = 1\n\n # Add full measure.\n full_m = stream.Measure()\n full_m.append(time_sig)\n full_m.append(key_sig)\n full_m.append(n1)\n n2 = n1.transpose('M2')\n full_m.append(n2)\n full_m.repeatAppend(n1, 2)\n part.append(full_m)\n\n # Add another full measure.\n full_m = stream.Measure()\n full_m.append(n1)\n n2 = n1.transpose('M2')\n full_m.append(n2)\n full_m.repeatAppend(n1, 2)\n part.append(full_m)\n\n sc.insert(0, part)\n\n # Show the full score and all score elements in indented text.\n # sc.show('text')\n return sc",
"def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])",
"def score_notify(score1, score2):\n\tif score1 > score2 :\n\t\tresult = \"Player A won\"\n\telif score1 < score2 : \n\t\tresult = \"Player B won\"\n\telse :\n\t\tresult = \"Tied Score\"\n\treturn result",
"def disp_score():",
"def say_scores(score0, score1):\n print(\"Player 0 now has\", score0, \"and Player 1 now has\", score1)\n return say_scores",
"def _score_a_quartet(self, num_one, num_two):\n score = 0\n if num_one > 0 and num_two > 0: return 0 #no one can win here, or nothing is here yet\n elif num_one == 0 and num_two == 0: return 0\n\n elif num_two == 4 or num_one == 4: score = 100000000 #someone wins\n\n elif num_two == 3 or num_one == 3: score = 100\n\n elif num_two == 2 or num_one == 2: score = 10\n\n elif num_two == 1 or num_one == 1: score = 1\n\n else: #This should never happen\n print(\"That's not right. There are \" + str(num_one) + \" ones and \" + str(num_two) + \" twos here.\")\n return None\n\n if self.who_played != our_player: return score * -1\n return score",
"def score(self):",
"def vanilaScore(self,attended,state,W):",
"def start_game(answer, session):\n\n print(\"start_game, answer: \", answer)\n\n attributes = reset_attributes()\n\n if answer == \"einem spieler\":\n answer = \"1\"\n if answer == \"vier spieler\":\n answer = \"4\"\n\n if answer in [str(x) for x in range(1, 5)]:\n curr_round = 1\n curr_player = 1\n state = \"Gameon\"\n scores = {x:0 for x in range(1, int(answer)+1)}\n sess_fragen = populate_questions(scores)\n \n attributes[\"question_index\"] = 0\n attributes[\"current_round\"] = curr_round\n attributes[\"current_player\"] = curr_player\n attributes[\"state\"] = state\n attributes[\"scores\"] = scores\n attributes[\"sess_questions\"] = sess_fragen\n\n if answer == \"1\":\n text = \"<s>Alles klar. \"+ TEXT_BREAK + \"Wir beginnen ein Spiel mit einem Spieler.\"+\\\n \"</s> <s>Das Quiz enthält {} Fragen.\\\n </s>\".format(TOTAL_ROUNDS)\n else:\n text = \"<s>Alles klar.\" + TEXT_BREAK + \"Wir beginnen ein Spiel mit {} Spielern\"\\\n .format(answer) +\\\n \"</s><s> Es werden jeweils {} Fragen an jeden Spieler gestellt.\\\n </s>\".format(TOTAL_ROUNDS)\n\n frage1 = ask_question(0, attributes)\n text += TICK_HELP_MESSAGE\n text += frage1\n card_text = \"Spiel mit {0} Spielern begonnen.\\n\".format(len(scores)) + clear_tags(frage1)\n\n else:\n richtige_zahl_prompt = \"Sag eine Nummer zwischen 1 und 4.\"\n text = \"Ungültige Spielerzahl. \" + richtige_zahl_prompt\n frage1 = SPIELER_PROMPT_TEXT\n card_text = text\n\n attributes[\"current_question\"] = frage1\n attributes[\"speech_output\"] = text\n attributes[\"reprompt_text\"] = frage1\n \n return response(text, should_end_session=False, reprompt_text=frage1, \\\n attributes=attributes, card_text=card_text)",
"def reward_conversion(self, hero_selection):\n count = sum([sum(rank.values()) for rank in hero_selection.itervalues()])\n combo = None\n\n if count == 2:\n # Only queens currently\n combo = Hero.QUEEN\n\n elif count == 3:\n if self._calc_same(hero_selection, 3):\n combo = 'same'\n else:\n combo = '_'.join(sorted([k for k in hero_selection.iterkeys()]))\n\n elif count == 4:\n pairs = [rank for rank, sub in hero_selection.iteritems() if sum(sub.values()) >= 2]\n if len(pairs) == 2:\n combo = 'double'\n elif self._calc_same(hero_selection, 4):\n combo = 'same'\n\n elif count == 5:\n if self._calc_same(hero_selection, 5):\n combo = 'same'\n\n elif len(hero_selection.keys()) == 5:\n faces = set([face for k, v in hero_selection.iteritems() \\\n for face in v.iterkeys()])\n if len(faces) == 1:\n combo = 'all_same'\n else:\n combo = 'different'\n\n elif len(hero_selection.keys()) == 2:\n cards = set([max(face.values()) for rank, face in \\\n hero_selection.iteritems()])\n\n if set([3,2]) == cards:\n combo = 'three'\n\n\n if not combo:\n self.log.warning('Unable to calculate hero price')\n return\n\n price = '%d_%s' % (count, combo)\n self.log.debug('Calculated price: {0}'.format(price))\n\n return price",
"def silence(score0, score1):\n return silence",
"def start(scale, entry, label, v):\r\n\r\n # The following variables are common across all the 5 different voices selected and so, will only be changed there for space considerations\r\n CHANNELS = 1\r\n RATE = 8000\r\n DURATION = 0\r\n WIDTH = 2\r\n BLOCKLEN = 1024\r\n\r\n if len(\r\n entry.get()) == 0: # can try and get rid of invalid characters when saving file too but that won't be necessary\r\n label['text'] = 'File name cannot be empty!'\r\n else:\r\n DURATION = scale.get()\r\n output_wavfile = entry.get()\r\n\r\n label['text'] = 'You will be recording for ' + str(DURATION) + ' seconds.'\r\n\r\n if v.get() == 1:\r\n voice1(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"1\")\r\n elif v.get() == 2:\r\n voice2(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"2\")\r\n elif v.get() == 3:\r\n voice3(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"3. Roger, roger!\")\r\n elif v.get() == 4:\r\n voice4(output_wavfile, DURATION, RATE, WIDTH, CHANNELS)\r\n print(\"4\")\r\n elif v.get() == 5:\r\n manualControl(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"5\")\r\n\r\n # after whatever operation we do\r\n label['text'] = 'Successfully saved ' + output_wavfile + '.wav file'\r\n\r\n pass",
"def update_score():\n pass",
"def _score_to_decision(self, score):",
"def score():\r\n\r\n point_1 = 0\r\n point_2 = 0\r\n print(term.move_xy(82,15) + term.white + 'Score joueur 1 : ', end='')\r\n print(point_1)\r\n print(term.move_xy(82,16) + term.white + 'Score joueur 2 : ', end='' )\r\n print(point_2)",
"def quality(self):\n return self.plays * self.number",
"def get_score(self, a, b):\n ### FILL IN ###",
"def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")",
"def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))",
"def separate_voices_score(score, compare=True, parameters=Parameters(), verbose=True):\n nb_measures = get_number_measures(score)\n new_score = m21.stream.Score()\n total_cost = 0\n\n for nb in range(0, nb_measures):\n measure, start_offset, end_offset = get_measure(score, nb)\n # if measure doesn't exist, continue (for example : no 0 measure (no anacrusis))\n if measure is None:\n continue\n\n voices = separate_voices(measure, start_offset, end_offset, parameters)\n new_score.append(m21.stream.Measure(voices, number=nb))\n\n if compare:\n total_cost += score_compare(measure, voices)\n if verbose:\n print(\"Measure :\", nb)\n print(\"\\tCost :\", total_cost)\n\n print(\"Result for this score :\", total_cost)\n\n if compare:\n return (new_score, total_cost)\n return new_score",
"def announce_highest(who, previous_high=0, previous_score=0):\n assert who == 0 or who == 1, 'The who argument should indicate a player.'\n if who ==0 :\n if previous_score> previous_high:\n previous_high=previous_score\n if previous_score>1:\n print(previous_score,'points! Thats the biggest gain yet for Player 1')\n #print('Thats the biggest gain yet for Player 1')\n elif previous_score==1:\n print(previous_score,'point! Thats the biggest gain yet for Player 1')\n print('Thats the biggest gain yet for Player 1')\n else: \n print('Player 1 gets ',previous_score,'point ; not enough for a new high')\n elif who==1:\n if previous_score> previous_high:\n previous_high=previous_score\n if previous_score>1:\n print(previous_score,'points! Thats the biggest gain yet for Player 2')\n #print('Thats the biggest gain yet for Player 2')\n elif previous_score==1:\n print(previous_score,'point! Thats the biggest gain yet for Player 2')\n #print('Thats the biggest gain yet for Player 2')\n else:\n print('Player 2 gets ',previous_score,'point ; not enough for a new high')",
"def pro() -> None:\n global player\n global points\n global comp_points\n while (points < 3) and (comp_points < 3):\n choice = str(input(\"rock...paper...scissors...SHOOT!!!: \"))\n computer = game[randint(0, 2)]\n print(f\"My turn: {computer}\")\n if choice == rock and computer == paper:\n points = points\n comp_points = comp_points + 1\n if choice == rock and computer == scissors:\n points = points + 1\n comp_points = comp_points\n if choice == paper and computer == rock:\n points = points + 1\n comp_points = comp_points\n if choice == paper and computer == scissors:\n points = points\n comp_points = comp_points + 1\n if choice == scissors and computer == rock:\n points = points\n comp_points = comp_points + 1\n if choice == scissors and computer == paper:\n points = points + 1\n comp_points = comp_points\n if choice == computer:\n points = points\n comp_points = comp_points\n print(f\"{player}'s score: {points}\")\n print(f\"My score: {comp_points}\")\n if points == 3:\n print(f\"Good job {player}! YOU WIN {STAR_EYES}{STAR_EYES}{STAR_EYES}\")\n if comp_points == 3:\n print(f\"Sorry, {player}. YOU LOSE {SAD_FACE}{SAD_FACE}{SAD_FACE}\")",
"def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)",
"def _adv_counter(winrate_together, winrate_hero1, winrate_hero2):\n return winrate_together",
"async def strange(self, ctx, number, option=0):\n user = ctx.author\n dice = random.randint(1, 20)\n raw = dice\n if option != 0:\n dice = dice + option\n\n print(\"option: {}, raw: {}, dice: {}\".format(option, raw, dice))\n voice = get(self.bot.voice_clients, guild=ctx.guild)\n\n if dice >= int(number) * 3:\n if dice == 20:\n write_history(\"good job \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n if voice and voice.is_connected():\n voice.play(discord.FFmpegPCMAudio(cfg.PATH + \"sound/20.mp3\"))\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.4\n await ctx.send(\"good job \" + user.name + \" (\" + str(dice) + \")\")\n else:\n write_history(\"good job \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n await ctx.send(\"good job \" + user.name + \" (\" + str(dice) + \")\")\n else:\n if dice == 1:\n write_history(\"Ohoh :hot_face: \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n if voice and voice.is_connected():\n voice.play(discord.FFmpegPCMAudio(cfg.PATH + \"sound/1.mp3\"))\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.4\n await ctx.send(\"Ohoh :hot_face: \" + user.name + \" (\" + str(dice) + \"). Setzt lieber ein XP du Noob\")\n else:\n write_history(\"Ohoh \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n await ctx.send(\"Ohoh \" + user.name + \" (\" + str(dice) + \")\")",
"def update_points(self, correct):\n\n if correct:\n self.points += 10\n \n if self.points > ((self.current_level + 1) * 100):\n self.play_sound('level_up', self.standard_sfx, True)\n self.play_sound(choice(self.correct_voice),self.standard_voice, wait=True)\n self.play_sound('combinations',self.game_sounds, wait=True)\n self.current_level += 1\n print(self.current_level)\n if self.current_level > 4:\n self.current_level = 4",
"async def rps(self, ctx, your_choice : RPSParser):\r\n author = ctx.message.author\r\n player_choice = your_choice.choice\r\n red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))\r\n cond = {\r\n (RPS.rock, RPS.paper) : False,\r\n (RPS.rock, RPS.scissors) : True,\r\n (RPS.paper, RPS.rock) : True,\r\n (RPS.paper, RPS.scissors) : False,\r\n (RPS.scissors, RPS.rock) : False,\r\n (RPS.scissors, RPS.paper) : True\r\n }\r\n\r\n if red_choice == player_choice:\r\n outcome = None # Tie\r\n else:\r\n outcome = cond[(player_choice, red_choice)]\r\n\r\n if outcome is True:\r\n await self.bot.say(\"{} You win {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n elif outcome is False:\r\n await self.bot.say(\"{} You lose {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n else:\r\n await self.bot.say(\"{} We're square {}!\"\r\n \"\".format(red_choice.value, author.mention))",
"def set_score(self, a, b, score):\n ### FILL IN ###",
"def updateScore(score):\n return score + 1",
"def f1_score(self):"
] | [
"0.6722317",
"0.58068377",
"0.5768385",
"0.57021904",
"0.56786025",
"0.56589353",
"0.5603751",
"0.55136603",
"0.549756",
"0.5485879",
"0.54847825",
"0.5401103",
"0.5356562",
"0.5330451",
"0.53275734",
"0.53220344",
"0.52828926",
"0.5266709",
"0.52593845",
"0.5259334",
"0.52449524",
"0.52364606",
"0.52332526",
"0.5231776",
"0.52313554",
"0.5229256",
"0.5222751",
"0.5207222",
"0.51982087",
"0.51967174"
] | 0.70795625 | 0 |
Make a short score with pick up and two voices. | def makeScore(self):
sc = stream.Score()
num_voices = 2
pitches = ['C', 'A-']
for i in range(num_voices):
part = stream.Part()
part.id = 'part %d' % i
time_sig = meter.TimeSignature('4/4')
key_sig = key.Key('c')
# Make a note.
n1 = music21_note.Note(pitches[i])
n1.duration.quarterLength = 1
# Add full measure.
full_m = stream.Measure()
full_m.append(time_sig)
full_m.append(key_sig)
full_m.append(n1)
n2 = n1.transpose('M2')
full_m.append(n2)
full_m.repeatAppend(n1, 2)
part.append(full_m)
# Add another full measure.
full_m = stream.Measure()
full_m.append(n1)
n2 = n1.transpose('M2')
full_m.append(n2)
full_m.repeatAppend(n1, 2)
part.append(full_m)
sc.insert(0, part)
# Show the full score and all score elements in indented text.
# sc.show('text')
return sc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeScoreWithPickup(self):\n sc = stream.Score()\n num_voices = 2\n pitches = ['C', 'A-']\n for i in range(num_voices):\n part = stream.Part()\n part.id = 'part %d' % i\n time_sig = meter.TimeSignature('4/4')\n key_sig = key.Key('c')\n\n # Add pickup measure.\n pickup = stream.Measure()\n pickup.append(time_sig)\n pickup.append(key_sig)\n n1 = music21_note.Note(pitches[i])\n n1.duration.quarterLength = 1\n pickup.append(n1)\n part.append(pickup)\n\n # Add full measure.\n full_m = stream.Measure()\n full_m.append(n1)\n n2 = n1.transpose('M2')\n full_m.append(n2)\n full_m.repeatAppend(n1, 2)\n part.append(full_m)\n\n sc.insert(0, part)\n\n # Show the full score and all score elements in indented text.\n # sc.show('text')\n return sc",
"def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])",
"def score_notify(score1, score2):\n\tif score1 > score2 :\n\t\tresult = \"Player A won\"\n\telif score1 < score2 : \n\t\tresult = \"Player B won\"\n\telse :\n\t\tresult = \"Tied Score\"\n\treturn result",
"def disp_score():",
"def say_scores(score0, score1):\n print(\"Player 0 now has\", score0, \"and Player 1 now has\", score1)\n return say_scores",
"def _score_a_quartet(self, num_one, num_two):\n score = 0\n if num_one > 0 and num_two > 0: return 0 #no one can win here, or nothing is here yet\n elif num_one == 0 and num_two == 0: return 0\n\n elif num_two == 4 or num_one == 4: score = 100000000 #someone wins\n\n elif num_two == 3 or num_one == 3: score = 100\n\n elif num_two == 2 or num_one == 2: score = 10\n\n elif num_two == 1 or num_one == 1: score = 1\n\n else: #This should never happen\n print(\"That's not right. There are \" + str(num_one) + \" ones and \" + str(num_two) + \" twos here.\")\n return None\n\n if self.who_played != our_player: return score * -1\n return score",
"def score(self):",
"def vanilaScore(self,attended,state,W):",
"def start_game(answer, session):\n\n print(\"start_game, answer: \", answer)\n\n attributes = reset_attributes()\n\n if answer == \"einem spieler\":\n answer = \"1\"\n if answer == \"vier spieler\":\n answer = \"4\"\n\n if answer in [str(x) for x in range(1, 5)]:\n curr_round = 1\n curr_player = 1\n state = \"Gameon\"\n scores = {x:0 for x in range(1, int(answer)+1)}\n sess_fragen = populate_questions(scores)\n \n attributes[\"question_index\"] = 0\n attributes[\"current_round\"] = curr_round\n attributes[\"current_player\"] = curr_player\n attributes[\"state\"] = state\n attributes[\"scores\"] = scores\n attributes[\"sess_questions\"] = sess_fragen\n\n if answer == \"1\":\n text = \"<s>Alles klar. \"+ TEXT_BREAK + \"Wir beginnen ein Spiel mit einem Spieler.\"+\\\n \"</s> <s>Das Quiz enthält {} Fragen.\\\n </s>\".format(TOTAL_ROUNDS)\n else:\n text = \"<s>Alles klar.\" + TEXT_BREAK + \"Wir beginnen ein Spiel mit {} Spielern\"\\\n .format(answer) +\\\n \"</s><s> Es werden jeweils {} Fragen an jeden Spieler gestellt.\\\n </s>\".format(TOTAL_ROUNDS)\n\n frage1 = ask_question(0, attributes)\n text += TICK_HELP_MESSAGE\n text += frage1\n card_text = \"Spiel mit {0} Spielern begonnen.\\n\".format(len(scores)) + clear_tags(frage1)\n\n else:\n richtige_zahl_prompt = \"Sag eine Nummer zwischen 1 und 4.\"\n text = \"Ungültige Spielerzahl. \" + richtige_zahl_prompt\n frage1 = SPIELER_PROMPT_TEXT\n card_text = text\n\n attributes[\"current_question\"] = frage1\n attributes[\"speech_output\"] = text\n attributes[\"reprompt_text\"] = frage1\n \n return response(text, should_end_session=False, reprompt_text=frage1, \\\n attributes=attributes, card_text=card_text)",
"def reward_conversion(self, hero_selection):\n count = sum([sum(rank.values()) for rank in hero_selection.itervalues()])\n combo = None\n\n if count == 2:\n # Only queens currently\n combo = Hero.QUEEN\n\n elif count == 3:\n if self._calc_same(hero_selection, 3):\n combo = 'same'\n else:\n combo = '_'.join(sorted([k for k in hero_selection.iterkeys()]))\n\n elif count == 4:\n pairs = [rank for rank, sub in hero_selection.iteritems() if sum(sub.values()) >= 2]\n if len(pairs) == 2:\n combo = 'double'\n elif self._calc_same(hero_selection, 4):\n combo = 'same'\n\n elif count == 5:\n if self._calc_same(hero_selection, 5):\n combo = 'same'\n\n elif len(hero_selection.keys()) == 5:\n faces = set([face for k, v in hero_selection.iteritems() \\\n for face in v.iterkeys()])\n if len(faces) == 1:\n combo = 'all_same'\n else:\n combo = 'different'\n\n elif len(hero_selection.keys()) == 2:\n cards = set([max(face.values()) for rank, face in \\\n hero_selection.iteritems()])\n\n if set([3,2]) == cards:\n combo = 'three'\n\n\n if not combo:\n self.log.warning('Unable to calculate hero price')\n return\n\n price = '%d_%s' % (count, combo)\n self.log.debug('Calculated price: {0}'.format(price))\n\n return price",
"def silence(score0, score1):\n return silence",
"def start(scale, entry, label, v):\r\n\r\n # The following variables are common across all the 5 different voices selected and so, will only be changed there for space considerations\r\n CHANNELS = 1\r\n RATE = 8000\r\n DURATION = 0\r\n WIDTH = 2\r\n BLOCKLEN = 1024\r\n\r\n if len(\r\n entry.get()) == 0: # can try and get rid of invalid characters when saving file too but that won't be necessary\r\n label['text'] = 'File name cannot be empty!'\r\n else:\r\n DURATION = scale.get()\r\n output_wavfile = entry.get()\r\n\r\n label['text'] = 'You will be recording for ' + str(DURATION) + ' seconds.'\r\n\r\n if v.get() == 1:\r\n voice1(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"1\")\r\n elif v.get() == 2:\r\n voice2(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"2\")\r\n elif v.get() == 3:\r\n voice3(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"3. Roger, roger!\")\r\n elif v.get() == 4:\r\n voice4(output_wavfile, DURATION, RATE, WIDTH, CHANNELS)\r\n print(\"4\")\r\n elif v.get() == 5:\r\n manualControl(output_wavfile, DURATION, BLOCKLEN, RATE, WIDTH, CHANNELS)\r\n print(\"5\")\r\n\r\n # after whatever operation we do\r\n label['text'] = 'Successfully saved ' + output_wavfile + '.wav file'\r\n\r\n pass",
"def update_score():\n pass",
"def _score_to_decision(self, score):",
"def score():\r\n\r\n point_1 = 0\r\n point_2 = 0\r\n print(term.move_xy(82,15) + term.white + 'Score joueur 1 : ', end='')\r\n print(point_1)\r\n print(term.move_xy(82,16) + term.white + 'Score joueur 2 : ', end='' )\r\n print(point_2)",
"def quality(self):\n return self.plays * self.number",
"def get_score(self, a, b):\n ### FILL IN ###",
"def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")",
"def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))",
"def separate_voices_score(score, compare=True, parameters=Parameters(), verbose=True):\n nb_measures = get_number_measures(score)\n new_score = m21.stream.Score()\n total_cost = 0\n\n for nb in range(0, nb_measures):\n measure, start_offset, end_offset = get_measure(score, nb)\n # if measure doesn't exist, continue (for example : no 0 measure (no anacrusis))\n if measure is None:\n continue\n\n voices = separate_voices(measure, start_offset, end_offset, parameters)\n new_score.append(m21.stream.Measure(voices, number=nb))\n\n if compare:\n total_cost += score_compare(measure, voices)\n if verbose:\n print(\"Measure :\", nb)\n print(\"\\tCost :\", total_cost)\n\n print(\"Result for this score :\", total_cost)\n\n if compare:\n return (new_score, total_cost)\n return new_score",
"def announce_highest(who, previous_high=0, previous_score=0):\n assert who == 0 or who == 1, 'The who argument should indicate a player.'\n if who ==0 :\n if previous_score> previous_high:\n previous_high=previous_score\n if previous_score>1:\n print(previous_score,'points! Thats the biggest gain yet for Player 1')\n #print('Thats the biggest gain yet for Player 1')\n elif previous_score==1:\n print(previous_score,'point! Thats the biggest gain yet for Player 1')\n print('Thats the biggest gain yet for Player 1')\n else: \n print('Player 1 gets ',previous_score,'point ; not enough for a new high')\n elif who==1:\n if previous_score> previous_high:\n previous_high=previous_score\n if previous_score>1:\n print(previous_score,'points! Thats the biggest gain yet for Player 2')\n #print('Thats the biggest gain yet for Player 2')\n elif previous_score==1:\n print(previous_score,'point! Thats the biggest gain yet for Player 2')\n #print('Thats the biggest gain yet for Player 2')\n else:\n print('Player 2 gets ',previous_score,'point ; not enough for a new high')",
"def pro() -> None:\n global player\n global points\n global comp_points\n while (points < 3) and (comp_points < 3):\n choice = str(input(\"rock...paper...scissors...SHOOT!!!: \"))\n computer = game[randint(0, 2)]\n print(f\"My turn: {computer}\")\n if choice == rock and computer == paper:\n points = points\n comp_points = comp_points + 1\n if choice == rock and computer == scissors:\n points = points + 1\n comp_points = comp_points\n if choice == paper and computer == rock:\n points = points + 1\n comp_points = comp_points\n if choice == paper and computer == scissors:\n points = points\n comp_points = comp_points + 1\n if choice == scissors and computer == rock:\n points = points\n comp_points = comp_points + 1\n if choice == scissors and computer == paper:\n points = points + 1\n comp_points = comp_points\n if choice == computer:\n points = points\n comp_points = comp_points\n print(f\"{player}'s score: {points}\")\n print(f\"My score: {comp_points}\")\n if points == 3:\n print(f\"Good job {player}! YOU WIN {STAR_EYES}{STAR_EYES}{STAR_EYES}\")\n if comp_points == 3:\n print(f\"Sorry, {player}. YOU LOSE {SAD_FACE}{SAD_FACE}{SAD_FACE}\")",
"def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)",
"def _adv_counter(winrate_together, winrate_hero1, winrate_hero2):\n return winrate_together",
"async def strange(self, ctx, number, option=0):\n user = ctx.author\n dice = random.randint(1, 20)\n raw = dice\n if option != 0:\n dice = dice + option\n\n print(\"option: {}, raw: {}, dice: {}\".format(option, raw, dice))\n voice = get(self.bot.voice_clients, guild=ctx.guild)\n\n if dice >= int(number) * 3:\n if dice == 20:\n write_history(\"good job \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n if voice and voice.is_connected():\n voice.play(discord.FFmpegPCMAudio(cfg.PATH + \"sound/20.mp3\"))\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.4\n await ctx.send(\"good job \" + user.name + \" (\" + str(dice) + \")\")\n else:\n write_history(\"good job \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n await ctx.send(\"good job \" + user.name + \" (\" + str(dice) + \")\")\n else:\n if dice == 1:\n write_history(\"Ohoh :hot_face: \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n if voice and voice.is_connected():\n voice.play(discord.FFmpegPCMAudio(cfg.PATH + \"sound/1.mp3\"))\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.4\n await ctx.send(\"Ohoh :hot_face: \" + user.name + \" (\" + str(dice) + \"). Setzt lieber ein XP du Noob\")\n else:\n write_history(\"Ohoh \" + user.name + \". dice = \" + str(dice) + \", strange = \" + str(number))\n await ctx.send(\"Ohoh \" + user.name + \" (\" + str(dice) + \")\")",
"def update_points(self, correct):\n\n if correct:\n self.points += 10\n \n if self.points > ((self.current_level + 1) * 100):\n self.play_sound('level_up', self.standard_sfx, True)\n self.play_sound(choice(self.correct_voice),self.standard_voice, wait=True)\n self.play_sound('combinations',self.game_sounds, wait=True)\n self.current_level += 1\n print(self.current_level)\n if self.current_level > 4:\n self.current_level = 4",
"async def rps(self, ctx, your_choice : RPSParser):\r\n author = ctx.message.author\r\n player_choice = your_choice.choice\r\n red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))\r\n cond = {\r\n (RPS.rock, RPS.paper) : False,\r\n (RPS.rock, RPS.scissors) : True,\r\n (RPS.paper, RPS.rock) : True,\r\n (RPS.paper, RPS.scissors) : False,\r\n (RPS.scissors, RPS.rock) : False,\r\n (RPS.scissors, RPS.paper) : True\r\n }\r\n\r\n if red_choice == player_choice:\r\n outcome = None # Tie\r\n else:\r\n outcome = cond[(player_choice, red_choice)]\r\n\r\n if outcome is True:\r\n await self.bot.say(\"{} You win {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n elif outcome is False:\r\n await self.bot.say(\"{} You lose {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n else:\r\n await self.bot.say(\"{} We're square {}!\"\r\n \"\".format(red_choice.value, author.mention))",
"def set_score(self, a, b, score):\n ### FILL IN ###",
"def updateScore(score):\n return score + 1",
"def f1_score(self):"
] | [
"0.70795625",
"0.58068377",
"0.5768385",
"0.57021904",
"0.56786025",
"0.56589353",
"0.5603751",
"0.55136603",
"0.549756",
"0.5485879",
"0.54847825",
"0.5401103",
"0.5356562",
"0.5330451",
"0.53275734",
"0.53220344",
"0.52828926",
"0.5266709",
"0.52593845",
"0.5259334",
"0.52449524",
"0.52364606",
"0.52332526",
"0.5231776",
"0.52313554",
"0.5229256",
"0.5222751",
"0.5207222",
"0.51982087",
"0.51967174"
] | 0.6722317 | 1 |
Check the key, mode, tonic pitch class extraction from key signature. | def testExtractionOfKeySignatureAttributes(self):
num_to_major_key = {0: 'C',
1: 'G',
2: 'D',
3: 'A',
4: 'E',
5: 'B',
6: 'F#',
7: 'C#',
8: 'G#',
9: 'D#',
10: 'A#',
11: 'E#',
12: 'B#',
-2: 'Bb',
-12: 'Dbb',
-11: 'Abb',
-10: 'Ebb',
-9: 'Bbb',
-8: 'Fb',
-7: 'Cb',
-6: 'Gb',
-5: 'Db',
-4: 'Ab',
-3: 'Eb',
-1: 'F'}
num_to_minor_key = {0: 'a',
1: 'e',
2: 'b',
3: 'f#',
4: 'c#',
5: 'g#',
6: 'd#',
7: 'a#',
8: 'e#',
9: 'b#',
10: 'f##',
11: 'c##',
12: 'g##',
-2: 'g',
-12: 'bbb',
-11: 'fb',
-10: 'cb',
-9: 'gb',
-8: 'db',
-7: 'ab',
-6: 'eb',
-5: 'bb',
-4: 'f',
-3: 'c',
-1: 'd'}
for test_mode in ['major', 'minor']:
for i in range(-12, 13):
ks = key.KeySignature(i)
ks.mode = test_mode
if test_mode == 'major':
key_map = num_to_major_key
else:
key_map = num_to_minor_key
try:
key_name, num_sharps, mode, tonic_pitchclass = (
pretty_music21._extract_key_signature_attributes(ks))
except pretty_music21.PrettyMusic21Error:
self.assertTrue(i < 7 or i > 7)
continue
self.assertEqual(key_name, key_map[i])
if mode == 'minor':
self.assertEqual(
key.sharpsToPitch(num_sharps + 3).name,
key.convertKeyStringToMusic21KeyString(key_name).upper())
else:
self.assertEqual(
key.sharpsToPitch(num_sharps).name,
key.convertKeyStringToMusic21KeyString(key_name).upper())
self.assertEqual(mode, ks.mode)
check_pitch = pitch.Pitch(
key.convertKeyStringToMusic21KeyString(key_map[i]))
check_pitchclass = check_pitch.pitchClass
self.assertEqual(tonic_pitchclass, check_pitchclass) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_key(self, key):\n raise NotImplementedError",
"def __getKeyInformation( self , flaglist ):\n\t\tkeyinfo = 0\n\t\tif 'HMAC_MD5_RC4' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 0 )\n\t\tif 'HMAC_SHA1_AES' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 1 )\n\t\tif 'group' in flaglist:\n\t\t\tpass\n\t\tif 'pairwise' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 3 )\n\t\tif 'idx0' in flaglist:\n\t\t\tpass\n\t\tif 'idx1' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 4 )\n\t\tif 'idx2' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 5 )\n\t\tif 'install' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 6 )\n\t\tif 'ack' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 7 )\n\t\tif 'mic' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 8 )\n\t\tif 'secure' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 9 )\n\t\tif 'error' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 10 )\n\t\tif 'request' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 11 )\n\t\tif 'encrypted' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 12 )\n\t\treturn keyinfo",
"def check_keys(self):",
"def verify_signature(self, key, data):\n verify_signature(self, key, data)",
"def isValidKey(key):\n return True",
"def verify(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = TomlSignatureFormatter().from_string(sign.read())\n\n if signature.verify(SignableBinaryIO(file), key):\n click.echo(\"---verified---\")\n exit(0)\n else:\n click.echo(\"---denied---\")\n exit(1)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except SignatureFormatError:\n click.echo(\"ERROR: Signature is in bad format\")",
"def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)",
"def checkKeyFile(file : str, typ : str) -> bool:\n return True\n with open(file, \"r\") as file:\n first_line = file.readline()\n for last_line in file:\n pass\n \n if typ == \"private\" :\n if(first_line == \"---begin monRSA private key---\\n\"):\n if(last_line == \"---end monRSA key---\"):\n return True\n return False\n elif typ == \"public\" :\n if(first_line == \"---begin monRSA public key---\\n\"):\n if(last_line == \"---end monRSA key---\"):\n return True\n return False\n else :\n print(\"wrong type\")\n return False",
"def check_sig(self):\n check_sig(self.path)\n dsc = self.get_dsc()\n if dsc is not None:\n check_sig(dsc)",
"def _checkKey(self, key):\n x, y = self._convertNegativeTupleKeyToPositiveTupleKey(key)\n return x, y",
"def checksignature(self):\n if(self.name=='ORBIT'): return\n if(self.ctpnum==0): return\n cmd=\"CheckSignature(\"+self.board+\",\"+self.signature+\",\"+self.ctpnum+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print \"input checksignature: \",output\n #self.signatureM=",
"def checkKeys( ):\n\n if (HMACKey is None) or (AESKey is None):\n loadKeys()\n\n if (int(time.time()) - creationTime) > const.KEY_ROTATION_TIME:\n rotateKeys()",
"def test_key(score1, score2, measure=0, part=0):\n\n\tdiff = ScoreDiff(score1, score2, path)\n return diff.have_same_key_signature(measure, part)",
"def is_key(v) -> bool:\n try:\n _validate(v, prefixes=[b\"edsk\", b\"edpk\", b\"spsk\", b\"p2sk\", b\"sppk\", b\"p2pk\"])\n except (ValueError, TypeError):\n return False\n return True",
"def verify_signatures(params, signed_fields_key='signedFields', full_sig_key='signedDataPublicSignature'):\r\n signed_fields = params.get(signed_fields_key, '').split(',')\r\n data = u\",\".join([u\"{0}={1}\".format(k, params.get(k, '')) for k in signed_fields])\r\n signed_fields_sig = processor_hash(params.get(signed_fields_key, ''))\r\n data += u\",signedFieldsPublicSignature=\" + signed_fields_sig\r\n returned_sig = params.get(full_sig_key, '')\r\n if processor_hash(data) != returned_sig:\r\n raise CCProcessorSignatureException()",
"def check_key(key, options):\n animal_id, exp_date, exp_type = key.split('_')\n if ((options.animal_id is None or animal_id == options.animal_id)\n and (options.exp_date is None or exp_date == options.exp_date)\n and (options.exp_type is None or exp_type == options.exp_type)):\n return True\n else:\n return False",
"def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True",
"def check_key(self):\n\n if self.type == \"RSA\" and self.size < 1024:\n raise HostkeyError(\"RSA keys must at least be 1024 bits.\")\n elif self.type == \"DSA\" and self.size != 1024:\n raise HostkeyError(\"DSA keys can only be 1024 bits.\")\n elif self.type == \"ECDSA\" and self.size not in [256, 384, 521]: # yes, that is *really* 521 bits, not a typo!\n raise HostkeyError(\"ECDSA key must be either 256, 384 or 521 bits (yes, 521 not 512!)\")\n elif self.type ==\"ED25519\" and self.size != 128:\n raise HostkeyError(\"ED25519 keys have a fixed size, which cannot be altered.\") # can't really happen, size is ignored for ED25519\n\n # if privkey is already there check size\n self.key_exists = False\n self.key_current_size = 0\n if os.path.exists(self.fullpath):\n self.key_exists = True\n if self.type == \"ED25519\":\n self.curve = \"EC25519\"\n self.key_current_size = 128 # somewhat erbitrary, attack complexity on ED25519 is larger that brute forcing a 128bit key\n self.key_exists = True\n elif self.type == \"RSA1\":\n self.key_exists = True\n self.key_current_size = 1024\n else:\n try:\n with open(self.fullpath, \"rb\") as key_file:\n self.privkey = crypto_serialization.load_pem_private_key(key_file.read(), password=None, backend=crypto_default_backend())\n except IOError:\n raise HostkeyError(get_exception())\n\n if self.type == \"DSA\" or self.type == \"RSA\":\n self.key_current_size = self.privkey.key_size\n elif self.type == \"ED25519\":\n self.key_current_size = 128\n elif self.type == \"ECDSA\":\n self.pubkey = self.privkey.public_key()\n if self.pubkey.curve.name == \"secp256r1\":\n self.key_current_size = 256\n elif self.pubkey.curve.name == \"secp384r1\":\n self.key_current_size = 384\n elif self.pubkey.curve.name == \"secp521r1\":\n self.key_current_size = 521\n else:\n self.curve = self.pubkey.curve.name",
"def verify_kpoints_content(kpoints):\n assert kpoints['mode'] == 'automatic'\n assert kpoints['comment'] == 'Example file'\n assert kpoints['divisions'] == [4, 4, 4]\n assert kpoints['shifts'] == [0.0, 0.0, 0.0]\n assert kpoints['points'] == None\n assert kpoints['centering'] == 'Gamma'\n assert kpoints['tetra'] == None\n assert kpoints['tetra_volume'] == None\n assert kpoints['num_kpoints'] == 0",
"def _determineSiginfo(self):\n return self.scanhandler.getSigInfo()",
"def check_key(self, key, key_pkl):\r\n start_time = time.time()\r\n # Verify that when we reload the KeyData from the pickled file, the\r\n # same key can be found in it, and is not equal to more than one\r\n # other key.\r\n key_data = cPickle.load(open(key_pkl, 'rb'))\r\n found = sum(key == other_key for other_key in key_data.keys)\r\n msg = ''\r\n if found == 0:\r\n msg = 'Key not found in unpickled KeyData file'\r\n if key_data.keys:\r\n # This is to make debugging in pdb easier, by providing\r\n # the offending keys in the local context.\r\n # key_data_keys = list(key_data.keys)\r\n ## import pdb; pdb.set_trace()\r\n pass\r\n elif found > 1:\r\n msg = 'Multiple equal keys found in unpickled KeyData file'\r\n if msg:\r\n raise AssertionError(\r\n \"%s. Verify the __eq__ and __hash__ functions of your \"\r\n \"Ops. The file is: %s. The key is: %s\" %\r\n (msg, key_pkl, key))\r\n # Also verify that there exists no other loaded key that would be equal\r\n # to this key. In order to speed things up, we only compare to keys\r\n # with the same version part and config md5, since we can assume this\r\n # part of the key is not broken.\r\n for other in self.similar_keys.get(get_safe_part(key), []):\r\n if other is not key and other == key and hash(other) != hash(key):\r\n raise AssertionError(\r\n \"Found two keys that are equal but have a different hash. \"\r\n \"Verify the __eq__ and __hash__ functions of your Ops. \"\r\n \"The keys are:\\n %s\\nand\\n %s\\n(found in %s).\" %\r\n (other, key, key_pkl))\r\n\r\n self.time_spent_in_check_key += time.time() - start_time",
"def vscf_raw_private_key_is_valid(self, ctx):\n vscf_raw_private_key_is_valid = self._lib.vscf_raw_private_key_is_valid\n vscf_raw_private_key_is_valid.argtypes = [POINTER(vscf_raw_private_key_t)]\n vscf_raw_private_key_is_valid.restype = c_bool\n return vscf_raw_private_key_is_valid(ctx)",
"def test_compare_signatures_string_key(self):\n provider = CreditProviderFactory(\n provider_id='asu',\n active=False,\n )\n\n # Create a serializer that has a signature which was created with a key\n # that we do not have in our system.\n sig = signature.signature({}, 'iamthewrongkey')\n serializer = serializers.CreditProviderCallbackSerializer(\n data={'signature': sig}\n )\n with pytest.raises(PermissionDenied):\n # The first arg here is key we have (that doesn't match the sig)\n serializer._compare_signatures('abcd1234', provider.provider_id) # lint-amnesty, pylint: disable=protected-access",
"def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True",
"def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:\n if len(s) != b // 4:\n raise ValueError(\"signature length is wrong\")\n\n if len(pk) != b // 8:\n raise ValueError(\"public-key length is wrong\")\n\n R = decodepoint(s[: b // 8])\n A = decodepoint(pk)\n S = decodeint(s[b // 8 : b // 4])\n h = Hint(encodepoint(R) + pk + m)\n\n (x1, y1, z1, _) = P = scalarmult_B(S)\n (x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))\n\n if (\n not isoncurve(P)\n or not isoncurve(Q)\n or (x1 * z2 - x2 * z1) % q != 0\n or (y1 * z2 - y2 * z1) % q != 0\n ):\n raise SignatureMismatch(\"signature does not pass verification\")",
"def keyIsValid(key):\n\n isValid = 1\n \n try:\n temp = getParam(key)\n\n except ValueError:\n isValid = 0\n warning(\" WARNING: %s not set\" % (key))\n\n return isValid",
"def _verify_key_exists(self, key, stack_path=[]):\r\n error_msg = (\r\n \"Could not find the {key_type} key '{key}' in: {stack_path}. \"\r\n \"Found {keys_found} instead.\"\r\n )\r\n try:\r\n dk = stack_path[0]\r\n fk = stack_path[1]\r\n xk = stack_path[2]\r\n yk = stack_path[3]\r\n vk = stack_path[4]\r\n except:\r\n pass\r\n try:\r\n if len(stack_path) == 0:\r\n if key not in self:\r\n key_type, keys_found = 'data', self.keys()\r\n stack_path = 'stack'\r\n raise ValueError\r\n elif len(stack_path) == 1:\r\n if key not in self[dk]:\r\n key_type, keys_found = 'filter', self[dk].keys()\r\n stack_path = 'stack[{dk}]'.format(\r\n dk=dk)\r\n raise ValueError\r\n elif len(stack_path) == 2:\r\n if key not in self[dk][fk]:\r\n key_type, keys_found = 'x', self[dk][fk].keys()\r\n stack_path = 'stack[{dk}][{fk}]'.format(\r\n dk=dk, fk=fk)\r\n raise ValueError\r\n elif len(stack_path) == 3:\r\n if key not in self[dk][fk][xk]:\r\n key_type, keys_found = 'y', self[dk][fk][xk].keys()\r\n stack_path = 'stack[{dk}][{fk}][{xk}]'.format(\r\n dk=dk, fk=fk, xk=xk)\r\n raise ValueError\r\n elif len(stack_path) == 4:\r\n if key not in self[dk][fk][xk][yk]:\r\n key_type, keys_found = 'view', self[dk][fk][xk][yk].keys()\r\n stack_path = 'stack[{dk}][{fk}][{xk}][{yk}]'.format(\r\n dk=dk, fk=fk, xk=xk, yk=yk)\r\n raise ValueError\r\n except ValueError:\r\n print error_msg.format(\r\n key_type=key_type,\r\n key=key,\r\n stack_path=stack_path,\r\n keys_found=keys_found\r\n )",
"def is_signature_valid(self, data, sig):\n if self.verified == False:\n return False\n\n key = self.publickey_set.filter(\n fingerprint=PublicKey.verify(data, sig).fingerprint,\n ).first()\n return key",
"def handle_key(self, key):\n keymodes = {'[': 'left', ']': 'right', 'l': 'location'}\n\n def _handle_key():\n if self.aperture_id is None or self.mode == '':\n # get closest one\n self.aperture_id = self.aperture_model.find_closest(\n self.last_x, self.fig.x_range.start, self.fig.x_range.end)\n if self.aperture_id is None:\n return False\n self.mode = keymodes[key]\n return False\n else:\n self.stop_aperture()\n return True\n\n if key in '[l]':\n return _handle_key()\n elif key == 's':\n self.aperture_id = self.aperture_model.find_closest(\n self.last_x, self.fig.x_range.start, self.fig.x_range.end, prefer_selected=False)\n self.aperture_model.select_aperture(self.aperture_id)\n return False\n elif key == 'c':\n self.aperture_id = None\n self.aperture_model.select_aperture(None)\n return False\n elif key == 'a':\n if self.aperture_id is None:\n self.start_aperture(self.last_x, self.last_y)\n return False\n else:\n self.stop_aperture()\n return True\n elif key == 'f':\n if self.aperture_id is None:\n self.aperture_model.find_peak(self.last_x)\n return True\n elif key == 'd':\n if self.aperture_id is None:\n # get closest one\n self.aperture_id = self.aperture_model.find_closest(\n self.last_x, self.fig.x_range.start, self.fig.x_range.end)\n if self.aperture_id is None:\n return False\n self.aperture_model.delete_aperture(self.aperture_id)\n self.stop_aperture()\n return True\n return False",
"def validate_license(key: str) -> bool:\r\n return bool(\r\n re.match(r'^PB-[A-Z0-9]{8}(?:-[A-Z0-9]{8}){3}$', key)\r\n )"
] | [
"0.5945108",
"0.57941103",
"0.57145506",
"0.56614727",
"0.55908537",
"0.557824",
"0.5512754",
"0.55084723",
"0.54874986",
"0.54785895",
"0.5465614",
"0.5448584",
"0.53911096",
"0.536824",
"0.53185785",
"0.5297847",
"0.529589",
"0.5281158",
"0.52718353",
"0.52686393",
"0.5246781",
"0.52276653",
"0.5210392",
"0.5207058",
"0.520597",
"0.5179743",
"0.5158527",
"0.513471",
"0.51275826",
"0.51181996"
] | 0.66755116 | 0 |
Test pretty_music21 score by comparing to music21 score. | def testCompareScores(self):
for score_type, source in self.sources.iteritems():
simple_score = self.simple_scores[score_type]
# Check overall length.
self.assertAlmostEqual(source.duration.quarterLength / 2.0,
simple_score.total_time)
# Check number of parts.
self.assertEqual(len(source.parts), len(simple_score.parts))
# Check the notes.
# TODO(annahuang): Don't assume note lengths are in quarter units.
for part_num in range(len(source.parts)):
part_flat = source.parts[part_num].flat
for note, simple_note in zip(
part_flat.getElementsByClass('Note'), simple_score.parts[part_num]):
self.assertEqual(note.pitch.midi, simple_note.pitch_midi)
self.assertEqual(
note.pitch.name.replace('-', 'b'), simple_note.pitch_name)
note_start = note.getOffsetBySite(part_flat)
self.assertEqual(note_start / 2.0, simple_note.start_time)
self.assertEqual((note_start + note.duration.quarterLength) / 2.0,
simple_note.end_time)
self.assertEqual(part_num, simple_note.part_index)
# Check the time signature.
if 'pickup' in score_type:
self.assertEqual(len(simple_score.time_signature_changes), 2)
# Pickup measure of 1/4, and then a full measure of 4/4.
correct_time_sigs = [(0.0, 1, 4), (0.5, 4, 4)]
else:
self.assertEqual(len(simple_score.time_signature_changes), 1)
correct_time_sigs = [(0.0, 4, 4)]
for i, time_sig in enumerate(simple_score.time_signature_changes):
self.assertAlmostEqual(time_sig.time, correct_time_sigs[i][0])
self.assertEqual(time_sig.numerator, correct_time_sigs[i][1])
self.assertEqual(time_sig.denominator, correct_time_sigs[i][2])
# Check the key signature.
retrieved_key_sigs = simple_score.key_signature_changes
self.assertEqual(len(retrieved_key_sigs), 1)
self.assertEqual(retrieved_key_sigs[0].time, 0.0)
self.assertEqual(retrieved_key_sigs[0].key, 'c')
self.assertEqual(retrieved_key_sigs[0].mode, 'minor')
self.assertEqual(retrieved_key_sigs[0].tonic_pitchclass, 0)
# TODO(annahuang): Check tempo. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_music21(music: \"Music\") -> Score:\n # Create a new score\n score = Score()\n\n # Metadata\n if music.metadata:\n score.append(to_music21_metadata(music.metadata))\n\n # Tracks\n for track in music.tracks:\n # Create a new part\n part = Part()\n part.partName = track.name\n\n # Add tempos\n for tempo in music.tempos:\n part.append(to_music21_metronome(tempo))\n\n # Add time signatures\n for time_signature in music.time_signatures:\n part.append(to_music21_time_signature(time_signature))\n\n # Add key signatures\n for key_signature in music.key_signatures:\n part.append(to_music21_key(key_signature))\n\n # Add notes to part\n for note in track.notes:\n m21_note = M21Note(_get_pitch_name(note.pitch))\n m21_note.quarterLength = note.duration / music.resolution\n offset = note.time / music.resolution\n part.insert(offset, m21_note)\n\n # Append the part to score\n score.append(part)\n\n return score",
"def artists_match_fixup1(song: Song, result: Result, score: float) -> float:\n\n # If we have a verified result, we don't have to fix anything\n if result.verified or score > 50:\n return score\n\n # If we didn't find any artist match,\n # we fallback to channel name match\n channel_name_match = ratio(\n slugify(song.artist),\n slugify(\", \".join(result.artists)) if result.artists else \"\",\n )\n\n if channel_name_match > score:\n score = channel_name_match\n\n # If artist match is still too low,\n # we fallback to matching all song artist names\n # with the result's title\n if score <= 50:\n artist_title_match = 0.0\n for artist in song.artists:\n slug_artist = slugify(artist).replace(\"-\", \"\")\n\n if slug_artist in slugify(result.name).replace(\"-\", \"\"):\n artist_title_match += 1.0\n\n artist_title_match = (artist_title_match / len(song.artists)) * 100\n\n if artist_title_match > score:\n score = artist_title_match\n\n return score",
"def artists_match_fixup3(song: Song, result: Result, score: float) -> float:\n\n if (\n score > 70\n or not result.artists\n or len(result.artists) > 1\n or len(song.artists) == 1\n ):\n # Don't fixup the score\n # if the score is already high\n # or if the result has more than one artist\n # or if the song has only one artist\n return score\n\n artists_score_fixup = ratio(\n slugify(result.name),\n slugify(create_song_title(song.name, [song.artist])),\n )\n\n if artists_score_fixup >= 80:\n score = (score + artists_score_fixup) / 2\n\n # Make sure that the score is not higher than 100\n score = min(score, 100)\n\n return score",
"def artists_match_fixup2(\n song: Song, result: Result, score: float, search_query: Optional[str] = None\n) -> float:\n\n if score > 70 or not result.verified:\n # Don't fixup the score\n # if the artist match is already high\n # or if the result is not verified\n return score\n\n # Slugify some variables\n slug_song_artist = slugify(song.artists[0])\n slug_song_name = slugify(song.name)\n slug_result_name = slugify(result.name)\n slug_result_artists = slugify(\", \".join(result.artists)) if result.artists else \"\"\n\n # Check if the main artist is simlar\n has_main_artist = (score / (2 if len(song.artists) > 1 else 1)) > 50\n\n match_str1, match_str2 = create_match_strings(song, result, search_query)\n\n # Add 10 points to the score\n # if the name match is greater than 75%\n if ratio(match_str1, match_str2) >= 75:\n score += 10\n\n # If the result doesn't have the same number of artists but has\n # the same main artist and similar name\n # we add 25% to the artist match\n if (\n result.artists\n and len(result.artists) < len(song.artists)\n and slug_song_artist.replace(\"-\", \"\")\n in [\n slug_result_artists.replace(\"-\", \"\"),\n slug_result_name.replace(\"-\", \"\"),\n ]\n ):\n score += 25\n\n # Check if the song album name is very similar to the result album name\n # if it is, we increase the artist match\n if result.album:\n if (\n ratio(\n slugify(result.album),\n slugify(song.album_name),\n )\n >= 85\n ):\n score += 10\n\n # Check if other song artists are in the result name\n # if they are, we increase the artist match\n # (main artist is already checked, so we skip it)\n artists_to_check = song.artists[int(has_main_artist) :]\n for artist in artists_to_check:\n artist = slugify(artist).replace(\"-\", \"\")\n if artist in match_str2.replace(\"-\", \"\"):\n score += 5\n\n # if the artist match is still too low,\n # we fallback to matching all song artist names\n # with the result's artists\n if score <= 70:\n # Artists from song/result name without the song/result name words\n artist_list1 = create_clean_string(song.artists, slug_song_name, True)\n artist_list2 = create_clean_string(\n list(result.artists) if result.artists else [result.author],\n slug_result_name,\n True,\n )\n\n artist_title_match = ratio(artist_list1, artist_list2)\n\n if artist_title_match > score:\n score = artist_title_match\n\n return score",
"def strict_score(gold, prediction):\n intersection_uids = set(gold.keys()).intersection(set(prediction.keys()))\n equal_count = 0\n for key in intersection_uids:\n if gold[key] == prediction[key]:\n equal_count += 1\n precision = 100 * equal_count / len(prediction)\n recall = 100 * equal_count / len(gold)\n print('Strict score')\n print('{:05.2f} {:05.2f} {:05.2f}'.format(precision, recall, f1_score(precision, recall)))",
"def _test_scores(lines):\n y_true, y_pred = zip(*[l.split()[-2:] for l in lines if len(l) > 0])\n res = report(score(y_true, y_pred))\n assert res.strip() == gold(lines).decode().strip()",
"def test_score_text3(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\t_, obj_ut = test.score_text(matches)\n\t\tself.assertEqual(obj_ut, {'not good': [[2, -1, 0]],\n\t\t\t'not very good': [[4, -1, 0]]})",
"def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)",
"def test_PhredValueTest(self):\n phredscore=0\n realscore=[6.197200000000001 ,0.08488999999999997]\n for line in self.phredlines:\n phredscore=phredvalues_version8(line)\n self.assertEqual(phredscore, realscore)\n #since the function is used in errorproducer, the absurdly high value of 6.197 is needed to match self.fastQ's phred score",
"def test_score_text4(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\t_, obj_ut = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, {'not good': [[2, -1, 0]], \n\t\t\t'not very good': [[4, -1.5, 0]]})",
"def test_scoring(self):\n scores = score_words(['foo', 'far', 'has', 'car'])\n expected = [(7, 'far'), (6, 'car'), (5, 'has'), (4 , 'foo')]\n self.assertEqual(scores, expected)",
"def test_score_text2(self):\n\t\t#import pdb; pdb.set_trace()\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, -1.25)",
"def test_get_simple_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n\n simple_score = self.annot_scorer._get_simple_score(\n classes, negated_classes, self.ic_store.statistics.mean_mean_ic,\n self.ic_store.statistics.mean_max_ic, self.ic_store.statistics.mean_sum_ic,\n self.negation_weight, self.mock_ic_values\n )\n assert simple_score == 0.7276770236073753",
"def test_high_score(self):\n player_score_expected = [(\"Rick Grimes\", 22, 22), # new entry\n (\"Rick Grimes\", 33.0, 33), # new high score\n (\"Daryl Dixon\", 22.6, 22), # new entry & high score\n (\"Daryl Dixon\", 4, 22), # lower score\n (\"Sophia Peletier\", -1, -1), #new entry; negative\n (\"Sophia Peletier\", 0, 0), # new high score\n (\"Merle Dixon\", \"16\", 16), # new entry; score as string\n (\"Judith Grimes\", \"five\", # non-valid entry\n \"NaN: Not a valid score\")\n ] \n msg = \"THESE SHOULD BE EQUAL!\"\n for player, score, exp in player_score_expected:\n observed = high_score(player, score) # function call\n self.assertEqual(exp, \n observed, \n msg)",
"def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)",
"def test_raw_score(atoms):\n err_msg = \"raw_score not put in atoms.info['key_value_pairs']\"\n assert 'raw_score' in atoms.info['key_value_pairs'], err_msg",
"def test_score_text1(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches)\n\t\tself.assertEqual(obj_ut, -1)",
"def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)",
"def compare(reference, catalog_record, title_similarity_score):\n\tscore_explanation = \"(%s=%s\"%(\"title_similarity\",title_similarity_score)\n\tscores = [title_similarity_score]\n\t# TODO: this needs to be improved! right now returns too highly a value for wrong matches\n\t# compare the `author` field\n\tif(\"author\" in reference and \"author\" in catalog_record and catalog_record[\"author\"] is not None \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tand len(catalog_record[\"author\"])>2):\n\t\tscore = fuzzyContainment(reference[\"author\"],catalog_record[\"author\"])\n\t\tscores.append(score)\n\t\tlogger.debug(\"[author] The score of fuzzyContainment between %s and %s is %s\"%(reference[\"author\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , catalog_record[\"author\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , score))\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s\"%(score_explanation,\"+ author_similarity\",score)\n\t# compare the `year` field\n\tif(\"year\" in reference and catalog_record[\"year\"] is not None and len(catalog_record[\"year\"])>2):\n\t\tif(\"-\" in reference[\"year\"]):\n\t\t\tfirst_part = reference[\"year\"].split(\"-\")[0].replace(\" \",\"\")\n\t\t\tsecond_part = reference[\"year\"].split(\"-\")[1].replace(\" \",\"\")\n\t\t\tscore = first_part in catalog_record[\"year\"] or second_part in catalog_record[\"year\"]\n\t\telse:\n\t\t\tscore = reference[\"year\"] == catalog_record[\"year\"]\n\t\tlogger.debug(\"[year] The similarity between %s and %s is %s\"%(reference[\"year\"], catalog_record[\"year\"], score))\n\t\tscores.append(score)\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s\"%(score_explanation,\"+ year_similarity\",score)\n\tif(\"place\" in reference and \"place\" in catalog_record and catalog_record[\"place\"] is not None \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t and len(catalog_record[\"place\"])>2):\n\t\tscore = fuzzyContainment(reference[\"place\"], catalog_record[\"place\"])\n\t\tlogger.debug(\"[publicationplace] The score of fuzzyContainment between %s and %s is %s\"%(reference[\"place\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, catalog_record[\"place\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, score))\n\t\tscores.append(score)\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s\"%(score_explanation,\"+ publplace_similarity\",score)\n\tif(\"publisher\" in reference and \"place\" in catalog_record[\"publisher\"] \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t and catalog_record[\"publisher\"] is not None \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t and len(catalog_record[\"publisher\"])>2):\n\t\tscore = fuzzyContainment(reference[\"publisher\"], catalog_record[\"publisher\"])\n\t\tlogger.debug(\"[publisher] The score of fuzzyContainment between %s and %s is %s\"%(reference[\"publisher\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , catalog_record[\"publisher\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , score))\n\t\tscores.append(score)\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s)\"%(score_explanation,\"+ publisher_similarity\",score)\n\tglobal_score = sum(scores)/len(reference)\n\tscore_explanation = \"%s / %s = %s\"%(score_explanation,len(reference),global_score)\n\tmessage = \"\"\"\n\tInput reference: %s\n\tRecord compared: %s\n\tGlobal score: %s\n\tScore's explanation: %s\n\t\"\"\"%(reference, catalog_record, global_score, score_explanation)\n\treturn global_score, score_explanation",
"def test_double_sharp_2():\n assert note_to_frequency(\"Ax4\") == note_to_frequency(\"B4\")",
"def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3",
"def test_score():\n print(\"Tests for 'score' function\")\n test_suite = TestSuite()\n\n # Testing with empty hand\n result = score([])\n test_suite.run_test(result, 0, '0')\n # Testing with non-empty hand\n result = score([1, 3])\n test_suite.run_test(result, 3, '1')\n # Testing with non-empty hand\n result = score([1, 3, 1, 1])\n test_suite.run_test(result, 3, '2')\n # Testing with non-empty hand\n result = score([4, 3, 4, 3, 3])\n test_suite.run_test(result, 9, '3')\n\n # Show report\n test_suite.report_results()",
"def disp_score():",
"def test_artists_match_diff_styles(self):\r\n gm_artists = ['Walter Bishop Jr.']\r\n sp_artists = ['Walter Bishop Jr']\r\n self.assertTrue(gmspotify.artists_match(gm_artists, sp_artists))",
"def score_match(phrase, song):\n return SequenceMatcher(None, phrase, song.title).ratio()\n ## Examples of other score metrics and modifiers:\n ## Penalize based on difference in phrase length (word count)\n # return -abs(len(song.split()) - len(phrase.split()))\n ## Penalize based on missing words\n # return -len([w for w in phrase.split() if w not in song.split()])",
"def compare_spectrum(spectrum0, spectrum1):\n title0 = spectrum0.get_title() \n title1 = spectrum1.get_title() \n if(title0 < title1): \n return -1\n elif(title0 > title1): \n return 1\n else:\n return 0",
"def test_quality(approach):\n tp, fn, fp, tn = 0, 0, 0, 0\n with open(\"test.txt\", \"r\") as f:\n test_data = json.load(f)\n for i in test_data:\n rez = approach(i[0][0], i[0][1])\n if rez == 1 and i[1] == 1:\n tp += 1\n elif rez == 0 and i[1] == 1:\n fn += 1\n elif rez == 1 and i[1] == 0:\n fp += 1\n else:\n tn += 1\n print \"Precision: {}%.\\nRecall: {}%.\".format(\n round(tp / (tp + fp), 2), round(tp / (tp + fn), 2))",
"def loose_micro(gold, prediction):\n\n count_n = 0\n count_d = 0\n for key in prediction:\n count_n += len(prediction[key].intersection(gold.get(key, set())))\n count_d += len(prediction[key])\n\n precision = 100 * count_n / count_d\n\n count_n = 0\n count_d = 0\n for key in gold:\n count_n += len(gold[key].intersection(prediction.get(key, set())))\n count_d += len(gold[key])\n\n recall = 100 * count_n / count_d\n\n print('loose micro')\n print('{:05.2f} {:05.2f} {:05.2f}'.format(precision, recall, f1_score(precision, recall)))",
"def loose_macro(gold, prediction):\n count = 0\n for key in prediction:\n count += len(prediction[key].intersection(gold.get(key, set()))) / len(prediction[key])\n precision = 100 * count / len(prediction)\n\n count = 0\n for key in gold:\n count += len(gold[key].intersection(prediction.get(key, set()))) / len(gold[key])\n recall = 100 * count / len(gold)\n print('loose macro')\n print('{:05.2f} {:05.2f} {:05.2f}'.format(precision, recall, f1_score(precision, recall)))",
"def test_titles_match_diff_ft_styles(self):\r\n gm_title = 'Stretch Deep (feat. Eve Essex)'\r\n sp_title = 'Stretch Deep - feat. Eve Essex'\r\n self.assertTrue(gmspotify.titles_match(gm_title, sp_title))"
] | [
"0.6397026",
"0.61292386",
"0.60153556",
"0.5894335",
"0.5888303",
"0.584743",
"0.58399576",
"0.58220565",
"0.5753058",
"0.57522446",
"0.5727253",
"0.5703115",
"0.565216",
"0.5651552",
"0.5611833",
"0.5602852",
"0.5571321",
"0.5554371",
"0.54933465",
"0.544632",
"0.5432133",
"0.5423866",
"0.5419078",
"0.5409884",
"0.5408189",
"0.54012996",
"0.53904575",
"0.5350439",
"0.5341123",
"0.53265226"
] | 0.6417285 | 0 |
Test if notes are sorted by start time. | def testSortedNotes(self):
for simple_score in self.simple_scores.values():
notes = simple_score.sorted_notes
assert all(notes[i].start_time <= notes[i + 1].start_time
for i in range(len(notes) - 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _can_add_note(self, start_step):\n return self.last_on is None or start_step - self.offset > self.last_on",
"def order_by_start(self):\n return self.order_by(\"start_time\")",
"def cmpBeginDate(artist1, artist2):\n return int(artist1['BeginDate']) < int(artist2['BeginDate'])",
"def cmpArtistsByDate(artist1, artist2):\n return int(artist1['BeginDate']) < int(artist2['BeginDate'])",
"def has_time(self):\n return isinstance(self._start, datetime.datetime)",
"def toc(self,timestamp):\n return self._timestamp > timestamp",
"def _started(inConf):\n timings = inConf.get('_params', {}).get('existsDuring', {})\n if not timings:\n return True\n if 'start' in timings and getUTCnow() < timings['start']:\n return False\n return True",
"def has_ops_before(self, ts):\n spec = {'ts': {'$lt': ts}}\n return bool(self.coll.find_one(spec))",
"def starts_with_tonic(a_list):\n key = a_list.track.bars[0].key.name\n note = a_list.get_first_actual_note()\n if note.name == key:\n return []\n else:\n return [note.start]",
"def _check_dates_tarea_start(self, cr, uid, ids, context=None):\n for leave in self.read(cr, uid, ids, ['date_start_tarea', 'date_start_proyecto'], context=context):\n if leave['date_start_tarea'] and leave['date_start_proyecto']:\n if leave['date_start_tarea'] < leave['date_start_proyecto']:\n return False\n return True",
"def __contains__(self, ts):\n if not isinstance(ts, datetime.datetime):\n return False\n base_key = self.floor_time(key)\n return self.first_timestamp <= base_key <= self.last_timestamp",
"def sort(self):\n self.notes.sort()",
"def test_sort(self):\n expected = [\n self.TDTT(when=self.dt_when - (3*self.SORT_DELTA)),\n self.TDTT(when=self.dt_when - self.SORT_DELTA),\n self.TDTT(when=self.dt_when),\n self.TDTT(when=self.dt_when + self.SORT_DELTA),\n self.TDTT(when=self.dt_when + (2*self.SORT_DELTA)),\n ]\n self.assertTrue(self.is_sorted_ascending_by_when(expected))\n\n unsorted = [\n expected[3], expected[2], expected[4], expected[1], expected[0],\n ]\n self.assertFalse(self.is_sorted_ascending_by_when(unsorted))\n self.assertNotEquals(\n [str(dt) for dt in expected],\n [str(dt) for dt in unsorted])\n\n now_sorted = self.TDTT.sort(unsorted)\n self.assertTrue(self.is_sorted_ascending_by_when(now_sorted))\n self.assertEquals(\n [str(dt) for dt in expected],\n [str(dt) for dt in now_sorted])",
"def test_sort_data_by_time():\n data = race.read_file_to_list()\n sorted_data = race.sort_data_by_time(data)\n assert data != sorted_data\n assert len(data) == len(sorted_data)\n assert type(sorted_data) == list\n for lines in sorted_data:\n assert type(lines) == dict",
"def is_sorted(seq):\n return all(seq[i-1] < seq[i] for i in range(1, len(seq)))",
"def is_rejoinee(self):\n return len(self._start_date) > 1",
"def test_correctly_recorded_start(self):\n code, out, err = self.t(\"_get 1.start\")\n self.assertEqual(out, \"2008-12-22T00:00:00\\n\")\n\n code, out, err = self.t(\"_get 2.start\")\n self.assertEqual(out, \"2009-04-17T00:00:00\\n\")",
"def test_models_meetings_ordering_first(self):\n MeetingFactory.create_batch(3)\n meetings = Meeting.objects.all()\n self.assertGreaterEqual(meetings[0].start, meetings[1].start)\n self.assertGreaterEqual(meetings[1].start, meetings[2].start)",
"def hasStartedOrEnded(obj):\n return ((obj.end_time is None and obj.start_time <= timezone.now()) or \n (obj.end_time is not None and timezone.now() >= obj.end_time))",
"def is_sorted(self):\n previous = 0 # Setting to 0 shouldn't be an issue aslong as MIN_VALUE is at least 0\n for value in self.data:\n if value < previous:\n return False\n previous = value\n return True",
"def higher_than(self, note):\n if ALL_NOTES.index(self.name) > ALL_NOTES.index(note.name):\n return True\n else:\n return False",
"def test_files(self):\r\n\r\n for path in self.get_files():\r\n self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path)) > self.start_time,\r\n msg='File not recently modified: %s' % os.path.basename(path))",
"def todo(self):\n # sort events with eventid using datetime string\n pass",
"def _should_start(self, setlink):\n return setlink.step_record.should_start()",
"def cmpArtworkByDate(artwork1, artwork2):\n return (lt.firstElement(artwork1)['Date'] < lt.firstElement(artwork2)['Date'])",
"def starts_with_tonic_or_fifth(a_list):\n key = a_list.track.bars[0].key\n note = a_list.get_first_actual_note()\n possible_notes = [key.name, Note(key).transpose('5', True).name]\n if note.name in possible_notes:\n return []\n else:\n return [note.start]",
"def ascendingTimeOrder(t1, t2):\n return cmp(t1['total_seconds'], t2['total_seconds'])",
"def is_started(self):\n return self.currIndex >= 0",
"def is_sorted_list(list_):\n prev = -1\n for item in list_:\n if item < prev:\n return False\n prev = item\n return True",
"def is_sorted(self):\n cur_list = []\n cur_node = self.head\n while cur_node is not None:\n cur_list.append(cur_node.data.number())\n cur_node = cur_node.next\n if cur_list == sorted(cur_list):\n return True\n return False"
] | [
"0.61224854",
"0.5865765",
"0.57295513",
"0.55280834",
"0.5441577",
"0.5399074",
"0.53915083",
"0.5378046",
"0.5370285",
"0.53318506",
"0.53177214",
"0.52837706",
"0.52716595",
"0.52472234",
"0.5236269",
"0.52277195",
"0.52230084",
"0.5207091",
"0.5160097",
"0.51134276",
"0.50987726",
"0.5077195",
"0.5070845",
"0.50664806",
"0.5043993",
"0.5031588",
"0.5017905",
"0.50112927",
"0.5000681",
"0.4986333"
] | 0.74918807 | 0 |
Runs the given command and gathers the output. If a callback is provided, then the output is sent to it, otherwise it is just returned. Optionally, the output of the command can be "watched" and whenever new output is detected, it will be sent to the given `callback`. | def run_cmd(cmd, callback=None, watch=False, background=False, shell=False):
if watch and not callback:
raise RuntimeError(
"You must provide a callback when watching a process."
)
output = None
if shell:
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
else:
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
if background:
# Let task run in background and return pmid for monitoring:
return proc.pid, proc
if watch:
while proc.poll() is None:
line = proc.stdout.readline()
if line != "":
callback(line)
# Sometimes the process exits before we have all of the output, so
# we need to gather the remainder of the output.
remainder = proc.communicate()[0]
if remainder:
callback(remainder)
else:
output = proc.communicate()[0]
if callback and output is not None:
return callback(output)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AddOutputCallback(self, callback):\n self.output_callbacks.append(callback)",
"def execute():\n command_line_args = argv[1:]\n args = cli(command_line_args)\n\n callback = args.callback\n kwargs = {\n k: v\n for k, v in args.__dict__.items()\n if k != \"callback\"\n }\n\n main(callback, **kwargs)",
"def send_output_event(self, message, callback=None):\n\n def pipeline_callback(call):\n if call.error:\n # TODO we need error semantics on the client\n exit(1)\n if callback:\n callback()\n\n self._pipeline.run_op(\n pipeline_ops_iothub.SendOutputEvent(message=message, callback=pipeline_callback)\n )",
"def run_with_output(self, cmd, end_strs=None, timeout=301, timeout_exception=True, api_call='write'):\n if api_call == 'write':\n self.write(cmd)\n out = ''\n else:\n out = self.runsingle(cmd)\n time.sleep(1)\n out += self.gather_output(cmd, out, end_strs, timeout, timeout_exception) # gather last of data buffer\n return out",
"def check_output(command, timeout=None):\n return CheckOutputHelper().run(command, timeout)",
"def run(self, cmd, out_display=None, err_display=None, **kwargs):\n if os.name == 'nt':\n loop = asyncio.ProactorEventLoop() # for subprocess' pipes on Windows\n asyncio.set_event_loop(loop)\n else:\n loop = asyncio.get_event_loop()\n result = loop.run_until_complete(self.arun(cmd, out_display, err_display, **kwargs))\n return result",
"def get_output(self, cmd, *args, **kwargs):\n return self.exec(cmd, *args, **kwargs, capture_output=True)",
"def do_command(cmd, output_file):\n global txt_output_dir\n output_path = os.path.join(txt_output_dir, output_file)\n print \"doing: %s > %s\" % (cmd, output_path)\n output = check_output(cmd.split(\" \"))\n with open(output_path, \"w\") as f:\n f.write(output)",
"def run_command(command):\n\n return subprocess.run(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)",
"def register_command_callback(self, cmd, callback):\n if not self.configured:\n return\n self.bcp_receive_commands[cmd] = callback",
"def run_command(shell_command, get_output):\n command_ran = subprocess.run(shell_command, capture_output=get_output)\n return command_ran",
"def run(command):\n\n out = \"\"\n try:\n out = str(subprocess.check_output(command,\n shell=True,\n universal_newlines=True))\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\n 'Failed to execute command %s: %s' % (e.cmd, e.returncode))\n else:\n return out",
"def output(cmd):\n return subprocess.check_output(cmd, shell=True)",
"def _call_command(wrapper, command, no_out=False):\n\n child = subprocess.Popen(command.split(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (out, err) = child.communicate()\n ret = child.returncode\n\n if not (no_out and ret == 0):\n for line in (out + err).splitlines():\n wrapper.pm(line.decode(\"utf-8\"))\n\n if ret != 0:\n if ret < 0:\n cause = \"signal\"\n ret *= -1\n else:\n cause = \"status\"\n\n wrapper.pm(messages[\"process_exited\"].format(command, cause, ret))\n\n return (ret, out)",
"def run_with_output(self, cmd, end_strs, timeout=310):\n self.write(cmd)\n out = self.gather_output(cmd, end_strs, timeout)\n return out",
"def runcmd_output(cmd, **kwargs):\n kwargs[\"raise_err\"] = True\n return execWithCapture(cmd[0], cmd[1:], **kwargs)",
"def RunCommand(command, parser_func=None, filter_obj=None, pipes=None,\n print_cmd=True, timeout=None, max_time=None, **kwargs):\n\n def TimedFlush(timeout, fh, kill_event):\n \"\"\"Flush fh every timeout seconds until kill_event is true.\"\"\"\n while True:\n try:\n fh.flush()\n # File handle is closed, exit.\n except ValueError:\n break\n # Wait for kill signal or timeout.\n if kill_event.wait(timeout):\n break\n print threading.currentThread(), 'TimedFlush: Finished'\n\n # TODO(all): nsylvain's CommandRunner in buildbot_slave is based on this\n # method. Update it when changes are introduced here.\n def ProcessRead(proc, writefh, parser_func=None, filter_obj=None,\n log_event=None, debug=False):\n writefh.flush()\n\n # Python on Windows writes the buffer only when it reaches 4k. Ideally\n # we would flush a minimum of 10 seconds. However, we only write and\n # flush no more often than 20 seconds to avoid flooding the master with\n # network traffic from unbuffered output.\n kill_event = threading.Event()\n flush_thread = threading.Thread(\n target=TimedFlush, args=(20, writefh, kill_event))\n flush_thread.daemon = True\n flush_thread.start()\n\n try:\n in_byte = proc.stdout.read(1)\n in_line = cStringIO.StringIO()\n while in_byte:\n # Capture all characters except \\r.\n if in_byte != '\\r':\n in_line.write(in_byte)\n\n # Write and flush on newline.\n if in_byte == '\\n':\n if log_event:\n log_event.set()\n if parser_func:\n parser_func(in_line.getvalue().strip())\n\n if filter_obj:\n filtered_line = filter_obj.FilterLine(in_line.getvalue())\n if filtered_line is not None:\n writefh.write(filtered_line)\n else:\n writefh.write(in_line.getvalue())\n in_line = cStringIO.StringIO()\n if debug and proc.poll() is not None:\n print 'Child process has terminated'\n in_byte = proc.stdout.read(1)\n\n print threading.currentThread(), 'ProcessRead: proc.stdout finished.'\n\n if log_event and in_line.getvalue():\n log_event.set()\n\n # Write remaining data and flush on EOF.\n if parser_func:\n parser_func(in_line.getvalue().strip())\n\n if filter_obj:\n if in_line.getvalue():\n filtered_line = filter_obj.FilterDone(in_line.getvalue())\n if filtered_line is not None:\n writefh.write(filtered_line)\n else:\n if in_line.getvalue():\n writefh.write(in_line.getvalue())\n finally:\n print threading.currentThread(), 'ProcessRead: cleaning up.'\n kill_event.set()\n flush_thread.join()\n writefh.flush()\n print threading.currentThread(), 'ProcessRead: finished.'\n\n pipes = pipes or []\n\n # Print the given command (which should be a list of one or more strings).\n if print_cmd:\n print '\\n' + subprocess.list2cmdline(command) + '\\n',\n for pipe in pipes:\n print ' | ' + subprocess.list2cmdline(pipe) + '\\n',\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n if not (parser_func or filter_obj or pipes or timeout or max_time):\n # Run the command. The stdout and stderr file handles are passed to the\n # subprocess directly for writing. No processing happens on the output of\n # the subprocess.\n proc = subprocess.Popen(command, stdout=sys.stdout, stderr=sys.stderr,\n bufsize=0, **kwargs)\n\n # Wait for the command to terminate.\n proc.wait()\n assert proc.returncode is not None\n return proc.returncode\n\n else:\n if not (parser_func or filter_obj):\n filter_obj = RunCommandFilter()\n\n # Start the initial process.\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, bufsize=0, **kwargs)\n proc_handles = [proc]\n\n if pipes:\n pipe_number = 0\n for pipe in pipes:\n pipe_number = pipe_number + 1\n if pipe_number == len(pipes) and not (parser_func or filter_obj):\n # The last pipe process needs to output to sys.stdout or filter\n stdout = sys.stdout\n else:\n # Output to a pipe, since another pipe is on top of us.\n stdout = subprocess.PIPE\n pipe_proc = subprocess.Popen(pipe, stdin=proc_handles[0].stdout,\n stdout=stdout, stderr=subprocess.STDOUT)\n proc_handles.insert(0, pipe_proc)\n\n # Allow proc to receive a SIGPIPE if the piped process exits.\n for handle in proc_handles[1:]:\n handle.stdout.close()\n\n log_event = threading.Event()\n\n # Launch and start the reader thread.\n thread = threading.Thread(target=ProcessRead,\n args=(proc_handles[0], sys.stdout),\n kwargs={'parser_func': parser_func,\n 'filter_obj': filter_obj,\n 'log_event': log_event})\n\n kill_lock = threading.Lock()\n\n def term_then_kill(handle, initial_timeout, numtimeouts, interval):\n def timed_check():\n for _ in range(numtimeouts):\n if handle.poll() is not None:\n return True\n time.sleep(interval)\n\n handle.terminate()\n time.sleep(initial_timeout)\n timed_check()\n if handle.poll() is None:\n handle.kill()\n timed_check()\n return handle.poll() is not None\n\n\n def kill_proc(proc_handles, message=None):\n with kill_lock:\n if proc_handles:\n killed = term_then_kill(proc_handles[0], 0.1, 5, 1)\n\n if message:\n print >> sys.stderr, message\n\n if not killed:\n print >> sys.stderr, 'could not kill pid %d!' % proc_handles[0].pid\n else:\n print >> sys.stderr, 'program finished with exit code %d' % (\n proc_handles[0].returncode)\n\n # Prevent other timeouts from double-killing.\n del proc_handles[:]\n\n def timeout_func(timeout, proc_handles, log_event, finished_event):\n while log_event.wait(timeout):\n log_event.clear()\n if finished_event.is_set():\n return\n\n message = ('command timed out: %d seconds without output, attempting to '\n 'kill' % timeout)\n kill_proc(proc_handles, message)\n\n def maxtimeout_func(timeout, proc_handles, finished_event):\n if not finished_event.wait(timeout):\n message = ('command timed out: %d seconds elapsed' % timeout)\n kill_proc(proc_handles, message)\n\n timeout_thread = None\n maxtimeout_thread = None\n finished_event = threading.Event()\n\n if timeout:\n timeout_thread = threading.Thread(target=timeout_func,\n args=(timeout, proc_handles, log_event,\n finished_event))\n timeout_thread.daemon = True\n if max_time:\n maxtimeout_thread = threading.Thread(target=maxtimeout_func,\n args=(max_time, proc_handles,\n finished_event))\n maxtimeout_thread.daemon = True\n\n thread.start()\n if timeout_thread:\n timeout_thread.start()\n if maxtimeout_thread:\n maxtimeout_thread.start()\n\n # Wait for the commands to terminate.\n for handle in proc_handles:\n handle.wait()\n assert handle.returncode is not None\n\n # Wake up timeout threads.\n finished_event.set()\n log_event.set()\n\n thread.join()\n\n # Check whether any of the sub commands has failed.\n for handle in proc_handles:\n assert handle.returncode is not None\n if handle.returncode:\n return handle.returncode\n\n assert proc.returncode is not None\n return proc.returncode",
"def runCommand(command, outputPrefix=\"ProcessRunner> \"):\n proc = ProcessRunner(command)\n proc.mapLines(WriteOut(sys.stdout, outputPrefix=outputPrefix), procPipeName=\"stdout\")\n proc.mapLines(WriteOut(sys.stderr, outputPrefix=outputPrefix), procPipeName=\"stderr\")\n proc.wait()\n returnCode = proc.poll()\n\n # proc.terminate()\n # proc.shutdown()\n\n return returnCode",
"def runCallback(self, callback=\"help\"):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n config = self._request.getConfig()\n data = self._request.getData()\n\n # invoke all callbacks for the 'callback'\n handled = tools.run_callback(callback,\n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n # do end callback\n tools.run_callback(\"end\", {'request': request})",
"def run_command(self, command, timeout=None, stdout=True):\n print('Running \"{}\"...'.format(command))\n output = self._shell.run_command(\n command, timeout=timeout, async_=False\n )\n if stdout:\n print(output)\n print(\"Done!\")\n return output",
"def run(self, command):\n try:\n print(f\"RUNNING: {command}\")\n print(\"-\" * 80)\n print(subprocess.check_output(command, shell=True).decode('utf-8'))\n except subprocess.CalledProcessError as e:\n print(f\"ERROR calling '{command}'\")\n print(\"-\" * 20)\n print(e.output and e.output.decode('utf-8'))\n sys.exit(-1)",
"def run(self, command):\n log.debug(\"Executing command: \" + str(command))\n\n output, error = \"\", \"\"\n p = subprocess.Popen(command.full_command, stdout=subprocess.PIPE)\n\n for line in p.stdout:\n output += line\n log.debug(line)\n stdout, error = p.communicate()\n\n return output, error",
"def invoke(*args, cmd=None, **kwargs):\n self, callback = args[:2]\n ctx = self\n\n # It's also possible to invoke another command which might or\n # might not have a callback. In that case we also fill\n # in defaults and make a new context for this command.\n if isinstance(callback, Command):\n # log.info('is Command')\n other_cmd = callback\n callback = other_cmd.callback\n ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)\n if callback is None:\n raise TypeError('The given command does not have a '\n 'callback that can be invoked.')\n\n for param in other_cmd.params:\n if param.name not in kwargs and param.expose_value:\n kwargs[param.name] = param.get_default(ctx)\n\n args = args[2:]\n with click.core.augment_usage_errors(self):\n with ctx:\n # log.info('running callback ', term.cyan(callback), ' cmd=', cmd)\n if hasattr(cmd, '__is_subcommand_manager__'):\n # log.info(term.cyan('IS SUBCOMMAND MANAGER '))\n real_callback = cmd.callback(*args, **kwargs)\n result = next(real_callback)\n # log.info(self.obj)\n if result is not None:\n self.obj = result\n def process_teardown(*a, **kw):\n try:\n real_callback.send(a)\n except StopIteration as e:\n return e.value\n cmd.result_callback = process_teardown\n return result\n\n return callback(*args, **kwargs)",
"def _run(cmd, **kwargs):\n kwargs.setdefault('combine_stdout_stderr', True)\n kwargs.setdefault('capture_output', True)\n kwargs.setdefault('check', False)\n # Make sure hooks run with stdin disconnected to avoid accidentally\n # interactive tools causing pauses.\n kwargs.setdefault('input', '')\n return rh.utils.run(cmd, **kwargs)",
"def call(command):\n cmd = join_and_sanitize(command)\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n result, _err = proc.communicate()\n return result",
"def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output",
"def run(cmd: str, verbose: bool = False):\n\n if verbose:\n print(cmd)\n\n out = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n if verbose:\n print(out)\n\n return out",
"def run_and_log_output(cmd_string):\n logging.info('Running %s', cmd_string)\n c = iterpipes.cmd(cmd_string)\n out = iterpipes.run(c)\n for line in out:\n logging.info(line)",
"def execute(self):\n return self.callback(*self.args)",
"def run_output(mocker):\n return mocker.patch('d.subprocess.check_output')"
] | [
"0.60572314",
"0.58634955",
"0.58029765",
"0.5787524",
"0.573274",
"0.5612282",
"0.5592784",
"0.5578011",
"0.5532359",
"0.5510294",
"0.55036664",
"0.54866433",
"0.5473412",
"0.5423298",
"0.5419067",
"0.5387715",
"0.53372276",
"0.5322813",
"0.53072083",
"0.53039205",
"0.5271837",
"0.5269564",
"0.524028",
"0.5222583",
"0.5216371",
"0.5210117",
"0.520064",
"0.5173157",
"0.517302",
"0.51724285"
] | 0.61989796 | 0 |
Check that proportions in composition file sum to 1 | def check_proportions(self):
proportions = [
v['proportion'] for k, v in self.composition.items()
]
if sum(proportions) < 1.0:
raise ValueError('Sum of proportions between host and pathogen must be 1.0.')
elif sum(proportions) > 1.0:
raise ValueError('Sum of proportions between host and pathogen allocations cannot exceed 1.0')
else:
self.logger.info('Sum of proportions equals 1.0 - proceeding') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_proportion(control, test):\n return set(control) == set(test) == {0, 1}",
"def __call__(self, read, info: ModificationInfo):\n n_count = read.sequence.lower().count('n')\n if self.is_proportion:\n if len(read) == 0:\n return False\n return n_count / len(read) > self.cutoff\n else:\n return n_count > self.cutoff",
"def check_compositionality(cls, fraction_total_reads: Series[float]) -> bool:\n # Bracken reports fractions with five decimals but rounding errors accumulate.\n return fraction_total_reads.empty or bool(\n np.isclose(fraction_total_reads.sum(), 1.0, atol=0.02)\n )",
"def test_composition_adds_to_100_percent(self):",
"def test(self, filename):\n hit = 0\n total = 0\n n = self.n\n for sent in open(filename):\n samp = sent.rstrip('\\n')\n# samp = '~' + samp + '~' \n for i in range(len(samp) - n):\n total = total + 1\n prev = samp[i:i + n - 1]\n pred = self.pred(prev)\n if pred == samp[i + n - 1]:\n hit = hit + 1\n \n return hit/total",
"def check_prize(correct_num):",
"def check_proportion_list(proportions):\r\n \r\n if str(type(proportions[0])) == \"<class 'float'>\":\r\n prop_type = 'list'\r\n count = 0.00\r\n for element in proportions:\r\n count += float(element)\r\n \r\n if count != float(1):\r\n diff = 1 - count\r\n bad_prop = proportions[-1]\r\n proportions[-1] = round(float(proportions[-1]) + diff,6)\r\n print('Proportion Set 0:\\n----------------\\n' +\r\n 'Entered proportions not equivalent to 1,\\n' \r\n + str(bad_prop) + ' changed to ' + str(proportions[-1])\r\n + '\\n')\r\n \r\n \r\n \r\n \r\n elif str(type(proportions[0])) == \"<class 'list'>\":\r\n for i in range(len(proportions)):\r\n prop_type = 'list/list'\r\n count = 0.00\r\n for element in proportions[i]:\r\n count += float(element)\r\n \r\n if count != float(1):\r\n diff = 1 - count\r\n bad_prop = proportions[i][-1]\r\n proportions[i][-1] = round(float(proportions[i][-1]) + diff,6)\r\n print('Proportion Set ' + str(i) + ':\\n----------------\\n' +\r\n 'Entered proportions not equivalent to 1,\\n' \r\n + str(bad_prop) + ' changed to ' + str(proportions[i][-1])\r\n + '\\n')\r\n \r\n \r\n\r\n return proportions, prop_type",
"def GetProportion(self):\r\n\r\n return self.proportion",
"def direct(self):\n n_notches = sum([photo.has_notch() for photo in self.photos])\n if n_notches > 0:\n return 1\n else:\n return 0",
"def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))",
"def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0",
"def validate(self):\n if self.isEmpty(): return False\n\n sum = 0\n for item in self.mask:\n sum += item.prob\n return sum == 1",
"def check_homogeneity(pid, data_dir=CORPUS_DIR, decimals=2):\n if not os.path.exists(data_dir+pid):\n print('no', pid)\n return\n sliceThickness = np.around(get_thickness(data_dir+pid), decimals=decimals)\n OK = np.all(sliceThickness==sliceThickness[0])\n if OK:\n print(pid, sliceThickness[0])\n else:\n print(pid, sliceThickness)",
"def get_verified_ratio(self):\n if len(self.pages) == 0: # There are no pages in this journal \n return 0, 0, 0\n verified = (1, 2, 4) \n numVerified = 0 \n numSeen = 0 \n for page in self.pages: \n numSeen += len(page.names) # page.names is a list of Name objects \n for name in page.names: \n if name.match in verified: \n numVerified += 1\n if numSeen == 0: # No names in any of the pages of the journal \n return 0, 0, 0\n return numVerified, numSeen, numVerified / numSeen",
"def has_column_proportion(self, index):\n\n return index in self._proportions[0]",
"def test_mixing_ratio():\n p = 998. * units.mbar\n e = 73.75 * units.mbar\n assert_almost_equal(mixing_ratio(e, p), 0.04963, 2)",
"def test_total_scattering_cross_section():\n structure = Material(input)\n assert (structure.total_scattering_cross_section == 31.880000000000003)",
"def get_proportion_of_unique_lemmas(self):\n lemmas = self.blob.words.lemmatize()\n return len(set(lemmas)) / float(len(self.blob.words))",
"def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)",
"def runpreprocessing(numofcountryfiles, numofsexfiles):\r\n\r\n if numofsexfiles/float(numofcountryfiles) == 2.0:\r\n return False\r\n else:\r\n return True",
"def test_concentration_profile(self):\n # TODO: add an output for average particle concentration",
"def permutate_genome_percent(human, phix, bacteria):\n \n per = list(itertools.product(human, phix, bacteria))\n sum_per = [sum(i) for i in zip(*per)]\n \n #check percentage sum < 1\n if all(i > 1 for i in sum_per):\n print \"Some combinations of human, phix and bacteria greater than 1\"\n sys.exit(0)\n \n return per",
"def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0",
"def has_row_proportion(self, index):\n\n return index in self._proportions[1]",
"def part1(fname: str) -> int:\n return sum(len(set(''.join(group))) for group in get_data(fname))",
"def completeness_of_game(game):\n spaces = game.width * game.height\n played_spaces = len([x for x in game._board_state[:-3] if x == 1])\n return float(played_spaces / spaces)",
"def pe_ratio(self):\n if self._pe_ratio == None:\n return float('inf')\n return self._pe_ratio",
"def feasible_ratio(self, solutions):\r\n count = np.zeros(len(solutions[0]))\r\n for x in solutions:\r\n count += x.unrepaired == x\r\n return count / float(len(solutions))",
"def PmfCorrect(efficacy, difficulties):\n pmf0 = thinkbayes2.Pmf([0])\n\n ps = [ProbCorrect(efficacy, difficulty) for difficulty in difficulties]\n pmfs = [BinaryPmf(p) for p in ps]\n dist = sum(pmfs, pmf0)\n return dist",
"def count_cop(self, infile):\n n_cop = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n if dg.has_cop_deprel():\n n_cop += 1\n return n_cop, len(dgs_in)"
] | [
"0.6806141",
"0.6376372",
"0.63674873",
"0.6106981",
"0.59485376",
"0.59462976",
"0.5868693",
"0.58353674",
"0.5807831",
"0.5794899",
"0.57598245",
"0.57089925",
"0.56516993",
"0.5600838",
"0.55719936",
"0.5553505",
"0.5544299",
"0.5544022",
"0.5519452",
"0.55115443",
"0.54876643",
"0.5465283",
"0.54362434",
"0.54284996",
"0.54054505",
"0.5395701",
"0.53935254",
"0.5366575",
"0.5361467",
"0.5360628"
] | 0.7553237 | 0 |
Clean up the Fastq index files from Pyfastx | def clean(self):
for _, data in self.composition.items():
index_file = Path(data['file'] + '.fxi')
if index_file.exists():
index_file.unlink() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_index(self):\n if self.index_module:\n self.index_module = None\n gc.collect()",
"def cleanup(self):\n index_id = self.params[\"index_id\"]\n\n # Remove the index document from the database.\n self.db.indexes.delete_one({\"_id\": index_id})\n\n self.dispatch(\"indexes\", \"delete\", [index_id])\n\n query = {\n \"_id\": {\n \"$in\": self.db.history.distinct(\"_id\", {\"index.id\": index_id})\n }\n }\n\n # Set all the otus included in the build to \"unbuilt\" again.\n self.db.history.update_many(query, {\n \"$set\": {\n \"index\": {\n \"id\": \"unbuilt\",\n \"version\": \"unbuilt\"\n }\n }\n })\n\n id_list = self.db.history.distinct(\"_id\", query)\n\n self.dispatch(\"history\", \"update\", id_list)\n\n virtool.utils.rm(self.params[\"index_path\"], True)",
"def reindex(self):\n self.index.drop_db()\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n self.index.update_from_metadata(self.load_blob_metadata(blob_uuid))",
"def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass",
"def _clean_up_project_file(self):\n\n print \"Reading in project file...\"\n with open(self.project_file,'r') as f_in:\n project_json = json.load(f_in)\n\n # Go through design_files references\n configurations = project_json['Project']['Configurations']\n n = len(configurations)\n indices_to_delete = []\n for i in range(n):\n if not os.path.basename(configurations[i]) in self.design_files:\n indices_to_delete.append(i)\n\n indices_to_delete.reverse()\n for i in indices_to_delete:\n del configurations[i]\n\n # Go through design_space_files references\n design_space_models = project_json['Project']['DesignSpaceModels']\n n = len(design_space_models)\n indices_to_delete = []\n for i in range(n):\n if not os.path.basename(design_space_models[i]) in self.design_space_files:\n indices_to_delete.append(i)\n\n indices_to_delete.reverse()\n for i in indices_to_delete:\n del design_space_models[i]\n\n # Go through test_bench_files references\n #test_benches = project_json['Project']['TestBenches']\n #n = len(test_benches)\n #indices_to_delete = []\n #for i in range(n):\n # if not os.path.basename(test_benches[i]) in self.test_bench_files:\n # indices_to_delete.append(i)\n #\n #for i in indices_to_delete.reverse():\n # del test_benches[i]\n\n # Write out the new, reduced in size, project dictionary\n with open(self.project_file,'wb') as f_out:\n json.dump(project_json, f_out, indent=4)\n\n print \"Written out cleaned up project dictionary.\"",
"def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)",
"def removeRtree(self):\n try:\n os.remove(str(self.dim)+'d_index.data')\n os.remove(str(self.dim)+'d_index.index')\n print('Files removed')\n except:\n print('No such files')",
"def _reset_index():\r\n WIX = create_in(INDEX_NAME, BmarkSchema) # noqa\r",
"def clean():\n clean_files()",
"def unindexReverseIndex(alphabet,reverseIndex,path):\n\tdef _deleteDocumentTermCounterString(docCount,termCount):\n\t\tdeleteString = \"[Document %8d Terms %8d]\" % (docCount,termCount)\n\t\tsys.stdout.write(\"\\b\" * len(deleteString))\n\tdef _writeDocumentTermCounterString(docCount,termCount):\n\t\tsys.stdout.write(\"[Document %8d Terms %8d]\" % (docCount,termCount))\n\toutputFileHash = dict()\n\tfor termWord,termId in alphabet.iteritems():\n\t\tdocCounter = 0\n\t\tdisplayTermWord = termWord[0:14]\n\t\tif len(displayTermWord) == 14: displayTermWord = \"\".join([\"<\",displayTermWord[:-2],\">\"])\n\t\tsys.stdout.write(\"Unindexing term %14s \" % displayTermWord)\n\t\t_writeDocumentTermCounterString(0,0)\n\t\tfor docIdTermInstanceVector in reverseIndex.lookupTermId(termId):\n\t\t\ttermCounter = 0\n\t\t\t_deleteDocumentTermCounterString(docCounter,termCounter)\n\t\t\tdocCounter += 1\n\t\t\t_writeDocumentTermCounterString(docCounter,termCounter)\n\t\t\tdocId = docIdTermInstanceVector.docId\n\t\t\tif docId not in outputFileHash:\n\t\t\t\toutputFileName = os.sep.join([path,str(docId) + \".fwd\"])\n\t\t\t\toutputFileHash[docId] = outputFileName\n\t\t\tfp = open(outputFileHash[docId],\"ab\")\n\n\t\t\tfor termInstance in docIdTermInstanceVector.termInstancesGenerator:\n\t\t\t\t_deleteDocumentTermCounterString(docCounter,termCounter)\n\t\t\t\ttermCounter += 1\n\t\t\t\t_writeDocumentTermCounterString(docCounter,termCounter)\n\t\t\t\tprint >> fp, \"%d %s\" % (termInstance.position,termWord)\n\t\t\tfp.close()\n\n\t\tsys.stdout.write(\" DONE\\n\")\n\t\n\tfor fileName in outputFileHash.values():\n\t\tfp = open(fileName,\"rb\")\n\t\tfileTerms = sorted([(int(position),word[:-1]) for position,word in [line.split(\" \",1) for line in fp]])\n\t\tfp.close()\n\t\tprint >> sys.stdout, \"Reorganizing: %s\" % fileName\n\t\tfp = open(fileName,\"wb\")\n\t\tfor termPosition,termWord in fileTerms:\n\t\t\tfp.write(termWord + \" \")\n\t\tfp.close()",
"def clean(self):\n cursor = self.cnx.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS FileNameFilter_scores\")\n cursor.execute(\"DROP TABLE IF EXISTS FileNameFilter_unique_name\")\n self.cnx.commit()\n cursor.close()",
"def unindex_later(self):\n return",
"def reset_file_index_cache() -> None:\n fileindex_cache_five_minutes.invalidate()",
"def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space",
"def clear_indexes(self):\n for keypoints in self:\n keypoints.clear_index()",
"def remove_intermediate_files(self):\r\n\r\n # tmp files are written in the current dir,\r\n # app controller always jumps into dir specified via exec_dir\r\n # Note: blast intermediates are not removed\r\n exec_dir = str(self.Parameters['--exec_dir'].Value)\r\n inp_file_name = str(self.Parameters['--query_NAST'].Value)\r\n\r\n exec_dir = exec_dir.rstrip('\"')\r\n exec_dir = exec_dir.lstrip('\"')\r\n\r\n inp_file_name = inp_file_name.rstrip('\"')\r\n inp_file_name = inp_file_name.lstrip('\"')\r\n\r\n tmp_suffixes = [\".CPS\", \".CPS.CPC\", \".CPS_RENAST\", \".CPS_RENAST.cidx\",\r\n \".CPS.CPC.wTaxons\", \".cidx\"]\r\n cs_tmp_files = [\r\n exec_dir +\r\n '/' +\r\n inp_file_name +\r\n x for x in tmp_suffixes]\r\n remove_files(cs_tmp_files, error_on_missing=False)\r\n\r\n db_param = self.Parameters['--db_NAST']\r\n if db_param.isOn():\r\n nast_db_name = str(db_param.Value)\r\n nast_db_name = nast_db_name.rstrip('\"')\r\n nast_db_name = nast_db_name.lstrip('\"')\r\n\r\n # Better do not remove this file since other ChimeraSlayer\r\n # instances running on the same ref set might use this file\r\n # Should be rather deleted in the calling function\r\n# remove_files([nast_db_name + \".cidx\"],\r\n# error_on_missing=False)\r\n\r\n fasta_param = self.Parameters['--db_FASTA']\r\n if fasta_param.isOn():\r\n fasta_name = str(fasta_param.Value)\r\n fasta_name = fasta_name.rstrip('\"')\r\n fasta_name = fasta_name.lstrip('\"')\r\n\r\n blast_db_files = [\r\n fasta_name +\r\n x for x in [\r\n \".nsq\",\r\n \".nin\",\r\n \".nhr\",\r\n \".cidx\"]]\r\n remove_files(blast_db_files, error_on_missing=False)",
"def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)",
"def clear(self):\n self.solr.delete_query(\"%s:%s\"\n % (self.index_uuid_field, self.index_uuid))\n self.solr.commit()",
"def clean(self):\n\t\tself.archiver.closeFile()",
"def _clean_up_meta_results_file(self):\n\n print \"Reading in meta-results file...\"\n with open(self.meta_results_file, 'r') as f_in:\n meta_results_json = json.load(f_in)\n\n results = meta_results_json['Results']\n n = len(results)\n indices_to_delete = []\n for i in range(n):\n # Assumption if any file is missing skip entire dictionary item.\n design_valid = results[i]['Design'] in self.design_files\n test_bench_valid = os.path.basename(results[i]['TestBench']) in self.test_bench_files\n sum_rep_valid = results[i]['Summary'] in self.result_files\n if not (design_valid and test_bench_valid and sum_rep_valid):\n indices_to_delete.append(i)\n\n indices_to_delete.reverse()\n for i in indices_to_delete:\n del results[i]\n\n # Write out the new, reduced in size, results dictionary\n with open(self.meta_results_file,'wb') as f_out:\n json.dump(meta_results_json, f_out, indent=4)\n\n print \"Written out cleaned up results dictionary.\"",
"def cleanup(destination_subdir):\n sp.check_call(f\"rm {destination_subdir}/*.bam\", shell=True)\n sp.check_call(f\"rm {destination_subdir}/*.sam\", shell=True)\n sp.check_call(f\"rm -rf ./index_files\", shell=True)",
"def remove_unused_index_files(reference_path: str, active_index_ids: list):\n for index_id in os.listdir(reference_path):\n if index_id not in active_index_ids:\n try:\n virtool.utils.rm(os.path.join(reference_path, index_id), recursive=True)\n except FileNotFoundError:\n pass",
"def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)",
"def _delete_index( env, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n adapter.delete( queries=[\"*:*\"] )\n adapter.commit()\n logger.info(u\"Deleted index\")",
"def clean_files(self):\n self.filenames.clear()",
"def cleanup_precluster_intermediate_files(batch_index):\n files = [\"seed{0}.S.fasta\".format(batch_index),\n \"seed{0}.orphans.fasta\".format(batch_index),\n \"batch{0}.fasta\".format(batch_index),\n \"batch{0}.remains.fasta\".format(batch_index),\n \"batch{0}.remains2.fasta\".format(batch_index)]\n\n files += glob.glob(\"batch{0}*.minimap\".format(batch_index))\n for file in files:\n try:\n os.remove(file)\n except:\n print >> sys.stderr, \"Failure to remove {0}. Ignore.\".format(file)",
"def _clean_up(self):",
"def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass",
"def cleanup():",
"def _clean_up_optimization():\n for (root, dirs, files) in walk(TEMP_MODULES_DIR_PATH, topdown=False):\n for file in files:\n if file.startswith(\"__temp_\"):\n remove(f\"{root}/{file}\")\n try:\n rmdir(root)\n except OSError:\n G.warn_(f\"Unidentified file found in temporary directory: {root}\")"
] | [
"0.6486027",
"0.6483935",
"0.61997026",
"0.6186733",
"0.6184694",
"0.6120797",
"0.61112803",
"0.61034113",
"0.6067713",
"0.6043162",
"0.6007758",
"0.60064405",
"0.5943808",
"0.59250754",
"0.59225947",
"0.5922426",
"0.59017795",
"0.5857676",
"0.58406806",
"0.583989",
"0.58378386",
"0.5835527",
"0.5832723",
"0.58205944",
"0.57949173",
"0.5792342",
"0.57868683",
"0.57581484",
"0.5748318",
"0.5742757"
] | 0.7508633 | 0 |
Rename read headers from the Pyfastx reads (readonly) | def rename_headers(reads: list, organism: str):
i = 0
read_strings = []
for read in reads:
read_str = read.raw.splitlines()
read_str[0] = f'@{organism}_{i}'
read_str = '\n'.join(read_str)
read_strings.append(read_str)
i += 1
return read_strings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header",
"def modify_bam_header(self, in_bam, out_bam):\n #bam_header = pysam.Samfile(in_bam,'rb',check_header=False, check_sq=False).header\n bam_header_raw = pysam.Samfile(in_bam,'rb',check_header=False, check_sq=False).text.replace(\"\\t\\n\",\"\\n\")\n temp_header = in_bam + \".tempheader\"\n with open(temp_header ,\"w\") as f:\n f.write(bam_header_raw)\n\n bam_header = pysam.Samfile(temp_header,'r', check_header=False, check_sq=False).header\n sample_id = os.path.basename(in_bam).replace(\".pre.bam\", \"\")\n try:\n original_SM = list(set([x[\"SM\"] for x in bam_header[\"RG\"]]))[0]\n except:\n raise PipelineException(\"@RG header line not found in %s!\" % bam_in)\n\n # make sure SM tags in RG line are consistent with sample_id\n rgs = copy.copy(bam_header[\"RG\"])\n bam_header[\"RG\"] = []\n for rg in rgs:\n rg[\"SM\"] = sample_id\n bam_header[\"RG\"].append(rg)\n\n # save original SM tage\n if \"CO\" not in bam_header:\n bam_header[\"CO\"] = [\"Original RG/SM tag: %s\" % original_SM]\n else:\n bam_header[\"CO\"].append(\"Original RG/SM tag: %s\" % original_SM)\n\n # write out header\n header_filename = self.as_temp(\"%s.header\" % in_bam)\n header_file = pysam.Samfile(header_filename, 'wh', header=bam_header)\n header_file.close()\n\n self.cmd(\"{samtools} reheader \\\n {header_file} \\\n {in_bam} > {out_bam}\"\n .format(\n samtools = self.cmds[\"samtools\"],\n in_bam=in_bam,\n out_bam=out_bam,\n header_file=header_filename,\n ),\n shell=True)\n\n self.rm(in_bam)",
"def _read_header(self, stream):\n return",
"def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')",
"def fix_headers(filename):\n\n counter = 1\n\n for line in fileinput.input(filename, inplace = True):\n if '>' in line:\n line = line.replace(line, '>'+str(counter)+'\\n')\n counter += 1\n sys.stdout.write(line)",
"def fasta_header(path, new_path):\n with open(path, 'r') as f_in:\n with open(new_path, 'w+') as f_out:\n records = SeqIO.parse(f_in, 'fasta')\n for record in records:\n record.id = record.id.split(\" \")[0]\n record.description = record.id.split(\" \")[0]\n SeqIO.write(record, f_out, 'fasta')\n return new_path",
"def seqIo_newHeader(fName, info):\n d, n = os.path.split(fName)\n if d==[]:d='./'\n tName=fName[:-4] + '_new' + time.strftime(\"%d_%m_%Y\") + fName[-4:]\n sr = seqIo_reader(fName)\n sw = seqIo_writer(tName,info)\n n=sr.header['numFrames']\n for f in range(n):\n I,ts=sr.getFrame(f)\n sw.addFrame(I,ts)\n sr.close()\n sw.close()",
"def testReadWriteHeaders(self):\n\n\t\tcache = AttributeCache(\"./test/AttributeCache.fio\", IndexedIO.OpenMode.Write)\n\n\t\tfor obj in self.cachedHeaderNames:\n\t\t\t# Make some random data\n\n\t\t\tdataWritten = V3fVectorData()\n\n\t\t\tnumPts = int(random.random())\n\t\t\tnumPts = numPts * numPts * 100\n\n\t\t\tfor i in range(0, numPts):\n\t\t\t\tdataWritten.append( V3f( random.random(), random.random(), random.random() ) )\n\n\t\t\tcache.writeHeader(obj, dataWritten)\n\n\t\t\tdataRead = cache.readHeader(obj)\n\n\t\t\tself.assertEqual( dataWritten, dataRead )\n\n\t\t\tdataRead = cache.readHeader()\n\n\t\t\tself.assertEqual( dataWritten, dataRead[ obj ] )\n\n\t\tself.assertEqual( set( self.cachedHeaderNames ).intersection( cache.headers() ), set( self.cachedHeaderNames ) )",
"def get_refactor_header(self, reffile, extra_keys=()):\n # Since expansion rules may depend on keys not used in matching, get entire header\n from crds import data_file\n header = data_file.get_header(reffile, observatory=self.observatory)\n needed_keys = tuple(self.get_reference_parkeys()) + tuple(extra_keys)\n header = data_file.ensure_keys_defined(header, needed_keys=needed_keys)\n # NOTE: required parkeys are in terms of *dataset* headers, not reference headers.\n log.verbose(\"insert_reference raw reffile header:\\n\",\n log.PP([ (key,val) for (key,val) in header.items() if key in self.get_reference_parkeys() ]),\n verbosity=70)\n header = self.reference_to_dataset_header(header)\n log.verbose(\"insert_reference transformed-to-dataset header:\\n\",\n log.PP([ (key,val) for (key,val) in header.items() if key in self.get_reference_parkeys() ]),\n verbosity=70)\n return header",
"def new_run_header(self, changed):\n self.header = changed['value']",
"def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile",
"def update_header(fopen):\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('<Q', json_start))\n fopen.seek(json_start)",
"def _change_header(self, add=False):\n if self.data['history_file'] is None:\n return\n good_heading = self.data['history_header'] % self.data\n # ^^^ history_header is a string with %(abc)s replacements.\n headings = self.data['headings']\n history_lines = self.data['history_lines']\n previous = ''\n underline_char = '-'\n empty = False\n if not history_lines:\n # Remember that we were empty to start with.\n empty = True\n # prepare header line\n history_lines.append('')\n if len(history_lines) <= 1:\n # prepare underline\n history_lines.append(underline_char)\n if not headings:\n # Mock a heading\n headings = [{'line': 0}]\n inject_location = 0\n first = headings[0]\n inject_location = first['line']\n underline_line = first['line'] + 1\n try:\n underline_char = history_lines[underline_line][0]\n except IndexError:\n logger.debug(\"No character on line below header.\")\n underline_char = '-'\n previous = history_lines[inject_location]\n if add:\n inject = [\n good_heading,\n underline_char * len(good_heading),\n '',\n self.data['nothing_changed_yet'],\n '',\n '',\n ]\n if empty:\n history_lines = []\n history_lines[inject_location:inject_location] = inject\n else:\n # edit current line\n history_lines[inject_location] = good_heading\n logger.debug(\"Set heading from %r to %r.\", previous, good_heading)\n history_lines[underline_line] = utils.fix_rst_heading(\n heading=good_heading,\n below=history_lines[underline_line])\n logger.debug(\"Set line below heading to %r\",\n history_lines[underline_line])\n # Setting history_lines is not needed, except when we have replaced the\n # original instead of changing it. So just set it.\n self.data['history_lines'] = history_lines",
"def _read_old_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.version = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.revision = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 26\n self.date = struct.unpack('<26s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.file_format = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.original_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.reference_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_a = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_b = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_c = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_d = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 82\n self.annotate = struct.unpack('<82s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_model = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_serial_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.software_version_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.crystal_material = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_wavelength_microns = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.laser_null_doubling = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.optical_ratio = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xc = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xm = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xb = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_size = struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.acquire_mode = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.emissivity = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.apodization = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.zero_fill = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.run_time_math = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.fft_size = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_coadds = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_igrams = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.amb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.inst_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.wbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.cbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 20\n self.spare_i = struct.unpack('<hhhhhhhhhh',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_f = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_l = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 65\n self.spare_na = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nb = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nc = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nd = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_ne = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size",
"def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)",
"def rename_records(f, fh, i):\n from Bio import SeqIO\n import gzip as gz\n for record in SeqIO.parse(gz.open(f, 'rt'), 'fastq'):\n record.id = \"{}_{}\".format(i, record.id)\n SeqIO.write(record, fh, \"fastq\")\n return fh",
"def remove_header( self, *names ):\n for name in names:\n del self[ name.strip() ]",
"def prepend_header(filename, header=None, drop=0):\n for no, line in enumerate(fileinput.input(filename, inplace=True)):\n # it's meaningless to set drop to -1, -2, ...\n if no == 0 and drop == 0:\n if header:\n print(header)\n print(line, end='')\n # replace\n elif no + 1 == drop:\n if header:\n print(header)\n elif no >= drop:\n print(line, end='')\n else:\n # no + 1 < drop\n continue",
"def _readCommonHeader(self):\n for i in range(self.ignore_header_lines):\n self.ignored_header_lines.append(nappy.utils.text_parser.readItemFromLine(self.file.readline()))\n \n self._readTopLine()\n self.ONAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.ORG = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.SNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.MNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n (self.IVOL, self.NVOL) = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 2, int)\n dates = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 6, int)\n (self.DATE, self.RDATE) = (dates[:3], dates[3:])\n self.NLHEAD += self.ignore_header_lines",
"def _read_headers(self):\n # Read the textual header.\n self._read_textual_header()\n # The next 400 bytes are from the Binary File Header.\n binary_file_header = self.file.read(400)\n bfh = SEGYBinaryFileHeader(binary_file_header, self.endian)\n self.binary_file_header = bfh\n self.data_encoding = self.binary_file_header.data_sample_format_code\n # If bytes 3506-3506 are not zero, an extended textual header follows\n # which is not supported so far.\n if bfh.number_of_3200_byte_ext_file_header_records_following != 0:\n msg = 'Extended textual headers are supported yet. ' + \\\n 'Please contact the developers.'\n raise NotImplementedError(msg)",
"def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}",
"def deserialize(self, reader: serialization.BinaryReader) -> None:\n self.headers = reader.read_serializable_list(Header)",
"def set_header( name, value ):",
"def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)",
"def _update_headers(self):\n if not self._header_updated:\n headers = self.head_obj(self._client, self._spec)\n self._headers.update(headers)\n self._header_updated = True",
"def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous column header to it\n if j > 0 and \"Unc\" in hd:\n hd = headers[j - 1] + \" \" + hd\n if \"Unc\" in hd and \"Unc.\" not in hd:\n hd = hd.replace(\"Unc\", \"Unc.\")\n # expand abbreviated headers\n if \"Energy\" in hd and \"Energy Level\" not in hd:\n hd = hd.replace(\"Energy\", \"Energy Level\")\n if \"Par. Elevel\" in hd:\n hd = hd.replace(\"Par. Elevel\", \"Parent Energy Level\")\n if \"Abund.\" in hd:\n hd = hd.replace(\"Abund.\", \"Abundance (%)\")\n if \"Ene.\" in hd:\n hd = hd.replace(\"Ene.\", \"Energy\")\n if \"Int.\" in hd:\n hd = hd.replace(\"Int.\", \"Intensity (%)\")\n if \"Dec\" in hd and \"Decay\" not in hd:\n hd = hd.replace(\"Dec\", \"Decay\")\n if \"Rad\" in hd and \"Radiation\" not in hd:\n hd = hd.replace(\"Rad\", \"Radiation\")\n if \"EP\" in hd:\n hd = hd.replace(\"EP\", \"Endpoint\")\n if \"Mass Exc\" in hd and \"Mass Excess\" not in hd:\n hd = hd.replace(\"Mass Exc\", \"Mass Excess\")\n headers_new.append(hd)\n if len(set(headers_new)) != len(headers_new):\n raise NNDCRequestError(\n \"Duplicate headers after parsing\\n\"\n + f' Original headers: \"{headers}\"\\n'\n + f' Parsed headers: \"{headers_new}\"'\n )\n return headers_new",
"def parse_header(self):",
"def _read_new_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.version = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.revision = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 28\n self.date = struct.unpack('<28s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_format = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.original_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.reference_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_a = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_b = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_c = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 84\n self.annotate = struct.unpack('<84s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_model = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_serial_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.software_version_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.crystal_material = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_wavelength_microns = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_null_doubling = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.padding = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xc = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xm = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xb = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.num_chan = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.interferogram_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.scan_direction = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.acquire_mode = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.emissivity = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.apodization = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.zero_fill = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.run_time_math = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.fft_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.number_of_coadds = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.single_sided = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.chan_display = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.amb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.inst_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.wbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.cbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.temperature_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.emissivity_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 40\n self.spare_i = struct.unpack('<llllllllll',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 80\n self.spare_f = struct.unpack('<dddddddddd',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 68\n self.spare_na = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nb = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nc = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nd = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_ne = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size",
"def test_normalize_headers():\n headers = [\n 'AllocationTransferAgencyIdentifier', 'BeginningPeriodOfAvailability', 'flex_mycol', 'FLEX_ANOTHER'\n ]\n mapping = {'allocationtransferagencyidentifier': 'ata', 'beginningperiodofavailability': 'boa'}\n\n result = csvReader.normalize_headers(headers, False, mapping)\n assert list(result) == [\n 'allocationtransferagencyidentifier', 'beginningperiodofavailability', 'flex_mycol', 'flex_another'\n ]\n result = csvReader.normalize_headers(headers, True, mapping)\n assert list(result) == ['ata', 'boa', 'flex_mycol', 'flex_another']",
"def __delitem__(self, name):\n name = name.lower()\n newheaders = []\n for k, v in self._headers:\n if k.lower() <> name:\n newheaders.append((k, v))\n self._headers = newheaders"
] | [
"0.60041726",
"0.5952062",
"0.59352136",
"0.59297824",
"0.59085053",
"0.588155",
"0.58796203",
"0.5815924",
"0.5812585",
"0.56827",
"0.5642944",
"0.56334716",
"0.56306666",
"0.56273365",
"0.56152153",
"0.56103003",
"0.55797523",
"0.5561919",
"0.5546493",
"0.55092734",
"0.54695374",
"0.5468915",
"0.5452742",
"0.54387933",
"0.54205596",
"0.5417583",
"0.5416696",
"0.5414983",
"0.54144466",
"0.54095745"
] | 0.7097539 | 0 |
Sample a list of Fastq reads / read names | def sample(fastq: list, reads: int = None, replacement: bool = False):
if replacement:
sampled_reads = random.choices(fastq, k=reads)
else:
sampled_reads = random.sample(fastq, k=reads)
return sampled_reads | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_read_list(samfile):\n read_sampler = ReadSampler()\n for line in samfile:\n line = sam_utils.SamAlignment(line)\n vals = line.get_aligned_blocks()\n if len(vals) > 1:\n logging.info(\"Skipping gapped read %s %s\"%(line.QNAME, str(vals))) \n read_sampler.add_read(vals[0])\n return read_sampler",
"def sample(read_sampler, n, array, res=1.0, prng = np.random.RandomState()):\n for read in read_sampler.pull_reads(n, prng):\n map_read(array, read, res)",
"def sample(f, n):\n entries = list(SeqIO.parse(f, 'fasta'))\n for seqnum in range(n):\n loc = round(random.uniform(0, len(entries) - 1))\n entry = entries[loc] # get index of randomly-selected FASTA entry\n header = '>' + str(seqnum + 1) + '-' + entry.description # header\n print(header + '\\n' + str(entry.seq)) # print-out entire entry",
"def get_fastq_files(wildcards):\n return expand(os.path.join(fastq_dir, \"{sample}_{readpair}.fastq\"), readpair=[1, 2], **wildcards)",
"def get_fastq(wildcards):\n return units.loc[(wildcards.sample, wildcards.unit), [\"fq1\", \"fq2\"]].dropna()",
"def get_fastq(wildcards):\n fastqs = caseinfo.loc[(wildcards.sample, wildcards.unit), [\"fq1\", \"fq2\"]].dropna()\n if len(fastqs) == 2:\n return {\"R1\": fastqs.fq1, \"R2\": fastqs.fq2}\n return {\"R1\": fastqs.fq1, \"R2\": fastqs.fq2}",
"def get_sample(self, path, prefix) -> List:\n pass",
"def find_fast5s_from_ids_readdb(readdb, read_ids, read_dirs, recursive=False):\n for name, fast5 in parse_read_name_map_file(readdb, read_dirs, recursive=recursive):\n if name.split(\"_\")[0] in read_ids:\n yield name, fast5",
"def data_sample(complexe_list, taille):\n\n indices = random.sample(range(len(complexe_list)), taille)\n\n complex_file_names = [complexe_list[i] for i in indices]\n\n return(complex_file_names)",
"def samples():\n f = open(config['samples'], \"r\")\n samp=[]\n for line in f:\n samp.append(line.strip().split()[0])\n return samp",
"def fastq_reader(fastq):\n group_gen = grouper(fastq, 4)\n for record in group_gen:\n # drop the @ before the name and any text after a whitespace\n name = record[0].split(' ')[0][1:].strip()\n seq = record[1].strip()\n yield name, seq",
"def get_fastq(wildcards):\n if sample_is_single_end(wildcards.sample):\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\"]].dropna()\n else:\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\", \"fq2\"]].dropna()",
"def fixture_sample_single() -> dict:\n _sample = {\n \"fastq\": \"<( zcat read_R1.fastq.gz )\",\n \"single_end\": True,\n \"sample_id\": \"single\",\n }\n return _sample",
"def sample_names(self):\n with open(self.sample_sheet) as sample_sheet:\n for line in sample_sheet:\n if 'Sample_ID' in line:\n for subline in sample_sheet:\n data = subline.split(',')\n self.samples.append(data[0])",
"def fixture_samples(sample_single) -> Iterator[dict]:\n _samples = []\n sample_id = sample_single[\"sample_id\"]\n for number in range(3):\n sample = copy.deepcopy(sample_single)\n sample[\"sample_id\"] = \"_\".join([sample_id, str(number)])\n _samples.append(sample)\n return _samples",
"def getOneRead(self, f, q, s):\n probs = np.power(10, q / -10)\n bases = []\n f.seek(s)\n n = 0\n while True:\n b = f.read(1)\n if b == \"\\n\":\n continue\n if random.random() < probs[n]:\n b = random.choice('ACGT')\n else:\n b = self.getAllele(b, f.tell() - 1)\n bases.append(b)\n n += 1\n if n == self.readlen:\n break\n return bases",
"def getSubsampleList(vcfname, ss_count):\n\n vcf_o = pysam.VariantFile(vcfname)\n rec = next(vcf_o)\n vcf_o.close()\n lst = []\n for samp in rec.samples:\n lst.append(samp)\n return lst[:int(ss_count)]",
"def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")",
"def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, subreads = name_and_reads\n subreads_copy = copy.deepcopy(subreads)\n pad_reads(subreads_copy)\n yield name, subreads_copy",
"def create_read_list_paired(samfile):\n read_sampler = ReadSampler()\n while True: \n line1 = samfile.readline()\n line2 = samfile.readline()\n if not line2: \n break\n line1 = sam_utils.SamAlignment(line1)\n line2 = sam_utils.SamAlignment(line2)\n if line1.QNAME != line2.QNAME:\n raise ValueError(\"Unpaired read or read with more than one pair\\\n encountered. Check your input file. File must\\\n be sorted by read name, every read must have\\\n a single pair and each pair must have one\\\n mapping. %s %s\"%(line1.QNAME, line2.QNAME))\n try:\n read_sampler.add_read(get_paired_blocks(line1,line2))\n except ValueError as err:\n logging.error(\"Skipping pair %s\"%err)\n except RuntimeError as err:\n logging.error(\"Skipping pair %s\"%err)\n return read_sampler",
"def pull_reads(self, n, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.randint(0, self.total, size=n)\n index = np.sort(index)\n return self.reads[index,:]",
"def readFastq(filename):\n\tsequences = []\n\tqualities = []\n\twith open(filename, 'r') as f:\n\t\twhile True: \n\t\t\tf.readline() # skip name line\n\t\t\tseq = f.readline().rstrip()\n\t\t\tf.readline() # skip place holder line \n\t\t\tq = f.readline().rstrip()\n\t\t\tif len(seq) ==0:\n\t\t\t\tbreak \n\t\t\tsequences.append(seq)\n\t\t\tqualities.append(q)\n\treturn sequences, qualities",
"def get_human_reads(percent, size, dir, isfastq):\n \n for i in range(0,int(size * percent)):\n seq = get_random_sequence(human_genome)\n pair = make_paired_end_reads(seq)\n \n global errr\n \n if errr:\n pair = introduce_errors(errr, pair)\n \n if isfastq:\n make_fastq(pair, dir + \"human\" + str(i+1), \"human\" + str(i+1))\n else:\n make_fasta(pair, dir + \"human\" + str(i+1), \"human\" + str(i+1))",
"def get_fw_reads(config, samples, p):\n files = []\n for sample in samples.keys():\n for unit in samples[sample].keys():\n if \"R1\" in samples[sample][unit].keys():\n r=\"R1\"\n else:\n r=\"se\"\n f = config[\"paths\"][\"results\"]+\"/intermediate/preprocess/{sample}_{unit}_{r}{p}.fastq.gz\".format(sample=sample,\n unit=unit, r=r, p=p)\n files.append(f)\n reads_string = \"\"\n for i, f in enumerate(files, start=1):\n reads_string += \"-reads{i} {f} \".format(i=i, f=f)\n return reads_string",
"def readFastq(filename):\n sequences = []\n qualities = []\n \n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() #read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rstrip() # base quality line\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n \n return sequences, qualities",
"def get_samplers(test, count):\n samplers = []\n for i in range(count):\n create = require(test.next_call_of(\"vkCreateSampler\"))\n require_equal(VK_SUCCESS, int(create.return_val))\n require_not_equal(0, create.int_device)\n require_not_equal(0, create.hex_pSampler)\n sampler = little_endian_bytes_to_int(require(create.get_write_data(\n create.hex_pSampler, NON_DISPATCHABLE_HANDLE_SIZE)))\n require_not_equal(0, sampler)\n samplers.append(sampler)\n return samplers",
"def create_sample_listing():\n entries = []\n for task_name, dataset in chain(MCBackgroundsSampleDictionaryUnordered, SignalMCSampleDictionaryUnordered, DataDictionaryMuonChannelUnordered):\n matching_output = [x for x in TaskDictionaryNameUnordered if x[0] == task_name]\n if len(matching_output) > 1:\n raise RuntimeError(\"More than 1 match for %s\" % task_name)\n if len(matching_output) == 0:\n print \"No match for task %s\" % task_name\n continue\n output_name = matching_output[0][1]\n entries.append(Sample(task_name, dataset, output_name))\n\n return entries",
"def prepare_fastq(Fastq_Root=\"2.Fastq/\", ):\n fastqs = glob.glob(Fastq_Root + \"*.fastq\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq\", \"\")\n if s.endswith(\"_1\"):\n sample = s.replace(\"_1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_2\"):\n sample = s.replace(\"_2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n if not s.endswith(\"_1\") and not s.endswith(\"_2\"):\n data[s] = [fq]\n return data",
"def samples(self):\n pass",
"def get_samples(self, sample_names):\n return [s for s in self.samples if s[SAMPLE_NAME_ATTR] in sample_names]"
] | [
"0.6420504",
"0.6299379",
"0.5943049",
"0.5809539",
"0.5767165",
"0.5739098",
"0.57060987",
"0.5696739",
"0.5689907",
"0.56789887",
"0.55646765",
"0.55259854",
"0.5509567",
"0.5490724",
"0.54807556",
"0.5477975",
"0.54732245",
"0.547267",
"0.5464326",
"0.54481435",
"0.54406804",
"0.5431611",
"0.5417391",
"0.53860706",
"0.53817165",
"0.5380757",
"0.5373318",
"0.537317",
"0.53657776",
"0.53471136"
] | 0.68244636 | 0 |
Set item in nested dictionary | def set_nested_item(data_dict: dict, key_list: tuple or list, value):
reduce(getitem, key_list[:-1], data_dict)[key_list[-1]] = value
return data_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __setitem__(self, key, value):\n self.tree[key] = value",
"def visit_dict(self, sydict):\n self.current.update(sydict)",
"def set(cls, hierarchical_dict: dict, key: str, value: Any) -> None:\n # split according to '.'\n hierarchical_key = key.split(\".\")\n\n # go over the the dictionary according to the path, create the nodes that does not exist\n element = hierarchical_dict\n for key in hierarchical_key[:-1]:\n if key not in element:\n element[key] = {}\n element = element[key]\n\n # set the value\n element[hierarchical_key[-1]] = value",
"def set(self, name1, natl, item):\n if name1 not in self.data: self.data[name1] = {}\n self.data[name1][natl] = item",
"def _set_item(dic: dict, keys: list, value):\n\tdic = _get_item(dic, keys[:-1])\n\tdic[keys[-1]] = value",
"def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value",
"def _set(self, ikey, value):\n obj = self\n keys = ikey.split('.')\n for idx in range(0, len(keys)):\n key = keys[idx]\n if not obj.has_key(key):\n ckey = '.'.join(keys[idx:])\n nkey, nval = convert_dot_notation(ckey, value)\n if isinstance(obj, DotDict):\n super(DotDict, obj).__setitem__(nkey, nval)\n else:\n obj.__setitem__(nkey, nval)\n return\n if key != keys[-1]:\n try:\n obj = super(DotDict, obj).__getitem__(key)\n except:\n try:\n obj = obj[key]\n except:\n raise\n if not isinstance(obj, dict):\n msg = 'Cannot assign new value, internal obj is not dict'\n raise Exception(msg)\n if isinstance(obj, DotDict):\n super(DotDict, obj).__setitem__(key, value)\n else:\n obj.__setitem__(key, value)",
"def __setChildDict(self, child):\n \n d = self[self._name]\n d[child.getName()] = child.getDict()",
"def set_field(key, obj, val):\n\n o = obj\n subkeys = key.split('.')\n\n for subkey in subkeys[:-1]:\n if subkey not in o:\n o[subkey] = {}\n\n o = o[subkey]\n\n o[subkeys[-1]] = val",
"def _single_setitem(self, key, item):\n self._dict[key] = item",
"def __setitem__(self, key, item):\n self.set_field(key, item)",
"def __setitem__(self, key, val):\r\n super(Stack, self).__setitem__(key, val)\r\n\r\n # The 'meta' portion of the stack is a standar dict (not Stack)\r\n try:\r\n if isinstance(val, Stack) and val.stack_pos is \"stack_root\":\r\n val.parent = self\r\n val.key = key\r\n\r\n # This needs to be compacted and simplified.\r\n if self.stack_pos is \"stack_root\":\r\n val.stack_pos = \"data_root\"\r\n elif self.stack_pos is \"data_root\":\r\n val.stack_pos = \"filter\"\r\n elif self.stack_pos is \"filter\":\r\n val.stack_pos = \"x\"\r\n\r\n except AttributeError:\r\n pass",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def __setitem__(self, key, value):\n while self is not None:\n if key in self._dict:\n self._dict[key] = value\n return\n else:\n self = self.parent\n raise KeyError(\"%s was not declared\" % key)",
"def set_dict(self, dic): # -> None:\n ...",
"def set_upward(self, key, value):\n context = self.dicts[-1]\n for d in reversed(self.dicts):\n if key in d:\n context = d\n break\n context[key] = value",
"def __setitem__(self, path, value):\n\n path = self.__check_path__(path)\n\n # d - dict, p - path (keys sequence)\n def set_key(d, p):\n k = p[0]\n\n if len(p) == 1:\n d[k] = value\n else:\n if not isinstance(d.setdefault(k, self._factory()), dict):\n d[k] = self._factory()\n set_key(d[k], p[1:])\n\n set_key(self.__dict__, path)",
"def set_element(d, path, value, default_dict=None):\n # type: (Dict, Tuple, Any, Optional[Dict]) -> None\n if default_dict is None:\n default_dict = dict()\n\n if len(path) == 0:\n raise ValueError('Path length cant be 0')\n elif len(path) == 1:\n d[path[0]] = value\n else:\n DictUtil.set_element(d.setdefault(path[0], default_dict), path[1:], value, default_dict)",
"def __setattr__(self, key, value):\n if isinstance(value, DotDict) and key != '_parent':\n value.__dict__['_parent'] = weakref.proxy(self)\n super(DotDictWithAcquisition, self).__setattr__(key, value)",
"def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()",
"def test_deep_set_create(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"K:L:M\", \"Q\")\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\"}},\n \"K\": {\"L\": {\"M\": \"Q\"}},\n },\n res,\n )",
"def set_data(self,key='',val=None):\n parent_itm = self._root\n if '.' in key:\n parent_itm = self.get_data(self.parent_key(key))\n itm_key = key.split('.')[-1]\n if itm_key:\n try: \n parent_itm[itm_key] = val\n except:\n try: \n parent_itm[int(itm_key)] = val # list case\n except:\n parent_itm.append(val) # append to list case",
"def _update(self, db_item, update_dict, unset=None, pull=None, push=None, push_list=None, pull_list=None):\n def _iterate_keys(k, db_nested, populate=True):\n k_list = k.split(\".\")\n k_item_prev = k_list[0]\n populated = False\n if k_item_prev not in db_nested and populate:\n populated = True\n db_nested[k_item_prev] = None\n for k_item in k_list[1:]:\n if isinstance(db_nested[k_item_prev], dict):\n if k_item not in db_nested[k_item_prev]:\n if not populate:\n raise DbException(\"Cannot set '{}', not existing '{}'\".format(k, k_item))\n populated = True\n db_nested[k_item_prev][k_item] = None\n elif isinstance(db_nested[k_item_prev], list) and k_item.isdigit():\n # extend list with Nones if index greater than list\n k_item = int(k_item)\n if k_item >= len(db_nested[k_item_prev]):\n if not populate:\n raise DbException(\"Cannot set '{}', index too large '{}'\".format(k, k_item))\n populated = True\n db_nested[k_item_prev] += [None] * (k_item - len(db_nested[k_item_prev]) + 1)\n elif db_nested[k_item_prev] is None:\n if not populate:\n raise DbException(\"Cannot set '{}', not existing '{}'\".format(k, k_item))\n populated = True\n db_nested[k_item_prev] = {k_item: None}\n else: # number, string, boolean, ... or list but with not integer key\n raise DbException(\"Cannot set '{}' on existing '{}={}'\".format(k, k_item_prev,\n db_nested[k_item_prev]))\n db_nested = db_nested[k_item_prev]\n k_item_prev = k_item\n return db_nested, k_item_prev, populated\n\n updated = False\n try:\n if update_dict:\n for dot_k, v in update_dict.items():\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item)\n dict_to_update[key_to_update] = v\n updated = True\n if unset:\n for dot_k in unset:\n try:\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item, populate=False)\n del dict_to_update[key_to_update]\n updated = True\n except Exception:\n pass\n if pull:\n for dot_k, v in pull.items():\n try:\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item, populate=False)\n except Exception:\n continue\n if key_to_update not in dict_to_update:\n continue\n if not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot pull '{}'. Target is not a list\".format(dot_k))\n while v in dict_to_update[key_to_update]:\n dict_to_update[key_to_update].remove(v)\n updated = True\n if pull_list:\n for dot_k, v in pull_list.items():\n if not isinstance(v, list):\n raise DbException(\"Invalid content at pull_list, '{}' must be an array\".format(dot_k),\n http_code=HTTPStatus.BAD_REQUEST)\n try:\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item, populate=False)\n except Exception:\n continue\n if key_to_update not in dict_to_update:\n continue\n if not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot pull_list '{}'. Target is not a list\".format(dot_k))\n for single_v in v:\n while single_v in dict_to_update[key_to_update]:\n dict_to_update[key_to_update].remove(single_v)\n updated = True\n if push:\n for dot_k, v in push.items():\n dict_to_update, key_to_update, populated = _iterate_keys(dot_k, db_item)\n if isinstance(dict_to_update, dict) and key_to_update not in dict_to_update:\n dict_to_update[key_to_update] = [v]\n updated = True\n elif populated and dict_to_update[key_to_update] is None:\n dict_to_update[key_to_update] = [v]\n updated = True\n elif not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot push '{}'. Target is not a list\".format(dot_k))\n else:\n dict_to_update[key_to_update].append(v)\n updated = True\n if push_list:\n for dot_k, v in push_list.items():\n if not isinstance(v, list):\n raise DbException(\"Invalid content at push_list, '{}' must be an array\".format(dot_k),\n http_code=HTTPStatus.BAD_REQUEST)\n dict_to_update, key_to_update, populated = _iterate_keys(dot_k, db_item)\n if isinstance(dict_to_update, dict) and key_to_update not in dict_to_update:\n dict_to_update[key_to_update] = v.copy()\n updated = True\n elif populated and dict_to_update[key_to_update] is None:\n dict_to_update[key_to_update] = v.copy()\n updated = True\n elif not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot push '{}'. Target is not a list\".format(dot_k),\n http_code=HTTPStatus.CONFLICT)\n else:\n dict_to_update[key_to_update] += v\n updated = True\n\n return updated\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))",
"def set(self, key, value):\r\n self.set_many({key: value})",
"def set_by_path(data: Dict[str, T], path: Sequence[str], value: T):\n get_by_path(data, path[:-1])[path[-1]] = value",
"def visit_record(self, syrecord):\n for other_key, other_value in syrecord.items():\n try:\n getattr(self.current, other_key).update(other_value)\n except KeyError:\n setattr(self.current, other_key, other_value)",
"def set_in_dict(data_dict, map_list, value):\n target = get_from_dict(data_dict, map_list[:-1])\n if isinstance(target, dict):\n if len(target[map_list[-1]]) == 0 and isinstance(value, str):\n target[map_list[-1]] = value\n else:\n target[map_list[-1]]['value'] = value",
"def setItem(self,row,column,value):\n data = self.data\n if row not in data:\n data[row] = {}\n data[row][column] = value\n self.hasChanged = True",
"def assign(self, key, value):\n key_split = key.split('.')\n cur_dict = self\n for k in key_split[:-1]:\n try:\n cur_dict = cur_dict[k]\n except KeyError:\n cur_dict[k] = self.__class__() # so that derived classes\n # remain true to type\n cur_dict = cur_dict[k]\n cur_dict[key_split[-1]] = value",
"def set_key(self, key, value):\n if '.' in key:\n key, remainder = key.split('.', 1)\n try:\n self[key].set_key(remainder, value)\n except KeyError:\n self[key] = AttrDict()\n self[key].set_key(remainder, value)\n except AttributeError:\n if self[key] is None: # If the value is None, we replace it\n self[key] = AttrDict()\n self[key].set_key(remainder, value)\n # Else there is probably something there, and we don't just\n # want to overwrite so stop and warn the user\n else:\n raise KeyError('Cannot set nested key on non-dict key.')\n else:\n self[key] = value"
] | [
"0.66427284",
"0.65705705",
"0.6513432",
"0.650163",
"0.6474375",
"0.64307684",
"0.6408831",
"0.63907933",
"0.63844234",
"0.6381398",
"0.6353006",
"0.63405037",
"0.6324578",
"0.63165903",
"0.63117933",
"0.63100886",
"0.63035226",
"0.6276476",
"0.62579644",
"0.6254745",
"0.6222477",
"0.62025076",
"0.61990833",
"0.6189454",
"0.6162546",
"0.61621433",
"0.61485016",
"0.61372495",
"0.6126555",
"0.611567"
] | 0.77281743 | 0 |
returns a list of axes of a variable mv | def allAxes( mv ):
if mv is None: return None
return mv.getAxisList() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_axes(self) -> VGroup:\n return self.axes",
"def get_axes(self) -> VGroup:\n return self.axes",
"def axes(self):\n return self._axes",
"def axes(self):\n return self._axes",
"def axes(*x: Iterable[int]):\n return [_ti_core.Axis(i) for i in x]",
"def axes(self) -> np.ndarray: # array[Axes]\n return self._axes",
"def axesnames(self):\n return self._axesnames",
"def common_axes( mv1, mv2 ):\n axes1 = [a[0] for a in mv1.getDomain()]\n axes2 = [a[0] for a in mv2.getDomain()]\n if len(axes1)!=len(axes2):\n print \"ERROR. common_axes requires same number of axes in\",mv1,\" and\",mv2\n return None\n axes3 = []\n for i in range(len(axes1)):\n axes3.append(common_axis( axes1[i], axes2[i] ))\n return axes3",
"def _get_axes_numbers(self, axes):\n if axes is None:\n return [0, 1]\n\n if isinstance(axes, str):\n return [self._get_axis_number(axes)]\n elif hasattr(axes, '__len__'):\n return [self._get_axis_number(ax) for ax in axes]\n return [axes]",
"def getaxeslist(pidevice, axes):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = pidevice.axes if axes is None else axes\n if not axes:\n return []\n if not isinstance(axes, (list, set, tuple)):\n axes = [axes]\n return list(axes) # convert tuple to list",
"def get_data(self):\n return [self.axes]",
"def axesNames(self, data, info):\n return []",
"def coordAxis(bases):\n \n axis_of_coord = []\n for ax,b in enumerate(bases):\n if np.isscalar(b):\n axis_of_coord.append(ax) # Singleton scalar\n else:\n for i in range(b.nd): # For each coordinate represented by this factor\n axis_of_coord.append(ax)\n \n return axis_of_coord",
"def axes_active(self) -> np.ndarray: # array[Axes]\n return self.axes.flat[:self.n_plots]",
"def twin_axes (self):\n return self._twin_axes",
"def feature_axes(self):\n raise NotImplementedError()",
"def _generate_axes_(self):\n\n return AxesTuple(self._axis(i) for i in range(self.ndim))",
"def mesh_axes(mesh) :\n \n if (mesh.dimension() == 1) :\n # for 1D, we take the cell center points\n x = np.zeros(mesh.number_cells_x())\n x[0] = mesh.dx(0) * 0.5\n for i in range(0, mesh.number_cells_x()-1) :\n x[i + 1] = x[i] + 0.5*(mesh.dx(i) + mesh.dx(i+1))\n return x \n \n else :\n # for 2D, we take the mesh edges\n x = np.zeros(mesh.number_cells_x()+1)\n y = np.zeros(mesh.number_cells_y()+1)\n for i in range(0, mesh.number_cells_x()) :\n x[i + 1] = x[i] + mesh.dx(i)\n for j in range(0, mesh.number_cells_y()) :\n y[j + 1] = y[j] + mesh.dy(j)\n return (x, y)",
"def naxes(self, world=True):\n return _coordsys.coordsys_naxes(self, world)",
"def get_axes(self, model):\n is_failed = True\n\n check_offt(self)\n is_failed = True\n ihat = None\n yhat = None\n zhat = None\n\n eid = self.eid\n (nid1, nid2) = self.node_ids\n node1 = model.nodes[nid1]\n node2 = model.nodes[nid2]\n xyz1 = node1.get_position()\n xyz2 = node2.get_position()\n\n elem = model.elements[eid]\n pid_ref = elem.pid_ref\n if pid_ref is None:\n pid_ref = model.Property(elem.pid)\n assert not isinstance(pid_ref, integer_types), elem\n\n is_failed, (wa, wb, ihat, yhat, zhat) = self.get_axes_by_nodes(\n model, pid_ref, node1, node2, xyz1, xyz2, model.log)\n return is_failed, (wa, wb, ihat, yhat, zhat)",
"def princ_axes(self):\r\n # get coordinates of mesh\r\n coords = BoundaryMesh(self.mesh,\"exterior\",True).coordinates()\r\n\r\n # get distances\r\n dist = np.sqrt(np.einsum('ij->i', np.square(coords)))\r\n\r\n # get maximal value\r\n maxind = np.argmax(dist)\r\n maxdist = dist[maxind]\r\n\r\n # get minimal value\r\n minind = np.argmin(dist)\r\n mindist = dist[minind]\r\n\r\n # find coordinates of maximal and minimal points\r\n maxax = coords[maxind, :]\r\n minax = coords[minind, :]\r\n\r\n # get the cross product of these vectors,\r\n # which is the ideal mid-size axis\r\n idealax = np.cross(maxax,minax)\r\n\r\n # get the dot product of this ideal axis with the coordinates,\r\n # take the absolute value, and find the index of the maximum\r\n secind = np.argmax(np.abs(np.einsum('j,ij->i',idealax,coords)))\r\n\r\n # get the second-axis distance\r\n secdist = dist[secind]\r\n\r\n return([maxdist, secdist, mindist], [\"a\", \"b\", \"c\"])",
"def listInputDeviceAxes(*args, **kwargs)->List[AnyStr]:\n pass",
"def process_custom_axes(axis_names):\n return axis_names.strip().strip(\"'\").strip('\"').split(',')",
"def get_machinekit_position():\n return settings.controller.axes_position()",
"def axes_inactive(self) -> np.ndarray:\n return self.axes.flat[self.n_plots:]",
"def returnJointMovers(self):\n\n name = self.groupBox.title()\n\n # select global movers\n cmds.select(name + \"*_mover\")\n globalMovers = cmds.ls(sl=True)\n\n # select offset movers\n cmds.select(name + \"*_mover_offset\")\n offsetMovers = cmds.ls(sl=True)\n\n # mesh movers\n cmds.select(name + \"*_mover_geo\")\n geoMovers = cmds.ls(sl=True)\n\n return [globalMovers, offsetMovers, geoMovers]",
"def _axes(self, X):\n \n return np.arange(len(X.shape) - 1) + 1",
"def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes",
"def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes",
"def extract_labels_xyz(scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n boxes_list = []\r\n\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0]\r\n xyz = np.array(obj.dimensions)\r\n boxes_list.append((scene.name2num[objclass], xyz))\r\n\r\n return boxes_list"
] | [
"0.6138203",
"0.6138203",
"0.6132842",
"0.6132842",
"0.6125584",
"0.6117667",
"0.6023415",
"0.59184104",
"0.5736616",
"0.5717504",
"0.57096046",
"0.55749345",
"0.54218215",
"0.5417657",
"0.54154396",
"0.5380405",
"0.52959144",
"0.5287406",
"0.52542454",
"0.5213043",
"0.5198154",
"0.51901317",
"0.51627874",
"0.5160633",
"0.51588225",
"0.5157839",
"0.5156403",
"0.51516575",
"0.51277155",
"0.51138663"
] | 0.7132127 | 0 |
Sometimes we get time units which aren't compatible with cdtime. This function will (try to) fix them. The input argument is a string, e.g. "months since Jan 1979" and the return value is another string, e.g. | def fix_time_units( timeunits ):
imon = timeunits.find("months since ")
if imon==0:
since="months since "
else:
iday = timeunits.find("days since ")
if iday==0:
since="days since "
else:
ihour = timeunits.find("hours since ")
if ihour==0:
since="hours since "
else:
return timeunits
date = timeunits[len(since):]
date_is_bc = False
if date.find('B.C.')>0: # I've seen one example like this!
# B.C. fixup isn't tested!
date_is_bc = True
# e.g. "January 1, 4713 B.C." Note exactly one space before B. And not BC etc.
matchobject = re.search( r"\d+\sB\.C\." ) # not tested
if matchobject is None:
return timeunits
pre_yr = matchobject.start()
pre_bc = matchobject.end() - 5 #2 spaces before B.C. would need -6 or another re
yr_bc = date[pre_yr:pre_bc]
yr_ad = str(1 - int(yr))
# The parser won't understand negative years, but cdtime will. So don't
# fix the date quite yet...
date = date[0:pre_bc]
new_date = str( dateutil.parser.parse( date, default=datetime(1850,1,1,0,0)) )
if date_is_bc:
pre_yr = new_date.find(yr_bc)
new_date = new_date[0:pre_yr]+yr_ad+new_date[pre_yr+len(yr_bc)]
return since+new_date | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(string):\n units = {'s':1, 'm':60, 'h':60*60, 'd':24*60*60, 'M':30*24*60*60}\n string = string.replace(' ','')\n p = re.compile('(\\d+)\\s*(\\w+)')\n num, unit = p.match(string).groups()\n num = float(num)\n return num * units[unit]",
"def clean_unit(unit):\n return 'M' if unit.lower() == 'month' else unit[0].lower()",
"def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)",
"def tidy_time_string(time):\n\n # TODO - :return date_range: Where date_status is \"centred\", date_range is a tuple (`first_date`, `last_date`) of\n # `datetime64[D]` objects. Otherwise will return a tuple of Not a Time objects.\n # TODO - warnings/logging\n # TODO - change date offsets to rounding using MonthEnd/MonthBegin\n # https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n # TODO - allow mulitple `date_status`es (circa and centred).\n\n date_status = 'not_converted'\n date = pd.NaT\n original_time_string = str(time)\n\n # IS THE STRING ALREADY PARSABLE AS AN EXACT TIME:\n if '-' not in time: # to avoid accidentally parsing ranges as exact times. e.g. \"25-27 june\".\n\n try:\n date = pd.to_datetime(time)\n date_status = 'exact'\n return date, date_status\n except:\n pass\n\n # IS THE STRING \"CIRCA\" SOMETHING:\n if (('c' in time) or (('[' in time) or (']' in time))):\n if 'c' in time: # contains 'c' (not in a month, e.g. Dec), so \" c. \", \"c \", t\n time = re.sub(r'(?<!\\w)(c[.]?\\s?)', '', time)\n\n if ('[' in time) and (']' in time): # contains square brackets\n\n # We don't attempt to fix multiple pairs of brackets with one missing bracket\n num_sq_brackets = time.count(']') + time.count(']')\n if num_sq_brackets >= 3 and (num_sq_brackets % 2) != 0:\n logging.info(\"Cannot fix multiple pairs of brackets with one missing bracket.\")\n return date, date_status\n\n reg2 = re.findall(r'\\[(.*?)\\]', time)\n if reg2 is not None:\n # remove square brackets\n for in_brackets in reg2:\n time = time.replace(f\"[{in_brackets}]\", in_brackets)\n elif '[' in time:\n time = time.replace('[', '')\n elif ']' in time:\n time = time.replace(']', '')\n\n time = time.strip()\n\n try:\n date = pd.to_datetime(time)\n date_status = 'circa'\n return date, date_status\n except:\n pass\n\n # IS THE STRING A RANGE OF DATES? WHICH WE CAN AVERAGE OR CENTRE:\n # We are assuming an '[1,2]\\d{2}0)s' pattern (e.g. 1970s, 1980s, 1730s, 1900s) implies a decade.\n if ('s' in time) or ('-') in time:\n if ('s' in time) and ('-' not in time):\n reg3 = re.findall(r'([1,2]\\d{2}0)s', time)\n for reg in reg3:\n time = time.replace(f\"{reg}s\", str(int(reg) + 5)) # centre is 5 years later\n date = pd.to_datetime(time, format='%Y')\n date_status = 'centred'\n\n elif ('-' in time):\n if time.count('-') > 1:\n print('many hyphens', original_time_string)\n # Not attempting to deal with multiple hyphens at the moment.\n pass\n else:\n time = re.sub(r'\\s?-\\s?', '-', time)\n reg4 = re.match(r'(.*?)-(.*)$', time)\n\n first = time.replace(reg4.group(0), reg4.group(1))\n last = time.replace(reg4.group(0), reg4.group(2))\n\n if 's' in first:\n reg5 = re.findall(r'([1,2]\\d{2}0)s', time)\n for reg in reg5:\n first = first.replace(f\"{reg}s\", reg)\n\n if not re.search(r'[1,2]\\d{3}', first): # no year:\n if not re.search(r'\\d+', first): # no days in `first` => varying month:\n # Take the year from last and add it on\n reg5 = re.findall(r'[1,2]\\d{3}', last)\n first = f\"{first} {reg5[0]}\"\n else: # days in `first` => varying days:\n # Take the month and year from last and add it on.\n reg6 = re.findall(r'\\w+ [1,2]\\d{3}', last)\n if len(reg6) > 0:\n first = f\"{first} {reg6[0]}\"\n\n if 's' in last:\n reg7 = re.findall(r'([1,2]\\d{2}0)s', time)\n for reg in reg7:\n last = last.replace(f\"{reg}s\", str(int(reg) + 10)) # end is 10 years later.\n\n if re.match(r'\\w+\\s\\d+', last): # assuming month and year\n time_delta = pd.tseries.offsets.DateOffset(months=1)\n elif re.match(r'[a-zA-Z]', last): # assuming it's a month\n time_delta = pd.tseries.offsets.DateOffset(months=1)\n elif re.match(r'[1,2]\\d{3}', last): # assuming it's a year\n time_delta = pd.tseries.offsets.DateOffset(months=12)\n elif re.match(r'\\d+', last).span()[1] - re.match(r'\\d+', last).span()[0] <= 2: # assuming it's a day:\n time_delta = pd.tseries.offsets.DateOffset(months=0)\n else:\n logging.info(f\"Can't guess format of {last} from {original_time_string}\")\n return date, date_status\n\n try:\n last = pd.to_datetime(last)\n except:\n logging.info(f\"Could not parse `last` ({last}) into `datetime` format.\")\n\n return date, date_status\n\n last = last + time_delta\n\n try:\n first = pd.to_datetime(first)\n except:\n logging.info(f\"Could not parse `first` ({first}) into `datetime` format.\")\n\n return date, date_status\n\n centre_date = first + (last - first) / 2\n date_status = 'centred'\n return centre_date, date_status\n\n return date, date_status",
"def parse_time(s):\n if s[-1].lower() in secs:\n return int(s[:-1]) * secs[s[-1].lower()]\n else:\n return int(s)",
"def normalize_time_string(duration: str) -> str:\n no_ws_duration = duration.replace(' ', '')\n duration_split = [el for el in re.split(r'(\\D+)', no_ws_duration) if el]\n\n if len(duration_split) != 2:\n raise ValueError(\n f\"Invalid duration string: '{duration}'. Expected one value (as integer in string) and one unit, such as '1 hour'.\"\n )\n\n value = duration_split[0]\n unit = duration_split[1]\n\n first_letter_of_unit = unit[0]\n return value + first_letter_of_unit",
"def string_to_timedelta(time_string: str) -> relativedelta:\n\n replace_dict = {\"years\": \"yrs\",\n \"yrs\": \"y\",\n \"months\": \"mon\",\n \"mon\": \"m\",\n \"days\": \"d\",\n \"hours\": \"H\",\n \"h\": \"H\",\n \"minutes\": \"min\",\n \"min\": \"M\",\n \"seconds\": \"sec\",\n \"sec\": \"S\",\n \"s\": \"S\",\n \" \": \"\"}\n\n for old in replace_dict.keys():\n new = replace_dict[old]\n time_string = time_string.replace(old, new)\n\n time_units = {\"y\": 0, \"m\": 0, \"d\": 0, \"H\": 0, \"M\": 0, \"S\": 0}\n\n # Extract all different time units from string\n for char in time_string:\n if char not in list(time_units):\n if not char.isdigit():\n raise ValueError(\"Invalid character in timedelta string.\")\n continue\n\n char_idx = time_string.find(char)\n time_units[char] = int(time_string[:char_idx])\n\n target_substring = time_string[:char_idx + 1]\n time_string = time_string.replace(target_substring, \"\")\n\n timedelta = relativedelta(years=time_units[\"y\"],\n months=time_units[\"m\"],\n days=time_units[\"d\"],\n hours=time_units[\"H\"],\n minutes=time_units[\"M\"],\n seconds=time_units[\"S\"])\n return timedelta",
"def replace_timestr(t):\n if isinstance(t, float):\n return None\n if '-' == t:\n return None\n return t",
"def sanitize(time_string): # Fix non-uniformity in the athletes data to enable sorting\n if '-' in time_string:\n splitter = '-'\n (mins, secs) = time_string.split(splitter)\n elif ':' in time_string:\n splitter = ':'\n (mins, secs) = time_string.split(splitter)\n else:\n return time_string\n return '{0}.{1}'.format(mins, secs)",
"def _get_time(string):\n string = string[0:7] # Drop day\n return string.replace(\"-\", \"\")",
"def test_ambiguous_m(self):\n with self.assertRaises(ValueError):\n util.parse_relative_time_string(\"+3m\")",
"def parse_sfx_relative_time(input_time: str) -> int:\n match = re.match(r\"-([0-9]+)([a-zA-z])\", input_time)\n if match:\n unit = match.group(2)\n if unit in SFX_TIME_MULT:\n delta = int(match.group(1)) * SFX_TIME_MULT[unit]\n return int(time.time()) * 1000 - delta\n allowed = \", \".join(SFX_TIME_MULT.keys())\n print(f'ERROR: SignalFx time syntax accepts only {allowed} time units. Provided: {unit}.')\n raise ValueError(f\"{input_time} is not a SignalFx relative time.\")",
"def sanitize(time_string):\n if '-' in time_string:\n splitter = '-'\n\n elif ':' in time_string:\n splitter = ':'\n\n else:\n return (time_string)\n \n (mins, secs) = time_string.split(splitter)\n\n return (mins + '.' + secs)",
"def refine_date(c):\n return strip_some_punct(c)",
"def _duration_from_string() -> str:\n return textwrap.dedent(\n '''\\\n _DURATION_RE = re.compile(\n r'^(?P<sign>\\\\+|-)?P'\n r'((?P<years>(0|[1-9][0-9]*)(\\.[0-9]+)?)Y)?'\n r'((?P<months>(0|[1-9][0-9]*)(\\.[0-9]+)?)M)?'\n r'((?P<weeks>(0|[1-9][0-9]*)(\\.[0-9]+)?)W)?'\n r'((?P<days>(0|[1-9][0-9]*)(\\.[0-9]+)?)D)?'\n r'(T'\n r'((?P<hours>(0|[1-9][0-9]*)(\\.[0-9]+)?)H)?'\n r'((?P<minutes>(0|[1-9][0-9]*)(\\.[0-9]+)?)M)?'\n r'(((?P<seconds>0|[1-9][0-9]*)(\\.(?P<fraction>[0-9]+))?)S)?'\n r')?$')\n\n\n def _duration_from_string(text: str) -> datetime.timedelta:\n \"\"\"\n parses the duration from the string in ISO 8601 format.\n\n Following C++ chrono library, the following units are counted as:\n\n * years as 365.2425 days (the average length of a Gregorian year),\n * months as 30.436875 days (exactly 1/12 of years) and\n * weeks as 7 days.\n\n :param text: string to be parsed\n :return: duration\n :raise:\n ValueError if the string could not be parsed,\n ValueError if the fraction precision is higher than microseconds\n OverflowError if the duration does not fit into datetime.timedelta\n\n\n >>> _duration_from_string('P10Y')\n datetime.timedelta(3652, 36720)\n\n >>> _duration_from_string('P1M')\n datetime.timedelta(30, 37746)\n\n >>> _duration_from_string('P1W')\n datetime.timedelta(7)\n\n >>> _duration_from_string('P1D')\n datetime.timedelta(1)\n\n >>> _duration_from_string('PT1H1M1S')\n datetime.timedelta(0, 3661)\n\n >>> _duration_from_string('PT1H1M1.1S')\n datetime.timedelta(0, 3661, 100000)\n\n >>> _duration_from_string('PT')\n datetime.timedelta(0)\n\n >>> _duration_from_string('P1.1Y1.1M1.1W1.1DT1.1H1.1M1.1S')\n datetime.timedelta(444, 8114, 900000)\n\n >>> _duration_from_string('PT0.000001S')\n datetime.timedelta(0, 0, 1)\n\n >>> _duration_from_string('PT1.000S')\n datetime.timedelta(0, 1)\n\n >>> _duration_from_string('-P1D')\n datetime.timedelta(-1)\n\n \"\"\"\n match = _DURATION_RE.match(text)\n\n if not match:\n raise ValueError(\n 'Failed to match the duration: {!r}'.format(\n text))\n\n sign_grp = match.group('sign')\n if not sign_grp or sign_grp == '+':\n sign = 1\n else:\n sign = -1\n\n years_grp = match.group('years')\n years = float(years_grp) if years_grp else 0.0\n\n months_grp = match.group('months')\n months = float(months_grp) if months_grp else 0.0\n\n weeks_grp = match.group('weeks')\n weeks = float(weeks_grp) if weeks_grp else 0.0\n\n days_grp = match.group('days')\n days = float(days_grp) if days_grp else 0.0\n\n hours_grp = match.group('hours')\n hours = float(hours_grp) if hours_grp else 0.0\n\n minutes_grp = match.group('minutes')\n minutes = float(minutes_grp) if minutes_grp else 0.0\n\n seconds_grp = match.group('seconds')\n seconds = int(seconds_grp) if seconds_grp else 0\n\n fraction_grp = match.group('fraction')\n if not fraction_grp:\n microseconds = 0\n\n elif len(fraction_grp) > 6:\n raise ValueError(\n ('Precision only up to microseconds supported, '\n 'but got: {}').format(text))\n\n else:\n stripped = fraction_grp.lstrip('0')\n if stripped:\n count = int(stripped)\n order = 6 - len(fraction_grp)\n microseconds = count * (10 ** order)\n else:\n microseconds = 0\n try:\n return sign * datetime.timedelta(\n days=years * 365.2425 + months * 30.436875 + weeks * 7 + days,\n seconds=seconds,\n minutes=minutes,\n hours=hours,\n microseconds=microseconds)\n\n except OverflowError as err:\n raise OverflowError(\n 'Creating a timedelta overflowed from: {!r}'.format(\n text)) from err''')",
"def parse_input(s):\n if isinstance(s, six.integer_types):\n s = str(s)\n elif not isinstance(s, six.string_types):\n raise ValueError(s)\n\n original = s\n\n if s[-1:] == 'L':\n s = s[:-1]\n\n sign = {'-': -1, '=': 0, '+': 1}.get(s[0], None)\n if sign is not None:\n s = s[1:]\n\n ts = 0\n for unit in _SORTED_UNITS:\n pos = s.find(unit[0])\n if pos == 0:\n raise ValueError(original)\n elif pos > 0:\n # If we find a unit letter, we're dealing with an offset. Default\n # to positive offset if a sign wasn't specified.\n if sign is None:\n sign = 1\n ts += int(s[:pos]) * __timedelta_millis(unit[1])\n s = s[min(len(s), pos + 1):]\n\n if s:\n ts += int(s)\n\n return date_from_utc_ts(ts) if not sign else \\\n utc() + sign * delta(milliseconds=ts)",
"def compact_timesince(timesince):\n\n # Replace 'an hour', 'ago'.\n timesince = timesince.replace('an hour', '1h').replace('a minute ago', '1m').replace('ago', '')\n\n # Replace long words with letters. (2 days, 3 hours -> 2 d, 3 h)\n timesince = timesince.replace('seconds', 's').replace('second', 's')\n timesince = timesince.replace('minutes', 'm').replace('minute', 'm')\n timesince = timesince.replace('hours', 'h').replace('hour', 'h')\n timesince = timesince.replace('days', 'd').replace('day', 'd')\n timesince = timesince.replace('weeks', 'w').replace('week', 'w')\n timesince = timesince.replace('months', 'mo').replace('month', 'mo')\n timesince = timesince.replace('years', 'y').replace('year', 'y')\n\n # Remove space between digit and unit. (2 d, 3h -> 2d, 3h)\n timesince = timesince.replace('\\xa0', '')\n\n # Take only the first, usually interesting part. (2d, 3h -> 2d)\n timesince = timesince.split(',', 1)[0]\n return timesince",
"def converttime(time, currentformat, newformat):\n\n # Define conversion dictionary\n conversions = {\n \"milliseconds\": {\n \"milliseconds\": \"time\",\n \"seconds\": \"time / 1000\",\n \"minutes\": \"time / 1000 / 60\",\n \"hours\": \"time / 1000 / 60 / 60\",\n \"days\": \"time / 1000 / 60 / 60 / 24\",\n \"weeks\": \"time / 1000 / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 1000 / 60 / 60 / 24 / 14\",\n \"years\": \"time / 1000 / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 1000 / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 1000 / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 1000 / 60 / 60 / 24 / 365 / 1000\"\n },\n \"seconds\": {\n \"milliseconds\": \"time * 1000\",\n \"seconds\": \"time\",\n \"minutes\": \"time / 60\",\n \"hours\": \"time / 60 / 60\",\n \"days\": \"time / 60 / 60 / 24\",\n \"weeks\": \"time / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 60 / 24 / 14\",\n \"years\": \"time / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 60 / 24 / 365 / 1000\"\n },\n \"minutes\": {\n \"milliseconds\": \"time * 60 * 1000\",\n \"seconds\": \"time * 60\",\n \"minutes\": \"time\",\n \"hours\": \"time / 60\",\n \"days\": \"time / 60 / 24\",\n \"weeks\": \"time / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 24 / 14\",\n \"years\": \"time / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 24 / 365 / 1000\"\n },\n \"hours\": {\n \"milliseconds\": \"time * 60 * 60 * 1000\",\n \"seconds\": \"time * 60 * 60\",\n \"minutes\": \"time * 60\",\n \"hours\": \"time\",\n \"days\": \"time / 24\",\n \"weeks\": \"time / 24 / 7\",\n \"fortnights\": \"time / 24 / 14\",\n \"years\": \"time / 24 / 365\",\n \"decades\": \"time / 24 / 365 / 10\",\n \"centuries\": \"time / 24 / 365 / 100\",\n \"millenniums\": \"time / 24 / 365 / 1000\"\n },\n \"days\": {\n \"milliseconds\": \"time * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 24 * 60 * 60\",\n \"minutes\": \"time * 24 * 60\",\n \"hours\": \"time * 24\",\n \"days\": \"time\",\n \"weeks\": \"time / 7\",\n \"fortnights\": \"time / 14\",\n \"years\": \"time / 365\",\n \"decades\": \"time / 365 / 10\",\n \"centuries\": \"time / 365 / 100\",\n \"millenniums\": \"time / 365 / 1000\"\n },\n \"weeks\": {\n \"milliseconds\": \"time * 7 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 7 * 24 * 60 * 60\",\n \"minutes\": \"time * 7 * 24 * 60\",\n \"hours\": \"time * 7 * 24\",\n \"days\": \"time * 7\",\n \"weeks\": \"time\",\n \"fortnights\": \"time / 2\",\n \"years\": \"time / 52\",\n \"decades\": \"time / 52 / 10\",\n \"centuries\": \"time / 52 / 100\",\n \"millenniums\": \"time / 52 / 1000\"\n },\n \"fortnights\": {\n \"milliseconds\": \"time * 14 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 14 * 24 * 60 * 60\",\n \"minutes\": \"time * 14 * 24 * 60\",\n \"hours\": \"time * 14 * 24\",\n \"days\": \"time * 14\",\n \"weeks\": \"time * 2\",\n \"fortnights\": \"time\",\n \"years\": \"time / 26\",\n \"decades\": \"time / 26 / 10\",\n \"centuries\": \"time / 26 / 100\",\n \"millenniums\": \"time / 26 / 1000\"\n },\n \"years\": {\n \"milliseconds\": \"time * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 256 * 24 * 60\",\n \"hours\": \"time * 256 * 24\",\n \"days\": \"time * 256\",\n \"weeks\": \"time * 52\",\n \"fortnights\": \"time * 26\",\n \"years\": \"time\",\n \"decades\": \"time / 10\",\n \"centuries\": \"time / 100\",\n \"millenniums\": \"time / 1000\"\n },\n \"decades\": {\n \"milliseconds\": \"time * 10 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 10 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 10 * 256 * 24 * 60\",\n \"hours\": \"time * 10 * 256 * 24\",\n \"days\": \"time * 10 * 256\",\n \"weeks\": \"time * 10 * 52\",\n \"fortnights\": \"time * 10 * 26\",\n \"years\": \"time * 10\",\n \"decades\": \"time\",\n \"centuries\": \"time / 10\",\n \"millenniums\": \"time / 100\"\n },\n \"centuries\": {\n \"milliseconds\": \"time * 100 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 100 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 100 * 256 * 24 * 60\",\n \"hours\": \"time * 100 * 256 * 24\",\n \"days\": \"time * 100 * 256\",\n \"weeks\": \"time * 100 * 52\",\n \"fortnights\": \"time * 100 * 26\",\n \"years\": \"time * 100\",\n \"decades\": \"time * 10\",\n \"centuries\": \"time\",\n \"millenniums\": \"time / 10\"\n },\n \"millenniums\": {\n \"milliseconds\": \"time * 1000 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 1000 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 1000 * 256 * 24 * 60\",\n \"hours\": \"time * 1000 * 256 * 24\",\n \"days\": \"time * 1000 * 256\",\n \"weeks\": \"time * 1000 * 52\",\n \"fortnights\": \"time * 1000 * 26\",\n \"years\": \"time * 1000\",\n \"decades\": \"time * 100\",\n \"centuries\": \"time * 10\",\n \"millenniums\": \"time\"\n }\n }\n\n # Return evaluated value\n return eval(conversions[currentformat][newformat])",
"def str_to_seconds(tstring):\n if tstring.endswith('m'):\n secs = 60 * int(tstring.replace('m', ''))\n elif tstring.endswith('h'):\n secs = 60 * 60 * int(tstring.replace('h', ''))\n elif tstring.endswith('d'):\n secs = 24 * 60 * 60 * int(tstring.replace('d', ''))\n elif tstring.endswith('y'):\n secs = 365 * 24 * 60 * 60 * int(tstring.replace('y', ''))\n else:\n secs = 60 * int(tstring)\n if secs < 0:\n secs = -1\n\n return secs",
"def get_time_with_delta(string):\n\n # If it looks like an ISO time, return that.\n try:\n absolute = pscheduler.iso8601_as_datetime(string)\n # Default behavior is to localize naive times.\n if absolute.tzinfo is None:\n absolute = pytz.utc.localize(absolute)\n return pscheduler.datetime_as_iso8601(absolute)\n except ValueError:\n pass\n\n try:\n if string[0:1] == \"+P\":\n delta = pscheduler.iso8601_as_timedelta(string[1:])\n elif string[0:1] == \"-P\":\n delta = -1 * pscheduler.iso8601_as_timedelta(string[1:])\n else:\n pass\n except ValueError:\n pscheduler.fail(\"Invalid time delta '%s'\" % (string))\n\n # Let this throw what it's going to throw.\n delta = pscheduler.iso8601_as_timedelta(string)\n\n return pscheduler.datetime_as_iso8601(\n pscheduler.time_now() + delta)",
"def __get_duration_from_string(cls, dstr):\n mtch = re.search(r'^(\\d+)$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)s(?:ec(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)m(?:in(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 60\n mtch = re.search(r'^(\\d+)h(?:r(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 3600\n mtch = re.search(r'^(\\d+)d(?:ay(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 86400\n raise FlashFileException(('String \"%s\" is not a known duration'\n ' format. Try 30sec, 10min, 2days etc.') %\n str(dstr))",
"def date_specificity(date_string):\n length = len(date_string)\n if length == 10:\n return 'ymd'\n elif length == 7:\n return 'ym'\n elif length == 4:\n return 'y'\n return None",
"def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg",
"def get_time_with_unit(time):\n sec_in_min = 60\n sec_in_hour = 60 * 60\n sec_in_day = 24 * 60 * 60\n\n if time % sec_in_day == 0:\n time = time / sec_in_day\n unit = 'days'\n\n elif time % sec_in_hour == 0:\n time = time / sec_in_hour\n unit = 'hours'\n\n else:\n time = time / sec_in_min\n unit = 'minutes'\n return \"%s %s\" % (time, unit)",
"def test_ms2min(self):\n result = TimeUnit(-50, 'ms', 'min')\n self.assertRaises(ValueError, lambda: result.doconvert())",
"def timeConversion(s):\n new_s = ''\n if s[-2:] == 'PM' and s[:2] != '12':\n new_s = str(int(s[:2]) + 12) + s[2:-2]\n elif s[-2:] == 'AM' and s[:2] == '12':\n new_s = '0' + str(int(s[:2]) - 12) + s[2:-2]\n else:\n new_s = s[:-2]\n return new_s",
"def tedoius_time(time_string):\n start = ['start', 'begin', 'beginning', 'head', 'first']\n end = ['slut', 'end', 'tail', 'finish',\n 'finito', 'fin', 'done', 'finished']\n\n if time_string.lower() in start:\n time_string = \"00:00:00\"\n # We need this exact string for later\n elif time_string.lower() in end:\n return time_string\n elif len(time_string) == 1:\n time_string = f\"00:00:0{time_string}\"\n elif len(time_string) == 2:\n time_string = f\"00:00:{time_string}\"\n elif len(time_string) == 3:\n time_string = f\"00:00{time_string}\"\n elif len(time_string) == 4:\n time_string = f\"00:0{time_string}\"\n elif len(time_string) == 5:\n time_string = f\"00:{time_string}\"\n elif len(time_string) == 6:\n time_string = f\"00{time_string}\"\n elif len(time_string) == 7:\n time_string = f\"0{time_string}\"\n elif len(time_string) > 8:\n raise('Time string too long!')\n return time_string",
"def clean_minutes_of_goals(temp_string):\n minute = \"\"\n for c in temp_string:\n if c.isnumeric():\n minute += c\n elif c == \"+\":\n minute += c\n return minute",
"def duration_string_to_time_delta(s):\n clauses = s.split()\n if len(clauses) % 2:\n raise ValueError(\"odd number of clauses: \" + s)\n pairs = zip(clauses[::2], clauses[1::2])\n d = {p[1]: float(p[0]) for p in pairs}\n if len(d) != len(pairs):\n raise ValueError(\"duplicated clauses: \" + s)\n return datetime.timedelta(**d)",
"def infer_time_unit(time_seconds_arr: Collection[float]) -> TimeUnit:\n if not time_seconds_arr:\n return \"hours\"\n max_time_seconds = max(time_seconds_arr)\n if max_time_seconds <= 60 * 2:\n return \"seconds\"\n elif max_time_seconds <= 60 * 60 * 2:\n return \"minutes\"\n elif max_time_seconds <= 24 * 60 * 60 * 2:\n return \"hours\"\n else:\n return \"days\""
] | [
"0.62871355",
"0.62632513",
"0.6058695",
"0.60543835",
"0.60210484",
"0.6020246",
"0.59578264",
"0.5928724",
"0.5863947",
"0.585928",
"0.5834382",
"0.57287663",
"0.56812316",
"0.5627189",
"0.5626828",
"0.5603379",
"0.5575414",
"0.55661714",
"0.5550087",
"0.5532709",
"0.5508526",
"0.5484529",
"0.54621035",
"0.5455659",
"0.5453484",
"0.5453067",
"0.54509205",
"0.54476833",
"0.54459816",
"0.54412174"
] | 0.80670094 | 0 |
Input is a variable which depends on latitude. This function will copy it to a new variable, except that the new variable's latitude axis will be restricted to latmin<=lat<=latmax; and of course the data will be restricted to correspond. | def restrict_lat( mv, latmin, latmax ):
if latmin==-90: latmin = -91 # just to make sure
if latmax==90: latmax = 91
# axes
latax,idx = latAxis2(mv)
if latax is None: return None
imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )
imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )
newlatax = latax.subaxis( imin, imax+1 )
# TO DO: use latax.bounds (if present) for newlatax.bounds
# At the moment, I'm working with data for which latax.bounds doesn't exist.
# At the moment, we don't need bounds. This would get us through if necessary:
# newlatax.bounds = newlatax.genGenericBounds()
newaxes = list( allAxes(mv) ) # shallow copy
newaxes[idx] = newlatax
# shrink the data to match the shrunk lat axis
newmv_shape = list( mv.shape )
newmv_shape[idx] = imax+1 - imin
if imin>0:
nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv
else:
nd = mv
lenidx = nd.shape[idx]
if lenidx > newmv_shape[idx]:
newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )
else:
newdata = nd
# new variable
newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )
newmv.units = mv.units
return newmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n return type(self)(self.lat_lon[0], self.lat_lon[1], **self._attrs)",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def apply_bound(x, var_min, var_max):\n x.position = np.maximum(x.position, var_min)\n x.position = np.minimum(x.position, var_max)",
"def broadcast_lonlat(ds, verbose=True):\n if 'lon' not in ds.variables:\n ds.coords['lon'] = ds['x']\n if 'lat' not in ds.variables:\n ds.coords['lat'] = ds['y']\n \n if len(ds['lon'].dims) < 2:\n ds.coords[\"lon\"] = ds[\"lon\"] * xr.ones_like(ds[\"lat\"])\n if len(ds['lat'].dims) < 2:\n ds.coords[\"lat\"] = xr.ones_like(ds[\"lon\"]) * ds[\"lat\"]\n return ds",
"def _set_coords_copy(self,coords):\n return self.copy()._set_coords_inplace(coords)",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def process_pain(x, lb, ub):\n x = x.abs()\n x.loc[(x > ub)] = 8\n x.loc[(x < lb) | (x > ub)] = np.nan\n return x",
"def map(self, x, y):\n if near(x[0], xmax) and near(x[1], ymax):\n y[0] = x[0] - xmax\n y[1] = x[1] - ymax\n elif near(x[0], xmax):\n y[0] = x[0] - xmax\n y[1] = x[1]\n elif near(x[1], ymax):\n y[0] = x[0]\n y[1] = x[1] - ymax\n else:\n y[0] = x[0]\n y[1] = x[1]",
"def lift(point):\n return gs.copy(point)",
"def _fixup_coords(self, var):\n for coord_name, data_array in var.coords.items():\n if data_array.attrs.get('standard_name') in ('projection_x_coordinate',\n 'projection_y_coordinate'):\n try:\n var.coords[coord_name].metpy.convert_units('meters')\n except DimensionalityError: # Radians!\n new_data_array = data_array.copy()\n height = var.coords['crs'].item()['perspective_point_height']\n scaled_vals = new_data_array.metpy.unit_array * (height * units.meters)\n new_data_array.metpy.unit_array = scaled_vals.to('meters')\n var.coords[coord_name] = new_data_array",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def composite(lon, lat):\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return (~gta * transform.TransformPoint(lat, lon)[:2])",
"def reverse(self, lon, lat):",
"def proj(self,lon,lat):\n x, y = self(np.atleast_1d(lon),np.atleast_1d(lat))\n x[x > 1e29] = None\n y[y > 1e29] = None\n #return np.ma.array(x,mask=x>1e2),np.ma.array(y,mask=y>1e2)\n return x, y",
"def localize(self, new_data, gps_guess=False):\r\n if gps_guess: \r\n mapping_img, _ = self.mapdata.extract_from_map(new_data.gps_pos, new_data.attitude, np.shape(new_data.img))\r\n gps_pos, attitude = projection(self.mapdata.gps_pos, self.mapdata.attitude, new_data.gps_pos, new_data.attitude)\r\n mapping_data = RadarData(None, mapping_img, gps_pos, attitude) \r\n else:\r\n mapping_img, _ = self.mapdata.extract_from_map(self.position, self.attitude, np.shape(new_data.img))\r\n mapping_data = RadarData(None, mapping_img, self.position, self.attitude) \r\n\r\n self.position, self.attitude = new_data.image_position_from(mapping_data)\r\n self.last_data = RadarData(new_data.id, new_data.img, self.position, self.attitude)\r\n\r\n if self.mapping: \r\n self.mapdata.add_data(self.last_data)\r\n \r\n return deepcopy(self.position), deepcopy(self.attitude)",
"def correct_lon(ds):\n ds = ds.copy()\n x = ds['x'].data\n ds['x'].data = np.where(x < 0 , 360 + x, x)\n\n lon = ds['lon'].data\n ds['lon'].data = np.where(lon < 0 , 360 + lon, lon)\n \n ds = ds.sortby('x')\n return ds",
"def replace_x_y_nominal_lat_lon(ds):\n ds = ds.copy()\n if 'x' in ds.dims and 'y' in ds.dims:\n \n nominal_y = ds.lat.mean('x')\n # extract the equatorial lat and take those lon values as nominal lon\n eq_ind = abs(ds.lat.mean('x')).load().argmin().data\n nominal_x = ds.lon.isel(y=eq_ind)\n ds.coords['x'].data = nominal_x.data\n ds.coords['y'].data = nominal_y.data\n\n ds = ds.sortby('x')\n ds = ds.sortby('y')\n \n else:\n warnings.warn('No x and y found in dimensions for source_id:%s. This likely means that you forgot to rename the dataset or this is the German unstructured model' %ds.attrs['source_id'])\n return ds",
"def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp",
"def locality_copy(self):\n new = copy(self)\n new.north = copy(self.north)\n new.south = copy(self.south)\n new.east = copy(self.east)\n new.west = copy(self.west)\n return new",
"def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units} )\n return lonmv",
"def expand_var(nc, out, name, direction):\n if name == direction:\n return\n\n var1 = nc.variables[name]\n\n print(\"Processing %s...\" % name)\n\n # Copy coordinate variables and stop:\n if name in ['t', 'z', 'y', 'x', 'zb']:\n var2 = out.createVariable(name, var1.dtype, (name,))\n var2[:] = var1[:]\n copy_attributes(var1, var2)\n return\n\n dims = var1.dimensions\n if len(dims) == 1:\n dims = ('y', 'x')\n elif len(dims) == 2:\n dims = ('t', 'y', 'x')\n elif len(dims) == 3:\n if name == \"litho_temp\": # litho_temp is the only variable depending on 'zb'.\n dims = ('t', 'zb', 'y', 'x')\n else:\n dims = ('t', 'z', 'y', 'x')\n\n var2 = out.createVariable(name, var1.dtype, dims)\n copy_attributes(var1, var2)\n\n for j in range(3):\n if direction == 'x':\n var2[get_slice(var2.dimensions, x=j)] = permute(var1)\n elif direction == 'y':\n var2[get_slice(var2.dimensions, y=j)] = permute(var1)",
"def _update_non_learnable_var(old_var: NestedMap, new_var: NestedMap,\n var_params: ParamsT) -> NestedMap:\n if not base_layer.var_not_trainable(var_params):\n assert new_var is None\n return old_var\n elif not in_pmap:\n # No aggregation is needed.\n assert new_var is not None\n return new_var\n elif base_layer.var_requires_mean_sync(var_params):\n assert new_var is not None\n return _synchronize_vars_using_mean(new_var, old_var)\n else:\n raise ValueError('Non-trainable variables must have a cross-replica '\n 'synchronization method specified.')",
"def varcopy(self, vars):",
"def normalize_longitude(lon: np.ndarray,\n lon_min: Optional[float] = -180.0) -> np.ndarray:\n return ((lon - lon_min) % 360) + lon_min",
"def assign(self, dst, req, src):\n if req == 'null':\n return\n if req in ('write', 'inplace'):\n dst[:] = src\n elif req == 'add':\n dst[:] += src",
"def _prepare_with_copy(geometry):\n geometry = pygeos.apply(geometry, lambda x: x) # makes a copy\n pygeos.prepare(geometry)\n return geometry",
"def get_latitude(self, latitude):\n self.Latitude = latitude\n return self.Latitude",
"def lat_bias_correction(self, obs_date, obs_lat, mod_data, prior_data):\n return 0.0",
"def tunnel_fast1d(latvar, lonvar, lonlat):\n\n rad_factor = np.pi / 180.0 # for trignometry, need angles in radians\n # Read latitude and longitude from file into numpy arrays\n latvals = latvar[:] * rad_factor\n lonvals = lonvar[:] * rad_factor\n\n # Compute numpy arrays for all values, no loops\n clat, clon = np.cos(latvals), np.cos(lonvals)\n slat, slon = np.sin(latvals), np.sin(lonvals)\n\n clat_clon = clat * clon\n clat_slon = clat * slon\n\n lat0_rad = lonlat[1, :] * rad_factor\n lon0_rad = lonlat[0, :] * rad_factor\n\n delX_pre = np.cos(lat0_rad) * np.cos(lon0_rad)\n delY_pre = np.cos(lat0_rad) * np.sin(lon0_rad)\n delZ_pre = np.sin(lat0_rad)\n\n nodes = np.zeros((lonlat.shape[1]))\n for i in range(lonlat.shape[1]):\n delX = delX_pre[i] - clat_clon\n delY = delY_pre[i] - clat_slon\n delZ = delZ_pre[i] - slat\n dist_sq = delX ** 2 + delY ** 2 + delZ ** 2\n minindex_1d = dist_sq.argmin() # 1D index of minimum element\n node = np.unravel_index(minindex_1d, latvals.shape)\n nodes[i] = node[0]\n\n return nodes"
] | [
"0.58231395",
"0.5813714",
"0.56509984",
"0.52055085",
"0.5196696",
"0.51875114",
"0.51552486",
"0.5142588",
"0.5094556",
"0.50887316",
"0.5085515",
"0.50473696",
"0.504018",
"0.49421754",
"0.4902313",
"0.48965225",
"0.4887742",
"0.486856",
"0.48649704",
"0.48557973",
"0.48485732",
"0.48039362",
"0.48034886",
"0.4803233",
"0.47986263",
"0.47972438",
"0.4786574",
"0.4774295",
"0.4771255",
"0.47708166"
] | 0.7229858 | 0 |
returns the mean of the variable over the supplied latitude range (in degrees, based on values of lat, not lat_bnds) The computed quantity is a scalar but is returned as a cdms2 variable, i.e. a MV. The input mv is a cdms2 variable, assumed to be indexed as is usual for CFcompliant variables, i.e. mv(time,lat,lon). At present, no other axes (e.g. level) are supported. At present mv must depend on all three axes. ....This function is deprecated use the version which uses the avarager() function.... | def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):
# For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.
# And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)
# If they aren't, it's best to use area from cell_measures attribute if available; otherwise
# compute it with lat_bnds, lon_bnds etc.
if vid==None:
vid = 'reduced_'+mv.id
time,lat,lon = tllAxes(mv)
if hasattr(mv.parent,'variables'):
fil = mv.parent # mv is a fileVariable and fil is a file.
lat_bnds = fil[lat.bounds]
else:
lataxis = latAxis(mv) # mv is a TransientVariable
lat_bnds = lataxis._bounds_
mvta = timeave_old( mv )
# In computing the average, we use area weighting.
# Sometimes the area is available in cell_measures, but for now I'll just use the backup method:
# The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).
# With equally spaced lon, we don't need delta(lon) for weights.
# I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.
wgtsum = 0
zm = 0
for i,lati in enumerate(lat):
# The following test could be sped up a lot, because lat[i] is ordered...
# >>> to do: partial overlaps
if latmin<=lati and lati<latmax:
latlo = lat_bnds[i,0]
lathi = lat_bnds[i,1]
wgti = sin(radians(lathi))-sin(radians(latlo))
zi = 0.0
for j in range(len(lon)):
zi += mvta[i,j]
zi *= wgti
wgtsum += wgti*len(lon)
zm += zi
zm /= wgtsum
# zm is a scalar, so createVariable gets no axes argument:
zmv = cdms2.createVariable( zm, id=vid )
return zmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume",
"def mean_average_position():\n pass",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def glcm_stat_mean(glcm_matrix):\n return np.mean(glcm_matrix)",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def geoMeanAxisPoints(self, var):\n varID = var.id\n var = genutil.statistics.geometricmean(var, axis=\"(%s)\" % self.axis.id)\n var.id = varID\n return var",
"def _compute_mean(self, C, mag, rjb, rake):\n mean = (C['a1'] +\n self._compute_linear_magnitude_term(C, mag) +\n self._compute_quadratic_magnitude_term(C, mag) +\n self._compute_logarithmic_distance_term(C, mag, rjb) +\n self._compute_faulting_style_term(C, rake))\n\n return mean",
"def disk_average(self, var, r_lim):\n # change the mask for the one in Flux\n npx = self.params['npx']\n npy = self.params['npy']\n npz = self.params['npz']\n number_domains = npx*npy*npz # so far only works for number_domains<100\n Lx = self.params['Lx']\n Ly = self.params['Ly']\n Lz = self.params['Lz']\n x0 = Lx/2 # center point in the x domain.\n y0 = Ly/2 # center point in the y domain.\n nz = self.params['nz']\n\n if var == 'NN': # maybe interpolate is field...\n nz = nz - 1\n\n t = self.read_vars(['t'])['t']\n n_time = t.shape[0]\n\n r_max = r_lim #0.45 # as in forced_plume_nudging.py\n z_max = 0.95\n\n means = np.zeros((n_time, nz))\n\n fields = self.read_vars([var, 'x', 'y'])\n\n if var in ['u', 'v', 'w']:\n axis_vel = {'u': 3, 'v': 2, 'w':1}\n fields[var] = velocity_interpolation(fields[var], axis=axis_vel[var])\n\n XX, YY = np.meshgrid(fields['x']/Lx - 0.5,\n fields['y']/Ly - 0.5)\n\n r = np.sqrt(XX**2 + YY**2)\n mask = ma.masked_outside(r, 0, r_max)\n #mask_2 = ma.masked_outside(ZZ, 0, z_max)\n\n for t in range(n_time):\n for z_lvl in range(nz):\n field_new = ma.masked_array(fields[var][t, z_lvl, :, :], mask.mask)\n means[t, z_lvl] = field_new.mean()\n\n #means = means/number_domains\n return means",
"def reprojection_error_mean(*args, **kwargs):\n return np.mean(reprojection_error_vector(*args, **kwargs))",
"def mean(self):\n return self.vmean",
"def average(a1, a2, coord1, coord2, dim1, dim2, unit):\r\n \r\n avg = (a1 + a2)/2\r\n \r\n avg_xr = xr.DataArray(avg, coords=[coord1, coord2], dims=[dim1, dim2])\r\n avg_xr.attrs['units'] = unit\r\n \r\n return avg_xr",
"def mean(vals):",
"def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga",
"def geo_mean(num_list):\n np_array = np.array(num_list)\n return np_array.prod() ** (1.0 / len(np_array))",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def area_average(mycube, coord1, coord2):\n import iris.analysis.cartography\n #mycube.coord(coord1).guess_bounds()\n #mycube.coord(coord2).guess_bounds()\n grid_areas = iris.analysis.cartography.area_weights(mycube)\n result = mycube.collapsed([coord1, coord2], iris.analysis.MEAN, weights=grid_areas)\n return result",
"def mean_velocity(self, ax):\n u_mod_bar = self.mean_f(self.uf_abs)\n contourf = ax.contourf(u_mod_bar, self.levels_u)\n ax.set_title(r'Mean speed $\\overline{|u|_t}(x, z)$')\n ax.set_xlabel('horizontal')\n ax.set_ylabel('vertical')\n return contourf",
"def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean",
"def restrict_lat( mv, latmin, latmax ):\n if latmin==-90: latmin = -91 # just to make sure\n if latmax==90: latmax = 91\n\n # axes\n latax,idx = latAxis2(mv)\n if latax is None: return None\n imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n imax = max( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] )\n newlatax = latax.subaxis( imin, imax+1 )\n # TO DO: use latax.bounds (if present) for newlatax.bounds\n # At the moment, I'm working with data for which latax.bounds doesn't exist.\n # At the moment, we don't need bounds. This would get us through if necessary:\n # newlatax.bounds = newlatax.genGenericBounds()\n newaxes = list( allAxes(mv) ) # shallow copy\n newaxes[idx] = newlatax\n\n # shrink the data to match the shrunk lat axis\n newmv_shape = list( mv.shape )\n newmv_shape[idx] = imax+1 - imin\n if imin>0:\n nd = numpy.delete( mv.data, slice(0,imin), idx ) # doesn't change mv\n else:\n nd = mv\n lenidx = nd.shape[idx]\n if lenidx > newmv_shape[idx]:\n newdata = numpy.delete( nd.data, slice(imax+1-imin,lenidx), idx )\n else:\n newdata = nd\n\n # new variable\n newmv = cdms2.createVariable( newdata, copy=True, axes=newaxes, id=mv.id )\n newmv.units = mv.units\n return newmv",
"def mean_velocity_(self, ax):\n u_mod_bar = self.mean_f(self.uf__abs)\n contourf = ax.contourf(u_mod_bar, self.levels_u_)\n ax.set_title(r'Mean speed $\\overline{|u|_t}(x, z)$')\n ax.set_xlabel('horizontal')\n ax.set_ylabel('vertical')\n return contourf"
] | [
"0.67416596",
"0.6548467",
"0.6509797",
"0.62889326",
"0.6226794",
"0.6226794",
"0.6008768",
"0.59134054",
"0.5877358",
"0.57791483",
"0.5682933",
"0.5665746",
"0.55882084",
"0.55698764",
"0.55633837",
"0.55565643",
"0.55452055",
"0.55107474",
"0.5478772",
"0.54400945",
"0.5365574",
"0.53577656",
"0.5301823",
"0.52972037",
"0.52939856",
"0.5289109",
"0.5280203",
"0.5275417",
"0.52556854",
"0.5254213"
] | 0.6658991 | 1 |
returns the mean of the variable over the supplied latitude range (in degrees, based on values of lat, not lat_bnds) The computed quantity is a scalar but is returned as a cdms2 variable, i.e. a MV. The input mv is a cdms2 variable too. This function uses the cdms2 avarager() function to handle weights and do averages | def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):
if vid==None:
vid = 'reduced_'+mv.id
axes = allAxes( mv )
ilat = None
for i,ax in enumerate(axes):
if ax.id=='lat': ilat = i
# reduce size of lat axis to (latmin,latmax)
# Let's home a direct search will be fast enough:
lataxis = latAxis( mv )
lmin = -1
lmax = len(lataxis)
if lataxis[0]>=latmin: lmin = 0
if lataxis[-1]<=latmax: lmax = len(lataxis)-1
if lmin==-1 or lmax==len(lataxis):
for l,ax in enumerate(lataxis):
if lmin==-1 and ax>=latmin: lmin = max( 0, l )
if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )
lataxis_shrunk = lataxis.subaxis(lmin,lmax)
mv2shape = list(mv.shape)
mv2shape[ilat] = lmax-lmin+1
axes[ilat] = lataxis_shrunk
mvd1 = numpy.delete( mv, slice(0,lmin), ilat )
mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )
mv2 = cdms2.createVariable( mvdata, axes=axes )
axis_names = [ a.id for a in axes ]
axes_string = '('+')('.join(axis_names)+')'
avmv = averager( mv2, axis=axes_string )
avmv.id = vid # Note that the averager function returns a variable with meaningless id.
ammv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n # If I base another reduction function on this one, it's important to note that an average\n # in the lat direction will unavoidably need weights, because of the geometry.\n\n if vid==None:\n vid = 'reduced_'+mv.id\n time_axis, lat_axis, lon_axis = tllAxes( mv )\n\n mvta = timeave_old( mv )\n\n zm = numpy.zeros( mvta.shape[0] )\n for i in range(len(lat_axis)):\n for j in range(len(lon_axis)):\n zm[i] += mvta[i,j]\n zm[i] /= len(lon_axis)\n zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )\n return zmv",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def _compute_mean(self, C, mag, rjb, rake):\n mean = (C['a1'] +\n self._compute_linear_magnitude_term(C, mag) +\n self._compute_quadratic_magnitude_term(C, mag) +\n self._compute_logarithmic_distance_term(C, mag, rjb) +\n self._compute_faulting_style_term(C, rake))\n\n return mean",
"def mean_average_position():\n pass",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units} )\n return latmv",
"def area_average_obs(cube, region, model_units):\n \n # Specify the latitudes and longitudes starting from the smallest number to largest or in latitude and longitude from south to north and east to west\n lon1, lon2, lat1, lat2 = region[0], region[1], region[2], region[3]\n\n print(cube.coord('latitude').var_name)\n print(cube.coord('latitude').units.modulus)\n cube.coord('latitude').units = model_units\n cube.coord('longitude').units = model_units\n print(cube.coord('latitude').units.modulus)\n\n # Then intersect the data at these points\n cube = cube.intersection(longitude=(lon1, lon2),latitude=(lat1, lat2))\n\n # cube.coord('latitude').guess_bounds()\n # cube.coord('longitude').guess_bounds()\n\n # area weighting\n weights = iris.analysis.cartography.area_weights(cube)\n # Average that area by latitude and longitudes by the weighted mean\n cube = cube.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=weights)\n\n return cube",
"def geo_mean(num_list):\n np_array = np.array(num_list)\n return np_array.prod() ** (1.0 / len(np_array))",
"def glcm_stat_mean(glcm_matrix):\n return np.mean(glcm_matrix)",
"def average(a1, a2, coord1, coord2, dim1, dim2, unit):\r\n \r\n avg = (a1 + a2)/2\r\n \r\n avg_xr = xr.DataArray(avg, coords=[coord1, coord2], dims=[dim1, dim2])\r\n avg_xr.attrs['units'] = unit\r\n \r\n return avg_xr",
"def area_average(mycube, coord1, coord2):\n import iris.analysis.cartography\n #mycube.coord(coord1).guess_bounds()\n #mycube.coord(coord2).guess_bounds()\n grid_areas = iris.analysis.cartography.area_weights(mycube)\n result = mycube.collapsed([coord1, coord2], iris.analysis.MEAN, weights=grid_areas)\n return result",
"def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga",
"def mean(self):\n return self.vmean",
"def disk_average(self, var, r_lim):\n # change the mask for the one in Flux\n npx = self.params['npx']\n npy = self.params['npy']\n npz = self.params['npz']\n number_domains = npx*npy*npz # so far only works for number_domains<100\n Lx = self.params['Lx']\n Ly = self.params['Ly']\n Lz = self.params['Lz']\n x0 = Lx/2 # center point in the x domain.\n y0 = Ly/2 # center point in the y domain.\n nz = self.params['nz']\n\n if var == 'NN': # maybe interpolate is field...\n nz = nz - 1\n\n t = self.read_vars(['t'])['t']\n n_time = t.shape[0]\n\n r_max = r_lim #0.45 # as in forced_plume_nudging.py\n z_max = 0.95\n\n means = np.zeros((n_time, nz))\n\n fields = self.read_vars([var, 'x', 'y'])\n\n if var in ['u', 'v', 'w']:\n axis_vel = {'u': 3, 'v': 2, 'w':1}\n fields[var] = velocity_interpolation(fields[var], axis=axis_vel[var])\n\n XX, YY = np.meshgrid(fields['x']/Lx - 0.5,\n fields['y']/Ly - 0.5)\n\n r = np.sqrt(XX**2 + YY**2)\n mask = ma.masked_outside(r, 0, r_max)\n #mask_2 = ma.masked_outside(ZZ, 0, z_max)\n\n for t in range(n_time):\n for z_lvl in range(nz):\n field_new = ma.masked_array(fields[var][t, z_lvl, :, :], mask.mask)\n means[t, z_lvl] = field_new.mean()\n\n #means = means/number_domains\n return means",
"def geoMeanAxisPoints(self, var):\n varID = var.id\n var = genutil.statistics.geometricmean(var, axis=\"(%s)\" % self.axis.id)\n var.id = varID\n return var",
"def reprojection_error_mean(*args, **kwargs):\n return np.mean(reprojection_error_vector(*args, **kwargs))",
"def metric_average(val, name, hvd):\n tensor = val.clone().detach()\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()",
"def mean(vals):",
"def coarsen_byavg(invar,lat,lon,deg,tol,latweight=True,verbose=True,ignorenan=False):\n\n # Make new Arrays\n lon5 = np.arange(0,360+deg,deg)\n lat5 = np.arange(-90,90+deg,deg)\n \n \n # Set up latitude weights\n if latweight:\n _,Y = np.meshgrid(lon,lat)\n wgt = np.cos(np.radians(Y)) # [lat x lon]\n invar *= wgt[None,:,:] # Multiply by latitude weight\n \n # Get time dimension and preallocate\n nt = invar.shape[0]\n outvar = np.zeros((nt,len(lat5),len(lon5)))\n \n # Loop and regrid\n i=0\n for o in range(len(lon5)):\n for a in range(len(lat5)):\n lonf = lon5[o]\n latf = lat5[a]\n \n lons = np.where((lon >= lonf-tol) & (lon <= lonf+tol))[0]\n lats = np.where((lat >= latf-tol) & (lat <= latf+tol))[0]\n \n varf = invar[:,lats[:,None],lons[None,:]]\n \n if latweight:\n wgtbox = wgt[lats[:,None],lons[None,:]]\n if ignorenan:\n varf = np.nansum(varf/np.nansum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n else:\n varf = np.sum(varf/np.sum(wgtbox,(0,1)),(1,2)) # Divide by the total weight for the box\n \n \n else:\n if ignorenan: \n varf = np.nanmean(varf,axis=(1,2))\n else:\n varf = varf.mean((1,2))\n \n outvar[:,a,o] = varf.copy()\n i+= 1\n msg=\"\\rCompleted %i of %i\"% (i,len(lon5)*len(lat5))\n print(msg,end=\"\\r\",flush=True)\n return outvar,lat5,lon5",
"def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)",
"def area_average(cube, region):\n \n # Specify the latitudes and longitudes starting from the smallest number to largest or in latitude and longitude from south to north and east to west\n lon1, lon2, lat1, lat2 = region[0], region[1], region[2], region[3] \n # Then intersect the data at these points\n cube = cube.intersection(longitude=(lon1, lon2),latitude=(lat1, lat2))\n\n #cube.coord('latitude').guess_bounds()\n #cube.coord('longitude').guess_bounds()\n\n # area weighting\n weights = iris.analysis.cartography.area_weights(cube)\n # Average that area by latitude and longitudes by the weighted mean\n cube = cube.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=weights)\n\n return cube",
"def spatial_avg(self, input_layer):\n return tf.reduce_mean(input_layer, [2, 3], name='spatial_avg')"
] | [
"0.6483892",
"0.6449936",
"0.63286823",
"0.6259683",
"0.6259683",
"0.60817444",
"0.59398943",
"0.5792978",
"0.5686484",
"0.5672774",
"0.5669752",
"0.5493353",
"0.548496",
"0.5436766",
"0.541976",
"0.541753",
"0.5412601",
"0.54037213",
"0.53908765",
"0.53008914",
"0.529571",
"0.5265717",
"0.52464926",
"0.5245556",
"0.52381605",
"0.5230718",
"0.5211916",
"0.5188137",
"0.5148418",
"0.5122673"
] | 0.65369064 | 0 |
averages mv over the full range all axes, to a single scalar. Uses the averager module for greater capabilities | def reduce2scalar( mv, vid=None ):
if vid==None: # Note that the averager function returns a variable with meaningless id.
vid = 'reduced_'+mv.id
axes = allAxes( mv )
axis_names = [ a.id for a in axes ]
axes_string = '('+')('.join(axis_names)+')'
avmv = averager( mv, axis=axes_string )
avmv.id = vid
avmv.units = mv.units
return avmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def manual_mean(arr):\n my_sum = 0\n for i in range(0, arr.shape[0]):\n for j in range(0, arr.shape[1]):\n my_sum += arr[i,j]\n return my_sum / arr.size",
"def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()",
"def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()",
"def metric_average(val, name, hvd):\n tensor = val.clone().detach()\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()",
"def calcVmoy(self, V, idx, n):\n i = max(idx-n, 0)\n f = min(idx+n+1, V.shape[0])\n av = np.mean(V[i:f])\n return av",
"def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def calc_mean(sig):\n # m = mean(sig)\n return np.mean(sig)",
"def average(data):\n return np.average(data)",
"def average(self):\n return np.mean(self.buf[:self._size], axis=0)",
"def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average",
"def mean(arr) -> float:\n return sum(arr) / len(arr)",
"def mae(x_pred, x_target, dim=0):\n if dim == 0:\n return x_pred.sub(x_target).abs().mean().item()\n elif dim == 1:\n return x_pred.sub(x_target).abs().mean((0,1))\n elif dim == 2:\n return x_pred.sub(x_target).abs().mean((0,2))\n else:\n raise ValueError(\"Not a valid dimension\")",
"def reprojection_error_mean(*args, **kwargs):\n return np.mean(reprojection_error_vector(*args, **kwargs))",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def avg(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n \n return (u + v) / 2.0",
"def mean(series):\n return fsum(series) / len(series)",
"def mean(vals):",
"def avgAxisPoints(self, var):\n varID = var.id\n var = cdutil.averager(var, axis=\"(%s)\" % self.axis.id, weight='equal')\n var.id = varID\n return var",
"def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm",
"def average(self):\n if self._average is None: # only first time\n self._average = self._obj.mean(dim='t')\n self._average.attrs = self._obj.attrs # we need units in quiver\n\n return self._average",
"def mean(self):\n mean=np.zeros(self.shape)\n if self.Fourier:\n ind=self.mean_index()\n for di in np.ndindex(*self.shape):\n mean[di]=np.real(self.val[di][ind])/self.fft_coef\n else:\n for di in np.ndindex(*self.shape):\n mean[di]=np.mean(self.val[di])\n return mean",
"def numpy_mean(arr):\n return arr.mean()",
"def make_average(self, arr):\n\n if not self.degen:\n self.get_degen()\n\n nkpt, nband = arr.shape[-2:]\n \n for ikpt in range(nkpt):\n for group in self.degen[ikpt]:\n average = copy(arr[...,ikpt,group[0][1]])\n for ispin, iband in group[1:]:\n average += arr[...,ikpt,iband]\n \n average /= len(group)\n for ispin, iband in group:\n arr[...,ikpt,iband] = average\n \n return arr",
"def with_sum_mean_reduction(self):\n return self.with_reduction(lambda x: x.sum(1).mean(0))",
"def wo_mean(arr):\n\n return np.array(arr) - np.mean(arr, axis=0)",
"def reduce_time( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id=='time' ]\n axes_string = '('+')('.join(axis_names)+')'\n if len(axes_string)>2:\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n avmv = averager( mv, axis=axes_string )\n else:\n avmv = mv\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv"
] | [
"0.6626889",
"0.63578427",
"0.625352",
"0.6220642",
"0.619339",
"0.6153601",
"0.61507607",
"0.61103404",
"0.61103404",
"0.6092938",
"0.60919166",
"0.6082729",
"0.602311",
"0.6023065",
"0.60227084",
"0.6014731",
"0.59958345",
"0.59730744",
"0.5966355",
"0.5960948",
"0.5960868",
"0.59581125",
"0.5952828",
"0.5938839",
"0.59293985",
"0.5923902",
"0.59011996",
"0.58990544",
"0.58918524",
"0.5890645"
] | 0.6819693 | 0 |
returns the mean of the variable over all axes but latitude, as a cdms2 variable, i.e. a MV. The input mv is a also cdms2 variable, assumed to be indexed as is usual for CFcompliant variables, i.e. mv(time,lat,lon). At present, no other axes (e.g. level) are supported. At present mv must depend on all three axes. | def reduce2lat_old( mv, vid=None ):
# >>> For now, I'm assuming that the only axes are time,lat,lon
# And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)
# If they aren't, it's best to use area from cell_measures attribute if available; otherwise
# compute it with lat_bnds, lon_bnds etc.
# If I base another reduction function on this one, it's important to note that an average
# in the lat direction will unavoidably need weights, because of the geometry.
if vid==None:
vid = 'reduced_'+mv.id
time_axis, lat_axis, lon_axis = tllAxes( mv )
mvta = timeave_old( mv )
zm = numpy.zeros( mvta.shape[0] )
for i in range(len(lat_axis)):
for j in range(len(lon_axis)):
zm[i] += mvta[i,j]
zm[i] /= len(lon_axis)
zmv = cdms2.createVariable( zm, axes=[lat_axis], id=vid )
return zmv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2latlon( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' ]\n axes_string = '('+')('.join(axis_names)+')'\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv",
"def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv",
"def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def reduce2levlat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes_string )\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat',\n attributes={'units':lat_axis.units},\n copy=True )\n return latmv",
"def glcm_stat_mean(glcm_matrix):\n return np.mean(glcm_matrix)",
"def reduce_time( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id=='time' ]\n axes_string = '('+')('.join(axis_names)+')'\n if len(axes_string)>2:\n for ax in axes:\n # The averager insists on bounds. Sometimes they don't exist, especially for obs.\n if ax.id!='lat' and ax.id!='lon' and not hasattr( ax, 'bounds' ):\n ax.setBounds( ax.genGenericBounds() )\n avmv = averager( mv, axis=axes_string )\n else:\n avmv = mv\n avmv.id = vid\n avmv.units = mv.units\n\n return avmv",
"def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lon_axis = lonAxis(mv)\n latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon',\n attributes={'units':lon_axis.units},\n copy=True )\n return latmv",
"def unconditional_x2_mean(self):\n mu2s = np.array([d._mu_2() for d in self.conditionalMVNs])\n return self.weights * mu2s",
"def mean_velocity(self, ax):\n u_mod_bar = self.mean_f(self.uf_abs)\n contourf = ax.contourf(u_mod_bar, self.levels_u)\n ax.set_title(r'Mean speed $\\overline{|u|_t}(x, z)$')\n ax.set_xlabel('horizontal')\n ax.set_ylabel('vertical')\n return contourf",
"def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n for ax in mv.getAxisList():\n if ax.getBounds() is None:\n ax._bounds_ = ax.genGenericBounds()\n timeax = timeAxis(mv)\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n if mvseas is None:\n # Among other cases, this can happen if mv has all missing values.\n return None\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def reprojection_error_mean(*args, **kwargs):\n return np.mean(reprojection_error_vector(*args, **kwargs))",
"def mean_velocity_(self, ax):\n u_mod_bar = self.mean_f(self.uf__abs)\n contourf = ax.contourf(u_mod_bar, self.levels_u_)\n ax.set_title(r'Mean speed $\\overline{|u|_t}(x, z)$')\n ax.set_xlabel('horizontal')\n ax.set_ylabel('vertical')\n return contourf",
"def conditional_component_means(self, x2 = None):\n return np.array([d.conditional_mean(x2) for d in self.conditionalMVNs])",
"def mean(self):\n return self.vmean",
"def timeave_old( mv ):\n # I haven't thought yet about how missing values would work with this...\n # If time intervals be unequal, this will have to be changed...\n sh = mv.shape # e.g. [312,90,144] for t,lat,lon\n n = sh[0]\n # BTW, this is the size of everything else:\n # n2 = reduce( operator.mul, sh[1:] ) # e.g. 90*144=12960\n mvta = numpy.sum( mv.__array__(), axis=0 )\n mvta /= n\n return mvta",
"def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n if levAxis(mv) is None: return None\n if latAxis(mv) is None: return None\n axes = allAxes( mv )\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n\n if timeax.units=='months':\n # Special check necessary for LEGATES obs data, because\n # climatology() won't accept this incomplete specification\n timeax.units = 'months since 0001-01-01'\n mvseas = seasons.climatology(mv)\n\n axis_names = [ a.id for a in axes if a.id!='lev' and a.id!='lat' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n avmv.id = vid\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n\n return avmv",
"def MeanCenter(X, mc_row, mc_col):\n data_headers = X.select_dtypes(include=[\"float64\"]).columns\n if mc_row:\n X[data_headers] = X[data_headers].sub(X[data_headers].mean(axis=1), axis=0)\n if mc_col:\n X[data_headers] = X[data_headers].sub(X[data_headers].mean(axis=0), axis=1)\n return X",
"def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require getBounds() to work.\n # If it doesn't, we'll have to give it one.\n # Setting the _bounds_ attribute will do it.\n timeax = timeAxis(mv)\n if timeax.getBounds()==None:\n timeax._bounds_ = timeax.genGenericBounds()\n mvseas = seasons.climatology(mv)\n \n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' and a.id!='lon' and a.id!='time']\n axes_string = '('+')('.join(axis_names)+')'\n\n if len(axes_string)>2:\n for axis in mvseas.getAxisList():\n if axis.getBounds() is None:\n axis._bounds_ = axis.genGenericBounds()\n avmv = averager( mvseas, axis=axes_string )\n else:\n avmv = mvseas\n if avmv is None: return avmv\n avmv.id = vid\n if hasattr(mv,'units'): avmv.units = mv.units\n avmv = delete_singleton_axis( avmv, vid='time' )\n avmv.units = mv.units\n return avmv",
"def calcVmoy(self, V, idx, n):\n i = max(idx-n, 0)\n f = min(idx+n+1, V.shape[0])\n av = np.mean(V[i:f])\n return av",
"def geoMeanAxisPoints(self, var):\n varID = var.id\n var = genutil.statistics.geometricmean(var, axis=\"(%s)\" % self.axis.id)\n var.id = varID\n return var",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def getMeanRMS (self,arr):\n # in base class we return redshift and zero varinace\n # repeat that here because mean RMS is meaningless for Template SED PDFs\n N=len(arr)\n return arr[\"z\"],np.zeros(N)",
"def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean",
"def V_mean(self) -> Optional[np.ndarray]:\n\n def _retrieve(fm: VariationalFM) -> np.ndarray:\n return fm.V\n\n return runtime_error_to_optional(self, _retrieve)"
] | [
"0.6196711",
"0.61690545",
"0.6022679",
"0.5879041",
"0.57352066",
"0.56729776",
"0.5658441",
"0.56343806",
"0.56343806",
"0.5478811",
"0.54540074",
"0.5406501",
"0.5377368",
"0.53606015",
"0.53185415",
"0.5281884",
"0.52813756",
"0.5268177",
"0.525204",
"0.52350086",
"0.51889944",
"0.51657677",
"0.5136341",
"0.5127716",
"0.5122016",
"0.5113879",
"0.5089201",
"0.5075762",
"0.5042679",
"0.50215214"
] | 0.6457875 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.