query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
add the content to a specific waiting area
def addContent(self,wait,start,content): if self.checkAddContent(wait,start,content): for i in range(content.length): self.w[wait][start+i] = (start+i,content) else: print content,"can not override existing content"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleContentComplete():", "def addMainWindow(self,appendToTask):\n self.appendToTask = appendToTask", "def set_content(self, widget):\n\t\tpass", "def _add_content_to_hex_view(self) -> None:\r\n \r\n try:\r\n queue_item = self.hex_thread_queue.get(block = False)\r\n\r\n if queue_item is None or self.abort_load: \r\n self._cleanup_hex_content(is_success = not self.abort_load)\r\n return\r\n elif isinstance(queue_item, Exception):\r\n raise queue_item\r\n\r\n textbox_address_content, textbox_hex_content, textbox_ascii_content = queue_item\r\n\r\n self.textbox_hex.insert(tk.END, textbox_hex_content.getvalue())\r\n self.textbox_ascii.insert(tk.END, textbox_ascii_content.getvalue())\r\n self.textbox_address.insert(tk.END, textbox_address_content.getvalue())\r\n self.textbox_address.tag_add(TAG_JUSTIFY_RIGHT, 1.0, tk.END)\r\n self.root.after_idle(self._add_content_to_hex_view)\r\n except tk.TclError as e:\r\n self._cleanup_hex_content(is_success = False)\r\n if not self.abort_load:\r\n raise e\r\n except queue.Empty:\r\n self.root.after_idle(self._add_content_to_hex_view)\r\n except Exception as e:\r\n self._cleanup_hex_content(is_success = False)\r\n self.callbacks[Events.SHOW_ERROR](f\"Error: {str(e)}\")", "def addContent(text):", "def checkAddContent(self,wait,start,content):\n\t\tfor i in range(content.length):\n\t\t\tif start+i > 19:\n\t\t\t\treturn False\n\t\t\telif self.w[wait][start+i] != None:\n\t\t\t\treturn False\n\t\treturn True", "def wait_for_overlay(self,message=None):\n\n loctype,loctext = self._po._split_locator(self.locators['shareoverlay'])\n WebDriverWait(self._browser,10).until(\n EC.visibility_of_element_located((loctype,loctext)),\n message=message)\n WebDriverWait(self._browser,10).until_not(\n EC.visibility_of_element_located((loctype,loctext)),\n message=message)", "def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))", "def _add_contents(self, window, contents):\r\n\r\n # If we are adding specific contents then we ignore any default view\r\n # visibility.\r\n #\r\n # fixme: This is a bit ugly! Why don't we pass the visibility in to\r\n # 'window.add_view'?\r\n for view in window.views:\r\n view.visible = False\r\n \r\n for item in contents:\r\n self._add_perspective_item(window, item)\r\n \r\n return", "def wait(self):\n self.mainloop().wait()", "def visit(self, node):\n self.body.append('<div id =\"discourse-comments\"></div>')", "def add_to(self, main_lay):\n cont = self._cont\n lay = qt.QHBoxLayout(cont)\n lay.addWidget(GC.create_icon_button(\"wizards/add.png\", self.add_cond))\n lab = qt.QLabel(self._title)\n lab.setSizePolicy(Policy.Expanding, Policy.Preferred)\n lay.addWidget(lab)\n if not self._default_cond:\n cont.setEnabled(False)\n\n main_lay.addWidget(cont)\n main_lay.addWidget(self._tab)\n self.setParent(main_lay.parentWidget())", "def replace_wait_box(self, text):\n assert QT_AVAILABLE, \"This function can only be used in a Qt runtime\"\n self._waitbox.set_text(text)", "def massage_addcontent(self) -> str:\n pass", "def waiting_confirmation(self):", "def createDialogBox(self, x=-0.1, y=-0.85, texts=['Insert Dialog Here'], \n textColors=['orange'],displayNextMessage=False):\n if self.dialogBox == None:\n if globals.isTutorial:\n texts[0] = \" ====================== Cosmica Tutorial Step: %s of %s ======================\\n\\n%s\" % (globals.tutorialStep, globals.tutorialTotalSteps, texts[0])\n self.dialogBox = dialogbox.DialogBox(path=self.guiMediaPath, x=x, y=y, texts=texts, textColors=textColors)\n self.dialogBox.setMyMode(self)\n self.gui.append(self.dialogBox)", "def spawn( self, event=None ):\n #print \"a\", self.msgVar.get(), \"\\n\"\n self.visible = 1\n self.after( int( self.delay * 1000 ), self.show ) # The after function takes a time argument in miliseconds\n #self.show() #ohnheiser hack", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait(self):\n pass", "def wait(self):\n pass", "def refresh_content(self, language):\r\n self.content = _get_writing_content(self.button.liste, language, self.button.number) # updates content\r\n self.active = self.button.active # writing is dispalyed if button is displayed\r", "def fill_active(self, layout):\n self.progress_triggers = QLineEdit()\n layout.addRow(\"progress triggers\", self.progress_triggers)", "def set_content(self, widget):\n\t\tself.content = widget", "def append(self, content):\n if hasattr(content, 'render'):\n self.content.append(content)\n else:\n self.content.append(TextWrapper(str(content)))", "def _credits_dialogue(self):\n credits_dialogue = _Credits(self)\n self.root.wait_window(credits_dialogue.top)", "def add_content(self, addition):\n self.content = self.content + addition", "def XPPlaceWidgetWithin(inSubWidget, inContainer):\n pass" ]
[ "0.5674969", "0.53723633", "0.5367427", "0.525665", "0.5225978", "0.5200504", "0.51720566", "0.50997674", "0.5084396", "0.5079925", "0.5061745", "0.5059036", "0.50525177", "0.50514406", "0.504467", "0.50442374", "0.5042535", "0.50417733", "0.50417733", "0.50417733", "0.50417733", "0.5040843", "0.5040843", "0.50037134", "0.50015736", "0.49968603", "0.4985419", "0.49626914", "0.49087572", "0.49078646" ]
0.635612
0
Delete the content from a specfic waiting area
def delContent(self,wait,start,content): for i in range(content.length): self.w[wait][start+i] = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_delete():\r\n del win.box[-1] # delete last line\r\n #del win.box[0:-1] # delete all lines \r", "def delete():", "def delete_this_region(self):", "def del_mail(self,box):\n self.logger.debug('delete the mail of %s',box)\n if not self.device(text=box).exists:\n self.device(description='Open navigation drawer').click()\n if not self.device(text=box).wait.exists(timeout = self.timeout):\n self.device.swipe(250,300,250,1200)\n self.device.delay(2)\n self.device(text=box).click()\n self.device.delay(1)\n if self.loading() == False:\n self.logger.debug('loading mail Failed')\n return False\n if self.device(text = \"Empty Trash\").exists:\n self.device(text = \"Empty Trash\").click()\n if self.device(text = \"Delete\").wait.exists(timeout = self.timeout):\n self.device(text = \"Delete\").click()\n self.device.delay(2)\n if self.device(text='No connection.').exists:\n return False \n if self.device(text='No connection.').wait.exists(timeout = 30000):\n return False\n else: \n maxtime=0\n# while not self.device(textContains = self.appconfig(\"Email\",\"empty_text\")).exists:\n while not self.device(resourceId = self.appconfig.id(\"id_empty\")).exists:\n if self.device(className='android.widget.ListView').child(className='android.widget.FrameLayout',index=0).exists:\n self.device(className='android.widget.ListView').child(className='android.widget.FrameLayout',index=0).long_click()\n if self.device(description='Delete').wait.exists(timeout = self.timeout):\n self.device(description='Delete').click()\n self.device.delay(2)\n if self.device(text= self.appconfig.id(\"no_connection\")).exists:\n return False\n if maxtime>100:\n return False\n maxtime+=1\n\n self.logger.debug('mail of the %s has delete complete',box)\n return True", "def delete(self):\n for i, message in enumerate(self.owner.messages):\n if message == self.body:\n del self.owner.messages[i]\n break", "def delete_box(self):\n\n # Confirm user want to delete box\n delete = self.delete_box_confirmation(self.data[0])\n\n # Ask if user want to delete all files inside \n delete_files = self.delete_all_files_confirmation()\n\n # If user wants to delete the box and all files inside:\n if delete_box and delete_files:\n try:\n # Delete all files\n delete_all_files(self.data[0])\n # Delete box\n delete_box(self.data[0])\n except:\n self.show_popup(\"Error\", \"Problem deleting box.\")\n else:\n self.show_popup(\"Success\", f\"You have deleted Box {self.data[0]} and all files inside.\")\n # User just wants to delete the box and leave the files\n elif delete_box:\n try:\n # Delete box\n delete_box(int(self.data[0]))\n set_box_number_null(int(self.data[0]))\n except:\n self.show_popup(\"Error\", \"Problem deleting box.\")\n else:\n self.show_popup(\"Success\", f\"You have deleted Box {self.data[0]}.\")\n\n self.close()\n self.parent().load_data()", "def delete_leader(self):", "def delete_selection():\r\n add_question_Frame.grid_forget()\r\n quiz_frame.grid_forget()\r\n one_person_quiz_frame.grid_forget()\r\n question_list_frame.grid_forget()\r\n search_question_frame.grid_forget()\r\n\r\n select_question_frame.grid(row=0, column=0, rowspan=10, columnspan=10, sticky=N + E + S + W)\r\n\r\n with open('Question_pool.txt', 'r') as fp:\r\n line = fp.readline()\r\n while line:\r\n list_delete.insert(END,line.split(',')[0])\r\n line = fp.readline()\r\n list_delete.grid()\r\n Scrollbar(add_question_Frame,orient=\"vertical\")", "def delete_part():\n if request.method==\"POST\":\n if request.form['del']==\"\":\n return render_template(\n \"delete-parterre.html\",\n liste = get_parterres(),\n title = \"Supprimer un parterre\")\n else:\n a = get_parterre(int(request.form['del']))\n a.clear_datas()\n for capteur in a.get_capteurs():\n a.delete_capteur(capteur)\n capteur.set_parterre(1)\n p = Actions(\n contenu = \"Suppresion du parterre \"+a.get_name(),\n liste = 1\n )\n db.session.add(p)\n db.session.delete(a)\n db.session.commit()\n return render_template(\n \"delete-parterre.html\",\n liste = get_parterres(),\n title = \"Supprimer un parterre\")", "def delete_item_complete():\n curItem = complete_tereeview.focus().strip('#')\n\n with open(\"Other_title_categories.json\", \"r\") as other_categories_fo:\n other_categories_foData = json.load(other_categories_fo)\n completeDict = other_categories_foData[\"complete\"]\n selectetItemData_complete = completeDict[curItem]\n\n print(\"deleting : \", curItem)\n\n del completeDict[curItem]\n other_categories_foData[\"complete\"] = completeDict\n\n with open('Other_title_categories.json', 'w') as f:\n json.dump(other_categories_foData, f, indent=2)\n print(\"done deleting the title \", curItem)\n\n complete_tereeview.delete(complete_tereeview.focus())", "def del_line(self, buttoninstance):\r\n try:\r\n widgets = self.timecompoundlist.pop()\r\n except IndexError:\r\n return\r\n for w in widgets:\r\n self.ids.inlayout.remove_widget(w)\r\n #del the line in the jsonfile\r\n store = get_store()\r\n lastval = store.get('Nbtimecompound')[\"value\"]\r\n store.delete('Timecompound'+str(lastval))\r\n store.put('Nbtimecompound', value=lastval-1)\r\n self.ids.inlayout.rows = 5 + store.get('Nbtimecompound')[\"value\"]\r\n #force the good size\r\n self.ids.tscrollview.change_child_height(self.ids.tscrollview.height)", "async def delAllMsg(self, waitingTime=0):\n for msg in self.msgToDelete:\n await msg.delete(delay=waitingTime)", "def delete_mail(self, box):\n self._logger.debug('delete the mail of %s', box)\n\n if not self.enter_mailbox(box):\n return False\n maxtime = 0\n while self.check_not_empty(100):\n if self.refresh_emailbox(60) <> 2:\n return False\n if box == 'Trash':\n if self._device(resourceId='com.tct.email:id/empty_trash').exists:\n self._device(resourceId='com.tct.email:id/empty_trash').click()\n self._device.delay(2)\n if self._device(resourceId='android:id/button1', text='DELETE').exists:\n self._device(resourceId='android:id/button1', text='DELETE').click()\n self._device.delay(3)\n else:\n index = 1\n if self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n className='android.widget.FrameLayout', instance=index).exists:\n if self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n resourceId='com.tct.email:id/outbox').exists:\n if self._device(resourceId='com.tct.email:id/conversation_list_view').getChildCount() == 1:\n return True\n else:\n self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n className='android.widget.FrameLayout', instance=index).long_click()\n self._device.delay(2)\n else:\n self._device(resourceId='com.tct.email:id/conversation_list_view').child(\n className='android.widget.FrameLayout', instance=index).long_click()\n self._device.delay(2)\n if self._device(description='Select all').exists:\n self._device(description='Select all').click()\n self._device.delay(2)\n if self._device(description='Delete').exists:\n self._device(description='Delete').click()\n self._device.delay(2)\n if self._device(description='Discard failed').exists:\n self._device(description='Discard failed').click()\n self._device.delay(2)\n if self._device(resourceId='android:id/button1', text='OK').exists:\n self._device(resourceId='android:id/button1', text='OK').click()\n self._device.delay(2)\n if self._device(resourceId='com.tct.email:id/empty_text').exists:\n break\n if maxtime > 30:\n break\n maxtime += 1\n if self._device(resourceId='com.tct.email:id/empty_text').exists:\n self._logger.debug('mail of the %s has delete complete', box)\n return True\n else:\n return False", "def deleteOrDelay(self):\n self.delete()", "def delete_menu():", "def test_move_delete(self):\r\n source_course = CourseLocator(org='testx', offering='GreekHero', branch='draft')\r\n dest_course = CourseLocator(org='testx', offering='GreekHero', branch=\"published\")\r\n head = source_course.make_usage_key('course', \"head12345\")\r\n chapter2 = source_course.make_usage_key('chapter', 'chapter2')\r\n problem1 = source_course.make_usage_key('problem', 'problem1')\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\", \"problem3_2\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\"])\r\n # now move problem1 and delete problem3_2\r\n chapter1 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter1\"))\r\n chapter3 = modulestore().get_item(source_course.make_usage_key(\"chapter\", \"chapter3\"))\r\n chapter1.children.append(problem1)\r\n chapter3.children.remove(problem1.map_into_course(chapter3.location.course_key))\r\n modulestore().delete_item(source_course.make_usage_key(\"problem\", \"problem3_2\"), self.user)\r\n modulestore().xblock_publish(self.user, source_course, dest_course, [head], [chapter2])\r\n expected = [\"head12345\", \"chapter1\", \"chapter3\", \"problem1\"]\r\n self._check_course(source_course, dest_course, expected, [\"chapter2\", \"problem3_2\"])", "def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass", "def delete_activity():\n pass", "def remove():", "async def wait(self, ctx):\n msg = await ctx.channel.send(self.progression + \", veuillez patientez.\")\n self.msgToDelete.append(msg)", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_delete_activity_template(self):\n pass", "def clear_message_area( self, ):\n self.msg_text.delete( 1.0, Tk.END )", "def test_meeting_poll_delete(self):\n pass", "def DelDiv(self):\n if self.created:\n self.CloseImage()\n command = \"\"\"$('#{}').remove();\"\"\".format(self.wid)\n get_ipython().run_cell_magic('javascript', '', command)\n self.created = False\n self.wid = uuid.uuid4().hex", "def delete_question(event):\r\n with open('Question_pool.txt', 'r+') as fp:\r\n w = event.widget\r\n value = w.get(ANCHOR)\r\n if edit_mode == FALSE:\r\n list_delete.delete(ANCHOR)", "def delete_plugin_data(self):", "def delete_selection(self):\n assert len(self.selection) == 1\n\n widget = self.selection[0]\n gadget = Gadget.from_widget(widget)\n if gadget:\n gapi.delete_gadget(self, gadget)\n\n elif (isinstance(widget, Placeholder) and widget.is_deletable()):\n if len(widget.get_parent().get_children()) > 1:\n cmd = CommandBoxDeletePlaceholder(widget)\n command_manager.execute(cmd, self)", "def on_leave(self):\r\n for widgets in self.timecompoundlist:\r\n for w in widgets:\r\n self.ids.inlayout.remove_widget(w)\r\n self.ids.inlayout.remove_widget(self.add_button)\r\n self.ids.inlayout.remove_widget(self.del_button)", "async def delete(self):\n return await self.set_message(text='')" ]
[ "0.60315347", "0.58551013", "0.58246267", "0.5749008", "0.5742068", "0.56990933", "0.5674887", "0.5603075", "0.5589578", "0.5526692", "0.5517584", "0.5516292", "0.5492436", "0.5492046", "0.5476662", "0.5476108", "0.5449119", "0.54362226", "0.5420728", "0.5418204", "0.5415539", "0.54097635", "0.54017526", "0.53970766", "0.53938353", "0.5372496", "0.5362493", "0.53624433", "0.53474474", "0.5342696" ]
0.7333313
0
Print the content in each waiting areas
def printWaiting(self): for wait in self.w: w_print="" for c in wait: if c: w_print += str(c[1]) else: w_print += 'NO' w_print += " " print w_print
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass", "def print_all_contents(self, *args, **kwargs):\n while self.has_to_print():\n # Try to print the first element in the queue.\n tar_to_print: str = self.print_queue[0].tar\n self.print_monitor.wait_turn(self, tar_to_print, *args, **kwargs)\n\n # Print all applicable values in the print_queue.\n while self.print_queue and (self.print_queue[0].tar == tar_to_print):\n msg: str = self.print_queue.popleft().msg\n print(msg, end=\"\", flush=True)\n\n # If True, then all of the output for extracting tar_to_print was in the queue.\n # Since we just finished printing all of it, we can move onto the next one.\n if self.is_output_done_enqueuing[tar_to_print]:\n # Let all of the other workers know that this worker is done.\n self.print_monitor.done_dequeuing_output_for_tar(self, tar_to_print)", "def do_block():\n print_column()\n print_rows()", "def do_block():\n print_column()\n print_rows()", "def show_quest(self):\n for quest_line in self.qtext:\n print(quest_line)\n time.sleep(1)", "def print_queue(self):\n for i in self.Obs:\n print(i)", "def print_queue(self):\n for value in self.data:\n element = f'| {value} |'\n print(element)", "def print_simulation_sequence(self):\n print('-----------------------------------------------')\n for msg_group in self.msg_group_list:\n msg_group.print()\n print('-----------------------------------------------')", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def visualizar(self):\n print(self.queue)", "def display_loop(self):\n from time import sleep\n self.displaying = True\n while self.displaying:\n print self\n sleep(.083)\n print loc(self.y, self.x) + ' '*self.size", "def print_all_buffers(self): \n for robot in range(self.no_robots):\n print(\"Buffer for robot: \" + str(robot) + \":\")\n print(\"Task ids: X, Y goal: Z orientation: Deadline:\")\n individual_buffer = self.all_buffers[robot]\n if isinstance(individual_buffer, np.float) or individual_buffer == []:\n print(\"Buffer is empty for robot \" + str(robot) + \"!\")\n else:\n for buff_row in range(individual_buffer.shape[0]):\n task = individual_buffer[buff_row]\n print(\"%d %f, %f %f %f\" % (int(task[0]), task[1], task[2], task[3], task[4]))", "async def print_processor(self) -> None:\n try:\n while True:\n while self.print_queue.empty() is not True:\n stub = await self.print_queue.get()\n if isinstance(stub, str):\n print(stub)\n elif isinstance(stub, tuple):\n if stub[0] == \"error\":\n print(f\"{r}{stub[1]}{reset}\")\n elif stub[0] == \"warning\":\n print(f\"{y}{stub[1]}{reset}\")\n elif stub[0] == \"success\":\n print(f\"{g}{stub[1]}{reset}\")\n elif stub[0] == \"bold\":\n print(f\"{bold}{stub[1]}{reset}\")\n else:\n print(f\"{stub[1]}\")\n self.print_queue.task_done()\n await asyncio.sleep(0.002)\n except asyncio.CancelledError:\n print('Closing the RedCisco application... Cleaning up running tasks...\\n')", "def loading(delay):\r\n\r\n for i in range(3):\r\n\r\n print \".\",\r\n sys.stdout.flush()\r\n time.sleep(delay)\r\n\r\n print(\"\")", "def displayThreads(self):\n print('{:18} {:20} {}'.format('THREAD NAME','INFO','IS ALIVE'))\n for key in sorted(list(self.threadlist.keys())):\n print('{!s:18}: {!s:20} {}'.format(key,\n self.threadlist[key], self.threadlist[key].isAlive()))", "def render_content(tab):\n # rander_holder = True\n for index in range(len(figs)):\n \"\"\"Render by start and callback.\"\"\"\n tabbi = f'tab-{index+1}'\n print(tabbi, 'in tabbi')\n if tab == tabbi:\n # returns the complete content for the browser\n return get_content(key_list[index], index)", "def process_list(self):\n for p in self._queue:\n print \"%-5d %-10s %-10s %2d %10s %10s\" % (p.id, p.name,\n p.status['type'], p.priority, \n self.print_cr_tree(p.creation_tree['parent']), \n self.print_cr_tree(p.creation_tree['child']))", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def print_content(self):\n if self.piece!=None:\n print('%s : %s %s' % (self.name, self.piece.color, self.piece.piece_type))\n else:\n print('%s : empty' % (self.name))", "def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()", "def display_collected():\n os.system('clear') # clearscreen\n print('BS4 widget generator')\n print('-' * 20)\n print('options selected:')\n for col in collected:\n print(col)\n\n print('-' * 20)\n\n return", "def show_current(self):\n for packet in self.station.genLoopPackets():\n print(packet)\n break", "def __write_all_places(self):\n place_nbr = 1\n self.doc.start_paragraph(\"Eclair-Report\")\n self.progress.set_pass(_(\"Generating report\"), len(self.place_handles))\n self.result=[]\n for handle in self.place_handles:\n city = self.__write_place(handle, place_nbr)\n self.__write_referenced_events(handle, city)\n place_nbr += 1\n # increment progress bar\n self.progress.step()\n self.result.sort()\n for msg in self.result:\n # AttributeError: 'GtkDocDocument' object has no attribute 'add_text\n self.doc.write_text(\"%s\\n\" % msg)\n self.doc.end_paragraph()", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n self.nodes_examined, self.unique_nodes\n )", "def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1", "def prnt(self):\n print \"%s %s %s %s\" % (time.ctime(), self.time, self.who, self.region)\n print \"%s %s %s\" % (time.ctime(), ' ' * len(self.time), self.text)\n for r in self.recipients:\n print \"%s %s %s\" % (time.ctime(), ' ' * len(self.time), r)", "def display(self):\n for row in self._board_area:\n print(row, end=\"\\n\")", "def WalkAll(self):\n while self.Next():\n pass\n if self.interval:\n self.PrintProgress()", "def print_block():\n do_twice(do_block)\n do_twice(do_block)\n print_column()", "def list_blocks(self, _):\n print(self.data.name)" ]
[ "0.68055356", "0.63277084", "0.6271929", "0.6271929", "0.6091832", "0.60454845", "0.5984652", "0.5973602", "0.59443873", "0.59372497", "0.59012204", "0.58839536", "0.58236563", "0.57862055", "0.57712454", "0.57689494", "0.5733052", "0.56877106", "0.5687649", "0.5642108", "0.5634161", "0.5614449", "0.56139654", "0.56138396", "0.55961484", "0.55843616", "0.55830294", "0.55531865", "0.55463207", "0.5542718" ]
0.7349046
0
Only when id, length, value all the same, These two content objects are equal
def __eq__(self,other): if other != None: return self.id==other.id and \ self.length == other.length and \ self.value==other.value else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_identical(self):\n write this test!", "def test_id(self):\n self.assertNotEqual(self.bm1.id, self.bm2.id)\n self.assertEqual(type(self.bm1.id), str)", "def __eq__(self, other):\n contentsmatchfail = False\n equal = False\n for i in self.contents:\n if i in other.contents:\n pass\n else:\n contentsmatchfail = True\n for i in other.contents:\n if i in self.contents:\n pass\n else:\n contentsmatchfail = True\n if self.name == other.name and self.name == other.name and contentsmatchfail == False:\n equal = True\n return equal", "def isEqual (self, other) :\n return self.id == other.getIdent ()", "def __eq__(self, other):\n return isinstance(other, self.__class__) and \\\n self.content == other.content and self.justification == other.justification", "def testEquality(self):\n pass", "def test_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def testEquality(self):\n list1 = [1,2]\n list2 = [1,2]\n \n # not the same object\n self.assertIsNot(list1,list2)\n self.assertNotEqual(id(list1),id(list2))\n self.assertFalse(list1 is list2)\n \n # but content is equal\n self.assertEqual(list1,list2)\n self.assertTrue(list1 == list2)\n \n ### \n list1 = []\n list2 = []\n self.assertFalse(list1 is list2) \n self.assertEqual(list1,list2)", "def testEquality(self) -> None:\n r = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id')\n other = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id')\n self.assertEqual(r, other)\n\n other = data_types.Result('test_2', ('win', 'x86'), (1, 10), 'id')\n self.assertNotEqual(r, other)\n\n other = data_types.Result('test_1', ('win', 'arm64'), (1, 10), 'id')\n self.assertNotEqual(r, other)\n\n other = data_types.Result('test_1', ('win', 'x86'), (2, 11), 'id')\n self.assertNotEqual(r, other)\n\n other = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id_2')\n self.assertNotEqual(r, other)\n\n other = None\n self.assertNotEqual(r, other)", "def test_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __eq__(self, other: 'StorageRecord') -> bool:\n\n return self.type == other.type and self.id == other.id and self.value == other.value and self.tags == other.tags", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def testCreateOpaqueValueWithSameContent(self):\n user = createUser(u'name', u'password', u'User', u'[email protected]')\n user.namespaceID = createNamespace(user, user.username, None).id\n tag = createTag(user, user.namespace, u'tag')\n value1 = createTagValue(user.id, tag.id, uuid4(), None)\n value2 = createTagValue(user.id, tag.id, uuid4(), None)\n self.store.commit()\n createOpaqueValue(value1.id, 'content')\n createOpaqueValue(value2.id, 'content')\n fileID = sha256('content').hexdigest()\n result = self.store.find(OpaqueValue, OpaqueValue.fileID == fileID)\n self.assertNotIdentical(None, result.one())\n result = self.store.find(OpaqueValueLink,\n OpaqueValueLink.fileID == fileID)\n self.assertEqual(2, result.count())", "def __eq__( self, other ):\n return self.data == other.data", "def __eq__(self, other):\n if not isinstance(other, PropertyContent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\r\n if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):\r\n return False\r\n for i in range(0, len(self.contents)):\r\n if self.contents[i] != other.contents[i]:\r\n return False\r\n return True", "def test_compare() -> None:\n\n obj = SpecificLocation()\n obj2 = SpecificLocation()\n\n assert obj != obj2\n\n obj._id = obj2.id\n\n assert obj == obj2", "def test_different_id(self):\n base1 = BaseModel(89)\n self.assertNotEqual(base1.id, 89)\n base1 = BaseModel(\"hello\")\n self.assertNotEqual(base1.id, \"hello\")\n base1 = BaseModel([1, 2, 3])\n self.assertNotEqual(base1.id, [1, 2, 3])", "def test_attributes_equal(self):\n test1 = self.Test({ 'id': 2, 'name': 'Poop Head' })\n test2 = self.Test({ 'id': 2, 'name': 'Poop Head' })\n self.assertEqual(test1, test2)", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self.type == other.type and\n self.data == other.data)", "def test_record_eq_record_different_values(self):\n zone = Zone('test.example.com')\n data = {'type': 'A', 'ttl': 30, 'values': ['1.1.1.1', '2.2.2.2']}\n record_current = Record(zone, 'test-record', data)\n data = {'type': 'A', 'ttl': 30, 'values': ['1.1.1.1', '3.3.3.3']}\n record_desired = Record(zone, 'test-record', data)\n self.assertTrue(record_current != record_desired)", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def test_difference_id(self):\n self.assertFalse(\n self.factory.create_type('iphone') is self.factory.create_type(\n 'iphone'))", "def test_instance_equality(self):\r\n class EqualityModel(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel(pk=0)\r\n m1 = EqualityModel(pk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def test_instance_equality(self):\n class EqualityModel(Model):\n pk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel(pk=0)\n m1 = EqualityModel(pk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def test_ordered_has_same_value_diff_id(cls):\n sl = orderedstructs.SkipList(object)\n obj_a = cls(0)\n sl.insert(obj_a)\n assert sl.has(obj_a)\n obj_b = cls(0)\n assert id(obj_a) != id(obj_b)\n assert sl.has(obj_b)", "def _id_of_DataArrays_equal(da1, da2):\n if id(da1) == id(da2):\n raise ValueError(\n f\"Cannot add the same object ({da1.name}) twice! Create a copy first.\"\n )\n if id(da1.values) == id(da2.values):\n raise ValueError(\n f\"DataArrays {da1.name} and {da2.name} refer to the same data! Create a copy first.\"\n )", "def __eq__(self, other):\n return isinstance(other, type(self)) and self.size == other.size", "def __eq__(self, other):\n return int(self.id) == int(other.id)" ]
[ "0.64666635", "0.6464577", "0.642242", "0.63855004", "0.63848305", "0.6381619", "0.6336744", "0.62777966", "0.6222118", "0.62202317", "0.62084025", "0.6182196", "0.6180592", "0.61721146", "0.61274123", "0.61194813", "0.60897243", "0.6081066", "0.6071086", "0.6068542", "0.606203", "0.6061425", "0.6054601", "0.60491776", "0.604825", "0.60466707", "0.6040703", "0.6000434", "0.59999776", "0.5999223" ]
0.6664983
0
Combinatorial search algorithm, Given area1, and area2 ...and a given time, return a selection, which maximize total weights, O(n^k), k is the number of areas, n is the the number of waiting contents in each areas in one time Backtrack to generate all solution and compare with current best solution, if it is better, replace current best with new one
def select_bruteforce(self,time,a1,a2,*args): areas = [] areas.append(a1) areas.append(a2) areas.extend(args) candidates = [[wait[time][1] if wait[time]!=None else None \ for wait in area.w] for area in areas] weights = [area.weight for area in areas] input = (candidates,weights) best = [None]*len(candidates) a = [-1]*len(areas) k = -1 self.backtrack(a,k,input,best) print 'bruteforce optimal solution,',zip(areas,best) print 'bruteforce optimal cost,', self.calculateCost(best,weights)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_greedy(self,time,a1,a2,*args):\n\t\tareas = []\n\t\tareas.append(a1)\n\t\tareas.append(a2)\n\t\tareas.extend(args)\n\t\tareas_sorted = sorted(areas,reverse=True)\n\t\tresult = []\n\t\tcandidates = [[wait[time][1] if wait[time]!=None else None \\\n\t\t\t\t\t for wait in area.w] for area in areas]\n\t\tused_content = set()\n\t\tfor area,cands in zip(areas_sorted,candidates):\n\t\t\tcands.sort(reverse=True)\n\t\t\tfor i in range(len(cands)):\n\t\t\t\tif cands[i] == None:\n\t\t\t\t\tresult.append((area,None))\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif cands[i].id not in used_content:\n\t\t\t\t\t\tresult.append((area,cands[i]))\n\t\t\t\t\t\tused_content.add(cands[i].id)\n\t\t\t\t\t\tbreak\n\t\tprint \"greedy best solution:\",result\n\t\tprint \"greedy best cost:\",sum([x.weight*y.value if y!= None \\\n\t\t\t\t\t\t\t\t\telse 0 for x,y in result])", "def find_optimal_strategy(iters=50000, n=30):\n # Get each total\n total_1, total_2 = full_game(iters=iters, n=n)\n # Since Player 2's choice depends on Player 1's choice, we will consider the net profit of Player 2\n # as the total winnings for Player 2 minues the total winnings for Player 1.\n # Get the best strategy of Player 2 for each choice of Player 1\n best_js = np.argmax(total_2, axis=1)\n # Create empty array for each value\n best_strats = np.zeros((n, n))\n # Get each best strat\n for i in range(n):\n best_strats[i, best_js[i]] = total_1[i, best_js[i]]\n # Find optimal strategy:\n optimal_1, optimal_2 = np.unravel_index(np.argmax(best_strats, axis=None), \\\n best_strats.shape)\n # Return expected payoff and optimal strategy (corrected for zero indexing)\n return total_1[optimal_1, optimal_2]/iters, \\\n total_2[optimal_1, optimal_2]/iters, \\\n (optimal_1 + 1, optimal_2 + 1)", "def greedy(constraint, indexes, m_l, parallel=False):\n\n selected = np.array([])\n plot = False\n choices = np.array(indexes)\n bar = ChargingBar(\"Calculating index set with greedy method\", max=m_l)\n\n for i in range(len(selected), m_l):\n # print(\"i = %d\" % i)\n start = time.time()\n\n def calc(node):\n return constraint(np.union1d(selected, node))\n\n if parallel:\n pickle_fix.calc = calc\n available_cores = odin.config.get(\"available_cores\", 4)\n pool = multiprocessing.Pool(processes=available_cores)\n values = pool.map(pickle_fix, choices)\n pool.close()\n else:\n # values: [float]\n values = list(map(calc, choices))\n\n greedy_choice = choices[np.argmax(values)]\n\n if plot:\n values = np.sort(values)\n oplt.plot(values)\n oplt.show()\n # current_best = np.max(values)\n\n selected = np.union1d(selected, [greedy_choice])\n choices = np.setdiff1d(choices, [greedy_choice])\n logging.debug(\"selected = %s; choice = %s; time = %.5f\" % (\n selected, greedy_choice, time.time() - start))\n bar.next()\n bar.finish()\n\n return selected", "def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path", "def kbest_matches(self, k=1, minlen=2, buffer=0):\n ki = 0\n while k is None or ki < k:\n idx = None\n lcm = None\n while idx is None:\n idx = np.unravel_index(np.argmax(self._wp, axis=None), self._wp.shape)\n if idx[0] == 0 or idx[1] == 0:\n return None\n r, c = idx\n lcm = LCMatch(self, r, c)\n for (x, y) in lcm.path:\n x += 1\n y += 1\n if len(self._wp.mask.shape) > 0 and self._wp.mask[x, y] is True: # True means invalid\n # print('found path contains masked, restart')\n lcm = None\n idx = None\n break\n else:\n self._wp[x, y] = ma.masked\n if len(lcm.path) < minlen:\n # print('found path too short, restart')\n lcm = None\n idx = None\n if buffer > 0 and lcm is not None:\n miny, maxy = 0, self._wp.shape[1] - 1\n minx, maxx = 0, self._wp.shape[0] - 1\n for (x, y) in lcm.path:\n xx = x + 1\n for yy in range(max(miny, y + 1 - buffer), min(maxy, y + 1 + buffer)):\n self._wp[xx, yy] = ma.masked\n yy = y + 1\n for xx in range(max(minx, x + 1 - buffer), min(maxx, x + 1 + buffer)):\n self._wp[xx, yy] = ma.masked\n if lcm is not None:\n ki += 1\n yield lcm", "def try_optimal_solution(module, n_of_players_with_vote):\n\n nonlocal all_lineups\n nonlocal final_field\n nonlocal malus\n\n # For each candidate\n for candidate in all_lineups:\n\n # We create the list where each player in the combination has only\n # 1 role\n candidates_single_role = all_lineups_single_role(candidate)\n\n # And test each of these combinations\n for new_cand in candidates_single_role:\n\n # If we find a solution we store the result\n if find_solution(new_cand, module, n_of_players_with_vote):\n final_field = new_cand\n break\n\n # And stop the iteration over the other condidates\n if final_field:\n malus = 0\n break", "def neighbour_search(\n orders_dict: dict,\n algorithm: str,\n factories_dict: dict,\n sku_dict: dict,\n time_limit=600000,\n iteration_limit=4,\n):\n\n algorithm = algorithm.lower()\n if algorithm not in [\"naive\", \"max\", \"min\"]:\n raise ValueError(\"Please only select one of naive, max or min as the algorithm\")\n orders_rank = list(orders_dict.items())\n timeout_start = time.time()\n it = int(0) # iteration count\n best_order = list(orders_dict.keys())\n best = generate_solutions(orders_dict, algorithm, factories_dict, sku_dict)\n while time.time() < timeout_start + time_limit:\n if it > iteration_limit:\n return (\n best,\n best_order,\n ) # if 5 iterations go by without improvement, return current best\n if len(orders_dict) < 2:\n return (\n generate_solutions(orders_dict, algorithm, factories_dict, sku_dict),\n best_order,\n )\n else:\n distance_dict = {}\n\n for order in orders_dict:\n distance_dict[order] = orders_dict[order].calculate_difference(\n factories_dict\n )\n orders_rank = dict(sorted(distance_dict.items(), key=lambda item: item[1]))\n\n n = int(len(orders_dict) / 4)\n\n orders_head = list(orders_rank.items())[:n]\n orders_tail = list(orders_rank.items())[n:]\n np.random.shuffle(orders_tail)\n orders_head.extend(orders_tail)\n new_orders = dict(orders_head)\n gen_orders = {}\n for i in list(new_orders.keys()):\n gen_orders[i] = orders_dict[i]\n\n results = generate_solutions(gen_orders, algorithm, factories_dict, sku_dict)\n if results[0] < best[0]:\n best = results\n best_order = list(new_orders.keys())\n it = 0 # iteration count resets when improvement found\n else:\n it += int(1)\n\n return best, best_order", "def strategy_best(cookies, cps, time_left, build_info):\n items = build_info.build_items()\n result = None\n cost = 0\n overall = cookies + time_left * cps\n for item in items:\n temp_cost = build_info.get_cps(item) / build_info.get_cost(item)\n if temp_cost <= overall and cost < temp_cost:\n result = item\n cost = temp_cost\n return result", "def solution(n: int, k: int, coins: list):\n \n path_best = deque([0])\n \n buff = Queue()\n buff.put(0)\n \n profit_curr = 0\n n_jumps = 0\n while not buff.empty():\n curr = buff.get()\n profit_max_ = None\n jump_best = None\n for delta in range(1, k+1):\n jump = delta + curr\n if jump >= n:\n break\n profit = profit_curr + coins[jump]\n if profit_max_ is None or profit_max_ < profit:\n jump_best = jump\n profit_max_ = profit\n if coins[jump] >= 0:\n break\n if jump_best is not None:\n profit_curr = profit_max_\n buff.put(jump_best)\n path_best.append(jump_best)\n n_jumps += 1\n return n_jumps, profit_curr, [x for x in path_best]", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def strategy_best(cookies, cps, time_left, build_info):\n return_item = None\n highest_icr = float('-inf')\n item_list = build_info.build_items()\n cookies_potential = cookies + time_left * cps\n for item in item_list:\n cost = build_info.get_cost(item)\n curr_icr = build_info.get_cps(item) / cost \n if cookies_potential >= cost and curr_icr > highest_icr:\n return_item = item\n highest_icr = curr_icr\n return return_item", "def optimal(count):\n\n return _optimal(start, count)[0]", "def best_combination_dynamic(dataset, max_investment):\n # Finding best ROI (return On Investment):\n max_investment_in_cents = int(max_investment * 100)\n dataset_length = len(dataset)\n matrix = [[0 for x in range(max_investment_in_cents + 1)] for x in range(dataset_length + 1)]\n for share in range(1, dataset_length + 1):\n for budget in range(1, max_investment_in_cents + 1):\n current_share = dataset[share - 1]\n if current_share[1] <= budget:\n matrix[share][budget] = max(current_share[3] + matrix[share - 1][budget - current_share[1]],\n matrix[share - 1][budget])\n else:\n matrix[share][budget] = matrix[share - 1][budget]\n\n # Retrieving best combination from matrix:\n best_combination = []\n budget_remaining = max_investment_in_cents\n while budget_remaining >= 0 and dataset_length >= 0:\n current_share = dataset[dataset_length - 1]\n if matrix[dataset_length][budget_remaining] == \\\n matrix[dataset_length - 1][budget_remaining - current_share[1]]\\\n + current_share[3]:\n best_combination.append(current_share)\n budget_remaining -= current_share[1]\n dataset_length -= 1\n\n return matrix[-1][-1], best_combination", "def solve(self):\n\n n = 1\n\n # Get the n-length combinations (if any) that can accommodate the\n # defined load.\n combos_n = self.fit_n_bins(n=n)\n\n while True:\n # Get the n+1-length combinations (if any) that can accommodate the\n # defined load.\n combos_np1 = self.fit_n_bins(n=n + 1)\n\n # If no viable combinations were found for both n and n + 1 lengths\n # the increment the defined length, swap the combinations and skip\n # to the next iteration\n if not combos_n or not combos_np1:\n combos_n = combos_np1\n n += 1\n continue\n\n # if the n + 1 length combinations happen to offer decreased\n # capacity then prefer those combinations over the shorter yet more\n # wasteful combinations\n if sum(combos_n[0]) > sum(combos_np1[0]):\n return combos_np1\n else:\n return combos_n", "def mainSelection():\n\timport time\n\tc1 = Content(1,4,20)\n\tc2 = Content(2,6,30)\n\tc3 = Content(3,5,25)\n\tc1_ = Content(1,1,20)\n\tc5 = Content(5,3,29)\n\tc6 = Content(6,11,50)\n\tc7 = Content(7,7,34)\n\tc1__ = Content(1,3,20)\n\tc8 = Content(8,6,10)\n\ta1 = Area('a1',1.0)\n\ta1.addContent(0,0,c1)\n\ta1.addContent(0,6,c2)\n\ta1.addContent(0,12,c3)\n\ta1.addContent(0,18,c1_)\n\ta1.addContent(1,2,c5)\n\ta1.addContent(1,9,c6)\n\ta1.addContent(2,0,c7)\n\ta1.addContent(2,9,c1__)\n\ta1.addContent(2,14,c8)\n\ta1.printSchedule()\n\tprint \"\\n\"\n\ta2 = Area('a2',0.5)\n\ta2.addContent(0,0,c6)\n\ta2.addContent(0,11,c7)\n\ta2.addContent(1,0,c1)\n\ta2.addContent(1,6,c8)\n\ta2.addContent(1,15,c3)\n\ta2.addContent(2,0,c3)\n\ta2.addContent(2,7,c5)\n\ta2.addContent(2,13,c2)\n\ta2.addContent(2,19,c1_)\n\ta2.printSchedule()\n\ta3 = Area('a3',1.0)\n\ta4 = Area('a4',0.9)\n\ta3.addContent(0,0,c2)\n\ta3.addContent(1,0,c5)\n\ta4.addContent(0,0,c2)\n\tsol_select = Selection_solution()\n\ttime_b = time.time()\n\tsol_select.select_bruteforce(time=12,a1=a1,a2=a2)\n\tprint \"running time: \",time.time()-time_b\n\ttime_g = time.time()\n\tsol_select.select_greedy(time=12,a1=a1,a2=a2)\n\tprint \"running time: \",time.time()-time_g\n\ttime_b = time.time()\n\tsol_select.select_bruteforce(time=0,a1=a3,a2=a4)\n\tprint \"running time: \",time.time()-time_b\n\ttime_g = time.time()\n\tsol_select.select_greedy(time=0,a1=a3,a2=a4)\n\tprint \"running time: \",time.time()-time_g", "def get_combination(class_, originals='./data/CASIA1_originals', fakes_ela='./data/CASIA1_fakes_ela'):\n medians_ = [0,3,5,7,9,11,13,15,17,19]\n\n iterations_ = []\n for i in range(21):\n iterations_.append(i)\n\n threshold_ = []\n for i in range(40):\n threshold_.append(i)\n\n fakes_list = os.listdir(class_)\n fakes = load_fakes(fakes_list, class_, originals)\n\n best = 0\n best_median_filter_size = 0\n best_number_of_iterations = 0\n best_thresh = 0\n for x, median_filter_size in enumerate(medians_):\n for y, number_of_iterations in enumerate(iterations_):\n for t, thresh in enumerate(threshold_):\n whole_score = 0\n for e, elem in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, elem.path.split('\\\\')[-1]))\n\n if thresh > 0:\n image_ = pywt.threshold(image, thresh, 'soft')\n image = cv2.normalize(image_, image, 0, 1, cv2.NORM_MINMAX)\n image = 255 * image\n image = image.astype(np.uint8)\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n\n if median_filter_size > 0:\n image = cv2.medianBlur(image, median_filter_size)\n\n kernel = np.ones((3, 3), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel, iterations=number_of_iterations)\n\n cnts = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n max_idx = 0\n max_pnts = 0\n for u, ulem in enumerate(cnts):\n if cv2.contourArea(ulem) < max_pnts:\n continue\n else:\n max_idx = u\n max_pnts = cv2.contourArea(ulem)\n\n if len(cnts) > 0:\n (x, y, w, h) = cv2.boundingRect(cnts[max_idx])\n pred = {\n \"x\": x,\n \"y\": y,\n \"w\": w,\n \"h\": h\n }\n else:\n pred = None\n\n whole_score += evaluate_augmentation_fit(pred, elem)\n if best < whole_score:\n best = whole_score\n best_median_filter_size = median_filter_size\n best_number_of_iterations = number_of_iterations\n best_thresh = thresh\n print(\"Class: {}; MedianFilterSize: {}; Iterations: {}; Thresh: {}; Score: {}\" .format(class_, median_filter_size, number_of_iterations, thresh, round(whole_score, 2)))\n print(\"###########\")\n print(\"Best: {} -> {} % ({}, {}, {})\" .format(round(best, 2), round((best/len(fakes)), 2), best_median_filter_size, best_number_of_iterations, best_thresh))\n print(\"###########\")", "def random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=True):\n A = A.copy()\n edges_removed = []\n budget = np.max(budgets)\n results_info = []\n for i in range(budget):\n if (len(C) == 0):\n # Maximum balance achieved -> budget high.\n results_info = update_res(results_info, budgets, time.time() - start_time, len(x_v.nonzero()[0]) - len(S))\n break\n while (True):\n try:\n e_chosen = C[np.random.choice(range(len(C)))]\n except:\n results_info = update_res(results_info, budgets, time.time() - start_time, len(x_v.nonzero()[0]) - len(S))\n return results_info, np.nonzero(x_v)[0], A, edges_removed\n if (is_connected_postdel(delete_edge(A.copy(), e_chosen), e_chosen)):\n break\n else:\n C.remove(e_chosen)\n edges_removed.append(e_chosen)\n try:\n ue = node_out (x_v, e_chosen)\n except:\n print(e_chosen, \" is not on the periphery\")\n return\n A = delete_edge (A, e_chosen)\n x_v[ue] = find_label (A, ue, x_v)\n C.remove(e_chosen)\n if (verbose):\n print(e_chosen, \" is chosen\")\n if (x_v[ue] != 0):\n C, C_i = update_chosen(ue, x_v, A, C)\n # if (verbose):\n # print(\"Edges added to C: \", C_i)\n C = C + C_i\n if (len(edges_removed) in budgets):\n select_time = time.time() - start_time\n results_info.append({\"Budget\": len(edges_removed), \"RT\": select_time, \"Delta\": len(np.nonzero(x_v)[0]) - len(S)})\n if (verbose):\n print(\"\\n\")\n return results_info, np.nonzero(x_v)[0], A, edges_removed", "def savings_algorithm(self):\n self.generate_trivial_tours() # generate trivial solution\n while True: # endless loop\n maxSavings = 0 # values for best savings decision\n bestr1 = None\n bestr2 = None\n for r1 in self.routes: # loop through all route combinations\n for r2 in self.routes:\n if r1 != r2:\n currentSavings = self.savings2routes(r1,r2)\n if currentSavings > maxSavings: # if the savings are greater than the so far best savings\n bestr1 = r1 # store the routes and the savings value\n bestr2 = r2\n maxSavings = currentSavings\n if (bestr1 == None): # if no savings or no feasible joins exist break out of the loop\n break\n newRoute = VRP_Route(bestr1.route+bestr2.route) # generate new route and delete old routes\n self.routes.remove(bestr1)\n self.routes.remove(bestr2)\n self.routes.append(newRoute)\n self.get_objective()\n return self.objective", "def find_combination(grid,pt):\n\n # find simplex interpolation coefficients\n coefs,tracks = find_interpolation_coefficients(grid,pt,grid.tessellation,grid.ndx)\n if (coefs is None): return None\n\n # find ages:\n ages = find_ages(coefs,tracks,pt[-1])\n if (ages is None): return None\n\n n = len(tracks)\n\n # combine multiple models:\n results = ()\n for coef,track,age in zip(coefs,tracks,ages):\n if (coef < 0.0): return None # make sure we're not outside the grid\n result = track.find_combination(age,coef)\n if (result is None): return None\n results += result\n return results", "def optimal_solution_multiple_pickup(memo):\n # Calculates what the maximum value is and saves which row and [col][energy] index\n maxvalue = None\n for i in range(len(memo)+1):\n # Sets up initial value\n if maxvalue is None:\n # Sets initial value to first non empty cell[1]\n if memo[len(memo)-1][i][1] is not None:\n maxvalue = (memo[len(memo)-1][i][1], i, 1)\n # Compares first non empty cell[1] with first non empty cell[0]\n if memo[len(memo)-1][i][0] is not None:\n if maxvalue is not None:\n if memo[len(memo) - 1][i][0] > maxvalue[0]:\n maxvalue = (memo[len(memo) - 1][i][0], i, 0)\n # In case first non empty cell[1] was None\n else:\n maxvalue = (memo[len(memo) - 1][i][0], i, 0)\n # After initial value is set. Compares it the other value in that cell to get maximum\n else:\n if memo[len(memo)-1][i][1] is not None:\n if memo[len(memo)-1][i][1] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i][1], i, 1)\n if memo[len(memo)-1][i][0] is not None:\n if memo[len(memo)-1][i][0] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i][0], i, 0)\n\n # Goes back and calculates how the optimal solution was formed\n optimal_solution = [0] * len(memo)\n current_row = maxvalue[1]\n current_index = maxvalue[2]\n # Goes backwards through the array starting at the best value\n for col in range(len(memo)-1, 0, -1):\n # For energy > 0 where it has the choice to pick up or not\n if current_row > 0:\n # Checks if it did pick up. If current cell does not have the same value as the previous column with\n # 1 less energy[current_index] then it must have picked up\n if memo[col][current_row][current_index] != memo[col-1][current_row-1][1]:\n optimal_solution[col] = 1\n\n # Picks the maximum number from previous column and 1 more energy\n if memo[col-1][current_row+1][0] is None:\n current_index = 1\n elif memo[col-1][current_row+1][1] is None:\n current_index = 0\n else:\n if memo[col-1][current_row+1][0] > memo[col-1][current_row+1][1]:\n current_index = 0\n else:\n current_index = 1\n current_row += 1\n # otherwise it did not pick up\n else:\n current_row -= 1\n current_index = 1\n # If at 0 energy then it must have picked up\n else:\n optimal_solution[col] = 1\n current_row += 1\n if memo[col - 1][1][0] is None:\n current_index = 1\n elif memo[col - 1][1][1] is None:\n current_index = 0\n else:\n if memo[col - 1][1][0] > memo[col - 1][1][1]:\n current_index = 0\n else:\n current_index = 1\n return maxvalue[0], optimal_solution", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def search_minimum_coloring(self,alpha,Beta):\n bestSol=[]\n bestK=0\n k= self.g.n\n iter = 0\n global encore\n encore = True\n timer = threading.Timer(200, findeboucle)\n timer.start()\n while(encore):\n tabus_search = self.compute_solution(k,alpha,Beta)\n if(tabus_search[1]==0):\n bestSol= copyMatrix(tabus_search[0])\n #tmax=tabus_search[2]\n bestK=k\n k=k-1\n return(bestK,bestSol)", "def search_optimal_capacities(network, step_size, tolerance, filename):\r\n ## Initialization\r\n # Initialize the value of total flow over the network\r\n totalflow = max(network.lb_totalflow, step_size)\r\n \r\n # An auxiliary threshold of the total flow computed based on the capacity upper bounds, used in Line 4 of Algorithm 3.\r\n aux_bound = 1 - np.exp(network.beta - network.b + network.phi/network.u)\r\n \r\n \r\n # Initialize the bounds for flow over each route\r\n ub_flow = np.zeros(network.num_routes)\r\n lb_flow = np.zeros(network.num_routes)\r\n \r\n # Initialize the optimal solution over the network\r\n opt_socialwelfare = np.array([])\r\n opt_totalflow = 0\r\n opt_flows = np.array([])\r\n opt_capacity = np.zeros(network.num_routes)\r\n \r\n\r\n# # For debugging only\r\n# lower_bound = np.zeros(network.num_routes)\r\n# upper_bound = np.zeros(network.num_routes)\r\n# count = 0\r\n \r\n # Try to plot out the (totalflow, social_welfare) scatter plot\r\n z = []\r\n hz = []\r\n# # End of debugging\r\n\r\n ## Start the search\r\n while totalflow < 1 - tolerance:\r\n flag_nofeasibleflow = False\r\n \r\n # Compute the bounds for the flow.\r\n for i in range(network.num_routes):\r\n # Line 3-8 of Algorithm 3. Compute the upper bounds for the flow.\r\n if totalflow >= aux_bound[i]: \r\n x3_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 3) \r\n if x3_star > network.u[i]:\r\n flag_nofeasibleflow = True\r\n break \r\n else:\r\n ub_flow[i] = x3_star \r\n else: \r\n ub_flow[i] = 1 \r\n # Line 9-10 of Algorithm 3. Compute the lower bounds of the flow.\r\n x1_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 1)\r\n x2_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 2)\r\n lb_flow[i] = max(x1_star, x2_star)\r\n \r\n \r\n if not flag_nofeasibleflow:\r\n # Check feasibility of the flow based on the current total flow, lower and upper bounds of the flow\r\n if totalflow < np.sum(lb_flow) or totalflow > np.sum(ub_flow): \r\n totalflow += step_size \r\n\r\n# # For debugging only\r\n# print(\"\\nThe current total flow is: \" + str(totalflow))\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # Eng of debugging\r\n# \r\n continue\r\n \r\n # The implementation of line 11 to 18. Find the optimal flow given the current value of z.\r\n [opt_obj, opt_x] = ip.max_sum_xlogx(network.num_routes, totalflow, lb_flow, ub_flow) \r\n \r\n \r\n # Line 18 of Algorithm 3. Compute the social welfare given the current z and optimal q(z).\r\n temp = opt_obj - totalflow * np.log(1-totalflow)\r\n\r\n ##### Testing: to plot out the function of h(z)\r\n z.append(totalflow)\r\n hz.append(temp)\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n if opt_socialwelfare.size == 0 or temp > opt_socialwelfare:\r\n opt_socialwelfare = temp\r\n opt_flows = opt_x\r\n opt_totalflow = totalflow \r\n \r\n # For debugging only\r\n# print(\"\\nUpdate optimal flow\")\r\n# print(opt_x)\r\n# print(lb_flow)\r\n# print(ub_flow)\r\n# print(\"Total flow is \" + str(opt_totalflow)) \r\n \r\n # For debugging\r\n# np.copyto(lower_bound, lb_flow) \r\n# np.copyto(upper_bound, ub_flow) \r\n# count += 1\r\n# print(\"The lower and upper bounds are: \")\r\n# print(lb_flow)\r\n# print(lower_bound)\r\n# print(\"\\n\")\r\n# print(ub_flow)\r\n# print(upper_bound)\r\n# print(\"\\n\")\r\n \r\n totalflow += step_size \r\n\r\n \r\n \r\n# # For debugging only\r\n# print(\"\\n----------------\\n Exiting the while loop.\")\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count)) \r\n# # Eng of debugging\r\n \r\n # Line 20 of ALgorithm 3\r\n if opt_flows.size > 0:\r\n network.update_flow(opt_flows) \r\n for i in range(network.num_routes): \r\n network.compute_capacity(opt_totalflow, i)\r\n opt_capacity[i] = network.capacity[i]\r\n print(\"\\n--------------\\nThe optimal flow is: \")\r\n print(opt_flows)\r\n print(\"\\n--------------\\nThe optimal parking capacity is: \")\r\n print(opt_capacity) \r\n print(\"\\n--------------\\nThe optimal total flow is \" + str(opt_totalflow))\r\n print(\"\\n--------------\\nThe maximum social welfare is \" + str(opt_socialwelfare) +\".\")\r\n \r\n \r\n ##### Testing: to plot out the function of h(z)\r\n #plt.scatter(z, hz, c='r', marker='r')\r\n plt.plot(z, hz, '-', linewidth=0.5)\r\n #plt.xlim(0.5, 1)\r\n plt.savefig(filename + '.png', bbox_inches='tight')\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n \r\n \r\n# # For debugging\r\n# temp1 = np.zeros(network.num_routes)\r\n# temp2 = np.zeros(network.num_routes)\r\n# temp3 = np.zeros(network.num_routes)\r\n# for i in range(network.num_routes): \r\n# temp1[i] = zeta(network, i, opt_flows[i], opt_totalflow, 1)\r\n# temp2[i] = zeta(network, i, opt_flows[i], opt_totalflow, 2)\r\n# temp3[i] = zeta(network, i, opt_flows[i], opt_totalflow, 3)\r\n# print(\"The function value of zeta at the optimal flow: \")\r\n# print(temp1)\r\n# print(temp2)\r\n# print(temp3)\r\n# \r\n# # For debugging\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # End of debugging\r\n \r\n return opt_flows, opt_capacity, opt_socialwelfare \r\n else:\r\n print(\"\\nNo optimal solution is found!\")\r\n return np.array([]), opt_capacity, opt_socialwelfare", "def get_area(area=None):\n # Set global vars that can be accessed outside of function for debuging\n global sorted_areas, chosen_area, mask\n # Create a dicionary where keys --> brain areas and values --> list of neurons\n sorted_areas = sort_by_area()\n # Choose area of interest\n chosen_area = sorted_areas[area]\n # Create a mask to keep only rows of interest\n mask = np.isin(spiketimes[:,0], chosen_area)\n # Fetch only desired spiketimes\n area_spikes = spiketimes[mask]\n # Find number of neurons\n N_neurons = len(chosen_area)\n \n return area_spikes", "def calculateOptimal(self) -> (list, int):\n\t\tcombinations = list(itertools.product(*self.clusters))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)", "def find_opt_size(instance, maxtime):\n if maxtime is None:\n maxtime = -1\n print(\"Searching for minimum-sized set of weights, timeout set at {}\"\n \"\".format(maxtime))\n try:\n with timeout(seconds=maxtime):\n while True:\n print(\"# \\tCall guess_weight with k = {}\".format(instance.k))\n solutions = solve(instance, silent=True)\n if bool(solutions):\n break\n instance.try_larger_k()\n elapsed = time.time() - start\n print(\"# Weights computation took {:.2f} seconds\".format(elapsed))\n print(\"# Solution:\", solutions)\n return solutions, elapsed\n except TimeoutError:\n print(\"Timed out after {} seconds\".format(maxtime))\n return set(), maxtime", "def solve(self):\n\n tracks_copy = self.tracks.copy()\n vehicles_sorted = sorted(self.vehicles, key=lambda x: x.departure_time)\n\n vehicles_added = 0\n while len(vehicles_sorted) != 0:\n best_ratio = - sys.maxsize - 1\n best_track = None\n best_vehicle = None\n shuffle(tracks_copy)\n\n for vehicle in vehicles_sorted:\n for track in tracks_copy:\n if track.add_vehicle(vehicle, self.tracks):\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n ratio = goal2 / goal1\n if ratio > best_ratio:\n best_ratio = ratio\n best_track = track\n best_vehicle = vehicle\n\n track.remove_last()\n\n if best_vehicle is not None and best_track is not None:\n vehicles_added += 1\n best_track.add_vehicle(best_vehicle, self.tracks)\n vehicles_sorted.remove(best_vehicle)\n else:\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n if goal1 < self.optimal_gg1 and goal2 > self.optimal_gg2:\n self.optimal_gg1 = goal1\n self.optimal_gg2 = goal2\n self.optimal_tracks = self.tracks\n return False\n\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n print(\"Success:\", goal1, goal2)\n if goal1 < self.best_gg1 and goal2 > self.best_gg2:\n self.best_gg1 = goal1\n self.best_gg2 = goal2\n self.best_tracks = self.tracks\n\n return True", "def cuckoo_search(n=None, nd=None, Lb=None, Ub=None, pa=None):\n\tif n is None:\n\t\tn =25\n\n\tif nd is None:\n\t\tnd=21\n\n\tif Lb is None:\n\t\tLb = np.ones(nd)*0\n\tif Ub is None:\n\t\tUb = np.ones(nd)*5\n\n\tif pa is None:\n\t\tpa = 0.25\n\n\t# creation of the list for parameter pairs \n\t\n\tstep = 1\n\n # initialization of the nests\n\tnests = np.zeros((n,nd))\n\tfor i in range(n):\n\t\tnests[i,:] = Lb + (Ub-Lb)*np.random.rand(len(Lb))\n\n\tfitness = 10**10 * np.ones((n,1))\n\tbest_nest, fmin, nest, fitness, N_iter = single_cuckoo_search(nests,fitness,Lb,Ub,pa,step) \n\n\treturn best_nest, fmin, nest, fitness, N_iter", "def create_knapsack_packing_problems_with_manual_solutions(can_print=False):\n\n problems, solutions = list(), list()\n\n start_time = time.time()\n\n # Problem 11\n\n max_weight = 200.\n container_shape = Polygon([(0, 0), (0, 2330), (5867, 2330), (5867, 0)])\n container = Container(max_weight, container_shape)\n items = [Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.)]\n problem = Problem(container, items)\n problems.append(problem)\n\n solution = Solution(problem)\n solutions.append(solution)\n\n print_if_allowed(solution.add_item(0, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(1, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(2, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(3, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(4, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(5, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(6, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(7, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(8, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(9, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(10, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(11, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(12, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(13, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(14, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(15, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(16, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(17, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(18, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(19, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(20, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(21, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(22, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(23, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(24, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(25, (5., 5.), 0.), can_print)\n\n # show elapsed time\n elapsed_time = get_time_since(start_time)\n print_if_allowed(\"Manual elapsed time: {} ms\".format(round(elapsed_time, 3)), can_print)\n\n return problems, [str(i + 1) for i in range(len(problems))], solutions" ]
[ "0.71428865", "0.58655334", "0.57385504", "0.5726437", "0.5719283", "0.5708327", "0.5681849", "0.5659449", "0.56578714", "0.56546485", "0.5613668", "0.5609115", "0.55987585", "0.5598416", "0.5590872", "0.5587801", "0.55872273", "0.55849886", "0.55798554", "0.5577924", "0.557098", "0.55529255", "0.5542213", "0.5527108", "0.5505404", "0.5485425", "0.5455674", "0.54548", "0.5452188", "0.54396284" ]
0.728321
0
Given a solution and weights, calculate the total cost, the higher, the better
def calculateCost(self,sol,weights): return sum([x.value*y if x != None else 0 \ for x,y in zip(sol,weights)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cost_and_weight_of_knapsack(solution,weight_cost):\r\n cost,weight = 0,0 \r\n for item in solution:\r\n weight += weight_cost[item][0]\r\n cost += weight_cost[item][1]\r\n return cost,weight", "def cost_total(X, cost_weights=(1.0, 1.0, 1.0)):\n return cost_weights[0] * cost_distance(X) + \\\n cost_weights[1] * cost_same_team_by_distance(X) + \\\n cost_weights[2] * cost_previous_neighbour_by_distance(X, normalize=True)", "def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost", "def dp_make_weight(egg_weights, target_weight, memo = {}):\r\n # construct table. outer loop: egg weights. inner loop: 0-target_weight\r\n # table will be stored in memo. key=egg_weight, value=list, indexed from 0-target_weight\r\n for i, w in enumerate(egg_weights):\r\n # initialize key-value pair for a given egg weight. Value is empty list to be filled in inner loop.\r\n memo[w] = []\r\n for j in range(target_weight + 1):\r\n # if weight is 0, no eggs\r\n if j == 0:\r\n memo[w].append(0)\r\n # if egg_weight is less than weight, minimize number of eggs\r\n elif w <= j:\r\n # to minimize: take the min of (using prior denomination to get same weight, using current denomation to get weight)\r\n # first item=prior egg value, same weight\r\n # second item=\"sub\" current egg value by subtracting it from weight and adding 1 to egg total\r\n \r\n # if first egg weight, no need to look at \"row\" above to minimize\r\n if i == 0:\r\n min_eggs = memo[w][j-w] + 1\r\n else:\r\n min_eggs = min(memo[egg_weights[i-1]][j], memo[w][j-w] + 1)\r\n memo[w].append(min_eggs)\r\n # else if egg_weight is more than weight, take prior denomination min number of eggs at j\r\n else:\r\n memo[w].append(memo[egg_weights[i-1]][j])\r\n\r\n # access bottom right value to get minimum number of coins (largest egg_weight at target_weight)\r\n # uncomment below to only returns min number of eggs\r\n #return memo[egg_weights[-1]][target_weight]\r\n\r\n # determine makeup of min number of egg: \r\n # cur_weight to keep track as we subtract from total weight\r\n cur_weight = target_weight\r\n \r\n # egg_choices: a dict that holds how many of each egg_weight are in the optimal solution\r\n egg_choices = {}\r\n \r\n #print(memo)\r\n \r\n # outer loop goes backwards from highest to smallest egg weight\r\n for i in range(len(egg_weights)-1, -1, -1):\r\n # check if equal to memo[i-1][j] (row above, same column). if not equal, i is in the set.\r\n while egg_weights[i] <= cur_weight:\r\n # also if smallest egg weight, keep subtracting until we get 0\r\n if i == 0 or (memo[egg_weights[i]][cur_weight] != memo[egg_weights[i-1]][cur_weight]):\r\n # if they are not equal, add to the count of i in the egg_choices dict\r\n if egg_weights[i] in egg_choices.keys():\r\n egg_choices[egg_weights[i]] += 1\r\n else:\r\n egg_choices[egg_weights[i]] = 1\r\n # subtract from current weight the egg weight accounted for\r\n cur_weight -= egg_weights[i]\r\n \r\n # break if all weight accounted for\r\n if cur_weight == 0:\r\n break\r\n \r\n # string together the min number of eggs and the composition\r\n out = str(memo[egg_weights[-1]][target_weight]) + ' ('\r\n \r\n # list of formatted value * key pairs\r\n eggs = []\r\n for key, value in egg_choices.items():\r\n eggs.append(str(value) + ' * ' + str(key))\r\n \r\n # join key/value pairs together\r\n out += ' + '.join(eggs)\r\n \r\n # finish off the string\r\n out += ' = ' + str(target_weight) + ')'\r\n return out", "def calculate_weighted_results():\n pass", "def fitness_score(sack, individual, max_weight):\n profit = sack[0] \n weight = sack[1] \n total_profit = 0\n total_weight = 0\n \n for i in range(len(individual)):\n if individual[i] == 1:\n total_profit += profit[i]\n total_weight += weight[i]\n \n if total_weight > max_weight:\n total_profit = 0\n\n return total_profit, total_weight", "def cost(self) -> float:", "def total_profit(knapsack, items, weight):\n return knapsack[items][weight]", "def solve_brute_force(n: int, W: int, weight: List[int], value: List[int]) -> int:\n mapped_items = [{\"w\": w, \"v\": v} for i, (w, v) in enumerate(zip(weight, value))]\n\n maximum_value: int = 0\n updated: bool = False\n for i in range(1, n + 1):\n if i > 1 and not updated:\n break\n\n updated = False\n for chosen_items in list(combinations(mapped_items, i)):\n sum_weight = 0\n sum_value = 0\n for item in chosen_items:\n sum_weight += item[\"w\"]\n sum_value += item[\"v\"]\n\n if sum_weight <= W and maximum_value < sum_value:\n updated = True\n maximum_value = sum_value\n return maximum_value", "def dp_make_weight(egg_weights, target_weight, memo={}):\n\n \"\"\"\n 根据提示: 每个pound类型的蛋是无限的。\n 问题是提供一种蛋的组合,最好pound数等于或是接近总的weight 并且要满足数量要越少越好。\n 这是两个限制条件。但是提示也给了总是有egg为value1的,那么难度小了很多。\n 现在是怎样让蛋的数量越少越好。\n \n 1.最优子结构\n egg_weights 现在假设是(1, 5, 10, 25)\n dp_make_weight((1, 5, 10, 25),x,memo) , 当x - n >= 0 时(n代表 1,5,10,25),\n 然后在 dp_make_weight((1,5,10,25,x-n,memo) +1 中 挑选最小值。+1的原因是包含本次\n 2.重叠子问题\n 详见ps1b的图片。\n 那么memo记录的key 为 avail(即剩余的容量) ,value 为avail下最小的蛋的数量n。\n \n 那么base_case是什么?\n target == 0时,返回0\n 现在按照深度优先的思路思考\n \"\"\"\n\n if target_weight == 0:\n return 0\n\n if target_weight in memo:\n return memo[target_weight]\n\n result = None # 占位符,没有多大用\n\n for elt in egg_weights:\n if target_weight - elt >= 0: # 这样才有继续探索的必要\n tmp_result = dp_make_weight(egg_weights, target_weight - elt, memo) + 1\n if result is None or tmp_result < result:\n result = tmp_result\n memo[target_weight] = result\n return result", "def costFunction(R, W):\n costFunc = 0\n for i in range(0, len(R)):\n for j in range(i, len(R)):\n costFunc += costBetweenNodes(R, W, i, j)\n return costFunc", "def _compute_fitness(self, solution, bias=5):\n\n solution_fitness = 0.0\n\n for index in range(len(solution)):\n waypoint1 = solution[index - 1]\n waypoint2 = solution[index]\n solution_fitness += self._waypoint_distances[frozenset([waypoint1, waypoint2])] - bias * abs(\n self._dicAvailability[waypoint2] - self._dicAvailability[waypoint1])\n\n return solution_fitness", "def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def knapsack_solution(v: List[int], w: List[float], c: float) -> int:\n pass", "def cost_function(V, W, H):\r\n cost = 0\r\n # return the coordinate matrix of some compelte data matrix\r\n modded = V.tocoo() \r\n # add each row * column set\r\n for row, col, v in zip(modded.row, modded.col, modded.data):\r\n cost += np.square(v - np.inner(W[row], H[:,col]))\r\n return cost", "def evaluate(self, representativeness: float, weight: float) -> float:\n pass", "def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)", "def compute_fitness(solution):\n\n solution_fitness = 0.0\n\n for index in range(len(solution)):\n waypoint1 = solution[index - 1]\n waypoint2 = solution[index]\n solution_fitness += waypoint_distances[frozenset([waypoint1, waypoint2])]\n\n return solution_fitness", "def knapsack(items: List[Item], capacity: float) -> Tuple[float, List[int]]: \n values = []\n weights = []\n capacities = []\n for item in items:\n values.append(item.value)\n\n temp_weights = []\n for item in items:\n temp_weights.append(item.weight)\n weights.append(temp_weights)\n \n capacities = [capacity]\n\n # print(f\"Values: {values}\")\n # print(f\"Weights: {weights}\")\n\n # http://google.github.io/or-tools/python/ortools/algorithms/pywrapknapsack_solver.html\n # Dynamic Programming Solver is also available through `KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER`\n solver = pywrapknapsack_solver.KnapsackSolver(\n pywrapknapsack_solver.KnapsackSolver.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,\n 'Knapsack'\n )\n\n # You can also set a time limit here to make sure that the solution is terminated if it takes too long\n # Use `set_time_limit()` method for this\n\n\n solver.Init(values, weights, capacities)\n computed_value = solver.Solve()\n \n taken: List[int] = []\n \n for i in range(len(values)):\n if solver.BestSolutionContains(i):\n taken.append(1)\n else:\n taken.append(0)\n\n # print('Taken:', taken)\n # print('Total weight:', computed_value)\n \n return computed_value, taken", "def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]", "def get_cost_weights(X, n_trials=1000):\n\n names = X.names\n seats = X.seats\n\n c1,c2,c3 = [],[],[]\n\n rand_X = X.copy()\n\n # def get_average_cost_random(ntrials=100, seats, names):\n for ind in range(n_trials):\n # make a new random arrangement\n new_names_seats_dict = generate_random_assignment(names, seats)\n # get the new X\n _rand_X = names_seats_dict_to_X(new_names_seats_dict, names, seats)\n\n # copy and update to random\n rand_X.update(_rand_X)\n\n # max this - hence -ve sign\n _c1 = cost_distance(rand_X)\n\n # min this\n _c2 = cost_same_team_by_distance(rand_X)\n\n # max this, hence -ve sign\n # pij_current = get_person_person_distance(new_names_seats_dict, seat_distances, names)\n _c3 = cost_previous_neighbour_by_distance(rand_X)\n\n c1.append(_c1)\n c2.append(_c2)\n c3.append(_c3)\n\n lambda_1 = np.mean(c1)\n lambda_2 = np.mean(c2)\n lambda_3 = np.mean(c3)\n\n lambdas = [lambda_1, lambda_2, lambda_3]\n tot_lambda = sum(abs(lam) for lam in lambdas)\n\n cost_weights = [1. / (lam / tot_lambda) for lam in lambdas]\n return tuple([cost / sum(cost_weights) for cost in cost_weights])", "def compute_optimalReward(task):\n\n\tT = 15.0\n\tweights = 0\n\tif task == TABLE_TASK or task == COFFEE_TASK:\n\t\tweights = 1\n\telif task == LAPTOP_TASK:\n\t\tweights = 10\n\n\t# initialize start/goal based on task \n\tif task == COFFEE_TASK or task == HUMAN_TASK:\n\t\tpick = pick_shelf\n\telse:\n\t\tpick = pick_basic\n\n\tif task == LAPTOP_TASK:\n\t\tplace = place_higher\n\telse:\n\t\tplace = place_lower\n\t\t\n\tstartRad = np.array(pick)*(math.pi/180.0)\n\tgoalRad = np.array(place)*(math.pi/180.0)\n\tstart = startRad\n\tgoal = goalRad\n\n\tplan = Planner(task)\t\n\tfilename = None\n\tif task == 1:\n\t\tfilename = \"task1.csv\"\n\telif task == 2:\n\t\tfilename = \"task2.csv\"\n\telif task == 3:\n\t\tfilename = \"task3.csv\"\n\t\t\n\t# get optimal waypts from file\n\twaypts = get_opt_waypts(filename)\n\tr = plan.featurize(waypts)\n\tRvel = r[0]\n\tRfeat = np.sum(r[1])\n\n\tplan.kill_planner()\n\treturn (Rvel, Rfeat)", "def costBetweenNodes(R, W, i, j):\n if W[i][j] >= 0:\n return W[i][j] * distance(R[i], R[j])\n else:\n return W[i][j] * min(distance(R[i], R[j]), DMAX)", "def get_score(self, solution: np.array) -> float:\n score = 0\n for vehicle_count, vehicle_solution in enumerate(solution):\n distances = self.distance_matrix[vehicle_solution[0:-1], vehicle_solution[1:]]\n costs = distances * self.selected_transportation_cost[vehicle_count]\n score += np.sum(costs)\n return score", "def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score", "def get_expected_cost(self):", "def knapsack(capacity: int, weights: List[int], vals: List[int]):\n num_vals = len(vals)\n\n # create a capacity x num_vals solution matrix to store the sub-problem results\n grid = [[0 for _ in range(capacity + 1)] for _ in range(num_vals + 1)]\n\n for i in range(num_vals + 1):\n for j in range(capacity + 1):\n if i == 0 or j == 0:\n grid[i][j] = 0\n continue\n\n item_idx = i - 1\n previous_max = grid[i - 1][j] # the previous max for given weight class\n item_weight = weights[item_idx] # the current item's weight\n item_val = vals[item_idx] # the current item's value\n\n if item_weight <= j:\n val_with_cur_item = item_val + grid[i - 1][j - item_weight]\n grid[i][j] = max(previous_max, val_with_cur_item)\n else:\n grid[i][j] = previous_max\n\n return grid[num_vals][capacity]", "def knapsack(weights):\n\n n = len(weights)\n max_sum = sum(weights)\n\n result = []\n\n dp = [False for _ in range(max_sum + 1)]\n dp[0] = True\n\n for i in range(1, n + 1):\n # update dp from right to left for each new weight\n for x in range(max_sum, -1, -1):\n if dp[x]:\n dp[x + weights[i - 1]] = True\n\n for i in range(len(dp)):\n if dp[i]:\n result.append(i)\n\n return result # returns all possible sums that can be constructed given a list of weights\n\n # return dp", "def T(w):\n\tif w < 1: return 0 # Less than 1 penny has no possible combinations\n\tif counts.has_key(w): return counts[w] # If already in dict, return that sum\n\n\t# Solve by taking all possible combinations recursively\n\t\n\tval = 0\n\tfor coin in coins:\n\t\tval += T(w-coin)\n\tval += w in coins\n\treturn val", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj" ]
[ "0.78241587", "0.7021364", "0.7016164", "0.68439823", "0.6772242", "0.66974974", "0.6579343", "0.65689176", "0.6504397", "0.6427664", "0.6424668", "0.641193", "0.64020264", "0.63749844", "0.6369628", "0.6342488", "0.6327875", "0.63274056", "0.63259715", "0.6325849", "0.6323128", "0.63016325", "0.62872267", "0.6281931", "0.62818074", "0.62664217", "0.6264704", "0.62476677", "0.6231101", "0.61911374" ]
0.7787033
1
greedy algorithm, take the most weight area first, and give it most valuable content, then second area, and so on , O(klogk + knlogn), not optimal, but efficient sort area by their weight in descreasing order, and choose the most valuable content in the area, and then second area ,keep checking whether the content have been selected by previous area
def select_greedy(self,time,a1,a2,*args): areas = [] areas.append(a1) areas.append(a2) areas.extend(args) areas_sorted = sorted(areas,reverse=True) result = [] candidates = [[wait[time][1] if wait[time]!=None else None \ for wait in area.w] for area in areas] used_content = set() for area,cands in zip(areas_sorted,candidates): cands.sort(reverse=True) for i in range(len(cands)): if cands[i] == None: result.append((area,None)) break else: if cands[i].id not in used_content: result.append((area,cands[i])) used_content.add(cands[i].id) break print "greedy best solution:",result print "greedy best cost:",sum([x.weight*y.value if y!= None \ else 0 for x,y in result])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TightStrategy(I_list,box_list):\n iso = 0\n lemon = []\n SortedItems = quick_sort(I_list)\n for element in range(0, len(SortedItems)):\n w = SortedItems[element].weight\n x = FindTightFit(box_list, w)\n if x == None:\n iso+=1\n pass\n else:\n if w <= x.max_cap - x.curr_cap:\n x.curr_cap += w\n x.items_list.append(SortedItems[element])\n lemon.append(SortedItems[element])\n else:\n pass\n print('Results from Greedy Strategy 2')\n if iso > 0:\n print('Unable to pack all items!')\n else:\n print('All items were successfully packed!')\n for s in box_list:\n print('Box',s.id,'of weight capacity',s.max_cap,'contains:')\n for item in s.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')", "def greedy_variable_order(primal_graph:PrimalGraph, pvo:List[List[int]]=None, pool_size=8, cutoff=INF):\n def fill_count(nid):\n \"\"\"\n count number of fill-in edges after removing nid\n number of combinations of nhd - existing edges (nodes in the subgraph of nhd)\n \"\"\"\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill\n\n def remove_fill_in_edges(nid):\n G.add_edges_from(itertools.combinations(G.neighbors(nid), 2)) # adding edge twice? no effect\n G.remove_node(nid)\n\n G = primal_graph.copy() # G = copy.deepcopy(primal_graph)\n if pvo is None:\n pvo = [list(G.nodes())] #[ [all in one block] ]\n ordering = []\n induced_width = 0\n for each_block in pvo:\n processing_nodes = SortedList( [(fill_count(nid), nid) for nid in each_block] ) # ascending order\n while processing_nodes:\n fill, selected_nid = processing_nodes[0]\n if fill != 0: # don't add any edge\n # pick a node in random from a pool of best nodes; each node has prob 1/(fill_in edges)\n scores, candidates = zip(*processing_nodes[:pool_size])\n probs = np.power(np.array(scores), -1.0)\n selected_ind = np.random.choice(len(probs), p=probs/(np.sum(probs)))\n selected_nid = candidates[selected_ind]\n ordering.append(selected_nid)\n # current_width = len(G.neighbors(selected_nid))\n current_width = G.degree[selected_nid]\n if current_width > cutoff:\n return None, induced_width\n if current_width > induced_width:\n induced_width = current_width\n remove_fill_in_edges(selected_nid)\n # recompute score after removing the selected node from primal graph\n processing_nodes = SortedList( [(fill_count(nid), nid) for _, nid in processing_nodes if nid != selected_nid] )\n return ordering, induced_width", "def RoomyStrategy(I_list,box_list):\n SortedItems = quick_sort(I_list)\n lemon = []\n iso = 0\n for element in range(0, len(SortedItems)):\n w = SortedItems[element].weight\n x = FindMaxCap(box_list)\n if w <= x.max_cap - x.curr_cap:\n x.curr_cap += w\n x.items_list.append(SortedItems[element])\n lemon.append(SortedItems[element])\n iso+=1\n else:\n pass\n print('Results from Greedy Strategy 1')\n if len(SortedItems) == iso:\n print('All items successfully packed into boxes!')\n else:\n print('Unable to pack all items!')\n for box in box_list:\n print('Box',box.id,'of weight capacity',box.max_cap,'contains:')\n for item in box.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def knapval_norep(W, wt):\n # choose to use item.weight and get item.value + optimal from what's left\n # last_item = items[-1]\n # other_items = items[:-1]\n # options = list(\n # knpaval_norep(capacity, other_items))\n # if last_item.weight <= capacity:\n # options.append(last_item.value +\n # knapval_norep(capacity-last_item.weight, other_items),\n # )\n #\n\n \"\"\"Find max weight that can fit in knapsack size W.\"\"\"\n # Create n nested arrays of 0 * (W + 1)\n max_vals = [[0] * (W + 1) for x in range(len(wt))]\n # Set max_vals[0] to wt[0] if wt[0] <= j\n max_vals[0] = [wt[0] if wt[0] <= j else 0 for j in range(W + 1)]\n for i in range(1, len(wt)):\n for j in range(1, W + 1):\n value = max_vals[i - 1][j] # previous i @ same j\n if wt[i] <= j:\n val = (max_vals[i - 1][j - wt[i]]) + wt[i]\n if value < val:\n value = val\n max_vals[i][j] = value\n else:\n max_vals[i][j] = value # set to [i - 1][j]\n else:\n max_vals[i][j] = value # set to [i - 1][j]\n\n return max_vals[-1][-1]", "def OneByOneStrategy(I_list,box_list):\n SortedItems = quick_sort(I_list)\n lemon = []\n for i in box_list:\n for item in range(len(SortedItems)):\n if i.max_cap - i.curr_cap == 0:\n break\n if SortedItems[item].weight <= i.max_cap - i.curr_cap:\n if SortedItems[item] not in lemon:\n lemon.append(SortedItems[item])\n i.items_list.append(SortedItems[item])\n i.curr_cap += SortedItems[item].weight\n else:\n pass\n print('Results from Greedy Strategy 3')\n if len(lemon) != len(SortedItems):\n print('Unable to pack all items')\n else:\n print('All items successfully packed!')\n for s in box_list:\n print('Box',s.id,'of weight capacity',s.max_cap,'contains:')\n for item in s.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')", "def greedy_order(dict_prefs, list_els):\n ordering=list()\n els=deepcopy(list_els)\n while els!=[]:\n best_score=float(\"-infinity\")\n for e1 in els:\n score_el=0\n for e2 in els:\n if e1==e2:\n continue\n score_el+=_score_pref(e1,e2,dict_prefs)\n if score_el>best_score:\n best_score=score_el\n best_el=e1\n ordering.append(best_el)\n els.remove(best_el)\n return ordering", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def best_B(Ag):\n top = 0\n for i in range(len(Ag)):\n etop = np.min(cf.TD20[int(Ag[i]) - 1])\n top += etop\n return top", "def nms(dets, thresh=0.5, mode=\"Union\"):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n if mode == \"Union\":\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == \"Minimum\":\n ovr = inter / np.minimum(areas[i], areas[order[1:]])\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n \n #step 2: filter the word space \n inds = range(len(x1))\n keep_ori = keep\n for k in keep_ori:\n inds_exp = list(set(inds) - set([k]))\n xx1 = np.maximum(x1[k], x1[inds_exp])\n yy1 = np.maximum(y1[k], y1[inds_exp])\n xx2 = np.minimum(x2[k], x2[inds_exp])\n yy2 = np.minimum(y2[k], y2[inds_exp])\n \n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[k] + areas[inds_exp] - inter)\n ind_max = np.argmax(ovr)\n if ovr[ind_max] > thresh:\n keep.append(inds_exp[ind_max])\n\n #step 3: merge \n retain = []\n for i in range(len(keep) - 1):\n xx1 = np.maximum(x1[keep[i]], x1[keep[i+1:]])\n yy1 = np.maximum(y1[keep[i]], y1[keep[i+1:]])\n xx2 = np.maximum(x2[keep[i]], x2[keep[i+1:]])\n yy2 = np.maximum(y2[keep[i]], y2[keep[i+1:]])\n\n \n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[keep[i]] + areas[keep[i+1:]] - inter)\n inds = np.where(ovr<0.2)[0]\n for j in inds:\n retain.append(keep[i+1+j])\n return dets[retain]", "def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list", "def top_n_satisfy2(content, n):\n #print(n)\n sum_satisfy = 0.0\n query_num = 0.0\n for qid in content:\n label_sort = []\n score = []\n all_info = content[qid]\n num_label1 = 0\n for info in all_info:\n if info[0] > 0:\n num_label1 += 1\n label_sort.append([info[0], info[1]])\n label_sort.sort(key=take_second, reverse=True)\n satisfy = 0.0\n count = 0\n size = len(label_sort)\n for i in range(min(n, size)):\n cur_label = label_sort[i][0]\n if cur_label > 0:\n satisfy += 1\n cur_satisfy = satisfy / min(n, num_label1)\n sum_satisfy += cur_satisfy\n query_num += 1\n return sum_satisfy / query_num", "def nms(boxes, scores, overlap=0.5, top_k=200):\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count", "def nms(boxes, scores, overlap=0.5, top_k=200):\n\n keep = torch.Tensor(scores.size(0)).fill_(0).long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count", "def visit_k_nearest(node, pt, k, result):\n # rather brute force but because cut off and k expected to be rather small\n # not further optimized\n # (result could instead of list be a bin heap with at most k items)\n for active, item in zip(node.active, node.items):\n # check active items\n if active:\n d = distance2(pt, item)\n result.append( (d, item) )\n # sort on distance\n result.sort(key=lambda x: x[0])\n # keep max k items\n while len(result) > k:\n result.pop()", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def greedy(items, maxCost, keyFunction):\n result = []\n itemsCopy = sorted(items, key=keyFunction, reverse=True)\n totalValue , totalCalories = 0.0, 0.0\n for i in range(len(itemsCopy)):\n item = itemsCopy[i]\n if (totalCalories + item.getCalories()) <= maxCost:\n result.append(item)\n totalCalories += item.getCalories()\n totalValue += item.getValue()\n return result, totalValue", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def quickbb(graph, fast=True):\n\n \"\"\"Given a permutation of the nodes (called an elimination ordering),\n for each node, remove the node and make its neighbors into a clique.\n The maximum degree of the nodes at the time of their elimination is\n the width of the tree decomposition corresponding to that ordering.\n The treewidth of the graph is the minimum over all possible\n permutations.\n \"\"\"\n\n best = Solution() # this gets around the lack of nonlocal in Python 2\n best.count = 0\n\n def bb(graph, order, f, g):\n best.count += 1\n if len(graph) < 2:\n if f < best.ub:\n assert f == g\n best.ub = f\n best.order = list(order) + list(graph)\n\n else:\n vs = []\n for v in graph:\n # very important pruning rule\n if simplicial(graph, v) or almost_simplicial(graph, v) and len(graph[v]) <= lb:\n vs = [v]\n break\n else:\n vs.append(v)\n\n for v in vs:\n graph1 = copy_graph(graph)\n eliminate_node(graph1, v)\n order1 = order + [v]\n # treewidth for current order so far\n g1 = max(g, len(graph[v]))\n # lower bound given where we are\n f1 = max(g, lower_bound(graph1))\n if f1 < best.ub:\n bb(graph1, order1, f1, g1)\n return\n\n graph = {u: set(graph[u]) for u in graph}\n\n order = []\n best.ub, best.order = upper_bound(graph)\n lb = lower_bound(graph)\n\n # This turns on the branch and bound algorithm that\n # gets better treewidth results, but takes a lot\n # longer to process\n if not fast:\n if lb < best.ub:\n bb(graph, order, lb, 0)\n\n # Build the tree decomposition\n tree = defaultdict(set)\n\n def build(order):\n if len(order) < 2:\n bag = frozenset(order)\n tree[bag] = set()\n return\n v = order[0]\n clique = graph[v]\n eliminate_node(graph, v)\n build(order[1:])\n for tv in tree:\n if clique.issubset(tv):\n break\n bag = frozenset(clique | {v})\n tree[bag].add(tv)\n tree[tv].add(bag)\n\n build(best.order)\n return tree", "def get_best_split(rows):\n best_gain = 0\n best_question = None\n current_impurity = get_gini(rows)\n n_features = len(rows[0])\n\n for col in range(n_features):\n\n for row in rows:\n question = Question(col, row[col])\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n break\n\n question_gain = get_info_gain(true_rows, false_rows, current_impurity)\n\n if question_gain >= best_gain:\n best_gain = question_gain\n best_question = question\n\n print(best_gain)\n print(best_question)\n return best_gain, best_question", "def solve(instance, silent=True, max_weight_lower=1,\n max_weight_upper=float('inf'), scoring=\"sink distance\"):\n flow = instance.flow\n k = instance.k\n\n # quit right away if the instance has weight bounds that can't be satisfied\n if instance.has_bad_bounds():\n return set()\n\n # if k equals the size of the largest edge cut, the weights are\n # predetermined\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n # Important: path weights must be sorted, otherwise our\n # subsequent optimizations will remove this constraint.\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight,\n instance.weights))\n\n if not silent:\n print(instance.weights, feasible_weights)\n\n # figure out whether we get the first or last positions for free\n largest_free = False\n smallest_free = False\n # check largest weight first\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n\n positions = list(range(int(smallest_free), k-int(largest_free)))\n\n # iterate over the number of unguessed weights\n for diff in range(k+1):\n if not silent:\n print(\"Diff =\", diff)\n # iterate over positions of guessed weights. We want them to be\n # ordered, but choose the smallest first to be removed\n for rev_indices in itertools.combinations(reversed(positions), k-diff):\n indices = list(reversed(rev_indices))\n p = len(indices)\n # when k-1 values are determined, it also determines the kth value\n if p == k-1:\n continue\n # iterate over choices for those guessed weights\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n\n # assign the chosen weights to the guessed positions\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n\n # add in free values\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k-1] = largest\n\n # quit if this didn't work\n if not is_feasible(weights, flow, max_weight):\n continue\n\n if not silent:\n print(\"Trying weights\", weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print(\"Unterdetermined solution\")\n return sol", "def maxWeightMatching(edges, maxcardinality=False):\n\n #\n # Vertices are numbered 0 .. (nvertex-1).\n # Non-trivial blossoms are numbered nvertex .. (2*nvertex-1)\n #\n # Edges are numbered 0 .. (nedge-1).\n # Edge endpoints are numbered 0 .. (2*nedge-1), such that endpoints\n # (2*k) and (2*k+1) both belong to edge k.\n #\n # Many terms used in the comments (sub-blossom, T-vertex) come from\n # the paper by Galil; read the paper before reading this code.\n #\n\n # Deal swiftly with empty graphs.\n if not edges:\n return [ ]\n\n # Count vertices.\n nedge = len(edges)\n nvertex = 0\n for (i, j, w) in edges:\n assert i >= 0 and j >= 0 and i != j\n if i >= nvertex:\n nvertex = i + 1\n if j >= nvertex:\n nvertex = j + 1\n\n # Find the maximum edge weight.\n maxweight = max(0, max([ wt for (i, j, wt) in edges ]))\n\n # If p is an edge endpoint,\n # endpoint[p] is the vertex to which endpoint p is attached.\n # Not modified by the algorithm.\n endpoint = [ edges[p//2][p%2] for p in range (2*nedge) ]\n\n # If v is a vertex,\n # neighbend[v] is the list of remote endpoints of the edges attached to v.\n # Not modified by the algorithm.\n neighbend = [ [ ] for i in range(nvertex) ]\n for k in xrange(len(edges)):\n (i, j, w) = edges[k]\n neighbend[i].append(2*k+1)\n neighbend[j].append(2*k)\n\n # If v is a vertex,\n # mate[v] is the remote endpoint of its matched edge, or -1 if it is single\n # (i.e. endpoint[mate[v]] is v's partner vertex).\n # Initially all vertices are single; updated during augmentation.\n mate = nvertex * [ -1 ]\n\n # If b is a top-level blossom,\n # label[b] is 0 if b is unlabeled (free);\n # 1 if b is an S-vertex/blossom;\n # 2 if b is a T-vertex/blossom.\n # The label of a vertex is found by looking at the label of its\n # top-level containing blossom.\n # If v is a vertex inside a T-blossom,\n # label[v] is 2 iff v is reachable from an S-vertex outside the blossom.\n # Labels are assigned during a stage and reset after each augmentation.\n label = (2 * nvertex) * [ 0 ]\n\n # If b is a labeled top-level blossom,\n # labelend[b] is the remote endpoint of the edge through which b obtained\n # its label, or -1 if b's base vertex is single.\n # If v is a vertex inside a T-blossom and label[v] == 2,\n # labelend[v] is the remote endpoint of the edge through which v is\n # reachable from outside the blossom.\n labelend = (2 * nvertex) * [ -1 ]\n\n # If v is a vertex,\n # inblossom[v] is the top-level blossom to which v belongs.\n # If v is a top-level vertex, v is itself a blossom (a trivial blossom)\n # and inblossom[v] == v.\n # Initially all vertices are top-level trivial blossoms.\n inblossom = range(nvertex)\n\n # If b is a sub-blossom,\n # blossomparent[b] is its immediate parent (sub-)blossom.\n # If b is a top-level blossom, blossomparent[b] is -1.\n blossomparent = (2 * nvertex) * [ -1 ]\n\n # If b is a non-trivial (sub-)blossom,\n # blossomchilds[b] is an ordered list of its sub-blossoms, starting with\n # the base and going round the blossom.\n blossomchilds = (2 * nvertex) * [ None ]\n\n # If b is a (sub-)blossom,\n # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).\n blossombase = range(nvertex) + nvertex * [ -1 ]\n\n # If b is a non-trivial (sub-)blossom,\n # blossomendps[b] is a list of endpoints on its connecting edges,\n # such that blossomendps[b][i] is the local endpoint of blossomchilds[b][i]\n # on the edge that connects it to blossomchilds[b][wrap(i+1)].\n blossomendps = (2 * nvertex) * [ None ]\n\n # If v is a free vertex (or an unreached vertex inside a T-blossom),\n # bestedge[v] is the edge to an S-vertex with least slack,\n # or -1 if there is no such edge.\n # If b is a (possibly trivial) top-level S-blossom,\n # bestedge[b] is the least-slack edge to a different S-blossom,\n # or -1 if there is no such edge.\n # This is used for efficient computation of delta2 and delta3.\n bestedge = (2 * nvertex) * [ -1 ]\n\n # If b is a non-trivial top-level S-blossom,\n # blossombestedges[b] is a list of least-slack edges to neighbouring\n # S-blossoms, or None if no such list has been computed yet.\n # This is used for efficient computation of delta3.\n blossombestedges = (2 * nvertex) * [ None ]\n\n # List of currently unused blossom numbers.\n unusedblossoms = range(nvertex, 2*nvertex)\n\n # If v is a vertex,\n # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual\n # optimization problem (multiplication by two ensures integer values\n # throughout the algorithm if all edge weights are integers).\n # If b is a non-trivial blossom,\n # dualvar[b] = z(b) where z(b) is b's variable in the dual optimization\n # problem.\n dualvar = nvertex * [ maxweight ] + nvertex * [ 0 ]\n\n # If allowedge[k] is true, edge k has zero slack in the optimization\n # problem; if allowedge[k] is false, the edge's slack may or may not\n # be zero.\n allowedge = nedge * [ False ]\n\n # Queue of newly discovered S-vertices.\n queue = [ ]\n\n # Return 2 * slack of edge k (does not work inside blossoms).\n def slack(k):\n (i, j, wt) = edges[k]\n return dualvar[i] + dualvar[j] - 2 * wt\n\n # Generate the leaf vertices of a blossom.\n def blossomLeaves(b):\n if b < nvertex:\n yield b\n else:\n for t in blossomchilds[b]:\n if t < nvertex:\n yield t\n else:\n for v in blossomLeaves(t):\n yield v\n\n # Assign label t to the top-level blossom containing vertex w\n # and record the fact that w was reached through the edge with\n # remote endpoint p.\n def assignLabel(w, t, p):\n if DEBUG: DEBUG('assignLabel(%d,%d,%d)' % (w, t, p))\n b = inblossom[w]\n assert label[w] == 0 and label[b] == 0\n label[w] = label[b] = t\n labelend[w] = labelend[b] = p\n bestedge[w] = bestedge[b] = -1\n if t == 1:\n # b became an S-vertex/blossom; add it(s vertices) to the queue.\n queue.extend(blossomLeaves(b))\n if DEBUG: DEBUG('PUSH ' + str(list(blossomLeaves(b))))\n elif t == 2:\n # b became a T-vertex/blossom; assign label S to its mate.\n # (If b is a non-trivial blossom, its base is the only vertex\n # with an external mate.)\n base = blossombase[b]\n assert mate[base] >= 0\n assignLabel(endpoint[mate[base]], 1, mate[base] ^ 1)\n\n # Trace back from vertices v and w to discover either a new blossom\n # or an augmenting path. Return the base vertex of the new blossom or -1.\n def scanBlossom(v, w):\n if DEBUG: DEBUG('scanBlossom(%d,%d)' % (v, w))\n # Trace back from v and w, placing breadcrumbs as we go.\n path = [ ]\n base = -1\n while v != -1 or w != -1:\n # Look for a breadcrumb in v's blossom or put a new breadcrumb.\n b = inblossom[v]\n if label[b] & 4:\n base = blossombase[b]\n break\n assert label[b] == 1\n path.append(b)\n label[b] = 5\n # Trace one step back.\n assert labelend[b] == mate[blossombase[b]]\n if labelend[b] == -1:\n # The base of blossom b is single; stop tracing this path.\n v = -1\n else:\n v = endpoint[labelend[b]]\n b = inblossom[v]\n assert label[b] == 2\n # b is a T-blossom; trace one more step back.\n assert labelend[b] >= 0\n v = endpoint[labelend[b]]\n # Swap v and w so that we alternate between both paths.\n if w != -1:\n v, w = w, v\n # Remove breadcrumbs.\n for b in path:\n label[b] = 1\n # Return base vertex, if we found one.\n return base\n\n # Construct a new blossom with given base, containing edge k which\n # connects a pair of S vertices. Label the new blossom as S; set its dual\n # variable to zero; relabel its T-vertices to S and add them to the queue.\n def addBlossom(base, k):\n (v, w, wt) = edges[k]\n bb = inblossom[base]\n bv = inblossom[v]\n bw = inblossom[w]\n # Create blossom.\n b = unusedblossoms.pop()\n if DEBUG: DEBUG('addBlossom(%d,%d) (v=%d w=%d) -> %d' % (base, k, v, w, b))\n blossombase[b] = base\n blossomparent[b] = -1\n blossomparent[bb] = b\n # Make list of sub-blossoms and their interconnecting edge endpoints.\n blossomchilds[b] = path = [ ]\n blossomendps[b] = endps = [ ]\n # Trace back from v to base.\n while bv != bb:\n # Add bv to the new blossom.\n blossomparent[bv] = b\n path.append(bv)\n endps.append(labelend[bv])\n assert (label[bv] == 2 or\n (label[bv] == 1 and labelend[bv] == mate[blossombase[bv]]))\n # Trace one step back.\n assert labelend[bv] >= 0\n v = endpoint[labelend[bv]]\n bv = inblossom[v]\n # Reverse lists, add endpoint that connects the pair of S vertices.\n path.append(bb)\n path.reverse()\n endps.reverse()\n endps.append(2*k)\n # Trace back from w to base.\n while bw != bb:\n # Add bw to the new blossom.\n blossomparent[bw] = b\n path.append(bw)\n endps.append(labelend[bw] ^ 1)\n assert (label[bw] == 2 or\n (label[bw] == 1 and labelend[bw] == mate[blossombase[bw]]))\n # Trace one step back.\n assert labelend[bw] >= 0\n w = endpoint[labelend[bw]]\n bw = inblossom[w]\n # Set label to S.\n assert label[bb] == 1\n label[b] = 1\n labelend[b] = labelend[bb]\n # Set dual variable to zero.\n dualvar[b] = 0\n # Relabel vertices.\n for v in blossomLeaves(b):\n if label[inblossom[v]] == 2:\n # This T-vertex now turns into an S-vertex because it becomes\n # part of an S-blossom; add it to the queue.\n queue.append(v)\n inblossom[v] = b\n # Compute blossombestedges[b].\n bestedgeto = (2 * nvertex) * [ -1 ]\n for bv in path:\n if blossombestedges[bv] is None:\n # This subblossom does not have a list of least-slack edges;\n # get the information from the vertices.\n nblists = [ [ p // 2 for p in neighbend[v] ]\n for v in blossomLeaves(bv) ]\n else:\n # Walk this subblossom's least-slack edges.\n nblists = [ blossombestedges[bv] ]\n for nblist in nblists:\n for k in nblist:\n (i, j, wt) = edges[k]\n if inblossom[j] == b:\n i, j = j, i\n bj = inblossom[j]\n if (bj != b and label[bj] == 1 and\n (bestedgeto[bj] == -1 or\n slack(k) < slack(bestedgeto[bj]))):\n bestedgeto[bj] = k\n # Forget about least-slack edges of the subblossom.\n blossombestedges[bv] = None\n bestedge[bv] = -1\n blossombestedges[b] = [ k for k in bestedgeto if k != -1 ]\n # Select bestedge[b].\n bestedge[b] = -1\n for k in blossombestedges[b]:\n if bestedge[b] == -1 or slack(k) < slack(bestedge[b]):\n bestedge[b] = k\n if DEBUG: DEBUG('blossomchilds[%d]=' % b + repr(blossomchilds[b]))\n\n # Expand the given top-level blossom.\n def expandBlossom(b, endstage):\n if DEBUG: DEBUG('expandBlossom(%d,%d) %s' % (b, endstage, repr(blossomchilds[b])))\n # Convert sub-blossoms into top-level blossoms.\n for s in blossomchilds[b]:\n blossomparent[s] = -1\n if s < nvertex:\n inblossom[s] = s\n elif endstage and dualvar[s] == 0:\n # Recursively expand this sub-blossom.\n expandBlossom(s, endstage)\n else:\n for v in blossomLeaves(s):\n inblossom[v] = s\n # If we expand a T-blossom during a stage, its sub-blossoms must be\n # relabeled.\n if (not endstage) and label[b] == 2:\n # Start at the sub-blossom through which the expanding\n # blossom obtained its label, and relabel sub-blossoms untili\n # we reach the base.\n # Figure out through which sub-blossom the expanding blossom\n # obtained its label initially.\n assert labelend[b] >= 0\n entrychild = inblossom[endpoint[labelend[b] ^ 1]]\n # Decide in which direction we will go round the blossom.\n j = blossomchilds[b].index(entrychild)\n if j & 1:\n # Start index is odd; go forward and wrap.\n j -= len(blossomchilds[b])\n jstep = 1\n endptrick = 0\n else:\n # Start index is even; go backward.\n jstep = -1\n endptrick = 1\n # Move along the blossom until we get to the base.\n p = labelend[b]\n while j != 0:\n # Relabel the T-sub-blossom.\n label[endpoint[p ^ 1]] = 0\n label[endpoint[blossomendps[b][j-endptrick]^endptrick^1]] = 0\n assignLabel(endpoint[p ^ 1], 2, p)\n # Step to the next S-sub-blossom and note its forward endpoint.\n allowedge[blossomendps[b][j-endptrick]//2] = True\n j += jstep\n p = blossomendps[b][j-endptrick] ^ endptrick\n # Step to the next T-sub-blossom.\n allowedge[p//2] = True\n j += jstep\n # Relabel the base T-sub-blossom WITHOUT stepping through to\n # its mate (so don't call assignLabel).\n bv = blossomchilds[b][j]\n label[endpoint[p ^ 1]] = label[bv] = 2\n labelend[endpoint[p ^ 1]] = labelend[bv] = p\n bestedge[bv] = -1\n # Continue along the blossom until we get back to entrychild.\n j += jstep\n while blossomchilds[b][j] != entrychild:\n # Examine the vertices of the sub-blossom to see whether\n # it is reachable from a neighbouring S-vertex outside the\n # expanding blossom.\n bv = blossomchilds[b][j]\n if label[bv] == 1:\n # This sub-blossom just got label S through one of its\n # neighbours; leave it.\n j += jstep\n continue\n for v in blossomLeaves(bv):\n if label[v] != 0:\n break\n # If the sub-blossom contains a reachable vertex, assign\n # label T to the sub-blossom.\n if label[v] != 0:\n assert label[v] == 2\n assert inblossom[v] == bv\n label[v] = 0\n label[endpoint[mate[blossombase[bv]]]] = 0\n assignLabel(v, 2, labelend[v])\n j += jstep\n # Recycle the blossom number.\n label[b] = labelend[b] = -1\n blossomchilds[b] = blossomendps[b] = None\n blossombase[b] = -1\n blossombestedges[b] = None\n bestedge[b] = -1\n unusedblossoms.append(b)\n\n # Swap matched/unmatched edges over an alternating path through blossom b\n # between vertex v and the base vertex. Keep blossom bookkeeping consistent.\n def augmentBlossom(b, v):\n if DEBUG: DEBUG('augmentBlossom(%d,%d)' % (b, v))\n # Bubble up through the blossom tree from vertex v to an immediate\n # sub-blossom of b.\n t = v\n while blossomparent[t] != b:\n t = blossomparent[t]\n # Recursively deal with the first sub-blossom.\n if t >= nvertex:\n augmentBlossom(t, v)\n # Decide in which direction we will go round the blossom.\n i = j = blossomchilds[b].index(t)\n if i & 1:\n # Start index is odd; go forward and wrap.\n j -= len(blossomchilds[b])\n jstep = 1\n endptrick = 0\n else:\n # Start index is even; go backward.\n jstep = -1\n endptrick = 1\n # Move along the blossom until we get to the base.\n while j != 0:\n # Step to the next sub-blossom and augment it recursively.\n j += jstep\n t = blossomchilds[b][j]\n p = blossomendps[b][j-endptrick] ^ endptrick\n if t >= nvertex:\n augmentBlossom(t, endpoint[p])\n # Step to the next sub-blossom and augment it recursively.\n j += jstep\n t = blossomchilds[b][j]\n if t >= nvertex:\n augmentBlossom(t, endpoint[p ^ 1])\n # Match the edge connecting those sub-blossoms.\n mate[endpoint[p]] = p ^ 1\n mate[endpoint[p ^ 1]] = p\n if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (endpoint[p], endpoint[p^1], p//2))\n # Rotate the list of sub-blossoms to put the new base at the front.\n blossomchilds[b] = blossomchilds[b][i:] + blossomchilds[b][:i]\n blossomendps[b] = blossomendps[b][i:] + blossomendps[b][:i]\n blossombase[b] = blossombase[blossomchilds[b][0]]\n assert blossombase[b] == v\n\n # Swap matched/unmatched edges over an alternating path between two\n # single vertices. The augmenting path runs through edge k, which\n # connects a pair of S vertices.\n def augmentMatching(k):\n (v, w, wt) = edges[k]\n if DEBUG: DEBUG('augmentMatching(%d) (v=%d w=%d)' % (k, v, w))\n if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (v, w, k))\n for (s, p) in ((v, 2*k+1), (w, 2*k)):\n # Match vertex s to remote endpoint p. Then trace back from s\n # until we find a single vertex, swapping matched and unmatched\n # edges as we go.\n while 1:\n bs = inblossom[s]\n assert label[bs] == 1\n assert labelend[bs] == mate[blossombase[bs]]\n # Augment through the S-blossom from s to base.\n if bs >= nvertex:\n augmentBlossom(bs, s)\n # Update mate[s]\n mate[s] = p\n # Trace one step back.\n if labelend[bs] == -1:\n # Reached single vertex; stop.\n break\n t = endpoint[labelend[bs]]\n bt = inblossom[t]\n assert label[bt] == 2\n # Trace one step back.\n assert labelend[bt] >= 0\n s = endpoint[labelend[bt]]\n j = endpoint[labelend[bt] ^ 1]\n # Augment through the T-blossom from j to base.\n assert blossombase[bt] == t\n if bt >= nvertex:\n augmentBlossom(bt, j)\n # Update mate[j]\n mate[j] = labelend[bt]\n # Keep the opposite endpoint;\n # it will be assigned to mate[s] in the next step.\n p = labelend[bt] ^ 1\n if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (s, t, p//2))\n\n # Verify that the optimum solution has been reached.\n def verifyOptimum():\n if maxcardinality:\n # Vertices may have negative dual;\n # find a constant non-negative number to add to all vertex duals.\n vdualoffset = max(0, -min(dualvar[:nvertex]))\n else:\n vdualoffset = 0\n # 0. all dual variables are non-negative\n assert min(dualvar[:nvertex]) + vdualoffset >= 0\n assert min(dualvar[nvertex:]) >= 0\n # 0. all edges have non-negative slack and\n # 1. all matched edges have zero slack;\n for k in xrange(nedge):\n (i, j, wt) = edges[k]\n s = dualvar[i] + dualvar[j] - 2 * wt\n iblossoms = [ i ]\n jblossoms = [ j ]\n while blossomparent[iblossoms[-1]] != -1:\n iblossoms.append(blossomparent[iblossoms[-1]])\n while blossomparent[jblossoms[-1]] != -1:\n jblossoms.append(blossomparent[jblossoms[-1]])\n iblossoms.reverse()\n jblossoms.reverse()\n for (bi, bj) in zip(iblossoms, jblossoms):\n if bi != bj:\n break\n s += 2 * dualvar[bi]\n assert s >= 0\n if mate[i] // 2 == k or mate[j] // 2 == k:\n assert mate[i] // 2 == k and mate[j] // 2 == k\n assert s == 0\n # 2. all single vertices have zero dual value;\n for v in xrange(nvertex):\n assert mate[v] >= 0 or dualvar[v] + vdualoffset == 0\n # 3. all blossoms with positive dual value are full.\n for b in xrange(nvertex, 2*nvertex):\n if blossombase[b] >= 0 and dualvar[b] > 0:\n assert len(blossomendps[b]) % 2 == 1\n for p in blossomendps[b][1::2]:\n assert mate[endpoint[p]] == p ^ 1\n assert mate[endpoint[p ^ 1]] == p\n # Ok.\n\n # Check optimized delta2 against a trivial computation.\n def checkDelta2():\n for v in xrange(nvertex):\n if label[inblossom[v]] == 0:\n bd = None\n bk = -1\n for p in neighbend[v]:\n k = p // 2\n w = endpoint[p]\n if label[inblossom[w]] == 1:\n d = slack(k)\n if bk == -1 or d < bd:\n bk = k\n bd = d\n if DEBUG and (bestedge[v] != -1 or bk != -1) and (bestedge[v] == -1 or bd != slack(bestedge[v])):\n DEBUG('v=' + str(v) + ' bk=' + str(bk) + ' bd=' + str(bd) + ' bestedge=' + str(bestedge[v]) + ' slack=' + str(slack(bestedge[v])))\n assert (bk == -1 and bestedge[v] == -1) or (bestedge[v] != -1 and bd == slack(bestedge[v]))\n\n # Check optimized delta3 against a trivial computation.\n def checkDelta3():\n bk = -1\n bd = None\n tbk = -1\n tbd = None\n for b in xrange(2 * nvertex):\n if blossomparent[b] == -1 and label[b] == 1:\n for v in blossomLeaves(b):\n for p in neighbend[v]:\n k = p // 2\n w = endpoint[p]\n if inblossom[w] != b and label[inblossom[w]] == 1:\n d = slack(k)\n if bk == -1 or d < bd:\n bk = k\n bd = d\n if bestedge[b] != -1:\n (i, j, wt) = edges[bestedge[b]]\n assert inblossom[i] == b or inblossom[j] == b\n assert inblossom[i] != b or inblossom[j] != b\n assert label[inblossom[i]] == 1 and label[inblossom[j]] == 1\n if tbk == -1 or slack(bestedge[b]) < tbd:\n tbk = bestedge[b]\n tbd = slack(bestedge[b])\n if DEBUG and bd != tbd:\n DEBUG('bk=%d tbk=%d bd=%s tbd=%s' % (bk, tbk, repr(bd), repr(tbd)))\n assert bd == tbd\n\n # Main loop: continue until no further improvement is possible.\n for t in xrange(nvertex):\n\n # Each iteration of this loop is a \"stage\".\n # A stage finds an augmenting path and uses that to improve\n # the matching.\n if DEBUG: DEBUG('STAGE %d' % t)\n\n # Remove labels from top-level blossoms/vertices.\n label[:] = (2 * nvertex) * [ 0 ]\n\n # Forget all about least-slack edges.\n bestedge[:] = (2 * nvertex) * [ -1 ]\n blossombestedges[nvertex:] = nvertex * [ None ]\n\n # Loss of labeling means that we can not be sure that currently\n # allowable edges remain allowable througout this stage.\n allowedge[:] = nedge * [ False ]\n\n # Make queue empty.\n queue[:] = [ ]\n \n # Label single blossoms/vertices with S and put them in the queue.\n for v in xrange(nvertex):\n if mate[v] == -1 and label[inblossom[v]] == 0:\n assignLabel(v, 1, -1)\n\n # Loop until we succeed in augmenting the matching.\n augmented = 0\n while 1:\n\n # Each iteration of this loop is a \"substage\".\n # A substage tries to find an augmenting path;\n # if found, the path is used to improve the matching and\n # the stage ends. If there is no augmenting path, the\n # primal-dual method is used to pump some slack out of\n # the dual variables.\n if DEBUG: DEBUG('SUBSTAGE')\n\n # Continue labeling until all vertices which are reachable\n # through an alternating path have got a label.\n while queue and not augmented:\n\n # Take an S vertex from the queue.\n v = queue.pop()\n if DEBUG: DEBUG('POP v=%d' % v)\n assert label[inblossom[v]] == 1\n\n # Scan its neighbours:\n for p in neighbend[v]:\n k = p // 2\n w = endpoint[p]\n # w is a neighbour to v\n if inblossom[v] == inblossom[w]:\n # this edge is internal to a blossom; ignore it\n continue\n if not allowedge[k]:\n kslack = slack(k)\n if kslack <= 0:\n # edge k has zero slack => it is allowable\n allowedge[k] = True\n if allowedge[k]:\n if label[inblossom[w]] == 0:\n # (C1) w is a free vertex;\n # label w with T and label its mate with S (R12).\n assignLabel(w, 2, p ^ 1)\n elif label[inblossom[w]] == 1:\n # (C2) w is an S-vertex (not in the same blossom);\n # follow back-links to discover either an\n # augmenting path or a new blossom.\n base = scanBlossom(v, w)\n if base >= 0:\n # Found a new blossom; add it to the blossom\n # bookkeeping and turn it into an S-blossom.\n addBlossom(base, k)\n else:\n # Found an augmenting path; augment the\n # matching and end this stage.\n augmentMatching(k)\n augmented = 1\n break\n elif label[w] == 0:\n # w is inside a T-blossom, but w itself has not\n # yet been reached from outside the blossom;\n # mark it as reached (we need this to relabel\n # during T-blossom expansion).\n assert label[inblossom[w]] == 2\n label[w] = 2\n labelend[w] = p ^ 1\n elif label[inblossom[w]] == 1:\n # keep track of the least-slack non-allowable edge to\n # a different S-blossom.\n b = inblossom[v]\n if bestedge[b] == -1 or kslack < slack(bestedge[b]):\n bestedge[b] = k\n elif label[w] == 0:\n # w is a free vertex (or an unreached vertex inside\n # a T-blossom) but we can not reach it yet;\n # keep track of the least-slack edge that reaches w.\n if bestedge[w] == -1 or kslack < slack(bestedge[w]):\n bestedge[w] = k\n\n if augmented:\n break\n\n # There is no augmenting path under these constraints;\n # compute delta and reduce slack in the optimization problem.\n # (Note that our vertex dual variables, edge slacks and delta's\n # are pre-multiplied by two.)\n deltatype = -1\n delta = deltaedge = deltablossom = None\n\n # Verify data structures for delta2/delta3 computation.\n if CHECK_DELTA:\n checkDelta2()\n checkDelta3()\n\n # Compute delta1: the minumum value of any vertex dual.\n if not maxcardinality:\n deltatype = 1\n delta = min(dualvar[:nvertex])\n\n # Compute delta2: the minimum slack on any edge between\n # an S-vertex and a free vertex.\n for v in xrange(nvertex):\n if label[inblossom[v]] == 0 and bestedge[v] != -1:\n d = slack(bestedge[v])\n if deltatype == -1 or d < delta:\n delta = d\n deltatype = 2\n deltaedge = bestedge[v]\n\n # Compute delta3: half the minimum slack on any edge between\n # a pair of S-blossoms.\n for b in xrange(2 * nvertex):\n if ( blossomparent[b] == -1 and label[b] == 1 and\n bestedge[b] != -1 ):\n kslack = slack(bestedge[b])\n if type(kslack) in (int, long):\n assert (kslack % 2) == 0\n d = kslack // 2\n else:\n d = kslack / 2\n if deltatype == -1 or d < delta:\n delta = d\n deltatype = 3\n deltaedge = bestedge[b]\n\n # Compute delta4: minimum z variable of any T-blossom.\n for b in xrange(nvertex, 2*nvertex):\n if ( blossombase[b] >= 0 and blossomparent[b] == -1 and\n label[b] == 2 and\n (deltatype == -1 or dualvar[b] < delta) ):\n delta = dualvar[b]\n deltatype = 4\n deltablossom = b\n\n if deltatype == -1:\n # No further improvement possible; max-cardinality optimum\n # reached. Do a final delta update to make the optimum\n # verifyable.\n assert maxcardinality\n deltatype = 1\n delta = max(0, min(dualvar[:nvertex]))\n\n # Update dual variables according to delta.\n for v in xrange(nvertex):\n if label[inblossom[v]] == 1:\n # S-vertex: 2*u = 2*u - 2*delta\n dualvar[v] -= delta\n elif label[inblossom[v]] == 2:\n # T-vertex: 2*u = 2*u + 2*delta\n dualvar[v] += delta\n for b in xrange(nvertex, 2*nvertex):\n if blossombase[b] >= 0 and blossomparent[b] == -1:\n if label[b] == 1:\n # top-level S-blossom: z = z + 2*delta\n dualvar[b] += delta\n elif label[b] == 2:\n # top-level T-blossom: z = z - 2*delta\n dualvar[b] -= delta\n\n # Take action at the point where minimum delta occurred.\n if DEBUG: DEBUG('delta%d=%f' % (deltatype, delta))\n if deltatype == 1: \n # No further improvement possible; optimum reached.\n break\n elif deltatype == 2:\n # Use the least-slack edge to continue the search.\n allowedge[deltaedge] = True\n (i, j, wt) = edges[deltaedge]\n if label[inblossom[i]] == 0:\n i, j = j, i\n assert label[inblossom[i]] == 1\n queue.append(i)\n elif deltatype == 3:\n # Use the least-slack edge to continue the search.\n allowedge[deltaedge] = True\n (i, j, wt) = edges[deltaedge]\n assert label[inblossom[i]] == 1\n queue.append(i)\n elif deltatype == 4:\n # Expand the least-z blossom.\n expandBlossom(deltablossom, False)\n\n # End of a this substage.\n\n # Stop when no more augmenting path can be found.\n if not augmented:\n break\n\n # End of a stage; expand all S-blossoms which have dualvar = 0.\n for b in xrange(nvertex, 2*nvertex):\n if ( blossomparent[b] == -1 and blossombase[b] >= 0 and\n label[b] == 1 and dualvar[b] == 0 ):\n expandBlossom(b, True)\n\n # Verify that we reached the optimum solution.\n if CHECK_OPTIMUM:\n verifyOptimum()\n\n # Transform mate[] such that mate[v] is the vertex to which v is paired.\n for v in xrange(nvertex):\n if mate[v] >= 0:\n mate[v] = endpoint[mate[v]]\n totalweight = 0\n for v in xrange(nvertex):\n assert mate[v] == -1 or mate[mate[v]] == v \n if v < mate[v]:\t\n \tfor (i, j, w) in edges:\n \t\tif i == v and j == mate[v]:\n \tprint (v, mate[v], w)\n #maxweighthere = max(0, max([ wt for (i == v, j == mate[v], wt) in edges ]))\n #print maxweighthere\n print (\"totalweight = \",totalweight)\n return mate", "def strategy_best(cookies, cps, time_left, build_info):\n items = build_info.build_items()\n result = None\n cost = 0\n overall = cookies + time_left * cps\n for item in items:\n temp_cost = build_info.get_cps(item) / build_info.get_cost(item)\n if temp_cost <= overall and cost < temp_cost:\n result = item\n cost = temp_cost\n return result", "def knapsack(W, items): \n n = len(items)\n k = [[0 for x in range(W+1)] for x in range(n+1)]\n\n for i in range(n+1):\n for w in range(W+1):\n if i == 0 or w == 0:\n k[i][w] = 0\n elif items[i-1][2] <= w:\n k[i][w] = max(items[i-1][1] + k[i-1][w-items[i-1][2]], k[i-1][w])\n else:\n k[i][w] = k[i-1][w]\n\n picked = []\n set_trace(k, n, W, items, picked)\n return k[n][W], picked", "def strategy_best(cookies, cps, time_left, build_info):\n return_item = None\n highest_icr = float('-inf')\n item_list = build_info.build_items()\n cookies_potential = cookies + time_left * cps\n for item in item_list:\n cost = build_info.get_cost(item)\n curr_icr = build_info.get_cps(item) / cost \n if cookies_potential >= cost and curr_icr > highest_icr:\n return_item = item\n highest_icr = curr_icr\n return return_item", "def strategy(hand, num_die_sides):\r\n \r\n best_value = 0.0\r\n best_hold = ()\r\n \r\n possible_holds = gen_all_holds(hand)\r\n \r\n for hold in possible_holds:\r\n current_value = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if current_value > best_value:\r\n best_value = current_value\r\n best_hold = hold\r\n \r\n return (best_value, best_hold)", "def limit_weight(self, weight_max):\n # remove items with low values\n if self.total_weight > weight_max:\n items_sorted_by_fitness = sorted(self.items, key=lambda item: item.fitness, reverse=False)\n while items_sorted_by_fitness and self.total_weight > weight_max:\n least_fit_item = items_sorted_by_fitness.pop(0)\n if self.item_stats[least_fit_item.id] == 1:\n self.item_stats[least_fit_item.id] = 0\n self.update_values() # have to update each time an item is change to recompute weight", "def knapsack(items, maxweight):\n @lru_cache(maxsize=None)\n def bestvalue(i, j):\n # Return the value of the most valuable subsequence of the first\n # i elements in items whose weights sum to no more than j.\n if j < 0:\n return float('-inf')\n if i == 0:\n return 0\n value, weight = items[i - 1]\n return max(bestvalue(i - 1, j), bestvalue(i - 1, j - weight) + value)\n\n j = maxweight\n result = []\n for i in reversed(range(len(items))):\n if bestvalue(i + 1, j) != bestvalue(i, j):\n result.append(items[i])\n j -= items[i][1]\n result.reverse()\n return bestvalue(len(items), maxweight), result", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s" ]
[ "0.6643813", "0.6288931", "0.625988", "0.61211926", "0.6088565", "0.5934902", "0.58888906", "0.572068", "0.57132936", "0.5708484", "0.56786495", "0.56729484", "0.56564474", "0.5643351", "0.5632886", "0.5576396", "0.55385184", "0.55242544", "0.5523174", "0.5466446", "0.5464137", "0.54572743", "0.5453999", "0.54467857", "0.5440771", "0.54332393", "0.5431393", "0.54303557", "0.5428858", "0.54284626" ]
0.6629955
1
Randomly generate a schedule based on given contents
def randomSchedule(self,contents): import random as ran import copy contents_copy = copy.deepcopy(contents) sol = Area('sb',ran.random()) while contents_copy: cont = ran.choice(contents_copy) i = 0 while True: ran_waiting = ran.randint(0,2) ran_start = ran.randint(0,19) if sol.checkAddContent(ran_waiting,ran_start,cont): sol.addContent(ran_waiting,ran_start,cont) contents_copy.remove(cont) break i += 1 if i>150: #print "cut" sol = Area('sb',ran.random()) contents_copy = contents[:] break #print "generate new schedule\n",sol.printSchedule() return sol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomize_schedule(self):\n #Creates a new Schedule Object\n new_schedule = Schedule(len(self.chromo_list),self.config)\n\n #For all of the entries in the hash map\n for classes,index in self.hash_map.items():\n #Get New Random Position\n rand = random.randint(0,len(new_schedule.chromo_list))\n total_duration = 0\n temp_index = rand\n\n #Adds the Class for the whole Duration\n while total_duration < classes.duration\\\n and temp_index < len(new_schedule.chromo_list):\n new_chromo = new_schedule.insert_chromosome(Chromosome(),\\\n temp_index)\n new_schedule.number_chromosomes += 1\n #Enters the new class into the hash map\n if not new_schedule.hash_map.has_key(classes):\n new_schedule.hash_map[classes] = temp_index\n #Assigns the class\n new_chromo._class = classes\n new_schedule.calculate_fitness(new_chromo,temp_index)\n total_duration += 1\n temp_index += 1\n\n return new_schedule", "def schedule_randomSampling(self,contents,areas):\n\t\tsolutions = []\n\t\tfor i in range(len(areas)):\n\t\t\twhile True:\n\t\t\t\tschedule = self.randomSchedule(contents)\n\t\t\t\tif self.validSchedule(schedule)[0]:\n\t\t\t\t\tschedule.id = \"area\" + str(i+1)\n\t\t\t\t\tsolutions.append(schedule)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\t\t\t#print \"not valid\"\n\t\tfor sol in solutions:\n\t\t\tsol.printSchedule()\n\t\treturn solutions", "def mainSchedule():\n\timport time\n\tc1 = Content(1,5,20)\n\tc2 = Content(2,6,30)\n\tc3 = Content(3,5,25)\n\tc1_ = Content(1,1,20)\n\tc5 = Content(5,3,29)\n\tc6 = Content(6,11,50)\n\tc7 = Content(7,7,34)\n\tc1__ = Content(1,3,20)\n\tc8 = Content(8,6,10)\n\ta1 = Area('a1',1.0)\n\ta2 = Area('a2',0.5)\n\ta3 = Area('a3',0.8)\n\tcontents = [c1,c2,c3,c1_,c5,c6,c7,c1__,c8]\n\tareas = [a1,a2,a3]\n\tsol_schedule = Schedule_solution()\n\tprint \"random sampling schedule:\\n\"\n\ttime_r = time.time()\n\tschedule_sols = sol_schedule.schedule_randomSampling(contents,areas)\n\tprint \"running time,\",time.time()-time_r\n\tprint \"local search schedule:\"\n\ttime_l = time.time()\n\tschedule_sols_local = sol_schedule.schedule_localSearch(contents,areas)\n\tprint \"running time,\",time.time()-time_l\n\tsol_selection = Selection_solution()\n\tsol_selection.select_bruteforce(4,*schedule_sols) #argument unpacking", "def simulate(self):\n\t\tcounter = 1\n\t\tweek = 1\n\t\twhile counter != 0:\n\t\t\tself.oneWeek(week)\n\t\t\tweek += 1\n\t\t\tcounter = len(self.teams[0].schedule)", "def schedule_task(self, Tau):\n return random.choice(self.tasks)", "def schedule_paragraph():", "def test_schedule_generator(self):\n\n dur_val = 10\n amp = 1.0\n\n def test_func(dur: int):\n sched = Schedule()\n sched += Play(library.constant(int(dur), amp), DriveChannel(0))\n return sched\n\n expected_sched = Schedule()\n expected_sched += Play(library.constant(dur_val, amp), DriveChannel(0))\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"f\", (0,), test_func)\n self.assertEqual(inst_map.get(\"f\", (0,), dur_val), expected_sched)\n\n self.assertEqual(inst_map.get_parameters(\"f\", (0,)), (\"dur\",))", "def _create_schedules(self):\n\n ''''''", "def schedule_text():", "def _get_day_attack_schedule(self):\n planer_args = self.planner_config[\"args\"]\n start_time = datetime.strptime(planer_args[\"min_time\"], \"%H:%M\").time()\n start_date = datetime.combine(datetime.today().date(), start_time)\n end_time = datetime.strptime(planer_args[\"max_time\"], \"%H:%M\").time()\n end_date = datetime.combine(datetime.today().date(), end_time)\n\n random.seed()\n attack_schedule = []\n for start, end in self._split_date_range(start_date, end_date, planer_args[\"times\"]):\n attack_schedule.append(random.uniform(start, end))\n\n return attack_schedule", "def simulate(self):\n self._t = self._t + 1\n if self._t == self._cycle:\n # End of a season, start of the next one. Year is also cyclic that is WINTER -> SPRING.\n self._t = 0\n self._season = self._season.next()\n\n # When the ammount of newly produced food in a cell is over and the cell can seed we\n # randomly choose another spot where some random ammount of newly produced food should\n # be stored.\n for i in range(self._height):\n for j in range(self._width):\n if self._env[i][j].get_newly() == 0 and not self._seeded[i][j]:\n # if the cell become empty just now seed in once in a randomn cell on the grid.\n self._seeded[i][j] = True\n cap = self._height + self._width\n while cap > 0:\n seedi = random.randint(0, self._height - 1)\n seedj = random.randint(0, self._width - 1)\n\n production_cap = self._food_per_season[self._season.value]\n\n production_cap -= self._env[seedi][seedj].get_newly()\n\n if production_cap > 0:\n seed_amount = random.randint(1, production_cap)\n self._env[seedi][seedj].produce(seed_amount)\n self._seeded[seedi][seedj] = False\n break\n\n cap = cap - 1", "def create_schedule(amount):\n matches = []\n if amount % 2 == 0:\n matches = pick_pairs(amount)\n else:\n twoRoundMathces = amount - 3\n if twoRoundMathces > 0:\n matches = pick_pairs(twoRoundMathces)\n # Add last 3 matches of 1 round each.\n i = twoRoundMathces\n matches += [(i,i+1,1), (i,i+2,1), (i+1,i+2,1)]\n return matches", "def create_population(num_schedules, size=10, rate_range=(-6, 0)):\n pop = []\n for _ in range(0, num_schedules):\n exponents = np.random.uniform(rate_range[0], rate_range[1], size)\n schedule = np.power(10, exponents).tolist()\n pop.append(schedule)\n return pop", "def get_task(self, locations):\n if self.current_location == self.desk_location:\n self.task_location = locations[random.randint(0, len(locations)-1)]\n self.task_duration = random.randint(1, 10)\n else:\n self.task_location = self.desk_location\n self.task_duration = random.randint(50, 100)", "def get_schedule(self, status):\n if status == self.S_LATE and self.delay_interval:\n return (datetime.datetime.now() +\n datetime.timedelta(\n seconds=random.random() * self.delay_interval))\n return None # Remove schedule on complete", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def get_driver_schedule(self, duration_mins=60):\n elapsed_mins, schedule = 0, []\n while elapsed_mins < duration_mins:\n elapsed_mins += expon.ppf(random()) * self.mins_between_drivers\n schedule.append(elapsed_mins)\n return schedule", "def generate_with_predefined_sequences(opts, TR, sched_group, group = 'experimental'):\n # get config\n config = get_config()\n type_data = get_seq_types(opts.type_file)\n \n seq_file = \\\n opts.seq_file + \".json\".format(sched_group) \\\n if opts.seq_file else \"./scheduling/sequences.json\"\n# opts.seq_file + \"_{}.json\".format(sched_group) \\\n color_list = config[\"COLOR_LIST\"]\n \n # create sequences\n row_list = []\n sess_num = 0 # 0 is baseline session\n\n np.random.seed(config[\"RND_SEED\"] + 1000*sched_group)\n \n for index, row in type_data.iterrows(): \n\n seq_type, seq_length, max_chord_size, seq_keys, n_free_trials, \\\n n_paced_trials, n_free_trials_testing, n_paced_trials_testing, \\\n blocks, n_seqs_trained, n_seqs_untrained, n_seqs_fmri, \\\n n_sess, testing_sessions, n_runs = row\n testing_session_list = \\\n [int(x) for x in str(testing_sessions).split(\",\")]\n \n seq_keys = seq_keys.split(\" \")\n blocks = [int(x) for x in blocks.split(\",\")]\n \n mygenerator = Generator(\n set=seq_keys, \n size=seq_length,\n maxchordsize=max_chord_size)\n trained_seqs, untrained_seqs \\\n = mygenerator.read_grouped(seq_file, seq_type)\n \n if opts.cycle_offset:\n trained_seqs = trained_seqs[opts.cycle_offset:] + trained_seqs[:opts.cycle_offset]\n untrained_seqs = untrained_seqs[opts.cycle_offset:] + untrained_seqs[:opts.cycle_offset]\n\n n_trained = len(trained_seqs)\n n_untrained = len(untrained_seqs)\n reorder_trained = list(permutations(range(n_trained))) \n reorder_trained_fmri = list(combinations(range(n_trained), n_seqs_fmri)) \n# reorder_untrained = list(combinations(range(n_untrained), n_seqs_untrained)) if not opts.no_untrained else [] \n reorder_untrained = []\n \n untrained_list = range(n_untrained)\n #one = untrained_list[0]\n #twos = untrained_list[1:3]\n #rest = untrained_list[3:]\n untrained_groups = []\n for j in range(n_seqs_untrained):\n untrained_groups.append(untrained_list[j::n_seqs_untrained])\n\n for k in range(len(testing_session_list)):\n# mycombination = [one, twos[k % 2], rest[k % len(rest)]]\n mycombination = [x[k % len(x)] for x in untrained_groups]\n random.shuffle(mycombination)\n reorder_untrained.append(tuple(mycombination))\n\n # n_seqs: how many are presented\n # get colors\n seq_color = {}\n for myseq in trained_seqs: \n index = random.randint(0, len(color_list) - 1)\n seq_color[myseq[1]] = color_list[index]\n del color_list[index] \n\n for myseq in untrained_seqs: \n index = random.randint(0, len(color_list) - 1)\n seq_color[myseq[1]] = color_list[index]\n del color_list[index] \n\n# untrained_index = 0 \n trained_comb_num = 0\n untrained_comb_num = 0\n \n for sess in range(n_sess):\n\n # controls the order across sessions\n trained_combination = list(reorder_trained[trained_comb_num \\\n % len(reorder_trained)]) \n \n trained_fmri_combination = list(reorder_trained_fmri[trained_comb_num \\\n % len(reorder_trained_fmri)]) \n trained_comb_num = trained_comb_num + 1\n \n for paced in range(2):\n\n myruns = n_runs if paced and \\\n sess_num in testing_session_list else 1 # sess+1\n \n if not sess_num in testing_session_list: # training sess + 1\n sess_type = \"training\"\n n_trials = n_free_trials if paced == 0 else \\\n n_paced_trials\n \n for seq in range(n_seqs_trained): \n instruct = 1 if seq == 0 else 0\n seq_index = trained_combination[seq]\n seq_train = \"trained\"\n sequence, sequence_string = \\\n trained_seqs[seq_index]\n\n if n_trials and group == 'experimental'> 0:\n row_list.append([\n sess_num,\n sess_type,\n n_trials,\n \" \".join(seq_keys),\n seq_type,\n sequence_string, \n seq_train,\n seq_color[sequence_string],\n trained_combination,\n seq_index,\n paced,\n instruct,\n 1, #run\n 1 # block\n ])\n\n\n else: # testing / fmri\n untrained_combination = \\\n list(reorder_untrained[untrained_comb_num \\\n % len(reorder_untrained)]) if not \\\n opts.no_untrained > 0 else [] \n# print(untrained_combination)\n# print(reorder_untrained)\n \n if paced == 0:\n sess_type = \"testing\"\n n_trials = n_free_trials_testing\n\n for seq in range(n_seqs_trained + n_seqs_untrained): # trained and untrained \n instruct = 1 if seq == 0 else 0\n\n # interleave trained/untrained \n if seq % 2 == 1 and not opts.no_untrained: \n seq_index = untrained_combination[(seq - 1)/2]\n shuffled_combination = untrained_combination\n seq_train = \"untrained\"\n sequence, sequence_string = \\\n untrained_seqs[seq_index]\n \n else :\n seq_index = trained_combination[seq/2]\n shuffled_combination = trained_combination\n seq_train = \"trained\"\n sequence, sequence_string = \\\n trained_seqs[seq_index]\n \n if n_trials > 0:\n row_list.append([\n sess_num,\n sess_type,\n n_trials,\n \" \".join(seq_keys),\n seq_type,\n sequence_string, \n seq_train,\n seq_color[sequence_string],\n shuffled_combination, \n seq_index,\n paced,\n instruct,\n 1, #run\n 1 # block\n ])\n\n\n else:\n untrained_comb_num = untrained_comb_num + 1\n\n sess_type = \"fmri\"\n\n combination_index = trained_fmri_combination + \\\n untrained_combination\n combination_type = \\\n len(trained_fmri_combination)*[\"trained\"] + \\\n len(trained_fmri_combination)*[\"untrained\"] # same amount of trained and untrained\n combination = zip(combination_type, combination_index)\n print(combination)\n n_trials = np.sum(np.array(blocks))\n # compute run statistics\n nbeats = config[\"MAX_CHORD_SIZE\"] + \\\n config[\"EXTRA_BEATS\"]\n\n ITI = list(generate_ITIs(config[\"ITIMEAN_FMRI\"], \n config[\"ITIRANGE_FMRI\"], \n 'exp')) \n trial_duration = config[\"BEAT_INTERVAL\"]*nbeats + \\\n config[\"BUFFER_TIME\"] + config[\"FIXATION_TIME\"] + \\\n np.mean(ITI) #config[\"ITIMEAN_FMRI\"] \n run_duration = trial_duration*n_trials*\\\n (len(combination)) + config[\"START_TIME_FMRI\"] + \\\n (len(combination)*n_trials/config[\"STRETCH_TRIALS\"]-1)*config[\"STRETCH_TIME\"]\n \n total_duration = run_duration*n_runs \n total_trials = n_runs*n_trials\n \n print(\"Trial duration: %.2f s; \"\n %(trial_duration) +\n \"Run duration: %.2f s (%.2f m, %d frames); \"\n %(run_duration, run_duration/60, np.ceil(run_duration/TR)) +\n \"Total duration: %.2f m; \"\n %(total_duration/60) + \n \"Total trials per sequence: %d\"\n %(total_trials)\n ) \n\n\n for run in range(myruns):\n \n shuffled_combination_run = \\\n shuffle_order(combination)\n last_seq = 0 \n for block, n_group in enumerate(blocks):\n shuffled_combination = \\\n shuffle_order(shuffled_combination_run)\n # avoid repetitions\n while last_seq == shuffled_combination[0]:\n \n shuffled_combination = \\\n shuffle_order(shuffled_combination_run)\n \n last_seq = shuffled_combination[-1]\n \n \n # shuffle trained and untrained\n for seq in range(len(shuffled_combination)): \n instruct = 1 if seq == 0 and \\\n block == 0 else 0\n \n combination_type, combination_index = \\\n shuffled_combination[seq]\n if combination_type == \"untrained\": \n seq_train = \"untrained\"\n sequence, sequence_string = \\\n untrained_seqs[combination_index]\n \n else:\n seq_train = \"trained\"\n sequence, sequence_string = \\\n trained_seqs[combination_index]\n \n if n_trials > 0:\n row_list.append([\n sess_num,\n sess_type,\n n_group,\n \" \".join(seq_keys),\n seq_type,\n sequence_string, \n seq_train,\n seq_color[sequence_string],\n shuffled_combination,\n seq_index,\n paced,\n instruct,\n run + 1, #run\n block + 1 # block\n ])\n \n \n sess_num = sess_num + 1\n\n schedule = pd.DataFrame(row_list, columns = (\n \"sess_num\",\n \"sess_type\",\n \"n_trials\", \n \"seq_keys\", \n \"seq_type\", \n \"sequence_string\", \n \"seq_train\",\n \"seq_color\",\n \"combination\",\n \"seq_order\",\n \"paced\",\n \"instruct\",\n \"run\",\n \"block\"\n )\n )\n \n # schedule.loc[schedule[\"sess_num\"] == 0, \"sess_num\"] = \\\n# np.max(schedule[\"sess_num\"]) + 1\n# schedule.sort_values(by = [\"sess_num\", \"paced\", \"seq_train\"], \n# inplace = True)\n\n if opts.schedule_file:\n schedulefilename = opts.schedule_file + \"_s{}\".format(sched_group)\n else: \n schedulefilename = \"./scheduling/schedule{}\".format(sched_group) \n\n if opts.split:\n schedule_home = \\\n schedule.loc[schedule[\"sess_type\"] != \"fmri\", :]\n schedule_fmri = \\\n schedule.loc[schedule[\"sess_type\"] == \"fmri\", :]\n \n schedule_home.to_csv(schedulefilename + \".csv\", \n sep =\";\", index=False)\n schedule_fmri.to_csv(schedulefilename + \"_fmri.csv\", \n sep =\";\", index=False)\n else:\n schedule.to_csv(schedulefilename + \".csv\", sep =\";\", index=False)", "def create_random_time_interval_gaps(days, gaps_as_percent, avg_gap_as_percent):\n \n print \"days \",days\n #first find the total number of gap days by calculating it as percent of the total days\n total_size_of_missing_days = round(days * (float(gaps_as_percent)/100))\n print \"total size of missing days \", total_size_of_missing_days\n #now, calculate the average gap size as percent of the total number of gap days\n average_gap_in_days = round(total_size_of_missing_days * (float(avg_gap_as_percent)/100)) \n num_of_time_gaps = round(float(days)/average_gap_in_days)\n\n #number of fills is a space around the gaps, from day 0 to last day\n # there are num_of_gaps + 1\n num_of_time_fills = num_of_time_gaps + 1\n size_of_fill = int(round((float(days)-total_size_of_missing_days)/num_of_time_fills))\n \n print \"num_of_time_gaps \", num_of_time_gaps\n print \"num_of_time_files \", num_of_time_fills\n print \"size_of_fill \", size_of_fill\n\n start_indices = []\n begin = 0\n for i in range(0,int(num_of_time_gaps)):\n begin = randint(begin,int(size_of_fill)-1)\n start_indices.append(begin)\n begin = begin + size_of_fill\n if begin + average_gap_in_days > total_size_of_missing_days:\n break\n\n \n print start_indices \n print \"num of time gaps \", num_of_time_gaps \n print \"num of time fills \", num_of_time_fills \n\n interval_tuples = []\n credit_space = days - total_size_of_missing_days\n start = 0\n end = randint(0,avg_size)\n #algorithm - take the size of present data and, using it as a reminder, create a distribution of days", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def test_list_schedules(self):\n pass", "def create( self ):\r\n for rsrc in self.ee.getRsrcs( ):\r\n self.schedule[rsrc.getid( )] = [ ]", "def __fill_person (self, person):\n \n scheduled = True\n while person.get_monthly_hours_difference (self.date) > 0 and scheduled:\n scheduled = False\n #schedule person\n scheduling_units = self.__get_emptiest (person)\n dates = self.workers.get_dates ( )\n for date in dates:\n for scheduling_unit in scheduling_units:\n \n turnuses = list (self.mapper.get_turnuses (scheduling_unit, person))\n random.shuffle (turnuses)\n \n for turnus in turnuses:\n if self.__is_valid_move (scheduling_unit, turnus, date, person, True):\n person.schedule_turnus (date, turnus, scheduling_unit)\n \n #block the previous day, if it was the night turnus\n prev_date = date - datetime.timedelta(days=1)\n if turnus.code[0] == 'N' and not person.is_blocked(prev_date, turnus):\n person.add_invalid_turnus(prev_date, turnus)\n \n # the is valid move has taken care of any potential violations, so that you\n # can just schedule turnuses\n if person.packet_night_turnuses and turnus.code[0] == 'N':\n next_date = date + datetime.timedelta(days=1)\n person.schedule_turnus (next_date, turnus, scheduling_unit)\n #if it is Saturday, schedule one more\n if next_date.weekday() == 5:\n next_date += datetime.timedelta(days=1)\n #find the workfree night turnus\n night_turnus = None\n for temp_turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if temp_turnus.holiday and temp_turnus.code[0] == 'N':\n night_turnus = temp_turnus\n break\n else:\n raise Exception ('Napaka pri dodajanju osebe z zdruzenimi nocnimi turnusi.')\n person.schedule_turnus(next_date, night_turnus, scheduling_unit)\n if turnus.code[0] == 'N' and not person.is_blocked(next_date + datetime.timedelta(days=1), turnus):\n person.add_invalid_turnus(next_date + datetime.timedelta(days=1), turnus)\n else:\n if turnus.code[0] == 'N' and not person.is_blocked(next_date + datetime.timedelta(days=1), turnus):\n person.add_invalid_turnus(next_date + datetime.timedelta(days=1), turnus) \n \n if holiday.is_workfree(date):\n schedule_utils.add_free_day (person, date)\n scheduled = True\n if person.get_monthly_hours_difference (self.date) <= 0:\n return\n else:\n scheduled = False\n if person.get_monthly_hours_difference (self.date) <= 0:\n return", "def simulate(seconds):\n\n #Grab the start time\n start_time = dt.datetime.now()\n\n # fill list with the start\n times_on_the_second = [start_time + dt.timedelta(seconds=x) for x in range(seconds + 1)]\n\n #end_time = start_time + dt.timedelta(seconds=seconds)\n\n end_time = times_on_the_second[-1]\n epochs = 0\n\n\n\n print(f\"Simulation started at {start_time}\")\n\n while dt.datetime.now() < end_time:\n\n while dt.datetime.now() < times_on_the_second[epochs]:\n pass\n\n for asteroid in Controller.currentAsteroids:\n asteroid.move()\n print(asteroid, F\"time: {dt.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}\")\n epochs += 1\n\n\n\n # time.sleep(1)", "def generate_program():\n sun = get_sun()\n sunrise = sun.get_sunrise_time() + timedelta(minutes=30)\n sunset = sun.get_sunset_time() - timedelta(minutes=30)\n\n program = {}\n\n # Iterate over each switch\n for id, config in switches.items():\n program[id] = OrderedDict()\n # morning\n if config['morning']:\n ts_on = get_random_time_in_range(days_events['getting_up'])\n ts_off = get_random_time_around(sunrise, interval_minutes=15)\n\n if ts_off > ts_on:\n program[id][get_random_time_in_range(days_events['getting_up'])] = 'on'\n program[id][get_random_time_around(sunrise, interval_minutes=15)] = 'off'\n else:\n logging.info('Sun goes up earlier %s than %s. No switching on in the morning',\n datetime.fromtimestamp(ts_on, tz=tzlocal()).strftime(\"%H:%M:%S %z\"),\n datetime.fromtimestamp(ts_off, tz=tzlocal()).strftime(\"%H:%M:%S %z\"),\n )\n\n # evening\n if config['evening']:\n program[id][get_random_time_around(sunset, interval_minutes=15)] = 'on'\n program[id][get_random_time_in_range(days_events['bedtime'])] = 'off'\n # program[id][datetime.now().timestamp() + 1*60 ] = 'off'\n\n return program", "def random_ticket():\n ts = time.time()\n return \"%s_%s\" % (ts, random_str(6, string.digits))", "def sample_times(times, num_examples):\n sample = random.sample(times, num_examples)\n make_hist(sample, 10, 'Sample of Size ' + str(num_examples),\n 'Minutes to Complete Race', 'Number of Runners')", "def create_empty_schedule():\n\n\t# create empty dictionary with all room-timelock combinations (roomlocks) as keys\n\troomlocks = list(range(0, 140))\n\tschedule = dict.fromkeys(roomlocks)\n\n\treturn schedule", "def schedule_localSearch(self,contents,areas):\n\t\tsolutions = []\n\t\tfor i in range(len(areas)):\n\t\t\tschedule = self.randomSchedule(contents)\n\t\t\tcount = 0\n\t\t\twhile True:\n\t\t\t\tif self.validSchedule(schedule)[0]:\n\t\t\t\t\tschedule.id = \"area\"+str(i+1)\n\t\t\t\t\tsolutions.append(schedule)\n\t\t\t\t\tcount = 0\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tschedule = self.transition(schedule)\n\t\t\t\t\tcount += 1\n\t\t\t\t\tif count > 10:\n\t\t\t\t\t\tcount = 0\n\t\t\t\t\t\tschedule = self.randomSchedule(contents)\n\t\tfor sol in solutions:\n\t\t\tsol.printSchedule()" ]
[ "0.7032516", "0.7032402", "0.6492854", "0.6483966", "0.6446885", "0.6158377", "0.60977435", "0.60693073", "0.6060243", "0.58782053", "0.58338886", "0.57139707", "0.5703477", "0.5686992", "0.5677032", "0.56692207", "0.56571686", "0.5630544", "0.5605559", "0.55920166", "0.5562734", "0.55571896", "0.55559564", "0.55545825", "0.5551681", "0.5551629", "0.5525937", "0.54973614", "0.54918945", "0.5482757" ]
0.79615617
0
Check whether the given schedule is a valid schedule
def validSchedule(self,schedule): def validRow(content,start,row): """ part of valid Schedule, only check whether a given row is valid @param start: the start position @param row: given waiting area @return: a boolean value """ cur_id = content[1].id try: next_c = row[start+content[1].length] except IndexError: return True if next_c != None: if cur_id != next_c[1].id: return True else: #print "row not valid" return False else: return True def validCol(content,start,schedule): """ Similar to validRow,but only check whether the given Column is valid @param start: the start position @param schedule: given schedule @return: a boolean value """ cur_id = content[1].id #print "cur_id,length,start",cur_id,content[1].length,start flag = 0 for i in range(content[1].length): for j in range(len(schedule.w)): #print start,i,content[1] if schedule.w[j][start+i]!=None and \ schedule.w[j][start+i][1].id == cur_id: flag += 1 if flag != content[1].length: #print "col not valid",flag,content[1].length,cur_id return False else: return True def validRowCol(content,start,row,schedule): """ Simply combine validRow and validCol """ if validRow(content,start,row) and \ validCol(content,start,schedule): return True else: return False i = 0 while i < len(schedule.w): j = 0 while j < len(schedule.w[i]): c = schedule.w[i][j] if c != None: if not validRowCol(c,j,schedule.w[i],schedule): return False,(c,i) else: j += c[1].length else: j += 1 i += 1 return True,None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schedule_exist(self, schedule_name):\r\n schedule = self.find(\"schedules\", schedule_name, attribute=\"name\")\r\n if schedule is not None:\r\n return True\r\n else:\r\n return False", "def setSchedule(self, schedule):\r\n if isinstance(schedule, list) == False:\r\n print(\"Schedule needs to be of type list\")\r\n return False\r\n elif len(schedule) != 0 and any(isinstance(entry, dict) == False for entry in schedule):\r\n print(\"Entries in schedule need to be of type dict\")\r\n return False\r\n else:\r\n self.schedule = schedule\r\n return True", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def is_valid_calendar(self, calendar):\n\n return calendar in self.available_calendars() or calendar == \"\"", "def found_schedules(self) -> bool:\n return self._schedule_list != []", "def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True", "async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)", "def valid_format(self):\n\n # If candidate is None, return true\n if not self.dt:\n print \"dt empty\"\n return True\n\n # Verify if time format is ok and stores in into a time-tuple format\n try:\n stime = datetime.strptime(self.dt, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return False\n else:\n return True", "def __is_valid_move(self, scheduling_unit, turnus, date, person, overtime, depth=0, check_turnuses=[]):\n \n \n if not schedule_utils.is_valid_move (scheduling_unit, turnus, date, person, overtime):\n return False\n \n # if the workplace has the special rule: work in the afternoon, if the next\n # day is a work free day and you will work the next day, and you won't work\n # the next day, work in the morning or not at all\n if scheduling_unit.has_holiday_rule ( ):\n if holiday.is_workfree(date):\n prev_date = date - datetime.timedelta(days=1)\n prev_turnus = person.get_turnus(prev_date) \n if prev_turnus:\n # all afternoon codes start with P\n # all double shift codes start with C\n # TODO: document this\n if prev_turnus.code[0] != 'P' or prev_turnus.code[0] != 'C':\n return False\n else:\n return False\n else:\n next_date = date + datetime.timedelta(days=1)\n if holiday.is_workfree(next_date):\n # this bottom condition is enough, because the dates are added ascending\n if not person.is_free_day(next_date):\n return False\n \n # if the person schedules night turnuses in packages: \n # (Monday + Tuesday)\n # (Tuesday + Wednesday)\n # (Wednesday + Thursday)\n # (Friday + Saturday + Sunday)\n if person.packet_night_turnuses and turnus.code[0] == 'N':\n if depth == 0 and (date.weekday() == 0 or date.weekday() == 2 or date.weekday() == 4):\n return self.__is_valid_move(scheduling_unit, turnus, date + datetime.timedelta(days=1), person, overtime, depth + 1, check_turnuses + [turnus])\n #if this is the second day in the packet continue validation only if it is a Saturday\n elif depth == 1 and date.weekday() == 5:\n # TODO: allow only one holiday turnus per shift type (document this)\n sunday_night_turnus = None\n for alternative_turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if alternative_turnus.holiday and alternative_turnus.code[0] == 'N':\n sunday_night_turnus = alternative_turnus\n break\n else:\n return False\n \n return self.__is_valid_move(scheduling_unit, sunday_night_turnus, date + datetime.timedelta(days=1), person, overtime, depth + 1, check_turnuses + [turnus])\n #Thursday to Friday combination does not exist\n elif depth == 1 and date.weekday() == 4:\n return False\n elif depth == 1:\n return True\n elif depth == 2:\n return True\n \n else:\n return False\n \n \n return True", "def _validate_schedulers(config, schedulers):\n for scheduler in schedulers:\n if scheduler not in PCLUSTER_SCHEDULERS:\n error = f\"Invalid scheduler ({scheduler}) found in config.\"\n logging.error(error)\n raise AssertionError(error)", "def load(self, schedule):\n result = True\n if not isinstance(schedule, PlutoSchedule):\n raise Exception(\n f\"Cannot load a schedule of type {type(schedule)}\")\n self._schedules.append(schedule)\n return result", "def check_inc_sched(self, sched):\r\n\r\n for i in range(1, len(sched)):\r\n if sched[i] <= sched[i - 1]:\r\n return False\r\n \r\n return True", "def validRowCol(content,start,row,schedule):\n\t\t\tif validRow(content,start,row) and \\\n\t\t\t\tvalidCol(content,start,schedule):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def validCol(content,start,schedule):\n\t\t\tcur_id = content[1].id\n\t\t\t#print \"cur_id,length,start\",cur_id,content[1].length,start\n\t\t\tflag = 0\n\t\t\tfor i in range(content[1].length):\n\t\t\t\tfor j in range(len(schedule.w)):\n\t\t\t\t\t#print start,i,content[1]\n\t\t\t\t\tif schedule.w[j][start+i]!=None and \\\n\t\t\t\t\t\tschedule.w[j][start+i][1].id == cur_id:\n\t\t\t\t\t\tflag += 1\n\t\t\tif flag != content[1].length:\n\t\t\t\t#print \"col not valid\",flag,content[1].length,cur_id\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True", "def test_get_schedule_with_unbound_parameter(self):\n param1 = Parameter(\"param1\")\n param2 = Parameter(\"param2\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param2=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def test_list_schedules(self):\n pass", "def final_check(self, schedule: Schedule) -> bool:\n for day in range(schedule.n_weekdays):\n for track in range(schedule.n_tracks):\n if schedule.count_courses_on_day(day, track) < 2 and schedule.count_courses_on_day(day, track) != 0: \n return False\n\n return True", "def _valid_day(self, date_find):\n try:\n datetime.strptime(date_find, settings.TIME_FORMAT)\n valid = True\n except ValueError:\n valid = False\n return valid", "def is_valid(self):\n if self.hour < 0 or self.minute < 0 or self.second < 0:\n return False\n if self.minute >= 60 or self.second >= 60:\n return False\n return True", "def time_conflict(self, schedule):\n for timerange in self._excluded_times:\n if timerange.conflicts_with(schedule):\n return False\n return True", "def test_check_args_submit_time(self):\n test_time = \"2021/06/18 11:00:00\"\n with self.assertRaises(TypeError) as context:\n self.duedate.check_args(test_time, self.test_turn_time)\n self.assertTrue(\"Invalid input format. 'submit_time' must be <datetime> format.\" in str(\n context.exception))", "def time_is_valid(request, day, time, name):\n\n\tif ((day != '0' and day != '6') and time.hour == 21) or time.minute != 0:\n\t\treturn False\n\n\t# George's time\n\tif name != \"George Yeh\" and day == '6' and time.hour >= 9 and time.hour < 12:\n\t\treturn False\n\n\treturn True", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def test_manage_report_schedule_enums(\n self, api_instance: Reports, report_type, schedule\n ):\n params = api_instance.manage_report_schedule(\n report_type=report_type,\n schedule=schedule,\n )\n self.assert_common_params(params, action=\"ManageReportSchedule\")\n assert params[\"ReportType\"] == \"_GET_STRANDED_INVENTORY_UI_DATA_\"\n assert params[\"Schedule\"] == \"_30_MINUTES_\"", "def is_task_in_schedule(self, tid: str) -> bool:\n return tid in self.__tasks", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )", "def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/schedule.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_schl, self.schedules)\r\n if 'link' in payload.keys() and payload['link'] != [{}]:\r\n b2 = self.link(self.schedules[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False", "def is_valid(self):\n return self.startTime <= ApiKey.get_now() < self.endTime" ]
[ "0.6923885", "0.67132664", "0.6674424", "0.6440875", "0.638306", "0.6347903", "0.63186556", "0.62030065", "0.6196701", "0.6159916", "0.6103682", "0.60652125", "0.60501474", "0.6044724", "0.60263616", "0.5999478", "0.5983755", "0.5954077", "0.5950224", "0.5926107", "0.5923733", "0.59184587", "0.5896596", "0.5885204", "0.5884958", "0.58778846", "0.5876381", "0.5855357", "0.5840659", "0.5824126" ]
0.7410286
0
Simply combine validRow and validCol
def validRowCol(content,start,row,schedule): if validRow(content,start,row) and \ validCol(content,start,schedule): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, row):\n raise NotImplementedError", "def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True", "def valid_input(self, row, col):\n return ((row, col) not in self.marks and\n row <= WIDTH and row > 0 and\n col in COL_MAP)", "def cell_invalidation(_frame, _val, row, col, grid, _col_dets):\n if col == 0:\n return _invalid_fldname(row, grid)\n elif col == 1:\n return _invalid_fldtype(row, grid)\n else:\n raise Exception('Two many columns for default cell invalidation test')", "def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False", "def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col", "def _validateRowCol(self, rows, cols, numRow, numCol, dvName):\n if rows is not None:\n rowArr = np.array(rows)\n if np.max(rowArr) > numRow:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numRow)\n + \" rows and index up to \"\n + str(np.max(rowArr))\n + \" was specified: \"\n + str(rows)\n )\n if np.min(rowArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Row index less than 1 specified: \"\n + str(rows)\n )\n if len(rows) != len(set(rows)):\n # duplicates\n raise Error(\"Duplicate indices specified in the rows of design variable \" + dvName + \": \" + str(rows))\n\n if cols is not None:\n colArr = np.array(cols)\n if np.max(colArr) > numCol:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numCol)\n + \" cols and index up to \"\n + str(np.max(colArr))\n + \" was specified: \"\n + str(cols)\n )\n if np.min(colArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"col index less than 1 specified: \"\n + str(cols)\n )\n if len(cols) != len(set(cols)):\n # duplicates\n raise Error(\"Duplicate indices specified in the cols of design variable \" + dvName + \": \" + str(cols))", "def is_valid_row_or_col(val: str):\n try:\n val = int(val)\n if 1 <= val <= 10:\n return True\n return False\n except (ValueError, TypeError):\n return False", "def validate_data(self, row, col, value):\n\n return True", "def check_row(row):\n \n if len(row) != _ncols:\n raise ValueError(\"Row contains {0} columns, expected {1}!\\n\\n{2}\\n\".format(len(row), _ncols, row))", "def checkRows( self ):\n\n for x in [0,3,6]:\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+1]\n thirdVal = self.__grid[x+2]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n\n return ('O', compiledVal) \n\n elif compiledVal.lower() == 'x2x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x8x':\n\n return ('X', compiledVal)\n \n return None", "def validate(self, field, row):\n raise NotImplementedError", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True", "def is_cell_valid(board, r, c):\n return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid", "def is_valid_row(self):\r\n return self.valid_row", "def is_board_valid(bd):\n return is_rows_valid(bd) and is_cols_valid(bd) and is_sqrs_valid(bd)", "def check_cols(self):\r\n for i in range(3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+3][-1] and self.grid[i+3][-1] == self.grid[i+6][-1]:\r\n return (i, (self.grid[i], self.grid[i+6]))\r\n return (-1, None)", "def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict", "def getValidRowsCols(self) :\n colns = number_of_good_cols(self.r_sheet)\n rowns = number_of_good_rows(self.r_sheet)\n \n # Check whether the number of good columns and rows are correct\n while self.isEmptyRow(rowns-1, colns) :\n rowns = rowns - 1 \n while self.isEmptyColumn(colns-1, rowns) :\n colns = colns - 1\n \n self.log.debug('Number of rows with content: {0}'.format(rowns))\n self.log.debug('Number of columns with content: {0}'.format(colns))\n return rowns, colns", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def test_row_from_columns_no_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def _valid_placement(self, i_row, i_col):\n if not self._empty_cell(i_row, i_col):\n return (False, [])\n adj_opp_cells = []\n\n if (i_row, i_col) == self._tl_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_corners, \"tl\")\n elif (i_row, i_col) == self._tr_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_rs_corners, \"tr\")\n elif (i_row, i_col) == self._bl_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_corners, \"bl\")\n elif (i_row, i_col) == self._br_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_rs_corners, \"br\")\n elif (i_row, i_col) in self._ls_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_and_rs, \"ls\")\n elif (i_row, i_col) in self._ts_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ts_and_bs, \"ts\")\n elif (i_row, i_col) in self._rs_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_and_rs, \"rs\")\n elif (i_row, i_col) in self._bs_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ts_and_bs, \"bs\")\n else:\n self._check_inner_dirs(i_row, i_col, adj_opp_cells)\n\n #print(\"\\nFOR TESTING. adj_opp_cells: \", adj_opp_cells)\n\n if adj_opp_cells == []:\n return (False, [])\n else:\n can_place, flip_lst = self._flip_dirs(adj_opp_cells)\n return (can_place, flip_lst)", "def checkColumns( self ):\n\n for x in list(range(0,3)):\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+3]\n thirdVal = self.__grid[x+6]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if 'xx' in compiledVal.lower():\n return ('X', compiledVal)\n\n elif 'oo' in compiledVal.lower():\n return ('O', compiledVal)\n\n elif compiledVal.lower() == 'x4x' or \\\n compiledVal.lower() == 'x5x' or \\\n compiledVal.lower() == 'x6x':\n\n return ('X', compiledVal) \n\n return None", "def validate(self):\r\n for name, col in self._columns.items():\r\n val = col.validate(getattr(self, name))\r\n setattr(self, name, val)", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def is_valid(gr, pos, num):\n \n row = pos[0]\n col = pos[1]\n \n for i in range(0, 9):\n # test row\n if(i != col and gr[row][i] == num):\n return False\n # test col\n if(i != row and gr[i][col] == num):\n return False\n\n # test 3x3 square\n small_row = floor(row / 3) * 3\n small_col = floor(col / 3) * 3\n\n for i in range(small_row, small_row + 3):\n for j in range(small_col, small_col + 3):\n if((i != row and j != col) and gr[i][j] == num):\n return False\n \n return True", "def _validate_indexes(self, row, col):\n if min(row, col) < 0 or max(row, col) >= self._n:\n raise IndexError(\n \"Incorrect position (%d, %d) in grid of size %d\" % (\n row, col, self._n\n )\n )" ]
[ "0.684255", "0.68137574", "0.6745641", "0.6707384", "0.66674656", "0.66391826", "0.65778613", "0.65593165", "0.650532", "0.64353895", "0.6420084", "0.63877106", "0.6331596", "0.63072574", "0.63056093", "0.6256621", "0.6225988", "0.62184995", "0.61916417", "0.6106615", "0.6101104", "0.60860676", "0.60755074", "0.60748", "0.6065741", "0.6044087", "0.6040643", "0.60336006", "0.6031315", "0.6029684" ]
0.70008266
0
transition method use validSchedule to find the problematic content, and switch it with another potential valid content
def transition(self,schedule): c_p = self.validSchedule(schedule)[1] row = c_p[1] start = c_p[0][0] c = c_p[0][1] space = c.length start_new = start for i in range(19): try: if schedule.w[row][start-i-1] == None: space += 1 start_new -= 1 else: break except IndexError: break for i in range(19): try: if schedule.w[row][start+i+1] == None: space += 1 else: break except IndexError: break for i in range(len(schedule.w)): j = 0 while j < len(schedule.w[i]) and (j<start or j>start+c.length): if schedule.w[i][j] != None and schedule.w[i][j][1].length\ <=space and schedule.w[i][j][1].id!=c.id\ and c.length<=schedule.w[i][j][1].length: temp = i,j,schedule.w[i][j][1] schedule.delContent(temp[0],temp[1],temp[2]) #print "add,",c schedule.addContent(temp[0],temp[1],c) schedule.delContent(row,start,c) #print "add,",temp[2] schedule.addContent(row,start_new,temp[2]) elif schedule.w[i][j] == None: j += 1 else: j += schedule.w[i][j][1].length return schedule
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validSchedule(self,schedule):\n\t\tdef validRow(content,start,row):\n \"\"\"\n part of valid Schedule, only check whether a given\n row is valid\n @param start: the start position\n @param row: given waiting area\n @return: a boolean value\n \"\"\"\n\t\t\tcur_id = content[1].id\n\t\t\ttry:\n\t\t\t\tnext_c = row[start+content[1].length]\n\t\t\texcept IndexError:\n\t\t\t\treturn True\n\t\t\tif next_c != None:\n\t\t\t\tif cur_id != next_c[1].id:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\t#print \"row not valid\"\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\tdef validCol(content,start,schedule):\n \"\"\"\n Similar to validRow,but only check whether the given\n Column is valid\n @param start: the start position\n @param schedule: given schedule\n @return: a boolean value\n \"\"\"\n\t\t\tcur_id = content[1].id\n\t\t\t#print \"cur_id,length,start\",cur_id,content[1].length,start\n\t\t\tflag = 0\n\t\t\tfor i in range(content[1].length):\n\t\t\t\tfor j in range(len(schedule.w)):\n\t\t\t\t\t#print start,i,content[1]\n\t\t\t\t\tif schedule.w[j][start+i]!=None and \\\n\t\t\t\t\t\tschedule.w[j][start+i][1].id == cur_id:\n\t\t\t\t\t\tflag += 1\n\t\t\tif flag != content[1].length:\n\t\t\t\t#print \"col not valid\",flag,content[1].length,cur_id\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\t\tdef validRowCol(content,start,row,schedule):\n \"\"\"\n Simply combine validRow and validCol\n \"\"\"\n\t\t\tif validRow(content,start,row) and \\\n\t\t\t\tvalidCol(content,start,schedule):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\ti = 0\n\t\twhile i < len(schedule.w):\n\t\t\tj = 0\n\t\t\twhile j < len(schedule.w[i]):\n\t\t\t\tc = schedule.w[i][j]\n\t\t\t\tif c != None:\n\t\t\t\t\tif not validRowCol(c,j,schedule.w[i],schedule):\n\t\t\t\t\t\treturn False,(c,i)\n\t\t\t\t\telse:\n\t\t\t\t\t\tj += c[1].length\n\t\t\t\telse:\n\t\t\t\t\tj += 1\n\t\t\ti += 1\n\t\treturn True,None", "def test_validate_self_invalid_transition_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.transitions['q5'] = self.dtm1.transitions['q0']\n self.dtm1.validate_self()", "def randomSchedule(self,contents):\n\t\timport random as ran\n import copy\n\t\tcontents_copy = copy.deepcopy(contents)\n\t\tsol = Area('sb',ran.random())\n\t\twhile contents_copy:\n\t\t\tcont = ran.choice(contents_copy)\n\t\t\ti = 0\n\t\t\twhile True:\n\t\t\t\tran_waiting = ran.randint(0,2)\n\t\t\t\tran_start = ran.randint(0,19)\n\t\t\t\tif sol.checkAddContent(ran_waiting,ran_start,cont):\n\t\t\t\t\tsol.addContent(ran_waiting,ran_start,cont)\n\t\t\t\t\tcontents_copy.remove(cont)\n\t\t\t\t\tbreak\n\t\t\t\ti += 1\n\t\t\t\tif i>150:\n\t\t\t\t\t#print \"cut\"\n\t\t\t\t\tsol = Area('sb',ran.random())\n\t\t\t\t\tcontents_copy = contents[:]\n\t\t\t\t\tbreak\n\t\t#print \"generate new schedule\\n\",sol.printSchedule()\n\t\treturn sol", "def test_validate_self_invalid_transition_result_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.transitions['q0']['y'] = ('q5', 'y', 'R')\n self.dtm1.validate_self()", "def validCol(content,start,schedule):\n\t\t\tcur_id = content[1].id\n\t\t\t#print \"cur_id,length,start\",cur_id,content[1].length,start\n\t\t\tflag = 0\n\t\t\tfor i in range(content[1].length):\n\t\t\t\tfor j in range(len(schedule.w)):\n\t\t\t\t\t#print start,i,content[1]\n\t\t\t\t\tif schedule.w[j][start+i]!=None and \\\n\t\t\t\t\t\tschedule.w[j][start+i][1].id == cur_id:\n\t\t\t\t\t\tflag += 1\n\t\t\tif flag != content[1].length:\n\t\t\t\t#print \"col not valid\",flag,content[1].length,cur_id\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True", "def test_schedule_with_non_alphanumeric_ordering(self):\n theta = Parameter(\"theta\")\n phi = Parameter(\"phi\")\n lamb = Parameter(\"lam\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(theta, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(phi, DriveChannel(0)), inplace=True)\n target_sched.insert(20, ShiftPhase(lamb, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched, arguments=[\"theta\", \"phi\", \"lam\"])\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(0, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1, DriveChannel(0)), inplace=True)\n ref_sched.insert(20, ShiftPhase(2, DriveChannel(0)), inplace=True)\n\n # if parameter is alphanumerical ordering this maps to\n # theta -> 2\n # phi -> 1\n # lamb -> 0\n # however non alphanumerical ordering is specified in add method thus mapping should be\n # theta -> 0\n # phi -> 1\n # lamb -> 2\n test_sched = inst_map.get(\"target_sched\", (0,), 0, 1, 2)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertEqual(test_inst[1], ref_inst[1])", "def __fill_person (self, person):\n \n scheduled = True\n while person.get_monthly_hours_difference (self.date) > 0 and scheduled:\n scheduled = False\n #schedule person\n scheduling_units = self.__get_emptiest (person)\n dates = self.workers.get_dates ( )\n for date in dates:\n for scheduling_unit in scheduling_units:\n \n turnuses = list (self.mapper.get_turnuses (scheduling_unit, person))\n random.shuffle (turnuses)\n \n for turnus in turnuses:\n if self.__is_valid_move (scheduling_unit, turnus, date, person, True):\n person.schedule_turnus (date, turnus, scheduling_unit)\n \n #block the previous day, if it was the night turnus\n prev_date = date - datetime.timedelta(days=1)\n if turnus.code[0] == 'N' and not person.is_blocked(prev_date, turnus):\n person.add_invalid_turnus(prev_date, turnus)\n \n # the is valid move has taken care of any potential violations, so that you\n # can just schedule turnuses\n if person.packet_night_turnuses and turnus.code[0] == 'N':\n next_date = date + datetime.timedelta(days=1)\n person.schedule_turnus (next_date, turnus, scheduling_unit)\n #if it is Saturday, schedule one more\n if next_date.weekday() == 5:\n next_date += datetime.timedelta(days=1)\n #find the workfree night turnus\n night_turnus = None\n for temp_turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if temp_turnus.holiday and temp_turnus.code[0] == 'N':\n night_turnus = temp_turnus\n break\n else:\n raise Exception ('Napaka pri dodajanju osebe z zdruzenimi nocnimi turnusi.')\n person.schedule_turnus(next_date, night_turnus, scheduling_unit)\n if turnus.code[0] == 'N' and not person.is_blocked(next_date + datetime.timedelta(days=1), turnus):\n person.add_invalid_turnus(next_date + datetime.timedelta(days=1), turnus)\n else:\n if turnus.code[0] == 'N' and not person.is_blocked(next_date + datetime.timedelta(days=1), turnus):\n person.add_invalid_turnus(next_date + datetime.timedelta(days=1), turnus) \n \n if holiday.is_workfree(date):\n schedule_utils.add_free_day (person, date)\n scheduled = True\n if person.get_monthly_hours_difference (self.date) <= 0:\n return\n else:\n scheduled = False\n if person.get_monthly_hours_difference (self.date) <= 0:\n return", "def mainSchedule():\n\timport time\n\tc1 = Content(1,5,20)\n\tc2 = Content(2,6,30)\n\tc3 = Content(3,5,25)\n\tc1_ = Content(1,1,20)\n\tc5 = Content(5,3,29)\n\tc6 = Content(6,11,50)\n\tc7 = Content(7,7,34)\n\tc1__ = Content(1,3,20)\n\tc8 = Content(8,6,10)\n\ta1 = Area('a1',1.0)\n\ta2 = Area('a2',0.5)\n\ta3 = Area('a3',0.8)\n\tcontents = [c1,c2,c3,c1_,c5,c6,c7,c1__,c8]\n\tareas = [a1,a2,a3]\n\tsol_schedule = Schedule_solution()\n\tprint \"random sampling schedule:\\n\"\n\ttime_r = time.time()\n\tschedule_sols = sol_schedule.schedule_randomSampling(contents,areas)\n\tprint \"running time,\",time.time()-time_r\n\tprint \"local search schedule:\"\n\ttime_l = time.time()\n\tschedule_sols_local = sol_schedule.schedule_localSearch(contents,areas)\n\tprint \"running time,\",time.time()-time_l\n\tsol_selection = Selection_solution()\n\tsol_selection.select_bruteforce(4,*schedule_sols) #argument unpacking", "def build_schedule(solution, new_examiners, new_students):\n examiners = deepcopy(new_examiners)\n students = deepcopy(new_students)\n\n def student_is_available(target_student, target_time, target_duration):\n \"\"\"\n Checks whether a student is available at a given time for a certain duration\n :param target_student: the student\n :param target_time: the time at which the student should be available\n :param target_duration: the duration during which the student should be available\n :return:\n \"\"\"\n for exam, exam_time in target_student.items():\n if exam_time == -1:\n continue\n\n if target_time <= exam_time < target_time + target_duration + delay:\n return False\n elif exam_time <= target_time < exam_time + durations[exam] + delay:\n return False\n\n return True\n\n def examiner_is_available(target_examiner, target_time):\n \"\"\"\n Checks whether an examiner is available at a given time for his exam's duration\n :param target_examiner: the examiner\n :param target_time: the duration during which the examiner should be available\n :return:\n \"\"\"\n examiner_number, examiner_exams = target_examiner[\"Number\"], target_examiner[\"Exams\"]\n\n for _, exam_time in examiner_exams.items():\n if exam_time == -1:\n continue\n\n if target_time <= exam_time < target_time + durations[examiner_number]:\n return False\n elif exam_time <= target_time < exam_time + durations[examiner_number]:\n return False\n\n return True\n\n examiners_order, *students_orders = solution\n\n for j in examiners_order:\n all_set = False\n t = 0\n while not all_set:\n all_set = [examiners[j][\"Exams\"][i] != -1 for i in range(student_count)] == [True] * student_count\n placed = False\n for student in students_orders[j]:\n if examiners[j][\"Exams\"][student] != -1:\n continue\n\n if student_is_available(students[student], t, durations[j]):\n if examiner_is_available(examiners[j], t):\n placed = True\n students[student][j] = t\n examiners[j][\"Exams\"][student] = t\n break\n\n if not placed:\n t += 1\n else:\n t += durations[j]\n\n return examiners, students", "def test_validate_self_final_state_transitions(self):\n with nose.assert_raises(exceptions.FinalStateError):\n self.dtm1.transitions['q4'] = {'0': ('q4', '0', 'L')}\n self.dtm1.validate_self()", "def reschedule():\n if not schedule.empty():\n purge_events() \n\n today_s = tuple_to_str(time.localtime()[:3])\n\n # first check if exception entry exist for today in datemap\n if today_s in datemap:\n \tschedule_day(datemap[today_s])\n else:\n # otherwise schedule it as normal weekday\n schedule_day(days[time.strftime(\"%A\")])", "def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True", "def test_invalidate_code_change(self):\n workflow1 = self.get_workflow(\n \"\"\"file://result <- file://file1\n Initial code\n\"\"\")\n workflow2 = self.get_workflow(\n \"\"\"file://result <- file://file1\n Updated code\n\"\"\")\n invalid = workflow1.resources_not_created_the_same_way(workflow2)\n assert len(invalid) == 1, [(res.url, reason._reason) for (res, reason,) in invalid]\n (resource, invalidation_reason) = invalid[0]\n assert resource.url == \"file://result\"\n assert invalidation_reason == PROCESS_CHANGED, invalidation_reason", "def _match_movie_titles(self, cinema_schedule_data):\n matcher = MovieIDMatcher()\n invalid_titles = []\n for title, content in cinema_schedule_data.items():\n\n logging.warning(\"Matching movie: \" + title)\n\n imdb_id = matcher.match_imdb_id_from_title_recent(title)\n if imdb_id is None:\n logging.error(\"IMDb ID matched is invalid!\")\n invalid_titles.append(title)\n continue\n\n cinema_schedule_data[title] = {\n 'imdb_id': imdb_id,\n 'content': content\n }\n self.update_single_movie_data(imdb_id)\n logging.warning(\"matching successful!\")\n\n for invalid_title in invalid_titles:\n cinema_schedule_data.pop(invalid_title)", "def test_sequenced_parameterized_schedule(self):\n\n converter = QobjToInstructionConverter([], buffer=0)\n qobjs = [\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=10, phase=\"P1\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=20, phase=\"P2\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=30, phase=\"P3\"),\n ]\n converted_instruction = [converter(qobj) for qobj in qobjs]\n\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"inst_seq\", 0, Schedule(*converted_instruction, name=\"inst_seq\"))\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=3, P4=4, P5=5)\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, 1, 2, 3, 4, 5, 6, 7, 8)\n\n p3_expr = Parameter(\"p3\")\n p3_expr = p3_expr.bind({p3_expr: 3})\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)", "def test_invalid_sequence(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move sequence\n move_seq = [0, 5, 6, 6, 3, 2]\n # check sequence validity\n (cost, valid, endloc) = k1.validate_sequence(move_seq)\n self.assertFalse(valid)\n self.assertEqual(cost, 0)", "def schedule_paragraph():", "def __prepareTransitions(self):\n self.transition_matrix=self.markovModel._get_transmat()[:]\n for i in range(len(self.transition_matrix)):\n index=np.argmax(self.transition_matrix[i,:])\n if index==i:\n self.transition_matrix[i,index]=0", "def _ensure_times_are_as_expected(self, board_div: HtmlElement) -> MeetingTimes:\n # They put their meeting times in plain english inside paragraph tags\n # (p). Instead of trying to parse plain english I'm just going to\n # assume they don't change much.\n #\n # I expect the first p in the div to be the board and the second to be\n # the general membership p. If anything differs from whats expected an\n # error is thrown.\n #\n # This returns the times so that all the time stuff is handled in this\n # function. I'm assuming the first p is for the board and the second p\n # is general\n\n expected_board_p = \"ACLA Board meetings (6:30 pm unless otherwise noted)\"\n expected_general_p = \"General Membership meetings (7:00 pm)\"\n expected_advisory_p = \"(10:00 am)\"\n expected_lac_p = \"(10:00 am)\"\n\n ps = board_div.xpath(\"p\")\n if len(ps) != 3:\n raise PageChangedException()\n board_p, general_p, advisory_p = ps\n lac_ps = board_div.xpath(\"./div/p\")\n if len(lac_ps) < 2:\n raise PageChangedException()\n lac_p = lac_ps[0]\n\n if board_p.text_content().strip() != expected_board_p:\n raise PageChangedException()\n if general_p.text_content().strip() != expected_general_p:\n raise PageChangedException()\n if advisory_p.text_content().strip() != expected_advisory_p:\n raise PageChangedException()\n if lac_p.text_content().strip() != expected_lac_p:\n raise PageChangedException()\n\n return MeetingTimes(\n board=time(18, 30),\n general=time(19, 0),\n advisory=time(10, 0),\n lac=time(10, 0),\n )", "def schedule_localSearch(self,contents,areas):\n\t\tsolutions = []\n\t\tfor i in range(len(areas)):\n\t\t\tschedule = self.randomSchedule(contents)\n\t\t\tcount = 0\n\t\t\twhile True:\n\t\t\t\tif self.validSchedule(schedule)[0]:\n\t\t\t\t\tschedule.id = \"area\"+str(i+1)\n\t\t\t\t\tsolutions.append(schedule)\n\t\t\t\t\tcount = 0\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tschedule = self.transition(schedule)\n\t\t\t\t\tcount += 1\n\t\t\t\t\tif count > 10:\n\t\t\t\t\t\tcount = 0\n\t\t\t\t\t\tschedule = self.randomSchedule(contents)\n\t\tfor sol in solutions:\n\t\t\tsol.printSchedule()", "def __is_valid_move(self, scheduling_unit, turnus, date, person, overtime, depth=0, check_turnuses=[]):\n \n \n if not schedule_utils.is_valid_move (scheduling_unit, turnus, date, person, overtime):\n return False\n \n # if the workplace has the special rule: work in the afternoon, if the next\n # day is a work free day and you will work the next day, and you won't work\n # the next day, work in the morning or not at all\n if scheduling_unit.has_holiday_rule ( ):\n if holiday.is_workfree(date):\n prev_date = date - datetime.timedelta(days=1)\n prev_turnus = person.get_turnus(prev_date) \n if prev_turnus:\n # all afternoon codes start with P\n # all double shift codes start with C\n # TODO: document this\n if prev_turnus.code[0] != 'P' or prev_turnus.code[0] != 'C':\n return False\n else:\n return False\n else:\n next_date = date + datetime.timedelta(days=1)\n if holiday.is_workfree(next_date):\n # this bottom condition is enough, because the dates are added ascending\n if not person.is_free_day(next_date):\n return False\n \n # if the person schedules night turnuses in packages: \n # (Monday + Tuesday)\n # (Tuesday + Wednesday)\n # (Wednesday + Thursday)\n # (Friday + Saturday + Sunday)\n if person.packet_night_turnuses and turnus.code[0] == 'N':\n if depth == 0 and (date.weekday() == 0 or date.weekday() == 2 or date.weekday() == 4):\n return self.__is_valid_move(scheduling_unit, turnus, date + datetime.timedelta(days=1), person, overtime, depth + 1, check_turnuses + [turnus])\n #if this is the second day in the packet continue validation only if it is a Saturday\n elif depth == 1 and date.weekday() == 5:\n # TODO: allow only one holiday turnus per shift type (document this)\n sunday_night_turnus = None\n for alternative_turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if alternative_turnus.holiday and alternative_turnus.code[0] == 'N':\n sunday_night_turnus = alternative_turnus\n break\n else:\n return False\n \n return self.__is_valid_move(scheduling_unit, sunday_night_turnus, date + datetime.timedelta(days=1), person, overtime, depth + 1, check_turnuses + [turnus])\n #Thursday to Friday combination does not exist\n elif depth == 1 and date.weekday() == 4:\n return False\n elif depth == 1:\n return True\n elif depth == 2:\n return True\n \n else:\n return False\n \n \n return True", "def not_valid_after(self):", "def test_validate_self_invalid_transition_symbol(self):\n with nose.assert_raises(exceptions.InvalidSymbolError):\n self.dtm1.transitions['q0']['2'] = ('q0', '0' 'R')\n self.dtm1.validate_self()", "async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)", "def check_transition_sanity(self):\n for trans in crest.get_all_transitions(self.model):\n assert trans._name is not None, f\"There is a transition in {trans._parent._name} ({trans._parent.__class__.__name__}) whose name is 'None'\"\n assert trans._name != \"\", f\"There is a transition in {trans._parent._name} ({trans._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(trans.source, crest.State), f\"Transition {trans._name}'s source is not a crest.State. It is: {trans.source} ({trans.source.__class__})\"\n assert trans.source in crest.get_states(trans._parent), f\"Transition source state {trans.source._name} ({trans.source}) is not in the states of entity {trans._parent._name} ({trans._parent})\"\n\n assert isinstance(trans.target, crest.State), f\"Transition {trans._name}'s target is not a crest.State. It is: {trans.target} ({trans.target.__class__})\"\n assert trans.target in crest.get_states(trans._parent), f\"Transition {trans._name}'s target state {trans.source._name} is not in the states of entity {trans._parent._name} ({trans._parent})\"\n\n assert isinstance(trans.guard, (crestml.LearnedFunction, types.FunctionType)), f\"Transition {influence._name}'s guard needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(trans.guard).parameters\n assert len(inspect.signature(trans.guard).parameters) == 1, \"A transition should not have arguments (except self)\"\n\n for port in SH.get_read_ports_from_update(trans.guard, trans):\n assert port in api.get_sources(trans._parent), f\"Transition {trans._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {trans._parent._name} ({trans._parent})\"", "def duplicate_transition_raise_error(old_transition, new_transition):\n raise ValueError(\"Attempting to re-insert transition %s\" % old_transition)", "def _validate_trajectory_transition(subgaits, from_subgait_names, to_subgait_names):\n for from_subgait_name, to_subgait_name in zip(from_subgait_names, to_subgait_names):\n\n if not all(name not in ('start', 'end', None) for name in (from_subgait_name, to_subgait_name)):\n continue # a start or end point can not be compared to a subgait\n\n from_subgait = next((subgait for subgait in subgaits if subgait.subgait_name == from_subgait_name), None)\n to_subgait = next((subgait for subgait in subgaits if subgait.subgait_name == to_subgait_name), None)\n\n if not from_subgait.validate_subgait_transition(to_subgait):\n raise NonValidGaitContent(msg='End setpoint of subgait {sn} to subgait {ns} does not match'\n .format(sn=from_subgait.subgait_name, ns=to_subgait.subgait_name))", "def test_valid_not_modified(self):\n coupon = COUPON_FACTORY.create_coupon()\n past_date = datetime.datetime.now() - datetime.timedelta(60)\n slot = Slot.objects.create(site_id=2,\n business_id=coupon.offer.business.id,\n start_date=past_date,\n end_date=past_date + datetime.timedelta(30))\n slot_time_frame_y = SlotTimeFrame.objects.create(slot=slot,\n coupon_id=coupon.id,\n start_datetime=past_date + datetime.timedelta(20))\n slot_time_frame_x = SlotTimeFrame.objects.create(slot=slot,\n coupon_id=coupon.id,\n start_datetime=past_date + datetime.timedelta(1),\n end_datetime=past_date + datetime.timedelta(2))\n try:\n slot_time_frame_x.justify_time_frames(slot_time_frame_y)\n except ValidationError as error:\n LOG.debug(error)\n self.fail('Valid slot time frames failed justification.')", "def test_invalidation_in_cascade(self):\n workflow1 = self.get_workflow(\n \"\"\"file://file2 <- file://file1\n Original code\n\nfile://file3 <- file://file2\"\"\")\n workflow2 = self.get_workflow(\n \"\"\"file://file2 <- file://file1\n Updated code\n\nfile://file3 <- file://file2\"\"\")\n invalid = workflow1.resources_not_created_the_same_way(workflow2)\n assert len(invalid) == 1, [(res.url, reason) for (res, reason,) in invalid]\n (resource, invalidation_reason) = invalid[0]\n assert resource.url == \"file://file2\"\n assert invalidation_reason == PROCESS_CHANGED\n resultant_from_dif = workflow1.dependant_resources([resource for (resource, _) in invalid])\n assert len(resultant_from_dif) == 1, resultant_from_dif\n (resource, invalidation_reason) = resultant_from_dif[0]\n assert resource.url == \"file://file3\"\n assert invalidation_reason.find(\"file://file2\") >= 0, invalidation_reason", "def test_update_instances_schedule_state(self):\n pass" ]
[ "0.64331365", "0.53829914", "0.53537375", "0.5132803", "0.5085885", "0.4992951", "0.49866506", "0.4924441", "0.4908995", "0.489999", "0.48967925", "0.4888517", "0.48863095", "0.4878813", "0.4878402", "0.48645383", "0.48517662", "0.48221093", "0.48145336", "0.4810384", "0.47993064", "0.4786985", "0.47869298", "0.47841388", "0.4780568", "0.47793436", "0.47626168", "0.47572303", "0.47518393", "0.4748189" ]
0.71358395
0
Driver for scheduling problem
def mainSchedule(): import time c1 = Content(1,5,20) c2 = Content(2,6,30) c3 = Content(3,5,25) c1_ = Content(1,1,20) c5 = Content(5,3,29) c6 = Content(6,11,50) c7 = Content(7,7,34) c1__ = Content(1,3,20) c8 = Content(8,6,10) a1 = Area('a1',1.0) a2 = Area('a2',0.5) a3 = Area('a3',0.8) contents = [c1,c2,c3,c1_,c5,c6,c7,c1__,c8] areas = [a1,a2,a3] sol_schedule = Schedule_solution() print "random sampling schedule:\n" time_r = time.time() schedule_sols = sol_schedule.schedule_randomSampling(contents,areas) print "running time,",time.time()-time_r print "local search schedule:" time_l = time.time() schedule_sols_local = sol_schedule.schedule_localSearch(contents,areas) print "running time,",time.time()-time_l sol_selection = Selection_solution() sol_selection.select_bruteforce(4,*schedule_sols) #argument unpacking
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_schedules(self):\n\n ''''''", "def checkUpstreamScheduler():", "def transmission_scheduler(self, ap_index:int):\n # sched_load = False\n # next_transmission_time = 0\n # current_sq = self.rec_reg.read()[ap_index]\n \n # for i in range(len(self.sch.queue)):\n # if self.sch.queue[i][1] == 4:\n # sched_load = True\n # next_transmission_time = self.sch.queue[i][0]\n # break\n \n # sched_args = list()\n # time_deadlines = list()\n \n # if sched_load:\n # time_sum = next_transmission_time + self.transmit_freq/2 \n # else:\n # time_sum = time.time()\n \n # #prebuild a list of transmission events and times for efficient entry into the scheduler\n # while True:\n # # delay added at start to avoid race between transmit() trying to read from the queue \n # # and the scheduler filling the queue\n # time_sum += self.transmit_freq\n # try:\n # # session queue of type bitarray\n # sched_args.append(current_sq.next())\n # # delete session queue object when the full queue is added to the scheduler\n # except ValueError:\n # # disconnect signal for transmit\n # time_deadlines.append(time_sum)\n # sched_args.append(None)\n # break\n # time_deadlines.append(time_sum)\n \n # #enter transmission events into the scheduler\n # for i in enumerate(time_deadlines):\n # self.sch.enterabs(time_deadlines[i], 4, self.transmit, \n # argument=(ap_index,sched_args[i]), kwargs={})\n # #print_queue(self.s.queue)\n # self.log.info(\"Scheduled transmission events for AP: %s\", ap_index)\n # self.log.info(\"Estimated transmission duration (s): %s\", \n # self.sch.queue[len(self.sch.queue)-1][0]-self.sch.queue[0][0])", "def schedule_task(self, counter):\n task_to_schedule = []\n each_agent = self.agents[counter]\n task_found = False\n H1_score_list = []\n H2_score_list = []\n H3_score_list = []\n H1_dict = {}\n H2_dict = {}\n H3_dict = {}\n\n # Agent not idle case, exit immediately\n if self.is_agent_idle[counter][0] == 0:\n print(each_agent.getName(), 'is not Idle')\n print(each_agent.getName(), 'is scheduled for null task')\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n return task_to_schedule\n # if agent is busy, output null task\n\n for task_num, each_task in enumerate(self.tasks):\n if self.is_task_finished[0][task_num] == 1: # Can't schedule a task that has already been completed\n continue\n if self.is_task_alive[0][task_num] == 0:\n continue\n # if self.is_task_enabled[0][task_num] == 0:\n # continue\n # if self.travel_time_constraint_satisfied[0][task_num] == 0:\n # continue\n if self.is_task_in_progress[0][task_num] == 1: # can't schedule the same task twice\n continue\n\n # All constraints satisfied\n # normalize each score separately\n deadline_score = (self.heuristic(heuristic_num=1, task_num=task_num, agent_num=counter))\n occupacity_score = (self.heuristic(heuristic_num=2, task_num=task_num, agent_num=counter))\n distance_score = (self.heuristic(heuristic_num=3, task_num=task_num, agent_num=counter)) * 150 / np.sqrt(\n 32) # The 150/np.sqrt(32) puts it in the same range as deadline score\n\n H1_dict[task_num] = deadline_score\n H1_score_list.append(deadline_score)\n\n H2_dict[task_num] = occupacity_score\n H2_score_list.append(occupacity_score)\n\n H3_dict[task_num] = distance_score\n H3_score_list.append(distance_score)\n\n task_found = True\n\n if not task_found:\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[3])\n return task_to_schedule\n\n new_dict = {}\n for key in H1_dict:\n new_dict[key] = H1_dict[key] * self.w_EDR + H2_dict[key] * self.w_RESOURCE + H3_dict[key] * self.w_DISTANCE\n\n highest = max(new_dict.values()) # figure out the highest score\n tasks_with_best_scores = [k for k, v in new_dict.items() if v == highest] # find all keys associated with the highest value\n if len(tasks_with_best_scores) > 1:\n print(tasks_with_best_scores)\n\n if self.do_you_like_big_tasks:\n task_chosen = max(tasks_with_best_scores)\n else:\n task_chosen = min(tasks_with_best_scores)\n\n print('Expert: Task chosen for', each_agent.getName(), ' is ', task_chosen, ' enabled: ', self.is_task_enabled[0][task_chosen])\n self.teacher_actions[self.global_schedule_num].append(task_chosen)\n # neural net task\n self.converge_embedding_based_on_history(task_chosen, counter)\n neural_net_task = self.predict_task(task_chosen, counter)\n self.learner_actions[self.global_schedule_num].append(neural_net_task)\n\n print('Neural Net: Task chosen for', each_agent.getName(), ' is ', neural_net_task, ' enabled: ',\n self.is_task_enabled[0][neural_net_task])\n\n # all of this changed to represent neural net task\n\n # if self.is_task_enabled[0][neural_net_task] == 0:\n # print('Task was not enabled, but is alive')\n\n if neural_net_task == task_chosen:\n self.num_correct_predictions_total[self.global_schedule_num] += 1\n\n self.num_predictions_total[self.global_schedule_num] += 1\n\n # Only do it if all of the pre-conditions are met\n\n if self.global_schedule_num != 0:\n location_of_task = self.tasks[neural_net_task].getloc()\n vectorized_task_num = self.get_vectorized_location(location_of_task) # checks if current task is in a location that is occupied\n if self.is_task_alive[0][neural_net_task] == 0 or \\\n self.is_task_enabled[0][neural_net_task] == 0 or \\\n self.travel_time_constraint_satisfied[counter][neural_net_task] == 0 or \\\n self.agent_locations[0][vectorized_task_num] >= 1 or \\\n self.is_task_in_progress[0][neural_net_task]:\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n print('Task ', neural_net_task, ' did not meet criteria of being enabled, alive, travel satisfied, or not occupied')\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[1])\n return task_to_schedule\n\n else: # global task schedule num is zero\n if self.is_task_enabled[0][task_chosen] == 0:\n print('Task was not enabled, but is alive')\n # Only do it if all of the pre-conditions are met\n location_of_task = self.tasks[task_chosen].getloc()\n vectorized_task_num = self.get_vectorized_location(location_of_task) # checks if current task is in a location that is occupied\n if self.is_task_alive[0][task_chosen] == 0 or \\\n self.is_task_enabled[0][task_chosen] == 0 or \\\n self.travel_time_constraint_satisfied[counter][task_chosen] == 0 or \\\n self.agent_locations[0][vectorized_task_num] >= 1:\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n print('task ', task_chosen, ' did not meet criteria of being enabled, alive, travel satisfied, or not occupied')\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[1])\n return task_to_schedule\n\n if self.global_schedule_num != 0:\n # if self.t > self.task_deadlines[0][neural_net_task]:\n if self.has_any_deadlines_passed(neural_net_task):\n task_to_schedule.append(-1)\n print('Deadline is passed')\n # updated where this is changed\n self.did_schedule_fail = True\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[2])\n return task_to_schedule\n else:\n # if self.t > self.task_deadlines[0][neural_net_task]:\n if self.has_any_deadlines_passed(task_chosen):\n task_to_schedule.append(-1)\n print('Deadline is passed')\n # updated where this is changed\n self.did_schedule_fail = True\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[2])\n return task_to_schedule\n\n if self.global_schedule_num != 0:\n task_to_schedule.append(neural_net_task)\n self.agent_current_task[counter] = task_to_schedule[0] # changes agent current task\n self.task_to_schedule = task_to_schedule\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[4])\n self.number_of_decisions_before_terminal_state[self.global_schedule_num] += 1\n # maybe remove the return\n print('Task scheduled for', each_agent.getName(), 'at time ', self.t, 'is ', self.task_to_schedule)\n return task_to_schedule # ALL zero indexed\n else:\n task_to_schedule.append(task_chosen)\n self.agent_current_task[counter] = task_to_schedule[0] # changes agent current task\n self.task_to_schedule = task_to_schedule\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[4])\n self.number_of_decisions_before_terminal_state[self.global_schedule_num] += 1\n # maybe remove the return\n print('Task scheduled for', each_agent.getName(), 'at time ', self.t, 'is ', self.task_to_schedule)\n return task_to_schedule # ALL zero indexed", "def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)", "def Scheduler():\n courses = \"cs108 cs112 cs214 stat343 cs336 cs300\".split()\n profs = \"norman adams schuurman pruim vanderlinden\".split()\n slots = \"mwf900 mwf1130 tth1030 tth130\".split()\n rooms = \"sb354 nh064\".split()\n \n variables = courses\n assignments = {}\n assignments['cs108'] = \"norman\"\n assignments['cs112'] = \"adams\"\n assignments['cs214'] = \"adams\"\n assignments['stat343'] = \"pruim\"\n assignments['cs336'] = \"vanderlinden\"\n assignments['cs300'] = \"schuurman\"\n neighbors = parse_neighbors(\"\"\"\n cs108: norman; cs112: adams; \n cs214: adams; stat343: pruim; \n cs336: vanderlinden; cs300: schuurman\n \"\"\", variables)\n domains = {}\n for course in courses:\n domains[course] = []\n for course in courses:\n for prof in profs:\n for room in rooms:\n for slot in slots:\n domains[course].append(prof + \" \" + room + \" \" + slot)\n \n for type in [courses]:\n for A in type:\n for B in type:\n if A != B:\n if B not in neighbors[A]:\n neighbors[A].append(B)\n if A not in neighbors[B]:\n neighbors[B].append(A)\n\n def scheduler_constraints(A, a, B, b, recurse=0):\n ADomain = a.split()\n BDomain = b.split()\n A_Prof = ADomain[0]\n B_Prof = BDomain[0]\n A_Room = ADomain[1]\n B_Room = BDomain[1]\n A_Slot = ADomain[2]\n B_Slot = BDomain[2]\n A_Course = A\n B_Course = B\n \n if(A_Prof == B_Prof and A_Slot == B_Slot):\n return False\n if(A_Room == B_Room and A_Slot == B_Slot):\n return False\n\n if('norman' in a and A == 'cs108'):\n return True\n if('adams' in a and A == 'cs112'):\n return True\n if('adams' in a and A == 'cs214'):\n return True\n if('pruim' in a and A == 'stat343'):\n return True\n if('vanderlinden' in a and A == 'cs336'):\n return True\n if('schuurman' in a and A == 'cs300'):\n return True\n if(A in courses and B in courses):\n return False\n if(recurse == 0):\n return scheduler_constraints(B, b, A, a, 1)\n return True\n \n return CSP(variables, domains, neighbors, scheduler_constraints)", "def __run_schedules():\n while True:\n __scheduler.run()", "def _schedule(self, instruction_list, schedule_mode):\n if schedule_mode:\n scheduler = Scheduler(schedule_mode)\n scheduled_start_time = scheduler.schedule(instruction_list)\n time_ordered_pos = np.argsort(scheduled_start_time)\n instruction_list = [instruction_list[i] for i in time_ordered_pos]\n scheduled_start_time.sort()\n else: # no scheduling\n scheduled_start_time = [0.]\n for instruction in instruction_list[:-1]:\n scheduled_start_time.append(\n instruction.duration + scheduled_start_time[-1])\n return instruction_list, scheduled_start_time", "def _schedule(self, instruction_list, schedule_mode):\n if schedule_mode:\n scheduler = Scheduler(schedule_mode)\n scheduled_start_time = scheduler.schedule(instruction_list)\n time_ordered_pos = np.argsort(scheduled_start_time)\n instruction_list = [instruction_list[i] for i in time_ordered_pos]\n scheduled_start_time.sort()\n else: # no scheduling\n scheduled_start_time = [0.0]\n for instruction in instruction_list[:-1]:\n scheduled_start_time.append(\n instruction.duration + scheduled_start_time[-1]\n )\n return instruction_list, scheduled_start_time", "def schedule_task(self, name, date):\n pass", "def __init__(self, meta, pid):\r\n # Parse workers\r\n self.workers = []\r\n workers = meta.get(\"workers\", None)\r\n if workers is None:\r\n raise SchedulerError(f\"Requires 'workers' field\")\r\n \r\n if not isinstance(workers, list):\r\n raise SchedulerError(f\"Expected 'workers' as a list\")\r\n\r\n for worker in workers:\r\n name = worker.get('type', None)\r\n background = worker.get('async', False)\r\n args = worker.get('args', {})\r\n if name is None:\r\n raise SchedulerError(f\"Requires 'type' field\")\r\n\r\n if not isinstance(background, bool):\r\n raise SchedulerError(f\"Expected 'async' as a bool\")\r\n\r\n try:\r\n worker = WorkerFactory.build(name, args, pid, background)\r\n except FactoryError as e:\r\n raise SchedulerError(f\"Error building '{name}': {str(e)}\")\r\n\r\n self.workers.append(worker)\r\n\r\n # Parse mode\r\n self.mode = None\r\n self.time = None\r\n schedule = meta.get('schedule', None)\r\n if schedule is None:\r\n self.mode = self.MODE_INSTANT\r\n else:\r\n mode = schedule.get('mode', None)\r\n if mode is None:\r\n raise SchedulerError(f\"'schedule' requires 'mode' field\")\r\n \r\n self.mode = self.MODE_MAP.get(mode, None)\r\n if self.mode is None:\r\n raise SchedulerError(f\"Unrecognized value for 'mode': {mode}\")\r\n\r\n # Get the delay\r\n if self.mode == self.MODE_DELAY:\r\n delay = schedule.get('delay', None)\r\n if delay is None:\r\n raise SchedulerError(f\"'schedule' requires 'seconds' field when in the specified mode\")\r\n\r\n if not isinstance(delay, (int, float)):\r\n raise SchedulerError(f\"Expected 'seconds' as a float or int\")\r\n\r\n self.time = delay\r\n elif self.mode == self.MODE_ALARM:\r\n time = schedule.get('time', None)\r\n if time is None:\r\n raise SchedulerError(f\"'schedule' requires 'time' field when in the specified mode\")\r\n \r\n try:\r\n trigger_time = datetime.strptime(time, '%m/%d/%y %H:%M:%S')\r\n except Exception as e:\r\n print(e)\r\n raise SchedulerError(f\"Failed to parse '{time}' as a datetime object\")\r\n\r\n self.time = trigger_time", "def task():", "def scheduledscansobjects():\n pass", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def schedule(self, newTask, time):\r\n sys.exit(\"You should implement function schedule in subclass\")\r\n pass", "def _schedule(self,task_dict):\n times = [time(), None, None, None] # (schedule timestamp, execution timestamp, stop timestamp, get timestamp)\n result_id = self._extract_features.remote(self, times) # calculation is started in new remote task \n task_dict[result_id] = self._idx # add sample index ", "def task(self,target,name = \"\", prio = 10, period = 0, time2run = 0):\n newtask = Task(target,name,prio,period, time2run)\n self.taskmap[newtask.tid] = newtask\n self.schedule(newtask)\n\n\n return newtask.tid", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_execute_monitoring_schedule_vendor_v3(self):\n pass", "def schedule_task(self, task):\n if self.time_based:\n minimum_wait_server = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_wait_server > server.waiting_time:\n target_server = server\n minimum_wait_server = server.waiting_time\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")\n else:\n minimum_jobs = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_jobs > len(server.jobs):\n minimum_jobs = len(server.jobs)\n target_server = server\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")", "def test_update_instances_schedule_state(self):\n pass", "def schedule(self, pid, delta_t):\n\t\tif len(self._process_list) == 0:\n\t\t\treturn None\n\n\t\tif self._last_index >= len(self._process_list):\n\t\t\tself._last_index = 0\n\t\t\n\t\twhile True:\n\n\t\t\tpriority = sort_priority()\n\t\t\t# print \"SCHEDULING PRIORITY \", priority, \"\\n\"\n\t\t\tactual_index = self._last_index + 1\n\n\t\t\twhile True:\n\t\t\t\t\n\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\tactual_index = 0\n\t\t\t\t\n\t\t\t\tprocess = self._process_list[actual_index] \n\t\t\t\t\n\t\t\t\tif find_priority(process.get_priority()) == priority:\n\t\t\t\t\tself._last_index = self._process_list.index(process)\n\t\t\t\t\t# print \"FIND PROCESS \" , actual_index, \"\\n\"\n\t\t\t\t\treturn process\n\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tactual_index += 1\n\n\t\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\t\tactual_index = 0\n\n\t\t\t\t\t# print \"LAST INDEX: \", self._last_index, \" ACTUAL INDEX: \", actual_index\t\t\t\t\t\n\n\t\t\t\t\tif actual_index == self._last_index: \n\t\t\t\t\t\tbreak", "def task(self):", "def task(self):", "def algo_schedule():\n\talgo(node.id, node)\n\treactor.callLater(STEP_TIME, algo_schedule)", "def schedule(self, pid, delta_t):\n\n\t\tif len(self._process_list) == 0: \n\t\t\treturn None\n\n\t\tif self._last_index >= len(self._process_list):\n\t\t\tself._last_index = 0\n\t\t\n\t\twhile True:\n\n\t\t\tpriority = sort_priority()\n\t\t\t# print \"SCHEDULING PRIORITY \", priority, \"\\n\"\n\t\t\tactual_index = self._last_index + 1\n\n\t\t\twhile True:\n\t\t\t\t# print \"FIND PROCESS \" , actual_index, \"\\n\"\n\t\t\t\t\n\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\tactual_index = 0\n\t\t\t\t\n\t\t\t\tprocess = self._process_list[actual_index] \n\t\t\t\t\n\t\t\t\tif find_priority(process.get_priority()) == priority:\n\t\t\t\t\tself._last_index = self._process_list.index(process)\n\t\t\t\t\treturn process\n\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tactual_index += 1\n\n\t\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\t\tactual_index = 0\n\t\t\t\t\t\tpriority = sort_priority()\n\n\t\t\t\t\t# print \"LAST INDEX: \", self._last_index, \" ACTUAL INDEX: \", actual_index\t\t\t\t\t\n\n\t\t\t\t\tif actual_index == self._last_index: \n\t\t\t\t\t\tbreak", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def _schedule(self, when):\n sched = IScheduler(self.store)\n for scheduledAt in sched.scheduledTimes(self):\n # https://github.com/twisted/epsilon/issues/38\n if when._time < scheduledAt._time:\n sched.reschedule(self, scheduledAt, when)\n break\n else:\n sched.schedule(self, when)", "def test_sequenced_parameterized_schedule(self):\n\n converter = QobjToInstructionConverter([], buffer=0)\n qobjs = [\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=10, phase=\"P1\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=20, phase=\"P2\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=30, phase=\"P3\"),\n ]\n converted_instruction = [converter(qobj) for qobj in qobjs]\n\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"inst_seq\", 0, Schedule(*converted_instruction, name=\"inst_seq\"))\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=3, P4=4, P5=5)\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, 1, 2, 3, 4, 5, 6, 7, 8)\n\n p3_expr = Parameter(\"p3\")\n p3_expr = p3_expr.bind({p3_expr: 3})\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)", "def scheduler(self):\n while True:\n if self.sch.empty():\n self.log.info(\"No scheduled jobs detected. Entering idle state\")\n bits = bitarray()\n # generate random 7B bitarrays\n for _ in range(pow(self.cube_dim,3)):\n bits.append(bool(random.getrandbits(1)))\n self.sch.enter(self.transmit_freq, 4, self.transmit, argument=(0, bits), kwargs={})\n else:\n try:\n self.log.info(\"Scheduled jobs detected. Serving through scheduler runner\")\n self.sch.run()\n except IOError as exc:\n self.log.exception(\"\"\"Scheduler runner encountered an error while executing the \n top level event: %s\"\"\", exc)\n sys.exit(1) # exit with status code 1" ]
[ "0.6768109", "0.64659697", "0.64603806", "0.6372004", "0.63610935", "0.6340818", "0.63037664", "0.6301751", "0.62980455", "0.6271578", "0.62470627", "0.6239573", "0.6231668", "0.6222913", "0.6192812", "0.6166951", "0.6150469", "0.6150188", "0.61315256", "0.61208034", "0.60991937", "0.607249", "0.6066878", "0.6066878", "0.6047169", "0.60361683", "0.6034518", "0.6025605", "0.60165495", "0.59573466" ]
0.6536917
1
Returns the Minio client
def minio_client(self): return Minio( os.environ.get("MINIO_SERVER"), access_key=os.environ.get("MINIO_ACCESS_KEY"), secret_key=os.environ.get("MINIO_SECRET_KEY"), secure=False )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_minio_client():\n minio_client = Minio(\n Config.MINIO_ENDPOINT,\n access_key=Config.MINIO_ACCESS_KEY,\n secret_key=Config.MINIO_SECRET_KEY,\n secure=Config.MINIO_SECURE,\n )\n return minio_client", "def get_minio_client(\n host=\"minio\", \n port=\"9000\", \n access_key=\"minio\", \n secret_key=\"minio123\", \n secure=False\n ):\n return Minio(\n endpoint=f\"{host}:{port}\",\n access_key=access_key,\n secret_key=secret_key,\n secure=secure\n )", "def get_minio_client(access: str, secret: str) -> Minio:\n return Minio(\n config.minio_host + \":9000\",\n access_key=access,\n secret_key=secret,\n secure=False\n )", "def get_client():\n return Client(__address, authkey='strumamor')", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def get_client(self):\n return self.client", "def client():\n\n client = Client()\n return client", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def Client(self):\n return self._client", "def client(self):\n return self._client", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def get_client():\n\n return MongoClientManager().client", "def client(self):\n return self._thread._client", "def client(self):\n\n return self._client", "def base_client(self):\n return self._client", "def uclient():\n return IceCubedSyncClient()", "def get_contribs_client():\n from mpcontribs.client import Client\n\n headers = get_consumer()\n\n if is_localhost():\n return Client(apikey=get_user_api_key())\n else:\n return Client(headers=headers)", "def admin_client():\n host = '127.0.0.1'\n port = 8126\n return TcpClient(host, port)", "def get_conn(self) -> WebClient:\n return self.client", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def _http_client(self):\n\n self.__enforce_connected()\n return self.collection._http_client", "def client(self) -> mqtt.Client:\n return self._client", "def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")", "def resource_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ResourceManagementClient)\n return self.client", "def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()", "def client(self) -> str:\n return pulumi.get(self, \"client\")", "def compute_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ComputeManagementClient)\n return self.client", "async def get_docker_client(self) -> \"DockerClient\":" ]
[ "0.8690317", "0.8170216", "0.7581131", "0.7048299", "0.6804743", "0.6720981", "0.6715593", "0.66408753", "0.65887773", "0.6549165", "0.65434384", "0.65317345", "0.64015293", "0.638567", "0.6359832", "0.63489944", "0.6335766", "0.6322186", "0.62669396", "0.62140334", "0.6203519", "0.62032354", "0.61555654", "0.6118968", "0.6086648", "0.6031395", "0.6005536", "0.60030663", "0.5943325", "0.5908213" ]
0.86054754
1
Returns the S3 resource
def s3_resource(self): return boto3.resource('s3', aws_access_key_id=os.environ.get("MINIO_ACCESS_KEY"), aws_secret_access_key=os.environ.get("MINIO_SECRET_KEY"), endpoint_url=f'http://{os.environ.get("MINIO_SERVER")}', config=Config(signature_version='s3v4') )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def s3resource(self):\n return self._s3resource", "def get_s3_client():\n return boto3.resource('s3')", "def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)", "def _get_resource(\n session: Optional[boto3.Session] = None, region: Optional[str] = None\n) -> S3ServiceResource:\n return (\n session.resource(\"s3\") if session else boto3.resource(\"s3\", region_name=region)\n )", "def s3(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"s3\")", "def get_s3_object(self, key):\n try:\n bucket_name = app.config['S3_BUCKET_NAME']\n s3_client = app.config['S3']\n response = s3_client.get_object(Bucket=bucket_name, Key=key)\n return response['Body'].read()\n except Exception:\n return None", "def s3(self) -> Optional['outputs.DataRepositoryAssociationS3']:\n return pulumi.get(self, \"s3\")", "def make_s3(sitename):\n return s3.S3(sitename)", "def get_s3_file(self, no_copy=False):\n return self.get_file(uri_type=URI_S3, no_copy=no_copy)", "def __get_s3_client(self):\n if self.AWS_ACCESS_KEY:\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=self.AWS_ACCESS_KEY,\n aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY,\n )\n else:\n s3_client = boto3.client(\"s3\")\n return s3_client", "def s3client(self):\n return self._s3client", "def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise", "def get_s3_connection(self):\n return connection.S3Connection(\n config.get('nereid_s3', 'access_key'),\n config.get('nereid_s3', 'secret_key')\n )", "def _get_aws_s3_connection(cls, access_key, secret_access_key):\n return boto.connect_s3(access_key, secret_access_key)", "def s3_client(self):\n return boto3.client('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def _get_s3_bucket(bucket_name: str):\n s3 = getattr(_resources_for_thread, \"s3\", None)\n if s3 is None:\n # When multi-threaded, we can't use the default session. So keep one per thread.\n session = boto3.session.Session()\n s3 = session.resource(\"s3\")\n _resources_for_thread.s3 = s3\n return s3.Bucket(bucket_name)", "def question():\n\n return s3_rest_controller()", "def get_s3_client(profile_name):\n try:\n session = boto3.session.Session(profile_name=profile_name)\n except ProfileNotFound as e:\n print(e, file=sys.stderr)\n raise FailureException from e\n return session.resource('s3')", "def fetch(iid):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n obj = s3.Bucket(BUCKET_NAME).Object(iid).get()\n if obj:\n return obj.get('Body')\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # get locally from temp dir (tests, local development)\n return get_temp_file(iid)\n return None", "def boto_init_s3(bucket_name):\n c = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n b = c.get_bucket(bucket_name)\n\n return b", "def getS3Object(self, bucket=None, s3Path=None, destPath=None):\n methodName = \"getS3Object\"\n \n if (not bucket):\n raise MissingArgumentException(\"An S3 bucket name (bucket) must be provided.\")\n #endIf\n \n if (not s3Path):\n raise MissingArgumentException(\"An S3 object key (s3Path) must be provided.\")\n #endIf\n \n if (not destPath):\n raise MissingArgumentException(\"A file destination path (destPath) must be provided.\")\n #endIf\n \n TR.info(methodName, \"STARTED download of object: %s from bucket: %s, to: %s\" % (s3Path,bucket,destPath))\n \n s3url = self.s3.generate_presigned_url(ClientMethod='get_object',Params={'Bucket': bucket, 'Key': s3Path},ExpiresIn=60)\n TR.fine(methodName,\"Getting S3 object with pre-signed URL: %s\" % s3url)\n #endIf\n \n destDir = os.path.dirname(destPath)\n if (not os.path.exists(destDir)):\n os.makedirs(destDir)\n TR.info(methodName,\"Created object destination directory: %s\" % destDir)\n #endIf\n \n r = requests.get(s3url, stream=True)\n with open(destPath, 'wb') as destFile:\n shutil.copyfileobj(r.raw, destFile)\n #endWith\n\n TR.info(methodName, \"COMPLETED download from bucket: %s, object: %s, to: %s\" % (bucket,s3Path,destPath))\n \n return destPath", "def get(self, key):\n return s3.Object(self.bucket.name, key).get()['Body'].read()", "def s3_data_conn ( self ) :\n if not self.s3_data :\n self.s3_data = boto.s3.connection.S3Connection( self.access_key, self.access_key_secret )\n return self.s3_data", "def retrieve_s3_contents ( s3_conn, bucket_name, key_name, stored_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if key.exists( ) :\n if stored_filename :\n key.get_contents_to_filename( stored_filename )\n return stored_filename\n\n return key.get_contents_as_string( )\n\n return None", "def __init__(self):\n self.s3_resource = boto3.resource('s3')\n self.s3_client = boto3.client('s3')", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def authorizedClientCall(self):\n\t\tclient \t\t= boto3.client(\"s3\")\n\t\treturn(client)", "def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def get_object(self, key):\n r = self.s3client.get_object(Bucket = self.s3_bucket, Key = key)\n data = r['Body'].read()\n return data" ]
[ "0.8834526", "0.8136219", "0.80481434", "0.77161956", "0.75156814", "0.7499978", "0.7383272", "0.73342884", "0.7209911", "0.71779615", "0.71755385", "0.7132572", "0.71012807", "0.7084859", "0.7083436", "0.70288396", "0.70011854", "0.6988842", "0.6984709", "0.69316506", "0.69097435", "0.69086164", "0.6895835", "0.6886167", "0.68837047", "0.68490475", "0.68071353", "0.68032044", "0.6799065", "0.67793936" ]
0.81758285
1
Moves a specified amount of messages to the specified channel.
async def move(self, ctx, limit: int, channel: TextChannel): # Clear the bot command message await ctx.message.delete() messages = [] async for message in ctx.message.channel.history(limit=limit): embed = Embed(description=message.content) embed.set_author(name=message.author.name, icon_url=message.author.avatar) embed.timestamp = message.created_at messages.append(embed) await message.delete() for i in messages[::-1]: await channel.send(embed=i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def move_messages(\n client,\n event,\n channel: ('channel_group_messageable', 'Where to move the message.'),\n):\n check_move_permissions(client, event, channel, False)\n \n if channel.is_in_group_thread():\n channel_id = channel.parent_id\n thread_id = channel.id\n else:\n channel_id = channel.id\n thread_id = 0\n \n context = MessageMoverContext(client, event, event.channel_id, channel_id, thread_id)\n await context.start()", "async def move_player(self, player : Player, channel):\r\n await player.move_to(channel)", "async def move(self, ctx, *, channel):\n author = ctx.message.author\n current_channel = author.voice.channel\n voice_channels = ctx.guild.voice_channels\n members = current_channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n done = False\n for vc in voice_channels:\n if vc.name == channel or str(vc.id) == str(channel):\n if channel.isdigit():\n channel = vc.name\n done = True\n await user.edit(voice_channel=vc)\n if not done:\n await ctx.send(\":x: Invalid voice channel name.\")\n return\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully moved the following users:\",\n title=f'{current_channel.name} ------> {channel}')\n await ctx.send(embed=embed)", "async def move_channel_loop(self):\n try:\n while self.state == CHANNEL_MOVE_STATE_NONE:\n message = await self.poll_message()\n if message is None:\n break\n \n try:\n await self.move_message(message)\n except DiscordException as err:\n if err.code == ERROR_CODES.unknown_webhook:\n self.set_status_update_waiter_webhook_deleted()\n return\n \n raise\n \n self.last_message_id = message.id\n self.total_moved_messages += 1\n \n except GeneratorExit:\n raise\n \n except CancelledError:\n raise\n \n except BaseException as err:\n self.set_status_update_waiter_error()\n await self.client.events.error(self.client, repr(self), err)\n return\n \n finally:\n self.discard()\n \n self.set_status_update_waiter_finished()", "async def move(self, ctx, *, channel_name: str):\r\n if ctx.voice_client:\r\n if discord.utils.get(ctx.guild.channels, name=channel_name):\r\n await ctx.voice_client.move_to(discord.utils.get(ctx.guild.channels, name=channel_name))\r\n print(\"[INFO] Bot moved to channel {0}\".format(channel_name))\r\n self.channel_name = channel_name\r\n else:\r\n await ctx.send(\"Channel {0} not found\".format(channel_name))\r\n else:\r\n await self.connect(ctx, channel_name=channel_name)\r\n await ctx.message.delete()", "async def channel(self, ctx, limit: int=100, channel: discord.TextChannel=None):\n\n if channel is None:\n channel = ctx.channel\n\n # noinspection PyUnresolvedReferences\n messages = await channel.purge(limit=limit)\n messages = len(messages)\n\n plural = '' if messages == 1 else 's'\n\n await ctx.send('Purged {} message{}.'.format(messages, plural), delete_after=10)", "async def move_to(self, channel: Optional[VocalGuildChannel], *, reason: Optional[str] = None) -> None:\n await self.edit(voice_channel=channel, reason=reason)", "async def send_wrapped_message(channel, message):\n for part in wrap(message, 2000):\n await channel.send(part)", "async def movePlayer(self, ctx, voiceChannel, reason):\n self.msgToDelete.append(await ctx.message.channel.send(\"Déplacement des joueurs ...\"))\n for member in ctx.author.voice.channel.members:\n await member.move_to(channel=voiceChannel, reason=reason)", "async def channel_(self, ctx, number=10):\n number = number if number <= 100 else 100\n question = await ctx.send(f\"this will delete the last {number} messages from ALL users. Continue?\")\n await question.add_reaction(self.reactions[0])\n await question.add_reaction(self.reactions[1])\n\n def check_is_author(reaction, user):\n return reaction.message.id == question.id and user.id == ctx.author.id and \\\n reaction.emoji in self.reactions\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\", check=check_is_author, timeout=20)\n if reaction.emoji == self.reactions[1]:\n await question.delete()\n return\n except asyncio.TimeoutError:\n await question.delete()\n return\n\n try:\n messages = await ctx.channel.purge(limit=number+1)\n await ctx.send(f\"deleted the last {len(messages)-1} messages from this channel\")\n except (discord.ClientException, discord.Forbidden, discord.HTTPException) as e:\n await ctx.send(str(e))\n except Exception as ex:\n import traceback\n owner = ctx.guild.get_member(self.bot.owner_id)\n if owner:\n await owner.send(traceback.print_exc())\n self.error_log.error(traceback.print_exc())", "async def move_message(self, message):\n target_channel = self.target_channel\n if target_channel.is_in_group_thread():\n thread_id = target_channel.id\n else:\n thread_id = 0\n \n files = await get_files(self.client, message)\n try:\n await create_webhook_message(self.client, self.webhook, message, thread_id, files)\n except:\n # Unallocate files if exception occurs\n files = None\n raise", "def drop_message(self, client, channel, i):\n del self.storage[client][channel][i]", "def move_cards(self, hand, num):\n for i in range(num):\n hand.add_card(self.pop_card())", "def modify_channel(self, channel):\n self._poller.modify(channel.fileno, channel._events)", "def move_cards(self, hand, num):\n \n # Check to see if the deck has enough cards\n if len(self.cards) < num:\n print(\"There aren't enough cards in the stack\")\n return\n \n for i in range(num):\n hand.cards.append(self.cards.pop())", "async def purge(self, ctx, amount: int = 1):\r\n if amount > 500:\r\n return await ctx.reply(\"Too many messages, limit is 500\", delete_after=3)\r\n await ctx.message.delete()\r\n await ctx.channel.purge(limit=amount)\r\n if amount >= 2:\r\n await ctx.send(f\"Purged {amount} mesages\", delete_after=3)\r\n else:\r\n await ctx.send(f\"Purged {amount} message\", delete_after=3)", "def _move_chunk(self, args: MigrationArgs) -> None:\n def move_command():\n self._mongo_client.admin.command(\"moveChunk\", args.collection, find={SHARD_KEY: args.shard_key},\n to=args.shard, _secondaryThrottle=False, _waitForDelete=True)\n self._try_until_done(move_command)\n self._chunks[args.collection][args.shard_key] = args.shard\n logging.info(f\"MongoAgent: Moved chunk {args.shard_key} of collection {args.collection} to {args.shard}\")", "async def clear(ctx, amount=10):\n\tawait ctx.channel.purge(limit=amount + 1)", "def msgSlowly(self, channel, lines, delay=700):\n send = lambda line: self.msg(channel, line)\n send(lines[0])\n for n, line in enumerate(lines[1:]):\n from . import irc as myself\n if getattr(myself, 'TESTING', False):\n send(line)\n else:\n reactor.callLater(n*(delay/1000.0), send, line)", "def move(self, delta):\n newPos = (self._pos + delta) % self._board.size\n # check for Pass GO condition\n if delta > 0 and newPos < self._pos:\n self._cash += 200\n self._board.acceptNotification(notification.PNPassGo(self))\n\n self._pos = newPos\n self._board.acceptNotification(notification.PNPlayerMove(self))", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def move_by(self, increment):\n return self.move_to(self.position + increment)", "def move_cards(self, hand, num=1):\n for i in range(num):\n hand.add_card(self.pop_card())", "def move(self,amount):\n self.positionx=self.positionx+self.amount\n return self.positionx", "def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)", "def move(self, distance):\n self._go(distance)", "def channel_move_sort_key(channel_key):\n return channel_key[0]", "def move(self, action):\n pile, count = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n elif pile < 0 or pile >= len(self.piles):\n raise Exception(\"Invalid pile\")\n elif count < 1 or count > self.piles[pile]:\n raise Exception(\"Invalid number of objects\")\n\n # Update pile\n self.piles[pile] -= count\n self.switchPlayer()\n\n # Check for a winner\n if all(pile == 0 for pile in self.piles):\n self.winner = self.player", "def move(self, action):\n pile, count = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n elif pile < 0 or pile >= len(self.piles):\n raise Exception(\"Invalid pile\")\n elif count < 1 or count > self.piles[pile]:\n raise Exception(\"Invalid number of objects\")\n\n # Update pile\n self.piles[pile] -= count\n self.switch_player()\n\n # Check for a winner\n if all(pile == 0 for pile in self.piles):\n self.winner = self.player", "async def delete_bot_msg(self, channel):\n await channel.purge(limit=100, check=self.is_me)" ]
[ "0.7316035", "0.6188762", "0.61035264", "0.60280615", "0.5928516", "0.5692596", "0.56376415", "0.5602749", "0.5475421", "0.53118956", "0.5237113", "0.5188172", "0.51433134", "0.51310295", "0.5110141", "0.5107266", "0.5092806", "0.5050932", "0.5045758", "0.5038465", "0.5031702", "0.5030054", "0.49914506", "0.49893892", "0.49880433", "0.49749747", "0.4967866", "0.49274793", "0.492477", "0.49171913" ]
0.7020896
1
Check client version string, and save their reward address
def _cmd_version(self): version = self._recv().split('.') rewards = self._recv() if version[0] != MINER_VERSION_ROOT: self._send('notok') return self.close() is_hex = all(c in string.hexdigits for c in rewards) if len(rewards) == 56 and is_hex: self._reward_address = rewards LOG.info('Client connected: version="%r" address="%r"', version, self._reward_address) self._send('ok')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkVersion(self, clientName, edamVersionMajor, edamVersionMinor):\r\n pass", "async def get_version(self) -> str or bool:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are getting the version\n self.add_to_output(\"Getting version...\")\n retries = 0\n while True:\n # open a connection to the [cgminer, bmminer, bosminer] API port (4028)\n connection_fut = asyncio.open_connection(self.ip, 4028)\n try:\n # get reader and writer streams from connection\n reader, writer = await asyncio.wait_for(connection_fut, timeout=5)\n # send the standard version command (JSON)\n writer.write(b'{\"command\":\"version\"}')\n # wait until command is finished sending\n await writer.drain()\n # read the returned data\n data = await reader.read(4096)\n # let the user know we recieved data\n self.add_to_output(\"Recieved data...\")\n # close the writer\n writer.close()\n # make sure the writer is fully closed\n await writer.wait_closed()\n # load the returned data (JSON), and remove the null byte at the end\n data_dict = json.loads(data[:-1].decode('utf-8'))\n # tell the user the version of the miner\n self.add_to_output(f'Version is {data_dict[\"VERSION\"][0][list(data_dict[\"VERSION\"][0].keys())[1]]}...')\n if \"BOSminer+\" in data_dict[\"VERSION\"][0].keys() or \"BOSminer\" in data_dict[\"VERSION\"][0].keys():\n # get/create ssh connection to miner\n conn = await self.get_connection(\"root\", \"admin\")\n # send the command and store the result\n try:\n result = await conn.run(\"cat /etc/bos_version\")\n version_base = result.stdout\n version_base = version_base.strip()\n version_base = version_base.split(\"-\")\n version = version_base[-2]\n if version == NEWEST_VERSION:\n return \"New\"\n else:\n return \"BOS+\"\n except:\n return \"BOS+\"\n else:\n return \"Antminer\"\n except asyncio.exceptions.TimeoutError:\n # we have no version, the connection timed out\n self.add_to_output(\"Get version failed...\")\n return False\n except ConnectionRefusedError:\n # add to retry times\n retries += 1\n # connection was refused, tell the user\n self.add_to_output(\"Connection refused, retrying...\")\n # make sure it doesnt get stuck here\n if retries > 3:\n self.add_to_output('Connection refused, attempting install...')\n return \"Antminer\"\n await asyncio.sleep(3)\n except:\n self.add_to_output(\"Unknown error getting version, attempting install...\")\n return \"Antminer\"", "def test_fullVERSION(self):\n self.client.versionName = \"FrobozzIRC\"\n self.client.versionNum = \"1.2g\"\n self.client.versionEnv = \"ZorkOS\"\n self.client.ctcpQuery_VERSION(\"[email protected]\", \"#theChan\", None)\n versionReply = (\n \"NOTICE nick :%(X)cVERSION %(vname)s:%(vnum)s:%(venv)s\"\n \"%(X)c%(EOL)s\"\n % {\n \"X\": irc.X_DELIM,\n \"EOL\": irc.CR + irc.LF,\n \"vname\": self.client.versionName,\n \"vnum\": self.client.versionNum,\n \"venv\": self.client.versionEnv,\n }\n )\n reply = self.file.getvalue()\n self.assertEqualBufferValue(reply, versionReply)", "def test_new_client(self):\n version, file = self.get('', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_2", "def test_min_client(self):\n for version in Version.objects.filter(pk__gte=self.version_1_2_0):\n appversion = version.apps.all()[0]\n appversion.min = AppVersion.objects.get(pk=325) # 3.7a5\n appversion.save()\n\n version, file = self.get('', '3070000005000', # 3.7a5pre\n self.app, self.platform)\n assert version == self.version_1_1_3", "def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))", "def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)", "def reader_version(self):\n\n try:\n result, sw1, sw2 = self.apdu_plain(b\"\\xff\\x00\\x48\\x00\\x00\")\n if len(result) > 0:\n str_result = result + bytes([sw1]) + bytes([sw2])\n str_result = str_result.decode(\"utf-8\")\n return str_result\n except Exception as e:\n print(\"Get version error:\", e)\n return \"n/a\"", "def _get_version(self):", "def test_low_client(self):\n version, file = self.get('', '3000000001100',\n self.app, self.platform)\n assert version == self.version_1_0_2", "async def do_version():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n version = data['version']['name']\n await bot.send_message(c, f'Minecraft version {version}')", "def checkForUpdates(cversion):\r\n \r\n # set a list of constant versions\r\n \r\n if MpGlobal.SAVED_VERSION == \"0.0.0.0\" :\r\n return;\r\n \r\n v1 = \"0.4.2.0\" # update songs in library to contain index values.\r\n \r\n # if any version compares are less than 0 then updates are required.\r\n update = versionCompare(cversion,v1) < 0;\r\n \r\n \r\n \r\n if update:\r\n print \"updates are required\"\r\n runUpdater(cversion);", "def test_version(self):\n result = check_output([b\"flocker-changestate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))", "def test_get_version(self):\n pass", "def check_recommended_versions_result(context, version):\n json_data = context.response.json()\n result = json_data[\"recommended_versions\"]\n assert result == version, \"different version found {} != {}\".format(version, result)", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def test_version(self):\n result = check_output([b\"flocker-reportstate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))", "def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)", "def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version", "def test_get_version(self, mpd_client):\n mpd_client.return_value.mpd_version = \"0.19.0\"\n # when we call MPC.get_version()\n version = MPC.get_version()\n\n # then we get the version of the mpd server\n self.assertEqual(version, \"0.19.0\")", "def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()", "def test_old_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('gr-qc/9901123v3'))", "def test_new_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('1202.1234v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v12'))", "def test_client_address_update(self):\n pass", "def get_version(client):\n version = client.info()['version']['number']\n version = version.split('-')[0]\n if len(version.split('.')) > 3:\n version = version.split('.')[:-1]\n else:\n version = version.split('.')\n return tuple(map(int, version))", "def version(self):\n r = requests.get(\"http://%s/api/version\" %(self.url), headers=self.headers)\n if r.status_code == 200:\n return True, r.content\n else:\n return False, {}", "def is_valid_version(self):\n pass", "def test_check_version(mock_send_message):\n A1sim.check_version(BASE_URL)\n mock_send_message.assert_called_once_with('GET',\n 'Get ric version',\n (f\"{BASE_URL}/counter/interface\"))", "def version(self, app, args):\n app.put('\\n\\n%s\\n' % _version_str)", "def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"" ]
[ "0.6100829", "0.5877899", "0.57476854", "0.5690405", "0.56460065", "0.5602962", "0.55144006", "0.55133027", "0.5433361", "0.54241884", "0.5423058", "0.5406405", "0.5389954", "0.53869414", "0.5382821", "0.53797954", "0.5372991", "0.53600615", "0.533975", "0.5339371", "0.53272575", "0.53141135", "0.5299173", "0.529645", "0.5292416", "0.52850676", "0.5273725", "0.5258511", "0.5236654", "0.52354234" ]
0.6978058
0
Highest block consensus information for all peers
def consensus(self): total_peers = 0 blocks = list() for peer in self.peers.values(): assert len(peer.blocks) > 0 if not peer.synched: continue total_peers += 1 blocks.extend(peer.blocks) # blocks = [peer.blocks[-1] for peer in self.peers.values() # if len(peer.blocks) and peer.synched] counts = defaultdict(int) heights = dict() timestamps = dict() for block in blocks: heights[block[1]] = block[0] counts[block[1]] += 1 # Retrieve newest timestamp for block if block[2] is not None: stamp = max([float(X[0]) for X in block[2]]) assert stamp is not None timestamps[block[1]] = stamp else: timestamps[block[1]] = block[3] result = list() for block_hash, num in counts.items(): block_height = heights[block_hash] consensus_pct = (num / float(total_peers)) * 100.0 row = (ConsensusBlock(int(block_height), block_hash, timestamps[block_hash]), num, consensus_pct) result.append(row) results = sorted(result, lambda x, y: int(y[0].height - x[0].height)) half_hour_ago = time.time() - (60*30) if len(results): # If there isn't enough data to get an accurate Difficulty rating # (requires 30 mins of data), then fill out with stuff from Ledger DB oldest_result = results[-1] oldest_time = min([X[0].stamp for X in results]) if oldest_time < half_hour_ago: merge_rows = ResultsManager.history_fetch(half_hour_ago, oldest_result[0].height) for row in merge_rows: results.append((row, 0, 100)) else: results = [(row, 0, 100) for row in ResultsManager.history_fetch(half_hour_ago)] # Verify consensus is above 50%, and notify result manager if results[0][2] >= 50: # LOG.warning('XXX adding new consensus peers:%r %r', total_peers, results) ResultsManager.on_consensus(results[0][0]) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def consensus():\n global blockchain\n\n longest_chain = None\n current_len = len(blockchain.chain)\n\n for node in peers:\n response = requests.get('{}chain'.format(node))\n length = response.json()['length']\n chain = response.json()['chain']\n if length > current_len and blockchain.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n\n if longest_chain:\n blockchain = longest_chain\n return True\n\n return False", "def consensus():\n global blockchain\n\n longest_chain = None\n current_len = len(blockchain.chain)\n\n for node in peers:\n response = requests.get('{}chain'.format(node))\n length = response.json()['length']\n chain = response.json()['chain']\n if length > current_len and blockchain.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n\n if longest_chain:\n blockchain = longest_chain\n return True\n\n return False", "def quick_consensus_secondary_structure( self ):\n consensus = []\n for i in range(1, max(self[\"position\"].values) + 1):\n values = self[self[\"position\"] == i][\"sse\"].values\n qseq = sorted(Counter(values).most_common(), key=lambda x: (-x[1], x[0]))[0]\n consensus.append(qseq[0])\n return \"\".join(consensus)", "def get_majority_blocks(self):\n\n if not self.overlap:\n return self.b.copy()\n else:\n return self.total_state.get_majority_blocks()", "def mine_blocks(self, count):\n\n # Clear out block announcements from each p2p listener\n [x.clear_block_announcements() for x in self.nodes[0].p2ps]\n self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)\n return int(self.nodes[0].getbestblockhash(), 16)", "def get_height_best_block(self) -> int:\n heads = [self.get_transaction(h) for h in self.get_best_block_tips()]\n highest_height = 0\n for head in heads:\n assert isinstance(head, Block)\n head_height = head.get_height()\n if head_height > highest_height:\n highest_height = head_height\n\n return highest_height", "def get_majority_blocks(self):\n\n bv = self.get_overlap_blocks()\n bv, bc = bv[0], bv[-1]\n b = self.base_g.new_vertex_property(\"int\")\n self._state.get_maj_overlap(self.base_g._Graph__graph,\n _prop(\"v\", self.base_g, bv),\n _prop(\"v\", self.base_g, bc),\n _prop(\"v\", self.base_g, b))\n return b", "def get_last_block_hash(self):\n cmd = \"\"\" SELECT %s FROM %s WHERE %s = (SELECT MAX(%s) FROM %s); \"\"\" %(\n COL_BLOCKCHAIN_BLOCK_HASH, TABLE_BLOCKCHAIN, COL_BLOCKCHAIN_BLOCKID,\n COL_BLOCKCHAIN_BLOCKID, TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchone()", "def get_heaviest_chain_tip(self):\n\n block_hashes_to_total_weights = self.get_all_block_weights()\n heaviest_block = None\n for block_hash in block_hashes_to_total_weights:\n block = self.blocks[block_hash]\n weight_in_block = block_hashes_to_total_weights[block_hash]\n if heaviest_block == None or weight_in_block > heaviest_weight:\n heaviest_block = block\n heaviest_weight = weight_in_block\n\n return heaviest_block", "def get_highest_block(self):\n if self._highest_block is not None:\n return self._highest_block\n\n highest_in_cache = None\n if self.block_cache:\n main_chain = [block for block in self.block_cache.values() if block.chain == MAIN_CHAIN]\n if main_chain:\n highest_in_cache = max(main_chain, key=lambda b: b.height)\n\n highest_in_db = self.blocks.find_one({\"chain\": MAIN_CHAIN}, sort=[(\"height\", -1)])\n if highest_in_db:\n mongo_block_transactions = self.transactions.find({\"blockhash\": highest_in_db['hash']})\n highest_in_db = MongoBlockFactory.from_mongo(highest_in_db, mongo_block_transactions)\n\n highest_block = max([highest_in_cache, highest_in_db])\n self.set_highest_block(highest_block)\n return self._highest_block", "def get_last_block(self):\n cmd = \"\"\" SELECT * FROM %s WHERE %s = (SELECT MAX(%s) FROM %s); \"\"\" %(\n TABLE_BLOCKCHAIN, COL_BLOCKCHAIN_BLOCKID, COL_BLOCKCHAIN_BLOCKID,\n TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchone()", "def get_best_block(self, node):\n block_height = node.getblockcount()\n blockhash = node.getblockhash(block_height)\n block = FromHex(CBlock(), node.getblock(blockhash, 0))\n block.calc_sha256()\n self.block_heights[block.sha256] = block_height\n return block", "def get_last_block():\n if namoto_length < 1:\n return None\n\n return namoto_blockchain[-1]", "def quick_consensus_sequence( self ):\n consensus = []\n for i in range(1, max(self[\"position\"].values) + 1):\n values = self[self[\"position\"] == i][\"aa\"].values\n qseq = sorted(Counter(values).most_common(), key=lambda x: (-x[1], x[0]))[0]\n consensus.append(qseq[0])\n return \"\".join(consensus)", "def get_last_block(self) -> Block:\n return self.blockchain[-1]", "def get_balances_blockchain(addresses):\r\n print(\"* blockchain.info not yet supported\")\r\n return None", "def test_single_chain(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 100 blocks in a row with no transaction but the genesis\n blocks = add_new_blocks(manager, 100, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata(force_reload=True)\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 30, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 50 more blocks in a row with no transactions between them\n blocks = add_new_blocks(manager, 50)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n # Mine 15 more blocks with 10 transactions between each block\n for _ in range(15):\n txs = add_new_transactions(manager, 10, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n blocks = add_new_blocks(manager, 1)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n self.assertConsensusValid(manager)", "def get_newest_blocks(self, count: int) -> tuple[list[Block], bool]:\n raise NotImplementedError", "def get_all_highest_piece_blocks(self):\n self.reset_state()\n self.update_state()\n\n active_piece_blocks = self.active_piece.get_block_positions()\n result_blocks = {}\n for i in range(self.MAX_X + 1):\n result_blocks[i] = (i, -1)\n for x in range(self.MAX_X):\n for y in range(self.MAX_Y, self.MIN_Y - 1, -1):\n if self.state[y][x] is not None and (x, y) not in active_piece_blocks:\n result_blocks[x] = (x, y)\n break\n return result_blocks", "def blocks_under_highest_justified(validator):\n res = 0\n for bhash, b in validator.processed.items():\n if isinstance(b, Block):\n if b.height <= validator.highest_justified_checkpoint.height:\n res += 1\n return res", "def compute_longest_chain(self):\n max_len = 0\n for m in self.miners:\n if m.chain.blocks[-1].height > max_len:\n max_len = m.chain.blocks[-1].height\n self.main_chain = m.chain.blocks", "def getBlocks(self):\n blocks = self.getBlocksMsg(b'\\x00')\n last_locator = self.largeMessageControl(blocks, 'inv', 0)\n\n while last_locator[1] < TARGET_BLOCK:\n blocks = self.getBlocksMsg(bytearray.fromhex(convertLittleBig(last_locator[0])))\n last_locator = self.largeMessageControl(blocks, 'inv', last_locator[1])\n\n print('\\nSuccessfully found the Block #{}: {}'.format(TARGET_BLOCK, last_locator[0]))\n return last_locator[0]", "def get_last_blockchain_value(self):\n # chekking if the blockchian is empty or not\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value():\n return blockchain[-1]", "def get_best_block(self) -> Block:\n assert self.indexes is not None\n block_hash = self.indexes.height.get_tip()\n block = self.get_transaction(block_hash)\n assert isinstance(block, Block)\n assert block.get_metadata().validation.is_fully_connected()\n return block", "def get_first_block(blockchain):\n response = requests.get('https://api.blockcypher.com/v1/%s/main' % blockchain)\n if response.status_code == 200:\n return int(json.loads(response.content.decode('latin1'))['height'])\n elif response.status_code == 429:\n print('Too many requests')\n return -1", "def last_block(self):\n return self.client.call('GET', self.name + 'last-block')", "def get_last_blockchain_value():\n if len(blockchain)< 1:\n return None\n return blockchain[-1]", "def get_consensus(self, get_iterator=False):\r\n\r\n nsData = self.sendAndRecv(\"GETINFO dir/status-vote/current/consensus\\r\\n\")[0][2]\r\n if get_iterator: return ns_body_iter(nsData)\r\n else: return parse_ns_body(nsData)", "def test_block_bad_consensus(self):\n pass" ]
[ "0.6532132", "0.6532132", "0.649755", "0.63097155", "0.62579066", "0.60755134", "0.6013463", "0.5995913", "0.5967726", "0.5833175", "0.5774598", "0.5754582", "0.5719536", "0.5695856", "0.5680934", "0.5660314", "0.5659518", "0.55405015", "0.553655", "0.5525904", "0.55225396", "0.55213594", "0.55171037", "0.55050415", "0.5496271", "0.54951566", "0.5491047", "0.546052", "0.54532623", "0.54498106" ]
0.7636462
0
Check warnings for a business.
def check_warnings(business: Business) -> list: result = [] # Currently only checks for missing business info warnings but in future other warning checks can be included # e.g. compliance checks - result.extend(check_compliance(business)) result.extend(check_business(business)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warning_check(self, rule_to_check, valid):\r\n for warning in self.warning_functions:\r\n warning(rule_to_check, valid)", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def has_warnings(self) -> bool:", "def has_warnings_active(self) -> bool:", "def is_warning(self):\n\n return self.severity == AlertSeverity.TOLERABLE", "def log_check_warnings(self):\n self._log_check_warnings_object(self._info)\n self._log_check_warnings_object(self._tags)\n self._log_check_warnings_object(self._schemes)\n self._log_check_warnings_object(self._paths)\n self._log_check_warnings_object(self._securityDefinitions)\n self._log_check_warnings_object(self._definitions)\n pass", "def _check_business_unit(self, business_unit: str, allowed_business_units: list):\n if business_unit not in allowed_business_units:\n raise ValueError(\n f\"business_unit must be one of {', '.join(allowed_business_units)}\"\n )", "def eval_warnings(self):\n\n # Ensure the minimum number of warnings were raised.\n assert len(self.war) >= len(self.warn_msgs)\n\n # Test the warning messages, ensuring each attribute is present.\n testing.eval_warnings(self.war, self.warn_msgs)\n return", "def isWarning(self):\n return _libsbml.XMLError_isWarning(self)", "def has_warning(self) -> bool:\n return self._has_warning", "def has_warnings(self) -> bool:\n return len(self.warnings) > 0", "def warnings(self) -> List[Error]:", "def is_warning(self) -> bool:\n return not self.get_warning()", "def is_warning(self) -> bool:\n return not self.get_warning()", "def warnings_active(self) -> List[Error]:", "def warning(self, warning):\n pass", "def warning(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.WARNING))", "def test_email_warnings_only_sent_for_open_bookings(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, tzinfo=dt_timezone.utc\n )\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 13, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 11, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False, status='OPEN',\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=3,\n )\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False, status='CANCELLED',\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=3,\n )\n _add_user_email_addresses(Booking)\n \n management.call_command('email_warnings')\n self.assertEqual(len(mail.outbox), 3)\n for booking in Booking.objects.filter(status='OPEN'):\n self.assertTrue(booking.warning_sent)\n for booking in Booking.objects.filter(status='CANCELLED'):\n self.assertFalse(booking.warning_sent)", "def check_journaled(con, warning, critical,perf_data):\n\n warning = warning or 20\n critical = critical or 40\n try:\n data=get_server_status(con)\n journaled = data['dur']['journaledMB'] \n message=\"Journaled : %.2f MB\" % journaled\n message+=performance_data(perf_data,[(\"%.2f\"%journaled,\"journaled\",warning, critical)])\n return check_levels(journaled,warning, critical, message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def eval_dep_warnings(warns, check_msgs):\n\n # Initialize the output\n found_msgs = [False for msg in check_msgs]\n\n # Test the warning messages, ensuring each attribute is present\n for iwar in warns:\n if iwar.category == DeprecationWarning:\n for i, msg in enumerate(check_msgs):\n if str(iwar.message).find(msg) >= 0:\n found_msgs[i] = True\n\n return found_msgs", "def warnings(self, d):\n\n if d['filter_nu'] == 220e9:\n if d['beam_shape'] == 'gaussian':\n warnings.warn('The nu dependency of the gausian beam FWHM '\n 'is not a good approximation in the 220 GHz band.')\n elif d['beam_shape'] == 'fitted_beam':\n warnings.warn('Beam and solid angle frequency dependence implementation '\n 'in the 220 GHz band for the fitted beam does not correctly describe '\n 'the true behavior')", "def _warn(self, warning=None):\r\n debug.err('Warning: %s' % warning)\r\n\r\n if core.FW_conf['settings'].TestRun.ExecutionMode == 'Leader' and warning != None:\r\n executeInFollower(\"self.warn('%s')\" % (warning,))\r\n\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n self.result.addStepWarning(warning)", "async def warning(self, check, *, note=None):\n return await self.mark(check, \"warning\", note=note)" ]
[ "0.6800652", "0.6201962", "0.6201962", "0.6201962", "0.6201962", "0.6201962", "0.6201962", "0.6201962", "0.6201962", "0.61174935", "0.58892727", "0.5828802", "0.5805223", "0.57828254", "0.5765763", "0.57578164", "0.5753095", "0.5727858", "0.57266426", "0.56829894", "0.56829894", "0.56752604", "0.561107", "0.56107837", "0.55976564", "0.5504616", "0.54963243", "0.54847986", "0.54724765", "0.54626524" ]
0.7674091
0
inintialize the two objects reload if database can be reloaded else init a database return two bills objects
def init(): database = "database.pkl" onsite_bills = BillID(database) online_bills = BillID(database) return onsite_bills, online_bills
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\r\n self.postgres = PostgreSQL()\r\n self.couch_query = Queries()\r\n super(Blockages, self).__init__()", "def load_database(self, main_class):\n main_class.database.delete_all(\"render\")\n main_class.database.delete_all(\"object\")\n #main_class.database.delete_all(\"output\")\n render_csv = os.path.join(self.filepath, \"Render_data.csv\")\n object_csv = os.path.join(self.filepath, \"Obj_data.csv\")\n main_class.database.import_excel(render_csv, \"render\")\n main_class.database.import_excel(object_csv, \"object\")\n\n render_dic=main_class.database.get_database_dict(\"render\")\n\n main_class.render_database = main_class.database.get_data_from_database(\"render\")\n main_class.object_database = main_class.database.get_data_from_database(\"object\")\n\n main_class.background_picture_list = main_class.database.get_background_pictures_names()\n main_class.packaging_picture_list = main_class.database.get_bubble_wrap_pictures_names()\n\n main_class.camera_settings.append([0, 0, 0, 0, 100])\n for obj in main_class.render_database:\n \"\"\"\n extracting Camerasetting from Database and set all important angles and distances\n \"\"\"\n if obj[render_dic[\"object_type\"]] == \"camera\":\n for i in range(0, int(obj[render_dic[\"polar_angle_segments\"]])):\n for j in range(0, int(obj[render_dic[\"azimuth_angle_segments\"]])):\n pol_min = obj[render_dic[\"polar_angle_min\"]]\n pol_max = obj[render_dic[\"polar_anglel_max\"]]\n pol_segments= obj[render_dic[\"polar_angle_segments\"]]\n pol_random=obj[render_dic[\"polar_angle_random_rad\"]]\n try:\n pol_min = float( pol_min.replace(',','.'))\n except:\n pass\n try:\n pol_max = float( pol_max.replace(',','.'))\n except:\n pass\n try:\n pol_segments = float( pol_segments.replace(',','.'))\n except:\n pass\n try:\n pol_random = float( pol_random.replace(',','.'))\n except:\n pass\n polar_angle = (pol_min + ((pol_max - pol_min)/(pol_segments))*i)\n\n azi_min = obj[render_dic[\"azimuth_angle_min\"]]\n azi_max = obj[render_dic[\"azimuth_angle_max\"]]\n azi_segments= obj[render_dic[\"azimuth_angle_segments\"]]\n azi_random= obj[render_dic[\"azimuth_angle_random_rad\"]]\n\n try:\n azi_min = float( azi_min.replace(',','.'))\n except:\n pass\n try:\n azi_max = float( azi_max.replace(',','.'))\n except:\n pass\n try:\n azi_segments = float( azi_segments.replace(',','.'))\n except:\n pass\n try:\n azi_random = float( azi_random.replace(',','.'))\n except:\n pass\n azimuth_angle = (azi_min + ((azi_max - azi_min)/(azi_segments))*j)\n\n position=[polar_angle, pol_random, azimuth_angle, azi_random, obj[render_dic[\"radius\"]] ]\n print(\"camera position added: \",position)\n main_class.camera_settings.append(position)\n \n if obj[render_dic[\"object_type\"]]==\"light\":\n\n if obj[render_dic[\"name\"]]==\"SUN\":\n radius= obj[render_dic[\"radius\"]]\n try:\n radius = float( radius.replace(',','.'))\n except:\n pass\n light_obj=[ obj[render_dic[\"name\"]] , [0,0, radius ] ]\n main_class.light_settings.append(light_obj)\n print(\"sun added to list\")\n\n if obj[render_dic[\"name\"]]==\"SPOT\":\n for i in range(0, int(obj[render_dic[\"polar_angle_segments\"]])):\n for j in range(0, int(obj[render_dic[\"azimuth_angle_segments\"]])):\n pol_min = obj[render_dic[\"polar_angle_min\"]]\n pol_max = obj[render_dic[\"polar_anglel_max\"]]\n pol_segments= obj[render_dic[\"polar_angle_segments\"]]\n pol_random=obj[render_dic[\"polar_angle_random_rad\"]]\n try:\n pol_min = float( pol_min.replace(',','.'))\n except:\n pass\n try:\n pol_max = float( pol_max.replace(',','.'))\n except:\n pass\n try:\n pol_segments = float( pol_segments.replace(',','.'))\n except:\n pass\n try:\n pol_random = float( pol_random.replace(',','.'))\n except:\n pass\n polar_angle = (pol_min + ((pol_max - pol_min)/(pol_segments))*i)\n\n azi_min = obj[render_dic[\"azimuth_angle_min\"]]\n azi_max = obj[render_dic[\"azimuth_angle_max\"]]\n azi_segments= obj[render_dic[\"azimuth_angle_segments\"]]\n azi_random= obj[render_dic[\"azimuth_angle_random_rad\"]]\n try:\n azi_min = float( azi_min.replace(',','.'))\n except:\n pass\n try:\n azi_max = float( azi_max.replace(',','.'))\n except:\n pass\n try:\n azi_segments = float( azi_segments.replace(',','.'))\n except:\n pass\n try:\n azi_random = float( azi_random.replace(',','.'))\n except:\n pass\n azimuth_angle = (azi_min + ((azi_max - azi_min)/(azi_segments))*j)\n position=[polar_angle, pol_random, azimuth_angle, azi_random, obj[render_dic[\"radius\"]] ]\n light_obj=[ obj[render_dic[\"name\"]] , position, obj[render_dic[\"tracking_obj\"]],1000 ]\n print(\"added light_obj: \", light_obj)\n main_class.light_settings.append(light_obj)\n main_class.max_loop_count=len(main_class.camera_settings)*len(main_class.light_settings)\n print(\"loop count is:\", main_class.max_loop_count)\n return", "def init_data():\n\n account1 = Account(name=\"Fermi Corp\", website=\"http://fermigier.com/\")\n db.session.add(account1)\n\n contact1 = Contact(first_name=\"Stefane\", last_name=\"Fermigier\", email=\"[email protected]\")\n contact1.account = account1\n contact2 = Contact(first_name=\"Paul\", last_name=\"Dupont\", email=\"[email protected]\")\n contact2.account = account1\n\n user1 = User(first_name=\"Stefane\", last_name=\"Fermigier\", email=\"[email protected]\", password=\"admin\")\n photo_path = join(dirname(__file__), \"..\", \"dummy_files\", \"mugshot.jpg\")\n user1.photo = open(photo_path).read()\n\n group1 = Group(name=\"Group 1\")\n group1.photo = open(photo_path).read()\n\n user1.join(group1)\n\n #db.session.add(contact1)\n #db.session.add(contact2)\n db.session.add(user1)\n\n db.session.commit()\n\n assert len(Contact.query.all()) == 2\n assert len(Account.query.all()) == 1\n assert len(User.query.all()) == 1\n assert len(Group.query.all()) == 1", "def bills():\n os_bills = Bill()\n os_vote_events = VoteEvent()\n os_bill_sponsors = BillSponsor()\n os_legislator_votes = LegislatorVote()\n\n os_bills.query()\n os_bills.parse()\n\n wiki_functions.write_to_csv_file_for_DataTransfer(os_bills,\n os_bills.bill_table)\n wiki_functions.write_to_csv_file_for_DataTransfer(os_vote_events,\n os_bills.vote_event_table)\n wiki_functions.write_to_csv_file_for_DataTransfer(os_legislator_votes,\n os_bills.legislator_vote_table)\n wiki_functions.write_to_csv_file_for_DataTransfer(os_bill_sponsors,\n os_bills.bill_sponsor_table)", "def load(self):\n db = CrawlDBI.DBI(dbtype='crawler')\n if self.rowid is not None:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"rowid = ?\",\n data=(self.rowid,))\n else:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"path = ?\",\n data=(self.path,))\n if 0 == len(rows):\n self.in_db = False\n elif 1 == len(rows):\n self.in_db = True\n rz = list(rows[0])\n self.rowid = rz.pop(0)\n self.path = rz.pop(0)\n self.type = rz.pop(0)\n self.cos = rz.pop(0)\n self.cart = rz.pop(0)\n self.ttypes = rz.pop(0)\n self.checksum = rz.pop(0)\n self.last_check = rz.pop(0)\n try:\n self.fails = rz.pop(0)\n except IndexError:\n self.fails = 0\n try:\n self.reported = rz.pop(0)\n except IndexError:\n self.reported = 0\n self.dirty = False\n else:\n raise StandardError(\"There appears to be more than one copy \" +\n \"of %s in the database\" % self)\n\n db.close()", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()", "def init_classes():\r\n\r\n\tglobal data\r\n\r\n\tif data is None:\r\n\t\twith app.app_context():\r\n\t\t\tprint \"initializing db\"\r\n\t\r\n\t\t\tdata = status.DataManager( db_path=dbPath, ext=pycklerext )\r\n\t\r\n\t\t\tprint \"db loaded\"\r\n\r\n\telse:\r\n\t\twith app.app_context():\r\n\t\t\tprint \"updating db\"\r\n\t\t\tdata.loadlast()\r\n\t\t\tprint \"db updated\"\r\n\r\n\twith app.app_context():\r\n\t\tg.modules = {\r\n\t\t\t'memall': get_mem_all,\r\n\t\t\t'memone': get_mem_one\r\n\t\t}\r\n\t\tg.glanularity = 60", "def at_object_creation(self):\n self.db.max_hp = 100 # Set maximum HP to 100\n self.db.hp = self.db.max_hp # Set current HP to maximum\n self.db.spells_known = [] # Set empty spells known list\n self.db.max_mp = 20 # Set maximum MP to 20\n self.db.mp = self.db.max_mp # Set current MP to maximum", "def init2(self):\n self.skill_points = self.count_skill_points()\n self.count_saves()\n self.lives = self.count_lives()\n self.base_attack = fetch_data.get_base_attack(self.BASE_ATTACK_LVLS, self.lvl)", "def _load(self, create):\n if not self.db.has_key('size'):\n if create:\n # It's a new database, initialize the special keys\n self.db['head'] = 0\n self.db['count'] = 0\n self.db['size'] = self.newSize\n\n # Cache the special keys\n self.head = self.db['head']\n self.count = self.db['count']\n self.size = self.db['size']\n self._loaded = True", "def load_database(self):\n # If there is already data, do not load\n if self:\n raise DatabaseError('Data already loaded!')\n\n # Gather all data from the table\n data = self.cursor.execute(\n 'SELECT unique_id, name, wins, time_stamp, '\n 'last_win FROM gungame_winners'\n )\n data = data.fetchall()\n\n # Are there no winners to add?\n if not data:\n return\n\n # Loop through all the past winners and their data\n for unique_id, name, wins, time_stamp, last_win in data:\n\n # Add the current winner to the database\n instance = self[unique_id]\n instance.name = name\n instance.wins = int(wins)\n instance.time_stamp = float(time_stamp)\n instance.last_win = float(last_win)", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()", "def reconstruct(self):\n if os.path.exists(self.dbname):\n with open(self.dbname, mode='rb') as db:\n self.cache = pickle.load(db)", "def populate_mobo():\n global MoboListing\n \n for item in moboList:\n mobo = \"%s\" % item[0]\n price = \"%s\" % item[1]\n \n try:\n MoboListing.objects.get(moboList = mobo, moboPrice = price)\n \n except MoboListing.DoesNotExist:\n print mobo\n mydb = MoboListing.objects.create(moboList = mobo, moboPrice = price)\n print mydb\n mydb.save()", "def sync_with_database(self):\n # learn from db\n lports = self.nb_api.get_all(l2.LogicalPort)\n for lport in lports:\n port_id = \"{}:{}\".format(lport.lswitch.id, lport.id)\n self.cache_logical_port_by_port_id[port_id] = lport\n lrouters = self.nb_api.get_all(l3.LogicalRouter)\n for lrouter in lrouters:\n self.cache_logical_router_by_dpid[lrouter.id] = lrouter", "def _load(self) -> None:\n self.record = self._saved_record\n self.counter = self._saved_counter\n self.current_objects = self._saved_objects", "def __init__(self,):\n self.logger = conf._init_logger(logger=conf.LOGGER_ORM)\n self.logger = logging.getLogger(conf.LOGGER_ORM)\n\n self.logger.info(\"[+] Initilizing Orm [+]\")\n\n\n self.engine = sqlalchemy.create_engine(\n f\"mysql+mysqldb://{conf.DB_USER}:{conf.DB_PASSWORD}@{conf.DB_ADRESS}/{conf.DB_NAME}\")\n self.metadata = sqlalchemy.MetaData(bind=self.engine)\n self.metadata.reflect(only=[\"examens\", \"sections\", \"patients\", \"medecins\", \"types_intervention\"])\n self.conn = self.engine.connect()\n \"\"\"\n Load the ORM of different table into the class\n \"\"\"\n self.check_table()\n self.hl7_connections = sqlalchemy.Table(\"hl7_connections\", self.metadata)\n self.examens = sqlalchemy.Table(\"examens\", self.metadata)\n self.sections = sqlalchemy.Table(\"sections\", self.metadata)\n self.patients = sqlalchemy.Table(\"patients\", self.metadata)\n self.medecins = sqlalchemy.Table(\"medecins\", self.metadata)\n self.types_interventions = sqlalchemy.Table(\"types_intervention\", self.metadata)\n self.logger.info(\"[+] Orm initialized [+]\")", "def api_db_load(vend, buy, db_compare=False, compressed=True):\r\n #находим последнюю сжатую базу\r\n if compressed:\r\n db_conv_time = last_db_time_get()\r\n if not db_compare:\r\n if convert_time(db_conv_time, 'm') > 9: #9\r\n print(\"Old database...\")\r\n #собираем ссылку\r\n link = 'https://api.originsro.org/api/v1/market/list?api_key='\r\n api_key = ''\r\n #делаем вызов и сохраняем получаемое в переменной\r\n response = rq.get('{}{}'.format(link, api_key))\r\n #создаем путь/имя для нашей новой базы\r\n latest_file = '{}\\\\jsons\\\\DB_{}.json.gz'.format(db_folder_path, now.strftime(\"%d-%m-%Y_%H-%M-%S\"))\r\n #сохраняем нашу новую базу\r\n compress_json.dump(response.text, latest_file)\r\n print('compressed db saved')\r\n #и сразу же загружаем\r\n #actual_db = compress_json.local_load(latest_file)\r\n #находим 2 по старости дб (то есть ту, которая была перед нашей только что выкачанной)\r\n previous_db = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json.gz'.format(db_folder_path)), key=os.path.getctime)[-2]\r\n #распаковываем его\r\n #previous_db_uncompressed = compress_json.local_load(previous_db)\r\n #создаем временные листы с данными\r\n #из прошлой бд\r\n pre_vendor_shops = []\r\n pre_buy_shops = []\r\n get_vend_data(previous_db, pre_vendor_shops, pre_buy_shops)\r\n #из новой бд\r\n print(latest_file)\r\n get_vend_data(latest_file, vend, buy)\r\n #fill demand file\r\n compare_data(pre_vendor_shops, vend)\r\n compare_data(pre_buy_shops, buy)\r\n print('demand refreshed')\r\n else:\r\n print('Database ok...')\r\n latest_file = max(glob.glob('{}\\\\jsons\\\\DB_*.json.gz'.format(db_folder_path)), key=os.path.getctime)\r\n get_vend_data(latest_file, vend, buy)\r\n else:\r\n compare_db()\r\n else:\r\n #находим самую свежую базу из имеющихся\r\n latest_file = max(glob.glob('{}\\\\jsons\\\\DB_*.json'.format(db_folder_path)), key=os.path.getctime)\r\n #Загрузка БД и ее обновление\r\n with open(latest_file, encoding=\"utf8\") as json_file:\r\n #делаем удобное обозначение той же базы\r\n api_db = json.load(json_file)['generation_timestamp']\r\n db_time = datetime.datetime.strptime(api_db, \"%Y-%m-%dT%H:%M:%S.%fZ\")\r\n if not db_compare:\r\n #если базе больше 10 минут, надо обновить\r\n if convert_time(db_time, 'm') > 10: #9\r\n print(\"Old database...\")\r\n link = 'https://api.originsro.org/api/v1/market/list?api_key='\r\n api_key = 'r3bkd0q8umxhuj75ahtvwpgkd3yzi3rm'\r\n response = rq.get('{}{}'.format(link, api_key))\r\n latest_file = '{}\\\\jsons\\\\DB_{}.json'.format(db_folder_path, now.strftime(\"%d-%m-%Y_%H-%M-%S\"))\r\n #создаем новую бд с названием из latest_file\r\n with open(latest_file, \"w+\", encoding=\"utf8\") as new_file:\r\n new_file.write(response.text)\r\n #создаем новую, сжатую, бд\r\n #compress_json.dump(response.text, latest_file)\r\n #находим второй по новости файл, то есть прошлую бд\r\n previous_db = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json'.format(db_folder_path)), key=os.path.getctime)[-2]\r\n #создаем временные листы с данными\r\n #из прошлой бд\r\n pre_vendor_shops = []\r\n pre_buy_shops = []\r\n get_vend_data(previous_db, pre_vendor_shops, pre_buy_shops)\r\n #из новой бд\r\n get_vend_data(latest_file, vend, buy)\r\n #fill demand file\r\n compare_data(pre_vendor_shops, vend)\r\n compare_data(pre_buy_shops, buy)\r\n else:\r\n print('Database ok...')\r\n get_vend_data(latest_file, vend, buy)\r\n else:\r\n compare_db()", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"[email protected]\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def __init__(self):\n self.databases = []", "def init_dev_data():\n db.drop_all()\n db.create_all()\n print(\"Initialized Connect 4 Database.\")\n\n g = Game()\n db.session.add(g)\n\n g2 = Game()\n db.session.add(g2)\n\n p1 = Player(username=\"tow\", password = \"0000\", birthday=datetime.datetime.strptime('11/06/1991', '%m/%d/%Y').date())\n p2 = Player(username=\"twaits\", password = \"0000\", birthday=datetime.datetime.strptime('01/14/1987', '%m/%d/%Y').date())\n p3 = Player(username=\"pie\", password = \"0000\", birthday=datetime.datetime.strptime('01/15/1987', '%m/%d/%Y').date())\n\n\n db.session.add(p1)\n print(\"Created %s\" % p1.username)\n db.session.add(p2)\n print(\"Created %s\" % p2.username)\n db.session.add(p3)\n print(\"Created %s\" % p3.username)\n\n g.player_one = p1\n g.player_two = p2\n\n g.turn = 7\n g.winner_id = 1\n g.creator_id = 2\n\n g2.player_one = p1\n g2.player_two = p3\n\n g2.turn = 11\n g2.winner_id = 3\n g2.creator_id = 1\n \n db.session.commit()\n print(\"Added dummy data.\")", "def _populate():\n models.Base.metadata.create_all(engine)\n logger.info(\"Initalized database\")\n db = Session()\n\n hermann = models.Account(id=\"test\",\n name=\"Hermann Dörkschneider\",\n email=\"[email protected]\")\n db.add(hermann)\n\n journey = models.Journey(id=str(uuid.uuid4()),\n account_id=\"test\",\n visibility=\"PUBLIC\",\n start_time_utc=datetime.datetime.now(),\n stop_time_utc=datetime.datetime.now())\n db.add(journey)\n\n waypoint1 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=2.71,\n latitude=3.1416,\n longitude=1.618,\n height_m=10)\n db.add(waypoint1)\n\n waypoint2 = models.Waypoint(journey=journey,\n time_utc=datetime.datetime.now(),\n accuracy_m=5.1,\n latitude=3.1410,\n longitude=1.620,\n height_m=5)\n db.add(waypoint2)\n\n db.commit()\n logger.info(\"Created test account {}\".format(hermann))\n logger.info(\"Created test journey {}\".format(journey))", "def __init__(self, pk):\n try:\n haus = models.Haus.objects.get(pk=pk)\n except ObjectDoesNotExist:\n print(\"haus not found in db\") \n self.haus_nr = haus.haus_nr\n self.display_nr = haus.display_nr\n \n try:\n self.architekt_plan = models.Architekt_plan.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"architekt components not found in db\")\n self.architekt_plan = None\n try:\n self.erd_bau = models.Erdbau.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"erdbau components not found in db\")\n try:\n self.rohbau = models.Rohbau.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"rohbau not found in db\")\n try:\n self.dach = models.Dach.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"dach not found in db\")\n try:\n self.fenster = models.Fenster.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"fenster not found in db\")\n try:\n self.elektro = models.Elektro.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"fenster not found in db\")\n try:\n self.innenputz = models.Innenputz.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"innenputz not found in db\")\n try:\n self.estrich = models.Estrich.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"estrich not found in db\")\n try:\n self.trockenbau = models.Trockenbau.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"trokenbau not found in db\")\n try:\n self.aussenputz = models.Aussenputz.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"aussenputz not found in db\")\n try:\n self.fliesenleger = models.Fliesenleger.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"fliesenleger not found in db\")\n try:\n self.bodenbelaege = models.Bodenbelaege.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"bodenbelaege not found in db\")\n try:\n self.schlosser = models.Schlosser.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"schlosser not found in db\")\n try:\n self.schliessanlage = models.Schliessanlage.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"schliessanlage not found in db\")\n try:\n self.sicherheitstechnik = models.Sicherheitstechnik.objects.get(haus=haus)\n except ObjectDoesNotExist:\n print(\"sicherheitstechnik not found in db\")\n # self.aussenanlagern = models.Aussenanlagern.objects.get(haus=haus)\n try:\n self.tiefgaragenbeschichtung = haus.tiefgaragenbeschichtung\n except ObjectDoesNotExist:\n print(\"tiefgaragenbeschichtung components not found in db\")", "def __init__(self):\n self.database = Database()\n self.load_config()", "def load_book(self):\n book = self._get_book()\n\n start_time = time()\n\n self.sequence = book['sequence']\n load_time = str(dt.now(tz=self.db.tz))\n\n self.db.new_tick({'type': 'load_book',\n 'product_id': self.sym,\n 'sequence': self.sequence})\n\n for bid in book['bids']:\n msg = {\n 'price': float(bid[0]),\n 'size': float(bid[1]),\n 'order_id': bid[2],\n 'side': 'buy',\n 'product_id': self.sym,\n 'type': 'preload',\n 'sequence': self.sequence,\n 'time': load_time\n }\n self.db.new_tick(msg)\n self.bids.insert_order(msg)\n\n for ask in book['asks']:\n msg = {\n 'price': float(ask[0]),\n 'size': float(ask[1]),\n 'order_id': ask[2],\n 'side': 'sell',\n 'product_id': self.sym,\n 'type': 'preload',\n 'sequence': self.sequence,\n 'time': load_time\n }\n self.db.new_tick(msg)\n self.asks.insert_order(msg)\n\n self.db.new_tick({'type': 'book_loaded',\n 'product_id': self.sym,\n 'sequence': self.sequence})\n del book\n self.bids.warming_up = False\n self.asks.warming_up = False\n\n elapsed = time() - start_time\n print('%s: book loaded................in %f seconds' % (self.sym, elapsed))", "def init_database():\n exists = Agent.query.all()\n if exists is None or len(exists) == 0:\n # Setting up agent\n agent = Agent(name='OpenCampus',\n about=\"Este es el chabot de Open Campus capaz de resolver dudas sobre los diferentes cursos de la oferta actual de Open Campus\")\n\n db.session.add(agent)\n\n # Setting upd properties\n\n description_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/description\")\n begin_date_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/beginDate\")\n end_date_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/endDate\")\n requirement_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/requirement\")\n duration_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/duration\")\n cost_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/cost\")\n teacher_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/teacherName\")\n content_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/content\")\n course_name_prop = Property(name=\"http://127.0.0.1/ockb/course/ontology/courseName\")\n\n # db.session.add(description_prop)\n # db.session.add(begin_date_prop)\n # db.session.add(end_date_prop)\n # db.session.add(requirement_prop)\n # db.session.add(duration_prop)\n # db.session.add(cost_prop)\n # db.session.add(teacher_name_prop)\n # db.session.add(content_name_prop)\n # db.session.add(course_name_prop)\n\n # Setting up answers\n\n ObtenerInformacionAnswer = Answer(uri=\"ObtenerInformacionAnswer\", answer_template=\"{%description%}\",\n properties=[description_prop])\n\n # db.session.add(ObtenerInformacionAnswer)\n ObtenerFechasAnswer = Answer(uri=\"ObtenerFechasAnswer\",\n answer_template=\"Las fechas importantes del curso son {%beginDate%} y termina el dia {%endDate%}\",\n properties=[begin_date_prop, end_date_prop])\n\n # db.session.add(ObtenerFechasAnswer)\n ObtenerFechasInicioAnswer = Answer(uri=\"ObtenerFechasInicioAnswer\",\n answer_template=\"El curso inicia el dia {%beginDate%}\",\n properties=[begin_date_prop])\n # db.session.add(ObtenerFechasInicioAnswer)\n ObtenerFechasFinAnswer = Answer(uri=\"ObtenerFechasFinAnswer\",\n answer_template=\"El curso finaliza el dia {%endDate%}\",\n properties=[end_date_prop])\n # db.session.add(ObtenerFechasFinAnswer)\n ObtenerPrerequisitosAnswer = Answer(uri=\"ObtenerPrerequisitosAnswer\",\n answer_template=\"Los prerequisitos del curso son {%requirement%}\",\n properties=[requirement_prop])\n # db.session.add(ObtenerPrerequisitosAnswer)\n ObtenerDuracionAnswer = Answer(uri=\"ObtenerDuracionAnswer\",\n answer_template=\"El curso tiene una duracion de {%duration%}\",\n properties=[duration_prop])\n # db.session.add(ObtenerDuracionAnswer)\n ObtenerPrecioAnswer = Answer(uri=\"ObtenerPrecioAnswer\", answer_template=\"{%cost%}\", properties=[cost_prop])\n # db.session.add(ObtenerPrecioAnswer)\n ObtenerDocenteAnswer = Answer(uri=\"ObtenerDocenteAnswer\",\n answer_template=\"El docente encargado del curso es {%teacherName%}\",\n properties=[teacher_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasTeacher\")\n # db.session.add(ObtenerDocenteAnswer)\n ObtenerContenidosAnswer = Answer(uri=\"ObtenerContenidosAnswer\",\n answer_template=\"Los contenidos a tratar en el curso son {%content%}\",\n properties=[content_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasContenido\")\n # db.session.add(ObtenerContenidosAnswer)\n ListarCursosAnswer = Answer(uri=\"ListarCursosAnswer\",\n answer_template=\"Los cursos de la oferta actual son: {%courseName%}\",\n properties=[course_name_prop],\n refers_to=\"http://127.0.0.1/ockb/course/ontology/hasCourse\",\n answer_from=\"http://127.0.0.1/ockb/resources/OpenCampusFebrero-Julio\")\n\n # Setting up resolution\n ObtenerInformacionResolution = Resolution(uri=\"ObtenerInformacionResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasResolution = Resolution(uri=\"ObtenerFechasResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasInicioResolution = Resolution(uri=\"ObtenerFechasInicioResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerFechasFinResolution = Resolution(uri=\"ObtenerFechasFinResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerPrerequisitosResolution = Resolution(uri=\"ObtenerPrerequisitosResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerDuracionResolution = Resolution(uri=\"ObtenerDuracionResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerPrecioResolution = Resolution(uri=\"ObtenerPrecioResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerDocenteResolution = Resolution(uri=\"ObtenerDocenteResolution\", question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n ObtenerContenidosResolution = Resolution(uri=\"ObtenerContenidosResolution\",\n question=\"De que cursos deseas conocer\",\n resolves=\"http://127.0.0.1/ockb/course/resource/Course\")\n\n # Setting up Entity\n\n curso_entity = Entity(name=\"http://127.0.0.1/ockb/course/resource/Course\")\n\n # setting up synonyms:\n Synonym(name=\"Mooc\", entity=curso_entity)\n Synonym(name=\"Taller\", entity=curso_entity)\n Synonym(name=\"Curso\", entity=curso_entity)\n Synonym(name=\"Open Course\", entity=curso_entity)\n\n # Setting up intents\n\n ObtenerInformacion = Intent(name=\"ObtenerInformacion\", agent=agent,\n description=\"Obtener una breve descripcion del curso\",\n answer=ObtenerInformacionAnswer, resolution=ObtenerInformacionResolution,\n entities=[curso_entity])\n ObtenerFechas = Intent(name=\"ObtenerFechas\", agent=agent,\n description=\"Obtener las fechas importantes del curso\",\n answer=ObtenerFechasAnswer, resolution=ObtenerFechasResolution, entities=[curso_entity])\n ObtenerFechasInicio = Intent(name=\"ObtenerFechasInicio\", agent=agent,\n description=\"Obtener las fechas de inicio del curso\",\n answer=ObtenerFechasInicioAnswer, resolution=ObtenerFechasInicioResolution,\n entities=[curso_entity])\n ObtenerFechasFin = Intent(name=\"ObtenerFechasFin\", agent=agent,\n description=\"Obtener las fechas de finalizacion del curso\",\n answer=ObtenerFechasFinAnswer, resolution=ObtenerFechasFinResolution,\n entities=[curso_entity])\n ObtenerPrerequisitos = Intent(name=\"ObtenerPrerequisitos\", agent=agent,\n description=\"Obtener prerequisitos del curso\",\n answer=ObtenerPrerequisitosAnswer,\n resolution=ObtenerPrerequisitosResolution)\n ObtenerDuracion = Intent(name=\"ObtenerDuracion\", agent=agent,\n description=\"Obtener la duracion del curso\", answer=ObtenerDuracionAnswer,\n resolution=ObtenerDuracionResolution, entities=[curso_entity])\n ObtenerPrecio = Intent(name=\"ObtenerPrecio\", agent=agent, description=\"Obtener el precio del curso\",\n answer=ObtenerPrecioAnswer, resolution=ObtenerPrecioResolution, entities=[curso_entity])\n ObtenerDocente = Intent(name=\"ObtenerDocente\", agent=agent,\n description=\"Obtener los nombres de los docentes del curso\",\n answer=ObtenerDocenteAnswer, resolution=ObtenerDocenteResolution,\n entities=[curso_entity])\n ObtenerContenidos = Intent(name=\"ObtenerContenidos\", agent=agent,\n description=\"Obtener los contenidos del curso\",\n answer=ObtenerContenidosAnswer, resolution=ObtenerDocenteResolution,\n entities=[curso_entity])\n ListarCursos = Intent(name=\"ListarCursos\", agent=agent,\n description=\"Presentar la oferta actual de cursos\", answer=ListarCursosAnswer,\n resolution=ObtenerContenidosResolution)\n # Setting up sentences\n\n Sentence(intent=ObtenerInformacion, sentence=\"De que trata el mooc?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Quiero informacion del curso de emprendimiento\")\n Sentence(intent=ObtenerInformacion, sentence=\"Muestrame un resumen del mooc?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Breve introducción al curso\")\n Sentence(intent=ObtenerInformacion, sentence=\"que es emprendimiento\")\n Sentence(intent=ObtenerInformacion, sentence=\"De que se trata el curso?\")\n Sentence(intent=ObtenerInformacion, sentence=\"De qué va el curso?\")\n Sentence(intent=ObtenerInformacion, sentence=\"Me ayudas con información acerca del curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuáles son las fechas importantes del curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Fechas clave del curso\")\n Sentence(intent=ObtenerFechas, sentence=\"Que fechas debo tomar en cuenta\")\n Sentence(intent=ObtenerFechas, sentence=\"fechas de inicio y fin\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuándo comienza el curso?\")\n Sentence(intent=ObtenerFechas, sentence=\"Fechas importantes del curso de inteligencia artificial\")\n Sentence(intent=ObtenerFechas, sentence=\"Cuáles son las fechas importantes del curso de emprendimiento\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Cuándo inicia el curso de emprendimiento\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Cuándo empiezan los cursos ?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Fecha de inicio de los moocs?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"Día de inicio de los moocs ?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"En que fecha inician los moocs?\")\n Sentence(intent=ObtenerFechasInicio, sentence=\"A partir de que fecha empiezan los mooc?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"Cuando finaliza el curso?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"En que fecha termina el curso?\")\n Sentence(intent=ObtenerFechasFin, sentence=\"Cuando termina el curso?\")\n Sentence(intent=ObtenerPrerequisitos,\n sentence=\"Cuáles son los requisitos necesarios para el curso de emprendimiento\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Cuáles son los prerequisitos?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Requisitos previos de ingreso al curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Dame a conocer los prerequisitos\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Me puedes indicar los prerequistos necesarios?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Que necesito saber antes de iniciar el curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"que se necesita saber para este curso\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Los pre requisitos cuales son?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué se necesita?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué debería saber para tomar el curso?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué conocimientos previos debo tener?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Qué tengo que saber?\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Requisitos previos\")\n Sentence(intent=ObtenerPrerequisitos, sentence=\"Conocimientos previos\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuanto dura el curso de empendimiento\")\n Sentence(intent=ObtenerDuracion, sentence=\"Duración del curso\")\n Sentence(intent=ObtenerDuracion, sentence=\"Número de horas del mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"En cuántas semanas se realiza el curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuanto dura el curso\")\n Sentence(intent=ObtenerDuracion, sentence=\"Tiempo que dura un curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"cuanto puede durar un curso mooc\")\n Sentence(intent=ObtenerDuracion, sentence=\"cuanto dura el curso?\")\n Sentence(intent=ObtenerDuracion, sentence=\"cual es la duracion de psicologia social?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuánto tiempo dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"De cuántas semanas es el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuántas horas dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"Cuánto tiempo dura el mooc?\")\n Sentence(intent=ObtenerDuracion, sentence=\"De cuántas semanas es el mooc?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cual es el precio del curso de emprendimiento\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuál es el precio?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuánto vale?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Valor del curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Costo\")\n Sentence(intent=ObtenerPrecio, sentence=\"Inversión total del curso\")\n Sentence(intent=ObtenerPrecio, sentence=\"cual es el valor de los componentes?\")\n Sentence(intent=ObtenerPrecio, sentence=\"costo de los cursos?\")\n Sentence(intent=ObtenerPrecio, sentence=\"cuanto cuesta los cursos\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuál es precio del curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuánto cuesta el mooc de Administración Empresarial?\")\n Sentence(intent=ObtenerPrecio, sentence=\"tiene algun valor los cursos?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto cuesta el curso Método Toyota?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto cuesta un curso?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Que vale el curso ?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Es gratis?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Cuanto vale el mooc?\")\n Sentence(intent=ObtenerPrecio, sentence=\"Precio\")\n Sentence(intent=ObtenerDocente, sentence=\"Cual es el docente del curso de emprendimiento\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es mi profesor en el curso?\")\n Sentence(intent=ObtenerDocente, sentence=\"Docente del mooc?\")\n Sentence(intent=ObtenerDocente, sentence=\"Qué docente imparte el mooc?\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es el docente encargado de la materia?\")\n Sentence(intent=ObtenerDocente, sentence=\"Nombre del docente del mooc\")\n Sentence(intent=ObtenerDocente, sentence=\"Que profesor esta a cargo del curso\")\n Sentence(intent=ObtenerDocente, sentence=\"cual es mi docente del mooc\")\n Sentence(intent=ObtenerDocente, sentence=\"información del docente\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién es el docente encargado?\")\n Sentence(intent=ObtenerDocente, sentence=\"Quién va a dar el MOOC\")\n Sentence(intent=ObtenerDocente, sentence=\"Que docente acompaña al estudiante?\")\n Sentence(intent=ObtenerDocente, sentence=\"Cual es el profe de Salud Sexual y Reproductiva\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuáles son los contenidos a tratar en el curos de emprendimiento\")\n Sentence(intent=ObtenerContenidos, sentence=\"Contenido del curso\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuál es la temática de cada curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Qué temas se van a tratar en cada curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"De que se tratan los moocs\")\n Sentence(intent=ObtenerContenidos, sentence=\"Cuáles son las temas del curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Que se va a tratar en este curso?\")\n Sentence(intent=ObtenerContenidos, sentence=\"Qué se va a dar en el curso?\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos hay\")\n Sentence(intent=ListarCursos, sentence=\"Muestrame los cursos\")\n Sentence(intent=ListarCursos, sentence=\"Cual es la oferta actual\")\n Sentence(intent=ListarCursos, sentence=\"Cuentame que cursos tienes\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos me ofreces\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos estan disponibles\")\n Sentence(intent=ListarCursos, sentence=\"Listame los cursos\")\n Sentence(intent=ListarCursos, sentence=\"Que cursos tiene\")\n\n # db.session.add(intent_obtenerinformacion)\n # db.session.add(intent_obtenerfechas)\n # db.session.add(intent_obtenerfechasinicio)\n # db.session.add(intent_obtenerfechasfin)\n # db.session.add(intent_obtenerprerequisitos)\n # db.session.add(intent_obtenerduracion)\n # db.session.add(intent_obtenerprecio)\n # db.session.add(intent_obtenerdocente)\n # db.session.add(intent_obtenercontenidos)\n # db.session.add(intent_listarCursos)\n\n db.session.commit()", "def _init(self):\n if os.path.exists(self.fname):\n with open(self.fname, \"rb\") as fh:\n self.db = pickle.load(fh)\n else:\n self.db = {}\n print(\"DB loaded, len\", len(self.db))", "def init():\n global __dbChanged\n __dbChanged = False\n loadItemsFromFile()\n global __isInitialized\n if __isInitialized == True:\n return\n\n Report.trace(\"itemdatabase\", \"Initializing the item database.\")\n returnCode = FilterManager.executeFiltersForEvent(\"preInitializeItemDatabase\")\n if returnCode == FilterManager.FINISHED:\n Report.trace(\"itemdatabase\", \"Item database initialized.\")\n __isInitialized = True\n return\n\n for item in Items.items:\n addItem(item)\n\n FilterManager.executeFiltersForEvent(\"postInitializeItemDatabase\")\n __isInitialized = True\n Report.trace(\"itemdatabase\", \"Item database initialized.\")", "def __init__(self):\n self.user_dao = MongoUserDAO()\n self.meta_game_dao = MongoMetaGameDAO()", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')" ]
[ "0.5736022", "0.56434137", "0.564165", "0.55726427", "0.5544261", "0.5475631", "0.5469645", "0.54065984", "0.53900474", "0.538401", "0.53686905", "0.5361027", "0.5327386", "0.5313991", "0.5283589", "0.5282382", "0.52822435", "0.52694327", "0.52486396", "0.5237471", "0.5233377", "0.5233316", "0.5231493", "0.5223977", "0.521843", "0.5213118", "0.5193911", "0.5192112", "0.51892203", "0.517798" ]
0.6783798
0
move_class method uses MoveClassRefactorListener to move the class to the target package
def move_class(token_stream, parse_tree, args): move_class_listener = MoveClassRefactoringListener( common_token_stream=token_stream, source_package=source_package, target_package=target_package, class_identifier=class_identifier, filename=args.file, dirname=directory ) walker = ParseTreeWalker() walker.walk(t=parse_tree, listener=move_class_listener) with open(args.file, mode='w', newline='') as f: f.write(move_class_listener.token_stream_rewriter.getDefaultText().replace("\r", ""))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_move_class_propagation(token_stream, parse_tree, args):\n has_import = False\n has_exact_import = False\n\n file_to_check = open(file=args.file, mode='r')\n for line in file_to_check.readlines():\n text_line = line.replace('\\n', '').replace('\\r', '').strip()\n if (text_line.startswith('import') and text_line.endswith(source_package + '.' + class_identifier + ';')) \\\n or (text_line.startswith('import') and text_line.endswith(source_package + '.*;')):\n has_import = True\n break\n if (text_line.startswith('import') and text_line.endswith(target_package + '.' + class_identifier + ';')) \\\n or (text_line.startswith('import') and text_line.endswith(target_package + '.*;')):\n has_exact_import = True\n break\n\n if not has_exact_import:\n print(f\"Start checking file \\\"{file_to_check.name}\\\" *** {file_counter}/100\")\n\n replace_dependent_object_listener = ReplaceDependentObjectsListener(\n common_token_stream=token_stream, source_package=source_package, target_package=target_package,\n class_identifier=class_identifier, filename=args.file, has_import=has_import\n )\n walker = ParseTreeWalker()\n walker.walk(t=parse_tree, listener=replace_dependent_object_listener)\n\n with open(args.file, mode='w', newline='') as f:\n f.write(replace_dependent_object_listener.token_stream_rewriter.getDefaultText().replace(\"\\r\", \"\"))\n\n print(f\"Finish checking file \\\"{file_to_check.name}\\\" *** {file_counter}/100\")", "def move_to_new_class(self, elements_to_move):\n for element in elements_to_move:\n place = self._place[element]\n place.delete()\n self.add_class(elements_to_move)", "def move_class_col(dfr, column_to_move):\n cols = list(dfr.columns.values)\n cols.pop(cols.index(column_to_move))\n dfr = dfr[cols + [column_to_move]]\n return dfr", "def recursive_walk(dir_path):\n global filename\n args = get_argument_parser(source_file_path)\n parse_tree, token_stream = get_parse_tree_token_stream(args)\n\n # check if the class has dependencies on other classes in the same class\n pre_condition_listener = MoveClassPreConditionListener()\n walker = ParseTreeWalker()\n walker.walk(t=parse_tree, listener=pre_condition_listener)\n\n filename_without_extension, extension = os.path.splitext(filename)\n if extension == '.java':\n move_class(token_stream, parse_tree, args)\n else:\n raise ValueError(f\"The filename format must be \\\".java\\\", but found {extension}!\")\n\n for dirname, dirs, files in os.walk(dir_path):\n for file in files:\n if file == filename or file == class_identifier + '.java':\n continue\n file_without_extension, extension = os.path.splitext(file)\n if extension == '.java':\n args = get_argument_parser(os.path.join(dirname, file))\n parse_tree, token_stream = get_parse_tree_token_stream(args)\n post_move_class_propagation(token_stream, parse_tree, args)", "def main(udb_path, source_package, source_class, method_name, target_classes: list, *args, **kwargs):\n\n db = und.open(udb_path)\n source_class_ents = db.lookup(f\"{source_package}.{source_class}\", \"Class\")\n target_class_ents = []\n source_class_ent = None\n\n if len(source_class_ents) == 0:\n config.logger.error(f\"Cannot find source class: {source_class}\")\n db.close()\n return False\n else:\n for ent in source_class_ents:\n if ent.simplename() == source_class:\n source_class_ent = ent\n break\n if source_class_ent is None:\n config.logger.error(f\"Cannot find source class: {source_class}\")\n db.close()\n return False\n\n method_ent = db.lookup(f\"{source_package}.{source_class}.{method_name}\", \"Method\")\n if len(method_ent) == 0:\n config.logger.error(f\"Cannot find method to pushdown: {method_name}\")\n db.close()\n return False\n else:\n method_ent = method_ent[0]\n\n for ref in source_class_ent.refs(\"extendBy\"):\n if ref.ent().simplename() not in target_classes:\n config.logger.error(\"Target classes are not children classes\")\n db.close()\n return False\n target_class_ents.append(ref.ent())\n\n for ref in method_ent.refs(\"callBy\"):\n if ref.file().simplename().split(\".\")[0] in target_classes:\n continue\n else:\n config.logger.error(\"Method has dependencies.\")\n db.close()\n return False\n\n # Remove field from source class\n listener = parse_and_walk(\n file_path=source_class_ent.parent().longname(),\n listener_class=CutMethodListener,\n has_write=True,\n source_class=source_class,\n method_name=method_name,\n debug=False\n )\n\n # Insert field in children classes\n for target_class in target_class_ents:\n parse_and_walk(\n file_path=target_class.parent().longname(),\n listener_class=PasteMethodListener,\n has_write=True,\n source_class=target_class.simplename(),\n method_content=listener.method_content,\n import_statements=listener.import_statements,\n debug=False\n )\n db.close()", "def move(self):\n pass", "def move(self):\n raise NotImplementedError", "def move(self, move):\n raise NotImplementedError()", "def _patch_remaining_classes(original_classes):\n # type: (Dict[str, type]) -> None\n # check which classes have actually been instrumented\n instrumented_classes = {}\n\n for package in list(original_classes.keys()):\n original_path = CLASSES_TO_INSTRUMENT[package]\n\n try:\n cls = _import_by_path(original_path)\n except (AttributeError, ImportError):\n logger.debug(\n \"[OTel] Failed to check if class has been instrumented: %s\",\n original_path,\n )\n del original_classes[package]\n continue\n\n if not cls.__module__.startswith(\"opentelemetry.\"):\n del original_classes[package]\n continue\n\n instrumented_classes[package] = cls\n\n if not instrumented_classes:\n return\n\n # replace occurrences of the original unpatched class in sys.modules\n for module_name, module in sys.modules.copy().items():\n if (\n module_name.startswith(\"sentry_sdk\")\n or module_name in sys.builtin_module_names\n ):\n continue\n\n for package, original_cls in original_classes.items():\n for var_name, var in vars(module).copy().items():\n if var == original_cls:\n logger.debug(\n \"[OTel] Additionally patching %s from %s\",\n original_cls,\n module_name,\n )\n\n setattr(module, var_name, instrumented_classes[package])", "def move_command():\n return Command().command(_move).require_migration().require_clean().with_database(write=True).with_commit()", "def pre_move_hook(self, from_module, to_module):\n raise NotImplementedError()", "def check_classes(class_name: str) -> str:\n classes_list = []\n class_directory = base_directory\n # Print out all classes in teh class_directory\n for i in os.listdir(class_directory):\n if i.startswith(\".\"):\n pass\n else:\n # Append name off classes to the list\n classes_list.append(i)\n # Check to see if the name of the class is in the list\n if class_name in classes_list:\n current_directory = os.path.join(base_directory, class_name, \"/\")\n return current_directory\n else:\n cprint(f\"{class_name} is not a class, creating new folder\", \"red\")\n new_directory = os.path.join(class_directory, class_name)\n os.mkdir(new_directory)\n cprint(f\"path {new_directory} created\", \"red\")\n return new_directory", "def move(): #py:move\n RUR._move_()", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def process_class_list(self, module, classes):", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def setup_class(klass):", "def setup_class(klass):", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def reset_class(self, classes):\r\n self._clear_cached_op()\r\n self._classes = classes\r\n if self._pos_iou_thresh >= 1:\r\n self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh)\r\n for outputs in self.yolo_outputs:\r\n outputs.reset_class(classes)", "def update_fixture_class(self, class_id, template_id):\n fix_template = self.axops_client.get_fixture_template(template_id)\n if not fix_template:\n raise AXApiResourceNotFound(\"Fixture template with id {} not found\".format(template_id))\n with self._class_lock:\n old_class = self.get_fixture_class(id=class_id)\n new_class = self.template_to_class(fix_template)\n if old_class.name != new_class.name or old_class.repo != new_class.repo or old_class.branch != new_class.branch:\n logger.info(\"Rebasing existing class %s from repo: %s branch: %s to template %s name: %s, repo: %s, branch: %s\",\n old_class.name, old_class.repo, old_class.branch, template_id, new_class.name, new_class.repo, new_class.branch)\n if old_class.name != new_class.name:\n # If there is a class rename, check for duplicate before allowing the rename\n duplicate_class = self.get_fixture_class(name=new_class.name, verify_exists=False)\n if duplicate_class:\n raise AXIllegalOperationException(\"Fixture class '{}' already enabled from a different repo/branch: {}/{}\"\n .format(duplicate_class.name, duplicate_class.repo, duplicate_class.branch))\n return self.apply_class_changes(old_class, new_class)", "def setup_class(cls):", "def setup_class(cls):", "def move_file(self, ctx):\n pass", "def move_buildings(self):", "def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )", "def _move(self, id: str, parent_id: str) -> MoveFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.move\n request_obj: MoveFolderRequestModel = endpoint.load_request(parent_id=parent_id)\n response: MoveFolderResponseModel = endpoint.perform_request(\n http=self.auth.http,\n request_obj=request_obj,\n id=id,\n )\n return response" ]
[ "0.620782", "0.59406626", "0.57894206", "0.5473868", "0.53981787", "0.5352078", "0.52304465", "0.52080524", "0.51873934", "0.51725787", "0.51608354", "0.5137262", "0.5135531", "0.5134518", "0.5097473", "0.508304", "0.5065531", "0.5065531", "0.5053348", "0.5053348", "0.5044372", "0.50430644", "0.5024996", "0.49849114", "0.49636087", "0.49636087", "0.49621475", "0.49473655", "0.4942692", "0.49409252" ]
0.74356085
0
post_move_class_propagation method is used to propagate postconditions after moving the class
def post_move_class_propagation(token_stream, parse_tree, args): has_import = False has_exact_import = False file_to_check = open(file=args.file, mode='r') for line in file_to_check.readlines(): text_line = line.replace('\n', '').replace('\r', '').strip() if (text_line.startswith('import') and text_line.endswith(source_package + '.' + class_identifier + ';')) \ or (text_line.startswith('import') and text_line.endswith(source_package + '.*;')): has_import = True break if (text_line.startswith('import') and text_line.endswith(target_package + '.' + class_identifier + ';')) \ or (text_line.startswith('import') and text_line.endswith(target_package + '.*;')): has_exact_import = True break if not has_exact_import: print(f"Start checking file \"{file_to_check.name}\" *** {file_counter}/100") replace_dependent_object_listener = ReplaceDependentObjectsListener( common_token_stream=token_stream, source_package=source_package, target_package=target_package, class_identifier=class_identifier, filename=args.file, has_import=has_import ) walker = ParseTreeWalker() walker.walk(t=parse_tree, listener=replace_dependent_object_listener) with open(args.file, mode='w', newline='') as f: f.write(replace_dependent_object_listener.token_stream_rewriter.getDefaultText().replace("\r", "")) print(f"Finish checking file \"{file_to_check.name}\" *** {file_counter}/100")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_class(token_stream, parse_tree, args):\n move_class_listener = MoveClassRefactoringListener(\n common_token_stream=token_stream, source_package=source_package, target_package=target_package,\n class_identifier=class_identifier, filename=args.file, dirname=directory\n )\n walker = ParseTreeWalker()\n walker.walk(t=parse_tree, listener=move_class_listener)\n\n with open(args.file, mode='w', newline='') as f:\n f.write(move_class_listener.token_stream_rewriter.getDefaultText().replace(\"\\r\", \"\"))", "def post_process(self, relevant_targets):\r\n pass", "def move(self):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def _post_transform(self):\n # Reclassify strategy post __init__, if needed.\n for (reclassifier, args, kwargs) in self._reclassifiers:\n self.classifier = reclassifier(self.classifier, *args, **kwargs)", "def _post_forward(\n self,\n handles: List[FlatParamHandle],\n reshard_fn: Optional[Callable],\n module: nn.Module,\n input: Any,\n output: Any,\n ) -> Any:\n self._exec_order_data.record_post_forward(handles)\n if reshard_fn is not None:\n reshard_fn()\n # Register pre-backward hooks to unshard the flattened parameters\n # for the gradient computation (if needed)\n output = self._register_pre_backward_hooks(output, handles)\n self.training_state = TrainingState_.IDLE\n for handle in handles:\n handle._training_state = HandleTrainingState.IDLE\n return output", "def handleMove(self):\n pass", "def move(self):\n raise NotImplementedError", "def test_class_ended(self, cls):", "def move(self) -> bool:\n pass", "def _post_forward(\n state: _State,\n handles: List[FlatParamHandle],\n reshard_fn: Callable,\n module: nn.Module,\n input: Any,\n output: Any,\n) -> Any:\n state._exec_order_data.record_post_forward(handles)\n if reshard_fn is not None:\n reshard_fn()\n # Register pre-backward hooks to unshard the flattened parameters\n # for the gradient computation (if needed)\n output = _register_pre_backward_hooks(state, output, handles)\n state.training_state = TrainingState.IDLE\n for handle in handles:\n handle._training_state = HandleTrainingState.IDLE\n return output", "def after_class_creation(cls):\n pass", "def after_move(self):\n self.epsilon -= self.epsilon_decay_rate\n self.epsilon = max(self.epsilon, self.epsilon_min)", "def move_to_new_class(self, elements_to_move):\n for element in elements_to_move:\n place = self._place[element]\n place.delete()\n self.add_class(elements_to_move)", "def pre_move_hook(self, from_module, to_module):\n raise NotImplementedError()", "def move(self, move):\n raise NotImplementedError()", "def post_backward_discriminator(self):\n pass", "def _post_process(self, X, y, check_input=False):\n if check_input:\n X, y = check_X_y(X, y)\n\n n_samples = X.shape[0]\n y_matrix = np.empty((n_samples, self.n_classes_, self.n_estimators))\n original_y = y\n\n for i in xrange(self.n_estimators):\n y_matrix[:,:,i] = self._tree_predict_proba(i, X)\n\n self.coef_ = np.empty((self.n_estimators, self.n_classes_))\n self.intercept_ = np.empty((self.n_classes_,))\n \n self.post_model.set_params(alpha=self.alpha)\n for k in xrange(self.n_classes_):\n if self.n_classes_ > 2:\n y = np.array(original_y == k, dtype=np.float64)\n self.post_model.fit(y_matrix[:,k,:], y)\n self.coef_[:,k] = self.post_model.coef_\n self.intercept_[k] = self.post_model.intercept_\n\n self.coef_[np.abs(self.coef_) < EPS] = 0.0\n\n return self", "def _postprocess(self):", "def _move_cleanup(self, ok, elog, start_pos, goal_pos):\n if not ok:\n self.stop()\n if elog:\n self._record_elog_move(start_pos, goal_pos)\n return ok", "def make_move(self, **move_data: typing.Dict[str, typing.Any]) -> bool:\n raise NotImplementedError", "def _absorb_classes(self, ground_to='in'):\n # Find number of new class for inside and outside set\n currnt_classes = np.unique(self.classes)\n new_class_in = currnt_classes.max() + 1\n new_class_out = currnt_classes.max() + 2\n \n # Ground node to new_class\n if ground_to == 'in':\n self.classes[self.ground_node] = new_class_in\n elif ground_to == 'out':\n self.classes[self.ground_node] = new_class_out\n else:\n self.classes[self.ground_node] = -1\n \n cond_out = self.x[:self.ground_node] > self.threshold\n self.classes[:self.ground_node][cond_out] = new_class_out\n cond_out = self.x[self.ground_node:] > self.threshold\n self.classes[self.ground_node+1:][cond_out] = new_class_out\n \n cond_in = self.x[self.ground_node:] <= self.threshold\n self.classes[self.ground_node+1:][cond_in] = new_class_in\n cond_in = self.x[:self.ground_node] <= self.threshold\n self.classes[:self.ground_node][cond_in] = new_class_in", "def multi_label_cls_head__post_process(ctx, self, pred, **kwargs):\n return pred", "def propose_move(self):\n self.mc_move_fcn(self.chain)\n return self.chain.nextviable()", "def step_forward(self):", "def postprocessing(self, postprocessing):\n\n self._postprocessing = postprocessing", "def post_processor(self):" ]
[ "0.5597115", "0.55090684", "0.5470342", "0.5460749", "0.5460749", "0.5460749", "0.5460749", "0.5402134", "0.5387696", "0.5336143", "0.5325261", "0.5315068", "0.53143436", "0.5280691", "0.5276908", "0.52355665", "0.5204402", "0.51873684", "0.51575035", "0.5106735", "0.5092696", "0.50882494", "0.50834024", "0.507692", "0.5076053", "0.5066518", "0.50450945", "0.5037911", "0.50132895", "0.4978435" ]
0.6519298
0
returns parse tree and token stream base on the file stream
def get_parse_tree_token_stream(args): # Step 1: Load input source into stream stream = FileStream(args.file, encoding='utf8') # Step 2: Create an instance of AssignmentStLexer lexer = JavaLexer(stream) # Step 3: Convert the input source into a list of tokens token_stream = CommonTokenStream(lexer) # Step 4: Create an instance of the AssignmentStParser parser = JavaParserLabeled(token_stream) parser.getTokenStream() # Step 5: Create parse tree parse_tree = parser.compilationUnit() return parse_tree, token_stream
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_file(self, path):\r\n return self._parse(antlr3.ANTLRFileStream(path))", "def parse(self):\n print_DBG(\"Parsing master file: \"+self.tokenizer.get_file_information()[0])\n for token_line in self.tokenizer.next_tokenized_line():\n if not token_line[0].isspace():\n if token_line[0] == pu.INCLUDE_FILE_SYM:\n self.tokenizer.open_file(token_line[1])\n print_DBG(\"Parsing file: \"+self.tokenizer.get_file_information()[0])\n self.stats[\"#files\"] += 1\n else:\n self._parse_declaration_initiator(token_line)\n self._expecting_rule = True\n self.stats[\"#declarations\"] += 1\n self._expected_indentation = None\n else:\n self._parse_rule(token_line)\n self._expecting_rule = False # Not expecting but still allowed\n self.stats[\"#rules\"] += 1\n self.tokenizer.close_files()\n print_DBG(\"Parsing finished!\")", "def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)", "def parse(self):\n node = ast.parse(self.__source, filename=self.__path)\n novermin = self.comments()\n return (node, novermin)", "def parse_file(\n self, filename: Path, encoding: Optional[str] = None, debug: bool = False\n ) -> NL:\n with open(filename, encoding=encoding) as stream:\n return self.parse_stream(stream, debug)", "def __parse(self):\n # raw/objects: detect name, type, use major tag for type as parent node\n # raw/graphics: as object raw, but add TILE_PAGE\n # init: usually flat file, except\n # embark_profiles.txt: [PROFILE] is parent\n # interface.txt: [BIND] is parent (legacy will be flat)\n # world_gen.txt: [WORLD_GEN] is parent\n # Non-raw files (unsupported): init/arena.txt, subdirs of raw/objects\n parse_raw(self, self.read(self.filename))", "def parse(self) -> node.KotlinFile:\n return self.parse_kotlin_file()", "def parse(self, stream, media_type=None, parser_context=None):\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n decoded_stream = codecs.getreader(encoding)(stream)\n raw_body = decoded_stream.read()\n request = parser_context.get('request')\n setattr(request, 'raw_body', raw_body)\n filename = self.get_filename(stream, media_type, parser_context)\n if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):\n filename = f'{filename}.toml'\n setattr(request, 'filename', filename)\n return toml.loads(raw_body)", "def tokenizer(text):\n for entry in text.split('$$$$\\n'):\n if entry.rstrip():\n lines_stream = deque(entry.split('\\n'))\n else:\n continue\n\n # yield from _molfile(stream=lines_stream)\n for token in _molfile(stream=lines_stream):\n yield token\n\n if len(lines_stream):\n # yield from _sdfile(stream=lines_stream)\n for token in _sdfile(stream=lines_stream):\n yield token\n\n yield EndOfFile()", "def make_tree_from_file_content(file_content):\r\n file_tokens = make_file_tokens(file_content)\r\n tree = movetree.MoveTree()\r\n root = movetree.Node()\r\n root.depth = -1\r\n tip = root\r\n branch_point_stack = []\r\n for token in file_tokens:\r\n if token == '(':\r\n branch_point_stack.append(tip)\r\n elif token == ')':\r\n tip = branch_point_stack.pop()\r\n else:\r\n new_move = move_from_token(token)\r\n tip.add_child(new_move)\r\n tip = new_move\r\n tree.info = make_info_node(root.children[0].properties)\r\n first_move = root.children[0].children[0]\r\n first_move.parent = tree.root_node\r\n tree.root_node.children.append(first_move)\r\n tree.current_move = tree.root_node\r\n return tree", "def parse_file(self, file):\n return self.parse(file.read())", "def parse(self, infile):\r\n raise NotImplementedError()", "def read(self, stream):\n root = []\n headings = []\n leading_sep = False\n trailing_sep = False\n for (pos, line) in enumerate(stream.read().splitlines()):\n tokens = args.regexp.split(line)\n log.debug('tokens: {tokens}'.format(**locals()))\n\n if pos == 0:\n \"\"\"\n Strip off empty beginning and trailing tokens in case the separator is used as a border\n \"\"\"\n leading_sep = not tokens[0]\n trailing_sep = (tokens[-1] == '') and (len(tokens) > 1)\n\n if leading_sep:\n if tokens[0]:\n # parser.error('Unexpected token under empty leading heading')\n pass\n # del tokens[0]\n pass\n\n if trailing_sep:\n if tokens[-1]:\n # parser.error('Unexpected token under empty trailing heading')\n pass\n # del tokens[-1]\n pass\n\n if args.headings and (pos == 0):\n if not tokens:\n parser.error('No headings')\n headings = tokens\n else:\n if headings:\n if len(tokens) > len(headings):\n parser.error('Column without heading: {tokens} > {headings}'.format(**locals()))\n root.append({heading: tokens[heading_pos] if heading_pos < len(tokens) else ''\n for (heading_pos, heading) in enumerate(headings)})\n else:\n root.append(tokens)\n\n return (root, headings)", "def parse_from_tree(self, parse):\n pass", "def read(self, f):\n return self.parse(f.read())", "def _readstream(self, nml_file, nml_patch_in=None):\n nml_patch = nml_patch_in if nml_patch_in is not None else Namelist()\n\n tokenizer = Tokenizer()\n tokenizer.comment_tokens = self.comment_tokens\n f90lex = []\n for line in nml_file:\n toks = tokenizer.parse(line)\n while tokenizer.prior_delim:\n new_toks = tokenizer.parse(next(nml_file))\n\n # Skip empty lines\n if not new_toks:\n continue\n\n # The tokenizer always pre-tokenizes the whitespace (leftover\n # behaviour from Fortran source parsing) so this must be added\n # manually.\n if new_toks[0].isspace():\n toks[-1] += new_toks.pop(0)\n\n # Append the rest of the string (if present)\n if new_toks:\n toks[-1] += new_toks[0]\n\n # Attach the rest of the tokens\n toks.extend(new_toks[1:])\n\n toks.append('\\n')\n f90lex.extend(toks)\n\n self.tokens = iter(f90lex)\n\n nmls = Namelist()\n\n # Attempt to get first token; abort on empty file\n try:\n self._update_tokens(write_token=False)\n except StopIteration:\n return nmls\n\n # TODO: Replace \"while True\" with an update_token() iterator\n while True:\n try:\n # Check for classic group terminator\n if self.token == 'end':\n self._update_tokens()\n\n # Ignore tokens outside of namelist groups\n while self.token not in ('&', '$'):\n self._update_tokens()\n\n except StopIteration:\n break\n\n # Create the next namelist\n try:\n self._update_tokens()\n except StopIteration:\n raise ValueError('End-of-file after namelist group token `&`.')\n g_name = self.token\n\n g_vars = Namelist()\n v_name = None\n\n # TODO: Edit `Namelist` to support case-insensitive `get` calls\n grp_patch = nml_patch.pop(g_name.lower(), Namelist())\n\n # Populate the namelist group\n while g_name:\n\n if self.token not in ('=', '%', '('):\n try:\n self._update_tokens()\n except StopIteration:\n raise ValueError(\n 'End-of-file before end of namelist group: \\'&{}\\''\n ''.format(g_name)\n )\n\n # Set the next active variable\n if self.token in ('=', '(', '%'):\n\n v_name, v_values = self._parse_variable(\n g_vars,\n patch_nml=grp_patch\n )\n\n if v_name in g_vars:\n v_prior_values = g_vars[v_name]\n v_values = merge_values(v_prior_values, v_values)\n\n g_vars[v_name] = v_values\n\n # Squeeze 1d list due to repeated variables\n for v_name, v_values in g_vars.items():\n if (\n isinstance(v_values, list)\n and len(v_values) == 1\n and v_name not in g_vars.start_index\n ):\n g_vars[v_name] = v_values[0]\n\n # Deselect variable\n v_name = None\n v_values = []\n\n # Finalise namelist group\n if self.token in ('/', '&', '$'):\n\n # Append any remaining patched variables\n for v_name, v_val in grp_patch.items():\n g_vars[v_name] = v_val\n v_strs = nmls._var_strings(v_name, v_val)\n for v_str in v_strs:\n self.pfile.write(v_str + '\\n')\n\n # Append the grouplist to the namelist\n if g_name in nmls:\n nmls.add_cogroup(g_name, g_vars)\n else:\n nmls[g_name] = g_vars\n\n # Reset state\n g_name, g_vars = None, None\n\n try:\n self._update_tokens()\n except StopIteration:\n break\n\n if nml_patch:\n # Append the contents to the namelist patch\n print(file=self.pfile)\n print(nml_patch, file=self.pfile)\n\n # Now append the values to the output namelist\n for grp in nml_patch:\n nmls[grp] = nml_patch[grp]\n\n return nmls", "def get_tree(source):\n return ast.parse(source)", "def _tokenize(\n self,\n source: str,\n name: t.Optional[str],\n filename: t.Optional[str] = None,\n state: t.Optional[str] = None,\n ) -> TokenStream:\n source = self.preprocess(source, name, filename)\n stream = self.lexer.tokenize(source, name, filename, state)\n\n for ext in self.iter_extensions():\n stream = ext.filter_stream(stream) # type: ignore\n\n if not isinstance(stream, TokenStream):\n stream = TokenStream(stream, name, filename) # type: ignore\n\n return stream", "def parse(self, file):\n # The root tree\n tree = Tree()\n # Dictionary of subtrees that are created\n # The key is the name and the value is the corresponding TreeElement\n subtrees = dict()\n\n current_subtree = tree\n current_tree_element = None\n next_is_start = False\n next_is_comment = False\n comment = False\n last_indent = 0\n lnr = 0\n with open(file, 'r') as bfile:\n for line in bfile:\n lnr += 1\n comment = next_is_comment\n\n line = re.sub(r'//\\*\\*.*?\\*\\*//', '', line) # Block comments starting and ending in the same line\n\n if '**//' in line:\n # Block comments ending in this line\n # This line as well as the following will contain valid code\n next_is_comment = False\n comment = False\n line = re.sub(r'.*\\*\\*//', '', line)\n if '//**' in line:\n # Block comments starting in this line\n # This line may contain valid code, the next ones won't\n next_is_comment = True\n line = re.sub(r'//\\*\\*.*', '', line)\n\n line = re.sub(r'//.*', '', line) # Line comments\n\n line = line.rstrip()\n if not line:\n continue\n\n if not comment:\n indent = len(line) - len(line.lstrip())\n if indent % 4 != 0:\n raise ParseError('Error parsing line {}: Indent is not a multiple of 4'.format(lnr))\n\n line_content = line.lstrip()\n\n if indent == 0 and line_content.startswith('-->'):\n # This is the declaration of the start. Next line contains root element\n next_is_start = True\n current_subtree = tree\n last_indent = indent\n continue\n\n if next_is_start:\n # This line contains the root element of the main tree\n next_is_start = False\n element = self.create_tree_element(line_content, current_tree_element)\n tree.set_root_element(element)\n current_tree_element = element\n\n if indent == 0 and line_content.startswith('#'):\n # This is the declaration of a new subtree\n current_subtree = Tree()\n subtrees[line_content[1:]] = current_subtree\n current_tree_element = None\n last_indent = indent\n continue\n\n if indent < last_indent:\n # Go layers up, depending on indent difference\n for _ in range(indent, last_indent, 4):\n current_tree_element = current_tree_element.parent\n\n if re.search(r'\\s*-?->\\s*', line_content):\n # Arrow in line, split in decision result and call\n result, call = re.split(r'\\s*-?->\\s*', line_content, 1)\n\n if call.startswith('#'):\n # A subtree is called here.\n subtree_name = call.strip('#')\n if subtree_name not in subtrees:\n raise AssertionError('Error parsing line {}: {} not defined'.format(lnr, call))\n # The root element of the subtree should be placed in this tree position\n if current_tree_element is None:\n # The current subtree is empty, set the subtree as its root element\n current_subtree.set_root_element(subtrees[subtree_name].root_element)\n else:\n # Append this subtree in the current position\n current_tree_element.add_child_element(copy.copy(subtrees[subtree_name].root_element), result)\n\n elif re.search(r'\\s*,\\s*', call):\n # A sequence element\n actions = re.split(r'\\s*,\\s*', call)\n element = self.create_sequence_element(actions, current_tree_element)\n current_tree_element.add_child_element(element, result)\n\n elif call.startswith('@'):\n # An action is called\n element = self.create_tree_element(call, current_tree_element)\n current_tree_element.add_child_element(element, result)\n\n elif call.startswith('$'):\n # A decision is called\n element = self.create_tree_element(call, current_tree_element)\n current_tree_element.add_child_element(element, result)\n current_tree_element = element\n\n else:\n raise ParseError('Error parsing line {}: Element {} is neither an action nor a decision'.format(lnr, call))\n\n else:\n # No arrow, must be the beginning of a new subtree\n element = self.create_tree_element(line_content, current_tree_element)\n current_subtree.set_root_element(element)\n current_tree_element = element\n\n last_indent = indent\n return tree", "def _read(self, file_path: str) -> Iterator[Instance]:\n with open(file_path) as f:\n for line in f:\n pairs = line.split()\n words, tags = zip(*(pair.split(\"###\") for pair in pairs))\n yield self.text_to_instance([Token(word) for word in words], tags)", "def parse(self):\n # type: () -> Tag\n logger.debug('%s- PARSING %r ...',\n self.context.level * LIQUID_LOG_INDENT,\n self.context.name)\n while True:\n scanned = self.nodescanner.consume(\n self.context.stream\n ) # type: Optional[bool, Node]\n\n if scanned is False:\n self.visitor.root.parse()\n logger.debug('%s END PARSING.',\n self.context.level * LIQUID_LOG_INDENT)\n break\n if scanned is True:\n continue\n # Node\n tag = scanned.tag\n\n if not tag.SECURE and self.config.strict:\n raise LiquidSyntaxError(\n f\"Tag not allowed in strict mode: {tag!r}\",\n tag.context, self\n )\n self.visitor.visit(tag)\n\n return self.visitor.root", "def __init__(self, file):\n self.__in_file = file\n self.__token = \"\" # will contain the current token\n self.__tokens = [] # will contain all tokens\n self.__lines = file.readlines()\n self.__lines = \"\".join(self.__lines) # all lines as one string, in order to make the iteration easier.\n self.__i = 0\n self.build_tokens()", "def parse(self, text, filename='', directory='', debuglevel=0):\n self._last_yielded_token = None\n self.declared_types = dict()\n self._namespace_stack = [ None ]\n\n self._enum_decls = []\n self._message_types = []\n self._union_decls = []\n self._all_members = []\n self._enum_concepts = []\n\n self._included_files = []\n self._lines_to_coords = []\n processed_text = ''.join(self.preprocess(text, filename, directory))\n self._last_yielded_token = None\n\n self._syntax_tree = self.parser.parse(\n input=processed_text,\n lexer=self.lexer,\n tracking=True,\n debug=debuglevel)\n\n self._postprocess_enums()\n self._postprocess_unions()\n self._postprocess_enum_concepts()\n self._postprocess_versioning_hashes(self._syntax_tree)\n self._note_ambiguity()\n\n return self._syntax_tree", "def parse_tree_file(filename):\n f = open(filename)\n data = f.read()\n f.close()\n return parser.parse(data, lexer=lexer)", "def node(self):\r\n node = ast.parse(self.source, self.filename)\r\n node = self.visit(node)\r\n ast.fix_missing_locations(node)\r\n return node", "def parse_file(file_path, encoding='utf-8', print_errors=False):\n with open(file_path, 'r', encoding=encoding) as f:\n return parse(\n f.read(),\n file_name=os.path.basename(file_path),\n print_errors=print_errors\n )", "def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')", "def __parse(self):\n\t\tparser=xml.sax.make_parser()\n\t\tparser.setContentHandler(OSMXMLFileParser(self))\n\t\tparser.parse(self.filename)\n\t\n\t\t# convert them back to lists\n\t\tself.nodes = self.nodes.values()\n\t\tself.ways = self.ways.values()\n\t\tself.relations = self.relations.values()", "def _parse(self, infile):\n raise NotImplementedError()", "def Parse(source, filename):\n lexer = Lexer(filename)\n parser = Parser(lexer, source, filename)\n\n lex.lex(object=lexer)\n yacc.yacc(module=parser, debug=0, write_tables=0)\n\n tree = yacc.parse(source)\n return tree" ]
[ "0.6474443", "0.62446785", "0.61728376", "0.6170358", "0.6150989", "0.60873765", "0.60797954", "0.60496897", "0.60075635", "0.59771377", "0.59643596", "0.59559005", "0.5909493", "0.58725095", "0.5828897", "0.58060056", "0.578644", "0.5780078", "0.57655746", "0.5764479", "0.575528", "0.5740365", "0.5738633", "0.57338095", "0.57129633", "0.5700453", "0.56849706", "0.56839097", "0.5668512", "0.56559575" ]
0.7330537
0
This function creates 1d polynomial toy data for linear regression y= w[0]+w[1]x+..w[d]x^d Input
def toyData(w,sigma,N): #Degree of polynomial degree=w.size; #generate x values x=np.linspace(0, 1,N); poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True) PHI=poly.fit_transform(x.reshape(N,1)) y=np.dot(PHI,w); target=y+np.random.normal(0, sigma, N); Out=[x,y,PHI, target] return Out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_polynomial_regression(self, x_train, y_train):\n x_poly = self.poly_reg.fit_transform(x_train)\n self.lin_reg.fit(x_poly, y_train)", "def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi", "def linearfit(x,y):\n fit = np.polyfit(x,y,1)\n fit_fn = np.poly1d(fit)\n yy = fit_fn(x) \n \n return yy", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError", "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def construct_poly(data, power):\n return np.power(data, power)", "def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p", "def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx", "def regression_fit(self, x, y, deg):\n return np.polyfit(x, y, deg=deg)", "def linear_regression(self, x_data, y_data, mask = None, ax = None):\n if mask is None:\n mask = full(len(y_data), True, dtype=bool)\n poly = poly1d(polyfit(x_data[mask], y_data[mask], 1))\n\n if ax is not None:\n ax.plot(x_data, polyval(poly, x_data), \"--r\",\\\n label = \"Slope: %.2f\" %(poly[1]))\n return poly", "def _fit_poly(y_data, deg=5):\n x = np.arange(1, len(y_data) + 1)\n coeffs = np.polynomial.polynomial.polyfit(\n x, y_data, deg=deg)\n y_pred = poly(x, coeffs)\n return coeffs, np.mean((y_data - y_pred) ** 2)", "def fit_poly(data, error_func, degree=4): \n\n # generate initial guss for polynomial model (all coeffs = 1)\n guess = np.poly1d(np.ones(degree + 1, dtype=np.float32))\n\n # plot intial guess\n x = np.linspace(-5, 5, 21)\n plt.plot(x, np.polyval(guess, x), 'm--', linewidth=2.0, label=\"Initial guess\")\n\n # call optimizer to minimize error function\n result = spo.minimize(error_poly, guess, args=(data,), method='SLSQP', options={'disp':True})\n \n # convert optimal result into a poly1d object\n return np.poly1d(result.x)", "def nnRegression(data):", "def predict_y(x, w):\n r = []\n n = len(w)\n for i in range(len(x)):\n temp = 0\n for j in range(n):\n temp = temp+w[n-j-1]*(x[i]**j)\n r = r+[temp]\n return r", "def linearReg(x,y):\n X=np.array(x).reshape(-1,1)\n Y=np.array(y).reshape(-1,1)\n x_shape = X.shape\n num_var = x_shape[1] \n yintercept = 0\n slope = 0\n progress = []\n #intialize the parameter\n weight_matrix = np.random.normal(-1,1,(num_var,1))\n yintercept = np.random.rand(1)\n #cost minmization\n for i in range(200):\n dcostdm = np.sum(np.multiply(((np.matmul(X,weight_matrix)+ yintercept)-Y),X))*2/x_shape[0] #w.r.t to the weight\n dcostdc = np.sum(((np.matmul(X,weight_matrix)+yintercept)-Y))*2/x_shape[0] #partial derivative of cost w.r.t the intercept\n weight_matrix -= 0.1*dcostdm \n #updating the weights with the calculated gradients\n yintercept -= 0.1*dcostdc #updating the weights with the calculated gradients\n progress.append(np.array((weight_matrix,yintercept)))\n slope = weight_matrix\n return (slope[-1],yintercept)", "def calc_poly_linear_regression(independent, dependent):\n # Reshape for sklearn\n independent = independent.values.reshape(-1,1)\n dependent = dependent.values.reshape(-1,1)\n # Make the whole thing poly\n poly = PolynomialFeatures(degree=2)\n independent_ = poly.fit_transform(independent)\n # Do the linear regression\n model = LinearRegression()\n model.fit(independent_, dependent)\n # Calculate R2\n return model.score(independent_, dependent)", "def polynomial_reg(self, independent, dependent):\n\n try:\n if isinstance(independent, str) and isinstance(dependent, str):\n x = self.df_input[independent]\n y = self.df_input[[dependent]]\n elif isinstance(independent, pd.DataFrame) and isinstance(dependent, pd.DataFrame):\n x = independent\n y = dependent\n\n x = x[:, np.newaxis]\n y = y[: np.newaxis]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n y_poly_pred = model.predict(x_poly)\n\n plt.scatter(x, y, color = 'red')\n sort_axis = operator.itemgetter(0)\n sorted_zip = sorted(zip(x,y_poly_pred), key=sort_axis)\n x, y_poly_pred = zip(*sorted_zip)\n plt.plot(x, y_poly_pred, color='blue', label=self.poly_eq(independent, dependent))\n plt.legend(fontsize=9, loc=\"upper right\")\n\n plt.title(\"Polynomial Regression of \" + independent + \" and \" + dependent)\n plt.show()\n \n except Exception as e:\n print(e)", "def poly_regression(self,precision=8):\n # return empty lists if input is empty\n if self.training == []:\n return [],[]\n\n latitudes = []\n longitudes = []\n for point in self.training[:-1]:\n latitudes.append(point[0])\n longitudes.append(point[1]) \n # store everything in a dataframe\n latDf = pd.DataFrame(numpy.array(latitudes), columns=['latitudes'])\n longDf = pd.DataFrame(numpy.array(longitudes), columns=['longitudes'])\n\n # learn how to do regression\n reg = linear_model.LinearRegression()\n\n # pass the order of your polynomial here \n poly = PolynomialFeatures(precision)\n\n \n # regression with latitude as domain\n vertical_predicted_path = []\n transform = poly.fit_transform(longDf)\n\n reg.fit(transform,latDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n vertical_predicted_path.append([predictions[i][0],longDf[\"longitudes\"][i]])\n\n \n # regression with longitude domain\n horizontal_predicted_path = []\n transform = poly.fit_transform(latDf)\n\n reg.fit(transform,longDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n horizontal_predicted_path.append([latDf[\"latitudes\"][i], predictions[i][0]])\n\n self.horizontal = sorted(horizontal_predicted_path, key=lambda k: [k[1], k[0]])\n self.vertical = sorted(vertical_predicted_path, key=lambda k: [k[0], k[1]])\n \n # return sorted horizontal and vertical prediction\n return self.horizontal, self.vertical", "def fit_poly(x, y, n=5, log=False):\n \n x_g = x\n x = np.ma.array(x, mask=y.mask).compressed()\n y = y.compressed()\n if log:\n yl = np.log10(y)\n else:\n yl = y\n fit = np.polyfit(x, yl, n)\n p = np.poly1d(fit)\n \n if log:\n return 10**(p(x_g))\n else:\n return p(x_g)", "def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X", "def get_poly_coeff(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n return model.coef_\n except Exception as e:\n print(e)", "def construct_polynomial_approx(degree, weights):\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n expanded_xs = np.matrix(expand_to_monomials(xs, degree))\n ys = expanded_xs*np.matrix(weights).reshape((len(weights),1))\n return np.array(ys).flatten()\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function", "def _lin_regress(self, y, x, bias=True):\n n_samples = x.shape[0]\n x = self.make_polynomial(x[:, np.newaxis], fit_intercept=bias)\n l = np.linalg.cholesky(x.T @ x)\n v = solve_triangular(l, x.T @ y, lower=True)\n coefs = solve_triangular(l.T, v)\n pred = x @ coefs\n resid = y - pred\n rss = resid @ resid\n ddof = n_samples - (1 + bias)\n return rss, ddof", "def generate_coefficients_data(poly_degree: int, performance_data: pd.DataFrame, param_columns: typing.List) -> pd.DataFrame:\n if poly_degree != 2:\n logging.warning('Not Implemented: polynomial degree of > 2. Will use degree 2 for meta-model')\n coef_names = get_coefficient_names()\n results = []\n for idx, task_id in enumerate(performance_data['task_id'].unique()):\n frame_task = performance_data.loc[performance_data['task_id'] == task_id]\n model = sklearn.linear_model.LinearRegression(fit_intercept=False)\n poly_feat = sklearn.preprocessing.PolynomialFeatures(2)\n X = poly_feat.fit_transform(frame_task[param_columns])\n y = frame_task['predictive_accuracy']\n model.fit(X, y)\n result = {\n 'task_id': task_id,\n coef_names[0]: model.coef_[0],\n coef_names[1]: model.coef_[1],\n coef_names[2]: model.coef_[2],\n coef_names[3]: model.coef_[3],\n coef_names[4]: model.coef_[4],\n coef_names[5]: model.coef_[5],\n }\n results.append(result)\n return pd.DataFrame(results).set_index('task_id')", "def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None", "def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)", "def poly_eq(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n coef_arr = model.coef_\n intercept_arr = model.intercept_\n \n poly_equation = \"y = \" + str(round(coef_arr[0][2], 4)) + \"x\\xB2\"\n \n if(coef_arr[0][1] < 0):\n poly_equation += \" + (\" + str(round(coef_arr[0][1], 4)) + \"x\" + \")\"\n else:\n poly_equation += \" + \" + str(round(coef_arr[0][1], 4)) + \"x\"\n \n if(intercept_arr[0] < 0):\n poly_equation += \" + (\" + str(round(intercept_arr[0], 4)) + \")\"\n else:\n poly_equation += \" + \" + str(round(intercept_arr[0], 4))\n \n return poly_equation\n except Exception as e:\n print(e)" ]
[ "0.6825394", "0.6576377", "0.65696436", "0.6495577", "0.64582753", "0.6423641", "0.6405825", "0.6357386", "0.6354499", "0.6313618", "0.6298149", "0.62897635", "0.6238766", "0.623336", "0.61870444", "0.6183656", "0.6180649", "0.61638725", "0.61339", "0.6129765", "0.61093587", "0.610793", "0.60940045", "0.60905856", "0.6082799", "0.6064008", "0.60237545", "0.60041624", "0.59601426", "0.59588295" ]
0.6982522
0
Encodes a string with the given encoder name.
def encode(data: str, encoder_name: str) -> str: func = globals()["_encode_" + encoder_name] data = func(data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def register(self, coder_name, raw_coder):\n coder = coderize(raw_coder)\n self.coders[coder_name] = coder", "def encoder_from_string(encoder: str) -> json.JSONEncoder:\n return util.import_obj(encoder) if encoder else None", "def enc(text):\n if isinstance(text, str):\n return unicode(text, 'utf-8') # TODO: fix in Python 3\n elif isinstance(text, unicode):\n return text.encode('utf-8')\n else:\n raise Exception(\"Unsupported encode format.\")", "def encode_string(string, encoding=None):\n # type: (str, str) -> str\n if isinstance(string, bytes):\n return string\n if isinstance(string, str):\n if encoding is None:\n encoding = 'utf8'\n return string.encode(encoding)\n raise ValueError('invalid string type: {}'.format(type(string)))", "def set_encoder(attribute: str, encoder: typing.Callable) -> None:\n if encoder is not None:\n __attribute_decoders[attribute.lower()] = encoder", "def encodeString(*args, **kwargs)->AnyStr:\n pass", "def encode(text, orig_coding):\r\n if orig_coding == 'utf-8-bom':\r\n return BOM_UTF8 + text.encode(\"utf-8\"), 'utf-8-bom'\r\n \r\n # Try declared coding spec\r\n coding = get_coding(text)\r\n if coding:\r\n try:\r\n return text.encode(coding), coding\r\n except (UnicodeError, LookupError):\r\n raise RuntimeError(\"Incorrect encoding (%s)\" % coding)\r\n if orig_coding and orig_coding.endswith('-default'):\r\n coding = orig_coding.replace(\"-default\", \"\")\r\n try:\r\n return text.encode(coding), coding\r\n except (UnicodeError, LookupError):\r\n pass\r\n if orig_coding == 'utf-8-guessed':\r\n return text.encode('utf-8'), 'utf-8'\r\n \r\n # Try saving as ASCII\r\n try:\r\n return text.encode('ascii'), 'ascii'\r\n except UnicodeError:\r\n pass\r\n \r\n # Save as UTF-8 without BOM\r\n return text.encode('utf-8'), 'utf-8'", "def get_encode(encode_str, py_version=3):\n if ((py_version == 2 and isinstance(encode_str, str)) or (\n py_version == 3 and isinstance(encode_str, str))):\n encode = \"unicode\"\n else:\n for code in [\"utf-8\", sys.getfilesystemencoding(), \"gb18030\",\n \"ascii\", \"gbk\", \"gb2312\"]:\n try:\n encode_str.decode(code, 'ignore')\n return code\n except UnicodeDecodeError:\n pass\n encode = 'utf-8'\n return encode", "def encode(self, strs):", "def encode(self, strs):", "def encode_if_unicode(string, encoding):\n if isinstance(string, compat.unicode_type):\n return string.encode(encoding)\n else:\n return string", "def get_encoder(encoding):\n if encoding == Encoding.V1_THRIFT:\n return _V1ThriftEncoder()\n if encoding == Encoding.V1_JSON:\n return _V1JSONEncoder()\n if encoding == Encoding.V2_JSON:\n return _V2JSONEncoder()\n if encoding == Encoding.V2_PROTO3:\n return _V2ProtobufEncoder()\n raise ZipkinError(\"Unknown encoding: {}\".format(encoding))", "def apply_coder(text, coder):\n ## TODO.\n encoded_text = ''\n for letter in text:\n if letter in coder:\n encoded_text += coder[letter]\n else:\n encoded_text += letter\n return encoded_text", "def EncodeString(string, encoding=None):\r\n if not encoding:\r\n encoding = DEFAULT_ENCODING\r\n\r\n if txtutils.IsUnicode(string):\r\n try:\r\n rtxt = string.encode(encoding)\r\n except LookupError:\r\n rtxt = string\r\n return rtxt\r\n else:\r\n return string", "def encode(self, text):", "def encode(encoding_scheme: str, val: str) -> str:\n if encoding_scheme == \"url\":\n return url_quote(val)\n\n # base64 utf8\n if encoding_scheme == \"base64\":\n return base64.b64encode(val.encode(\"utf8\")).decode(\"utf-8\")\n\n # returns original val if encoding_scheme not recognized\n return val", "def encoding(text: str) -> str:\n text = [text[i:i + 3] for i in range(0, len(text), 3)]\n encoded_text = []\n for letter in text:\n completed = False\n for coding in Encoder.__ALPHABET:\n if coding.encode == letter:\n completed = True\n encoded_text.append(coding.code)\n if completed:\n break\n if not completed:\n encoded_text.append(letter)\n encoded_string = \"\".join(encoded_text)\n return encoded_string.lower()", "def get_encoder_name(self):\n return \"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())", "def encode(inStr):\n if (isinstance(inStr, basestring)):\n return inStr.encode(sys.stdout.encoding, 'xmlcharrefreplace')\n return inStr", "def getEncoder (name):\n for m in _registeredEncoders:\n if m.name () == name:\n return m\n return None", "async def encode(ctx, text: Option(str, \"Text to encode in brainfuck\")):\n encoded = bot.brainfuck.encode(text)\n await send_code(ctx, encoded.code, lang=\"bf\")", "def encode_file_using_codes(file_name, letter_codes):\r\n contents = \"\"\r\n with open(file_name) as f:\r\n contents = f.read()\r\n file_name_encoded = file_name + \"_encoded\"\r\n with open(file_name_encoded, 'w') as fout:\r\n for c in contents:\r\n fout.write(letter_codes[c])\r\n print(\"Wrote encoded text to {}\".format(file_name_encoded))", "def encode(fmtstr, *stuff):\n return Wire(fmtstr).encode(*stuff)", "def encode_name(param):\n sname = param\n # replace all kind of unwanted chars in a python dictname.\n sname = sname.strip()\n for ch in ['/', ' + ', ' ', '#', '&', '-', ',', '+', ]:\n if ch in sname:\n sname = sname.replace(ch, \"_\")\n\n # replace brackets\n for ch in ['(', ')']:\n if ch in sname:\n sname = sname.replace(ch, \"\")\n\n # replace the numbers 2 and 10 with the text representation\n if '10' in sname:\n sname = sname.replace('10', 'TEN')\n\n if '2' in sname:\n sname = sname.replace('2', 'TWO')\n return sname.upper()", "def escapeEncode(s: unicode) -> unicode:\n ...", "def encode (self, strs):\n if strs == []: return \"null\"\n return chr(257).join(strs)", "def test_encode(self):\n assert url_encoder.encode(1) == 'TheStakeOut'\n assert url_encoder.encode(800) == 'TheStockTip-TheSeven'\n assert url_encoder.encode(99999) == 'MaleUnbonding-TheConversion-TheAndreaDoria'", "def encode(self, decoded):", "def encoding(self, enc):\n self._encoding = enc" ]
[ "0.64407706", "0.61046815", "0.6079837", "0.5847847", "0.5770759", "0.5728369", "0.56867754", "0.5659111", "0.56393677", "0.5635285", "0.5635285", "0.560628", "0.5529888", "0.5525563", "0.55111885", "0.5491491", "0.54693437", "0.5467637", "0.54634774", "0.54603595", "0.54328984", "0.5427237", "0.54148054", "0.5408877", "0.53986216", "0.5377089", "0.5352456", "0.53485996", "0.53123814", "0.5299699" ]
0.79553246
0
List available encoder for argument listencoders.
def argparse_encoder_list() -> None: print("Available encoders:\n") print("{:<12}| {}".format("NAME", "DESCRIPTION")) print("{}|{}".format("-" * 12, "-" * 12)) for encoder in AVAILABLE_ENCODERS: print("{:<12}| {}".format(encoder, AVAILABLE_ENCODERS[encoder]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEncoders ():\n return _registeredEncoders", "def list():\n return [Drive.ENCODER_L,\n Drive.ENCODER_R]", "def get_encoder_names(cls) -> list[str]:\n return cls.backbone_names", "def read_encoders(self):\n enc_list = Drive.list()\n rtn = {}\n\n for enc in enc_list:\n rtn[enc] = self.read_encoder(enc)\n\n return rtn", "def get_encoders_values(self, delta: bool=False) -> list:\n encoders = self.robot.all_services.get('encoders')\n if encoders is not None:\n values_all = encoders.all_values\n if delta:\n return values_all\n else:\n return [values_all[0][1], values_all[1][1]]\n else:\n log.warning(\"Service encoders is not enabled!\")", "def get_coders(self):\n return self._coder", "def encoders(self):\n return self.rpc.call(MsfRpcMethod.ModuleEncoders)['modules']", "def show_encs():\n encs = g.encoders\n out = \"%sEncoding profiles:%s\\n\\n\" % (c.ul, c.w)\n\n for x, e in enumerate(encs):\n sel = \" (%sselected%s)\" % (c.y, c.w) if Config.ENCODER.get == x else \"\"\n out += \"%2d. %s%s\\n\" % (x, e['name'], sel)\n\n g.content = out\n message = \"Enter %sset encoder <num>%s to select an encoder\"\n g.message = message % (c.g, c.w)", "def get_codecs_list():\n for codec in CODECS_IN_FILE.iterkeys():\n print codec", "def encodings():\n from . import factory\n return factory.MAPPINGS.keys()", "def get_video_encoder_configurations(self):\n return self.camera_media.GetVideoEncoderConfigurations()", "def get_encoder_parameters(cls) -> list[dict]:\n parameter_list = []\n for backbone_name in cls.backbone_names:\n parameter_list.append(\n {\n \"model_name\": backbone_name,\n \"pretrained\": True,\n \"progress\": True,\n \"spatial_dims\": 2,\n \"in_channels\": 3,\n \"num_classes\": 1000,\n \"norm\": (\"batch\", {\"eps\": 1e-3, \"momentum\": 0.01}),\n \"adv_prop\": \"ap\" in backbone_name,\n }\n )\n return parameter_list", "def requestEncoders(self):\n self._protocol.write_line(CMD_MOTORS)", "def encoders_count(self):\r\n return self._get('encoders_count', {})", "def encodings(self) -> list[int]:\n pass", "def get_decoders_names(self):\n if self.replay_source is None:\n return [\"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())]\n\n if self.helper_decoders_one_class:\n decoders_names = [\"P\" + str(self.parameters_common_index) + \".\" +\n str(self.parameters_fs_index) + \".\" +\n str(self.parameters_helper_index) + \".\" +\n str(self.parameters_incremental_index) +\n \"_T\" + str(self.test_index) + \"_S\" + str(i) + \"_\" +\n self.replay_source + \"_1\" for i in range(len(self.test_structure))]\n else:\n decoders_names = [\"P\" + str(self.parameters_common_index) + \".\" +\n str(self.parameters_fs_index) + \".\" +\n str(self.parameters_helper_index) + \".\" +\n str(self.parameters_incremental_index) +\n \"_T\" + str(self.test_index) + \"_S\" + str(i) + \"_\" +\n self.replay_source for i in range(len(self.test_structure))]\n\n decoders_names[0] = \"P\" + str(self.parameters_common_index) + \".\" + str(self.parameters_fs_index) + \"_E\" \\\n + str(self.get_encoder_number())\n\n return decoders_names", "def encoding_ids(self):\n # type: () -> list[string_types]\n return self._encoding_ids", "def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval", "def compute_encoding(self):\n for input_quantizer in self._input_quantizers.values():\n input_quantizer.compute_encoding()\n\n for quantizer in self.param_quantizers.values():\n # NOTE: If quantizer.enabled is True but quantizer.encoding is None,\n # quantizer.compute_encoding() will set quantizer.enabled to False.\n # Otherwise, quantizer.compute_encodings() is equivalent to no-op.\n quantizer.compute_encoding()\n\n for output_quantizer in self._output_quantizers.values():\n output_quantizer.compute_encoding()", "def get_right_encoder_values(self, delta: bool=False) -> list:\n encoders = self.robot.all_services.get('encoders')\n if encoders is not None:\n right_values_all = encoders.right_values\n if delta:\n return right_values_all\n else:\n return right_values_all[1]\n else:\n log.warning(\"Service encoders is not enabled!\")", "def EnumerateEncodings(*args, **kwargs):\n return _gdi_.FontEnumerator_EnumerateEncodings(*args, **kwargs)", "def get_encoder(*targets: 'layout.Encoding') -> 'layout.Encoder':\n for pattern in targets:\n for codec in ENCODERS:\n if pattern.match(codec.encoding):\n return codec\n raise Encoding.Unsupported(f'No encoder for any of {targets}')", "def items(self):\n return self.encoder.items()", "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def get_encoders(schema):\n encoders = dict([(question.name, preprocessing.LabelEncoder()) for question in schema.questions])\n\n assert len(schema.get_question_names()) == len(set(schema.get_question_names())) # questions must be uniquely named\n for question in schema.questions:\n encoders[question.name] = encoders[question.name].fit(question.get_answer_names())\n\n return encoders", "def Initialize_Encoders(self):\n for NumTries in range(5):\n try:\n self.fd_channel = []\n for i in range(3):\n self.fd_channel.append(open(self.ENCODER_NAME[i], \"r+b\"))\n fcntl.ioctl(self.fd_channel[i], self.LOAD_CMD_REG, self.X4) \n fcntl.ioctl(self.fd_channel[i], self.LOAD_CMD_REG, self.IOR_DISABLE_INDEX) \n self.fd_channel[i].write(b'\\x00\\x00\\x00') \n # Write zeros to 24 bit PR register\n fcntl.ioctl(self.fd_channel[i], self.LOAD_CMD_REG, self.TRAN_PR_CNTR) \n # Transfers the PR register to the counter\n self.read_pos[i] = 0\n print \"Successfully initialized Encoders\\n\"\n return\n except Exception as e:\n print \"Failure initializing optical encoders. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n time.sleep(0.1)\n continue\n\n print \"Failed to initialize Encoders\\n\"\n return", "def encoder(self, inputs):\n pass", "def get_left_encoder_values(self, delta: bool=False) -> list:\n encoders = self.robot.all_services.get('encoders')\n if encoders is not None:\n left_values_all = encoders.left_values\n if delta:\n return left_values_all\n else:\n return left_values_all[1]\n else:\n log.warning(\"Service encoders is not enabled!\")", "def _codec_names():\n import glob\n import os.path\n\n package_folder = os.path.dirname(__file__)\n for codec_path in glob.glob(os.path.join(package_folder, \"cp*.py\")):\n codec_name = os.path.splitext(os.path.basename(codec_path))[0]\n yield codec_name", "def get_encoding():\n return {\n \"resolution\": RESOLUTION,\n \"max_beat\": MAX_BEAT,\n \"max_duration\": MAX_DURATION,\n \"dimensions\": DIMENSIONS,\n \"n_tokens\": N_TOKENS,\n \"type_code_map\": TYPE_CODE_MAP,\n \"beat_code_map\": BEAT_CODE_MAP,\n \"position_code_map\": POSITION_CODE_MAP,\n \"pitch_code_map\": PITCH_CODE_MAP,\n \"duration_code_map\": DURATION_CODE_MAP,\n \"instrument_code_map\": INSTRUMENT_CODE_MAP,\n \"code_type_map\": CODE_TYPE_MAP,\n \"code_beat_map\": CODE_BEAT_MAP,\n \"code_position_map\": CODE_POSITION_MAP,\n \"code_pitch_map\": CODE_PITCH_MAP,\n \"code_duration_map\": CODE_DURATION_MAP,\n \"code_instrument_map\": CODE_INSTRUMENT_MAP,\n \"program_instrument_map\": PROGRAM_INSTRUMENT_MAP,\n \"instrument_program_map\": INSTRUMENT_PROGRAM_MAP,\n }" ]
[ "0.7441287", "0.6809621", "0.67048085", "0.6366758", "0.63201123", "0.6299589", "0.62778974", "0.6232592", "0.608029", "0.6028224", "0.597877", "0.59699005", "0.59497845", "0.5925195", "0.5882375", "0.5660411", "0.5649066", "0.56255937", "0.55458534", "0.55051184", "0.54601586", "0.5453892", "0.5405314", "0.53760827", "0.53645027", "0.5321351", "0.5243208", "0.5222475", "0.5204648", "0.51687884" ]
0.729824
1
URL encodes the string.
def _encode_url(data: str) -> str: return urllib.parse.quote(data, safe="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def b2_url_encode(s):\n return quote(s.encode('utf-8'))", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def _encode(self, url):\n\n\t\ttiny_url = ''\n\n\t\tstring_id = self.get_string_id(url)\n\n\t\twhile string_id > 0:\n\t\t\tstring_id, mod = divmod(string_id, len(ALPHABET))\n\t\t\ttiny_url = tiny_url + ALPHABET[mod]\n\n\t\treturn tiny_url", "def _url_encode(self, text):\n try:\n return (urllib.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))\n except:\n print('Using python3')\n return (urllib.parse.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))", "def urlEncode(s):\n\treturn string.join(map(lambda c: _urlEncode[c], list(s)), '')", "def encode(self, url):\n\n # Escape ()\n return url.replace(\"(\", \"%28\").replace(\")\", \"%29\") if url else url", "def encode_url(self, url):\n # turn string into unicode\n if not isinstance(url, unicode):\n url = url.decode('utf8')\n\n # parse it\n parsed = urlsplit(url)\n\n # divide the netloc further\n netloc_pattern = re.compile(r\"\"\"\n (?:(?P<user>[^:@]+)(?::(?P<password>[^:@]+))?@)?\n (?P<host>[^:]+)\n (?::(?P<port>[0-9]+))?\n \"\"\", re.X | re.U)\n netloc_parsed = netloc_pattern.match(parsed.netloc).groupdict()\n\n # encode each component\n scheme = parsed.scheme\n user = netloc_parsed['user'] and quote(netloc_parsed['user'])\n password = (netloc_parsed['password'] and\n quote(netloc_parsed['password']))\n host = netloc_parsed['host']\n port = netloc_parsed['port'] and netloc_parsed['port']\n path = '/'.join( # could be encoded slashes!\n quote(unquote(pce).encode('utf8'), '')\n for pce in parsed.path.split('/')\n )\n query = quote(unquote(parsed.query), '=&?/')\n fragment = quote(unquote(parsed.fragment))\n\n # put it back together\n netloc = ''\n if user:\n netloc += user\n if password:\n netloc += ':' + password\n netloc += '@'\n netloc += host\n if port:\n netloc += ':'+port\n return urlunsplit((scheme, netloc, path, query, fragment))", "def encode_url(url):\n\treturn url.replace(' ', '_')", "def encode(self, longUrl):\n pass", "def _encode_urlplus(data: str) -> str:\n return urllib.parse.quote_plus(data, safe=\"\")", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def encode(self, strs):", "def encode(self, strs):", "def _url_base64_encode(msg):\r\n msg_base64 = base64.b64encode(msg)\r\n msg_base64 = msg_base64.replace('+', '-')\r\n msg_base64 = msg_base64.replace('=', '_')\r\n msg_base64 = msg_base64.replace('/', '~')\r\n return msg_base64", "def do_urlencode(value):\n return urllib.quote(value.encode('utf8'))", "def test_encode(self):\n assert url_encoder.encode(1) == 'TheStakeOut'\n assert url_encoder.encode(800) == 'TheStockTip-TheSeven'\n assert url_encoder.encode(99999) == 'MaleUnbonding-TheConversion-TheAndreaDoria'", "def encode(self, longUrl):\n return str(base64.b64encode(longUrl.encode('utf-8')), 'utf-8')", "def toURLEncoded(self):\n args = sorted(self.toPostArgs().items())\n return urllib.parse.urlencode(args)", "def encode(self, longUrl):\n if not longUrl:\n return ''\n key = self.next()\n self.encodedToUrl[key] = longUrl\n return 'http://tinyurl.com/{}'.format(key)", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def urlencode(self, value):\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n elif value is None:\n return \"\"\n\n assert isinstance(value, str)\n\n return urllib.quote_plus(value)", "def encode(encoding_scheme: str, val: str) -> str:\n if encoding_scheme == \"url\":\n return url_quote(val)\n\n # base64 utf8\n if encoding_scheme == \"base64\":\n return base64.b64encode(val.encode(\"utf8\")).decode(\"utf-8\")\n\n # returns original val if encoding_scheme not recognized\n return val", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def test_url_encoding_encodes_space(self):\n api = bandcamp.Api(api_key=None)\n\n url = 'http://api.bandcamp.com/api/band/3/search'\n parameters = {'name': 'mountain man'}\n encoded_url = 'http://api.bandcamp.com/api/band/3/search?name=mountain+man'\n\n self.assertEqual(encoded_url, api.get_encoded_url(url=url, parameters=parameters))", "def encodeUrl(self, id):\n characters = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # base = 62\n base = len(characters)\n ret = []\n while id > 0:\n val = id % base\n ret.append(characters[val])\n id = id // base\n # reverse and return\n return \"\".join(ret[::-1])", "def test_url_encoding(self):\n api = bandcamp.Api(api_key=None)\n\n url = 'http://api.bandcamp.com/api/url/1/info'\n parameters = {'url': 'cults.bandcamp.com'}\n encoded_url = 'http://api.bandcamp.com/api/url/1/info?url=cults.bandcamp.com'\n\n self.assertEqual(encoded_url, api.get_encoded_url(url=url, parameters=parameters))", "def encode(self, text):", "def url_escape(value, plus=True):\r\n quote = urllib_parse.quote_plus if plus else urllib_parse.quote\r\n return quote(utf8(value))" ]
[ "0.73567134", "0.7325704", "0.72661626", "0.71775556", "0.71566695", "0.7070874", "0.7048081", "0.70204276", "0.7011386", "0.69716555", "0.68814105", "0.68672156", "0.68672156", "0.6819084", "0.67636806", "0.6643498", "0.6638649", "0.6609854", "0.6606962", "0.65300757", "0.65300757", "0.6511282", "0.6462202", "0.641601", "0.64125764", "0.6400137", "0.63955677", "0.63827723", "0.6361226", "0.6336255" ]
0.7664371
0
URL encodes the string and converts spaces to a '+' sign.
def _encode_urlplus(data: str) -> str: return urllib.parse.quote_plus(data, safe="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _url_encode(self, text):\n try:\n return (urllib.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))\n except:\n print('Using python3')\n return (urllib.parse.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))", "def encode_url(url):\n\treturn url.replace(' ', '_')", "def url_escape(value, plus=True):\r\n quote = urllib_parse.quote_plus if plus else urllib_parse.quote\r\n return quote(utf8(value))", "def _encode_url(data: str) -> str:\n return urllib.parse.quote(data, safe=\"\")", "def urlencode(txt):\n return urllib.quote_plus(txt)", "def urlEncode(s):\n\treturn string.join(map(lambda c: _urlEncode[c], list(s)), '')", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def urldecode_plus(s):\n s = s.replace('+', ' ')\n arr = s.split('%')\n res = arr[0]\n for it in arr[1:]:\n if len(it) >= 2:\n res += chr(int(it[:2], 16)) + it[2:]\n elif len(it) == 0:\n res += '%'\n else:\n res += it\n return res", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def urllib_quote_plus(inputstring, safestring=\"\"):\r\n\r\n if type(inputstring) is not str:\r\n raise TypeError(\"urllib_quote_plus' inputstring parameter must be a string, not '\"+str(type(inputstring))+\"'\")\r\n if type(safestring) is not str:\r\n raise TypeError(\"urllib_quote_plus' safestring parameter must be a string, not '\"+str(type(safestring))+\"'\")\r\n \r\n\r\n return urllib_quote(inputstring, safestring + \" \").replace(\" \", \"+\")", "def urlify_pythonic(text, length):\n return text.rstrip().replace(\" \", \"%20\")", "def URLify(s):\n\n # Without any additional libraries and by doing it in an array as intended by the writer.\n return ''.join('%20' if c == ' ' else c for c in s.strip())\n\n # Time Complexity: O(n)\n # Space Complexity: O(1)", "def urlencode(self, value):\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n elif value is None:\n return \"\"\n\n assert isinstance(value, str)\n\n return urllib.quote_plus(value)", "def urlify2(w, length):\n chars = []\n while i < len(w):\n c = w[i]\n if c == ' ':\n chars.append('%20') \n else:\n chars.append(c)\n i += 1\n url_w = build_string(chars)\n return url_w", "def do_urlencode(value):\n return urllib.quote(value.encode('utf8'))", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def b2_url_encode(s):\n return quote(s.encode('utf-8'))", "def urlify(w, length):\n return w.strip().replace(' ', '%20')", "def urlencode(path):\n return urllib.quote_plus(path)", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def encode(self, url):\n\n # Escape ()\n return url.replace(\"(\", \"%28\").replace(\")\", \"%29\") if url else url", "def test_url_encoding_encodes_space(self):\n api = bandcamp.Api(api_key=None)\n\n url = 'http://api.bandcamp.com/api/band/3/search'\n parameters = {'name': 'mountain man'}\n encoded_url = 'http://api.bandcamp.com/api/band/3/search?name=mountain+man'\n\n self.assertEqual(encoded_url, api.get_encoded_url(url=url, parameters=parameters))", "def _encode(self, url):\n\n\t\ttiny_url = ''\n\n\t\tstring_id = self.get_string_id(url)\n\n\t\twhile string_id > 0:\n\t\t\tstring_id, mod = divmod(string_id, len(ALPHABET))\n\t\t\ttiny_url = tiny_url + ALPHABET[mod]\n\n\t\treturn tiny_url", "def urllib_unquote_plus(inputstring):\r\n if type(inputstring) is not str:\r\n raise TypeError(\"urllib_unquote_plus' inputstring parameter must be a string, not '\"+str(type(inputstring))+\"'\")\r\n\r\n return urllib_unquote(inputstring.replace(\"+\", \" \"))", "def encode_parameters(self, text):\n return quote_plus(text, safe='=:&\"')", "def _url_base64_encode(msg):\r\n msg_base64 = base64.b64encode(msg)\r\n msg_base64 = msg_base64.replace('+', '-')\r\n msg_base64 = msg_base64.replace('=', '_')\r\n msg_base64 = msg_base64.replace('/', '~')\r\n return msg_base64", "def urlencodeall(str):\n if not str:\n return \"\"\n\n return string.join(['%' + s.encode('hex') for s in str], '')", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def toURLEncoded(self):\n args = sorted(self.toPostArgs().items())\n return urllib.parse.urlencode(args)", "def urlencode(query):\n\n if hasattr(query, 'items'):\n # mapping objects\n query = query.items()\n l = []\n for k, v in query:\n k = quote_plus(k)\n if isinstance(v, basestring):\n v = quote_plus(v)\n l.append(k + '=' + v)\n else:\n v = quote_plus(unicode(v))\n l.append(k + '=' + v)\n return '&'.join(l)" ]
[ "0.7584783", "0.74393106", "0.72772896", "0.72347313", "0.72131795", "0.7179806", "0.7172637", "0.7165301", "0.7084647", "0.6912815", "0.6858822", "0.6854565", "0.6832598", "0.6832171", "0.68085265", "0.678919", "0.677692", "0.67722255", "0.6757509", "0.6684567", "0.66667247", "0.66421825", "0.65827596", "0.651286", "0.6480765", "0.638556", "0.63771325", "0.63469625", "0.6346171", "0.62258744" ]
0.79795545
0
Html entity encodes the string.
def _encode_html(data: str) -> str: return html.escape(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, text):\n # taken from htmlcss1 writer\n # @@@ A codec to do these and all other HTML entities would be nice.\n text = text.replace(\"&\", \"&amp;\")\n text = text.replace(\"<\", \"&lt;\")\n text = text.replace('\"', \"&quot;\")\n text = text.replace(\">\", \"&gt;\")\n text = text.replace(\"@\", \"&#64;\") # may thwart some address harvesters\n return text", "def escape(cls, html):\n return (\"%s\" % (html)).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def htmlencode(s):\n \ts = s.replace(\"&\", \"&amp;\")\n\ts = s.replace(\"<\", \"&lt;\")\n\ts = s.replace(\">\", \"&gt;\")\n\ts = s.replace(\"\\\"\",\"&quot;\")\n\ts = s.replace(\"'\", \"&apos;\")\n\treturn s", "def encode(string):\n return string.translate(html_entities)", "def escape_html_entity(text):\n parser = HTMLParser.HTMLParser()\n return parser.unescape(text)", "def escape(html):\n if not isinstance(html, unicode):\n if not isinstance(html, str):\n html = unicode(html)\n else:\n html = unicode(html, 'utf-8')\n return html.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def _escape(html):\n return encoding.force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def render_string(_str):\n\t\treturn str.encode(_str)", "def encode_html(self, text, quotes=True):\n a = (\n ('&', '&amp;'),\n ('<', '&lt;'),\n ('>', '&gt;'))\n\n if quotes:\n a = a + ((\"'\", '&#39;'),\n ('\"', '&#34;'))\n\n for k, v in a:\n text = text.replace(k, v)\n return text", "def HtmlEscape(text):\n return escape(text, _HTML_ESCAPE_TABLE)", "def escape(txt):\n txt = sax_escape(txt, entities=ENTITIES)\n return mark_safe(txt)", "def escape(self, text):\n\t\tif not self.escape_html or text is None:\n\t\t\treturn text\n\n\t\treturn (\n\t\t\ttext.replace('&', '&amp;').replace('<', '&lt;')\n\t\t\t.replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')\n\t\t)", "def _html_esc(string):\n repls = {\n '<': 'lt',\n '>': 'gt',\n '&': 'amp',\n '\"': 'quot',\n }\n\n def repl(matchobj):\n return \"&%s;\" % repls[matchobj.group(0)]\n\n regex = \"([%s])\" % ''.join(repls.keys())\n return re.sub(regex, repl, string)", "def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&amp;')\n\t\tunsafe = string.replace(unsafe, '<', '&lt;')\n\t\treturn string.replace(unsafe, '>', '&gt;')", "def escape_html(s):\n\treturn s. \\\n\t\treplace(\"<\", \"&lt;\"). \\\n\t\treplace(\">\", \"&gt;\"). \\\n\t\treplace(\"&\", \"&amp;\"). \\\n\t\treplace(\" \", \"&nbsp;\"). \\\n\t\treplace(\"\\t\", \"&nbsp;&nbsp;&nbsp;&nbsp;\")", "def escape(text):\n if (isinstance(text, basestring)):\n try: text = encode(text)\n except: text = copy(text)\n text = text.replace(\"&\", \"&amp;\")\n text = text.replace(\"<\", \"&lt;\")\n text = text.replace(\">\", \"&gt;\")\n return text", "def escape(s):\r\n return str(s).replace('<', '&lt;').replace('>', '&gt;')", "def html_escape(text): \n html_escape_table = {\n \"&\": \"&amp;\",\n '\"': \"&quot;\",\n \"'\": \"&apos;\",\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n }\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def htmlspecialchars(val, flags = None):\n out = \"\"\n for i in range(0, len(val)):\n num = ord(unicode(val[i]))\n if htmlentitydefs.codepoint2name.has_key(num):\n out += \"&%s;\" % htmlentitydefs.codepoint2name[num]\n else:\n out += val[i]\n return out", "def html_escape(s):\n s = html.escape(s, False)\n s = s.replace('\"', \"&quot;\")\n return s", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escapeEncode(s: unicode) -> unicode:\n ...", "def escape_html(self, text):\n return cgi.escape(text, quote=True). \\\n replace(u'\\n', u'<br />'). \\\n replace(u'\\t', u'&emsp;'). \\\n replace(u' ', u' &nbsp;')", "def myescape(str):\n\tif str is not None:\n\t\treturn str.replace('&', '&amp;').replace('<', '&lt;')\n\telse:\n\t\treturn \"\"", "def htmlquote(text):\r\n text = text.replace(\"&\", \"&amp;\") # Must be done first!\r\n text = text.replace(\"<\", \"&lt;\")\r\n text = text.replace(\">\", \"&gt;\")\r\n text = text.replace(\"'\", \"&#39;\")\r\n text = text.replace('\"', \"&quot;\")\r\n return text", "def escape(text):\n return text_type(text).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')", "def as_html(self):\r\n return mark_safe(' '.join(['%s=\"%s\"' % (k, escape(v if not callable(v) else v()))\r\n for k, v in six.iteritems(self)]))", "def html_escape(u):\n u = _DEFAULT_TAG_ESCAPE(u)\n return u.replace(\"'\", '&#x27;')", "def encode(self, text):", "def escape(self,s):\n\t\ts = s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')\n\t\ts = s.replace('\"','').replace(\"'\",\"\")\n\t\treturn ''.join([c for c in s if ord(c) > 0x1F])" ]
[ "0.76823986", "0.7436511", "0.7403639", "0.73916006", "0.732849", "0.71537197", "0.70061815", "0.69845724", "0.6976353", "0.6944052", "0.68957525", "0.68801415", "0.6877136", "0.6862977", "0.66983545", "0.6685174", "0.6656498", "0.6608553", "0.65870816", "0.65712136", "0.6549749", "0.6549498", "0.65421396", "0.6540415", "0.6527743", "0.6496181", "0.64838094", "0.6474622", "0.6474312", "0.64735305" ]
0.766779
1
Base64 encodes with space padding to ensure output only contains [azAZ09+].
def _encode_base64pad(data: str) -> str: pattern = r"[^a-zA-Z0-9\+]" regex = re.compile(pattern) while True: ebytes = base64.b64encode(data.encode("utf-8")) estring = str(ebytes, "utf-8") if not regex.findall(estring): break # Pad with trailing space and try again to eliminate base64 pad chars data = data + " " return estring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _encode_2xbase64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9]\"\n regex = re.compile(pattern)\n while True:\n # First run\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n # Second run\n ebytes = base64.b64encode(estring.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad/special chars\n data = data + \" \"\n\n return estring", "def base64(s):\n return b64encode(s,'[]').replace('=','_')", "def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str", "def base64Encode(input, addNewlines = False):\n base64Str = base64.b64encode(input)\n if not type(base64Str) is str:\n base64Str = \"\".join(map(chr, base64Str))\n \n if not addNewlines:\n return base64Str\n\n result = \"\"\n i = 0\n while i < len(base64Str):\n result += base64Str[i:i + 64] + \"\\n\"\n i += 64\n return result", "def base64_string(self) -> global___Expression:", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")", "def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")", "def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")", "def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, '=')", "def b64encode(s: str) -> str:\n return base64.b64encode(s.encode()).decode()", "def b64_string(input_string):\n return b64encode(input_string.encode(\"utf-8\")).decode(\"utf-8\")", "def base64_encode(data):\n return base64.encodestring(data);", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)", "def base64encode(self, value):\n\n return value.encode(\"base64\")[:-1].replace(\"\\n\", \"\")", "def _b64(b):\n return base64.urlsafe_b64encode(b).decode('utf8').replace(\"=\", \"\")", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def base64_with_linebreaks(der):\n\n b = base64.b64encode(der)\n n = len(b)\n return \"\\n\" + \"\\n\".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + \"\\n\"", "def test_pad_b64(self):\n test1 = {\"value\": b\"any carnal pleasure.\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4=\"}\n test2 = {\"value\": b\"any carnal pleasure\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ==\"}\n test3 = {\"value\": b\"any carnal pleasur\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\"}\n\n for test in [test1, test2, test3]:\n padded = oidc._pad_b64(test[\"unpadded\"])\n self.assertEqual(test[\"padded\"], padded)\n value = base64.b64decode(padded)\n self.assertEqual(test[\"value\"], value)", "def b64_encode(value: bytes) -> bytes:\n return base64.urlsafe_b64encode(value).strip(b\"=\")", "def _encode_partitial_parameter(data):\n return base64.b64encode(data.encode(\"utf-8\")).decode()", "def jws_b64encode(source):\n return urlsafe_b64encode(source).decode('ascii').rstrip('=')", "def base64_encode(text):\n if not isinstance(text, (bytes, bytearray)):\n text = bytes(text.encode())\n encode = base64.b64encode(text)\n return encode.decode('ascii')", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def _encode(dictionary):\n # Strip ugly base64 padding.\n byteStr = bytearray(json.dumps(dictionary).encode())\n encodedStr = base64.urlsafe_b64encode(byteStr)\n return encodedStr.rstrip('='.encode())", "def _string_to_encoded_string(deck_string):\n return deck_string.encode(\"base64\")", "def _hash_encoder(data: bytes) -> str:\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode('ascii')" ]
[ "0.77566636", "0.7302048", "0.71138585", "0.70349246", "0.7016238", "0.6873493", "0.6863052", "0.685882", "0.6854486", "0.6766383", "0.66630834", "0.6654437", "0.66174877", "0.66125387", "0.65900755", "0.65900755", "0.6526679", "0.6524847", "0.6427443", "0.6425861", "0.6408763", "0.64030224", "0.63737214", "0.633421", "0.631343", "0.628697", "0.62837464", "0.62819916", "0.62815565", "0.62684983" ]
0.7900289
0
Base64 encodes twice with space padding to ensure output only contains [azAZ09].
def _encode_2xbase64pad(data: str) -> str: pattern = r"[^a-zA-Z0-9]" regex = re.compile(pattern) while True: # First run ebytes = base64.b64encode(data.encode("utf-8")) estring = str(ebytes, "utf-8") # Second run ebytes = base64.b64encode(estring.encode("utf-8")) estring = str(ebytes, "utf-8") if not regex.findall(estring): break # Pad with trailing space and try again to eliminate base64 pad/special chars data = data + " " return estring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _encode_base64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9\\+]\"\n regex = re.compile(pattern)\n while True:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad chars\n data = data + \" \"\n\n return estring", "def base64(s):\n return b64encode(s,'[]').replace('=','_')", "def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str", "def base64_string(self) -> global___Expression:", "def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")", "def base64Encode(input, addNewlines = False):\n base64Str = base64.b64encode(input)\n if not type(base64Str) is str:\n base64Str = \"\".join(map(chr, base64Str))\n \n if not addNewlines:\n return base64Str\n\n result = \"\"\n i = 0\n while i < len(base64Str):\n result += base64Str[i:i + 64] + \"\\n\"\n i += 64\n return result", "def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")", "def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")", "def test_pad_b64(self):\n test1 = {\"value\": b\"any carnal pleasure.\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4=\"}\n test2 = {\"value\": b\"any carnal pleasure\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ==\"}\n test3 = {\"value\": b\"any carnal pleasur\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\"}\n\n for test in [test1, test2, test3]:\n padded = oidc._pad_b64(test[\"unpadded\"])\n self.assertEqual(test[\"padded\"], padded)\n value = base64.b64decode(padded)\n self.assertEqual(test[\"value\"], value)", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def b64_string(input_string):\n return b64encode(input_string.encode(\"utf-8\")).decode(\"utf-8\")", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, '=')", "def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)", "def base64_with_linebreaks(der):\n\n b = base64.b64encode(der)\n n = len(b)\n return \"\\n\" + \"\\n\".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + \"\\n\"", "def b64encode(s: str) -> str:\n return base64.b64encode(s.encode()).decode()", "def _b64(b):\n return base64.urlsafe_b64encode(b).decode('utf8').replace(\"=\", \"\")", "def base64_encode(data):\n return base64.encodestring(data);", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def base64encode(self, value):\n\n return value.encode(\"base64\")[:-1].replace(\"\\n\", \"\")", "def zbase32_encode(data: bytes) -> str:\n result = \"\"\n for idx in range(0, len(data), 5):\n result += ZBASE32_ALPHABET[(data[idx] & 0xF8) >> 3]\n if idx + 1 == len(data):\n result += ZBASE32_ALPHABET[(data[idx] & 0x07) << 2]\n break\n result += ZBASE32_ALPHABET[((data[idx] & 0x07) << 2) | ((data[idx + 1] & 0xC0) >> 6)]\n result += ZBASE32_ALPHABET[(data[idx + 1] & 0x3E) >> 1]\n if idx + 2 == len(data):\n result += ZBASE32_ALPHABET[(data[idx + 1] & 0x01) << 4]\n break\n result += ZBASE32_ALPHABET[((data[idx + 1] & 0x01) << 4) | ((data[idx + 2] & 0xF0) >> 4)]\n if idx + 3 == len(data):\n result += ZBASE32_ALPHABET[(data[idx + 2] & 0x0F) << 1]\n break\n result += ZBASE32_ALPHABET[((data[idx + 2] & 0x0F) << 1) | ((data[idx + 3] & 0x80) >> 7)]\n result += ZBASE32_ALPHABET[(data[idx + 3] & 0x7C) >> 2]\n if idx + 4 == len(data):\n result += ZBASE32_ALPHABET[(data[idx + 3] & 0x03) << 3]\n break\n result += ZBASE32_ALPHABET[((data[idx + 3] & 0x03) << 3) | ((data[idx + 4] & 0xE0) >> 5)]\n result += ZBASE32_ALPHABET[data[idx + 4] & 0x1F]\n assert len(result) == (len(data) * 8 + 4) // 5\n return result", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def itob64(n):\n c = hex(n)\n c = c[2:-1] if c[-1] == 'L' else c[2:]\n if len(c)%2:\n c = '0'+c\n x = base64.urlsafe_b64encode(c.decode('hex'))\n return re.sub(r'=*$','',x)", "def padding(string):\r\n\tbinary = ascii_to_binary(string)\r\n\tl = len(binary)\r\n\tif l >= 448:\r\n\t\treturn \"STRING IS TOO LONG\"\r\n\telse:\r\n\t\tbinary += \"1\"\r\n\t\t\t\r\n\t\tfor i in range(448-len(binary)):\r\n\t\t\tbinary += \"0\"\r\n\r\n\t\tbinary = binary + conversions.decimal_to_binary(l, 64)\r\n\r\n\t\treturn binary", "def _hash_encoder(data: bytes) -> str:\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode('ascii')", "def b64_encode(value: bytes) -> bytes:\n return base64.urlsafe_b64encode(value).strip(b\"=\")", "def _string_to_encoded_string(deck_string):\n return deck_string.encode(\"base64\")", "def _encode_partitial_parameter(data):\n return base64.b64encode(data.encode(\"utf-8\")).decode()" ]
[ "0.7583209", "0.7163318", "0.7001192", "0.69024086", "0.68460166", "0.68253416", "0.6691286", "0.6659967", "0.66558814", "0.6631023", "0.65908706", "0.6589733", "0.6513986", "0.64372027", "0.6431788", "0.64234906", "0.629073", "0.62886935", "0.6261819", "0.6261819", "0.6244567", "0.62413245", "0.6235836", "0.62288797", "0.6190174", "0.6170892", "0.61455834", "0.6140938", "0.6106813", "0.6101222" ]
0.7789818
0
Retrieve topic schema from the Schema Registry. Returns
async def get_schema(self) -> AvroSchemaT: schema = None try: schema = await self._client.schema_by_topic(self._subject) except Exception: msg = f"Could not retrieve schema for subject {self._subject}." raise SchemaException(msg) return schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def get_schema(self):\r\n return self.__schema", "def _get_schema(self):\n self._pick()\n return Schema()", "def get_schema(cls):\n return cls.schema()", "def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)", "def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA", "def schema(self):\n return self._schema", "def namespace_schema(self, namespace):\n try:\n return self._namespace_schemas[namespace]\n except KeyError:\n raise Error(\"undefined namespace: \\\"%s\\\"; defined namespaces: %s\" % (namespace, util.quoted_list(self._namespace_schemas.keys())))", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def subject_schema(self):\n return self.schemas.get(self.subject_property_name, None)", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def schema(self):\n # type: () -> object\n return self._schema", "def get_schemas(self):\n query = mssqlqueries.get_schemas()\n logger.info(u'Schemas query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def get_meta_schema(self):\n return self._tc_meta_schema", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def schema(self):\n schema_el = self.root.xpath(\n '/wsdl:definitions/wsdl:types/xsd:schema', namespaces=NS_MAP,\n )[0]\n return element_as_tree(schema_el)", "def schema(self) -> Schema:\n return next(schema for schema in self.metadata.schemas if schema.schema_id == self.metadata.current_schema_id)", "def kafka_topic(self):\n from corehq.apps.change_feed.topics import get_topic_for_doc_type\n return get_topic_for_doc_type(self.document_class().to_json()['doc_type'])", "def sample_schema(self):\n if 'sample' not in self._schemas:\n logging.debug(f\"{self.id} - no schema? {self._schemas}\")\n return None\n return self._schemas['sample']", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def get_schema(self, name, namespace=None):\n avro_name = self.get_name(name=name, namespace=namespace)\n return self._names.get(avro_name.fullname, None)", "def get_schema(self) -> dict:", "def schema(cls):\n return Schema.get_instance(cls)", "def schemas(self):\n return self.get_schemas()", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "def list_schemas(jwt_payload: dict):\n DJConnector.set_datajoint_config(jwt_payload)\n\n # Attempt to connect return true if successful, false is failed\n return [row[0] for row in dj.conn().query(\"\"\"\n SELECT SCHEMA_NAME FROM information_schema.schemata\n WHERE SCHEMA_NAME != \"information_schema\"\n ORDER BY SCHEMA_NAME\n \"\"\")]", "def __getitem__(self, name) -> 'StarSchema':\n return self.schemas[name]", "def get_schema(schema): # noqa: E501\n return 'do some magic!'" ]
[ "0.69318485", "0.6648944", "0.66151816", "0.65597457", "0.64833015", "0.64112085", "0.6292522", "0.6284893", "0.6240288", "0.61408865", "0.61262184", "0.6110277", "0.60988575", "0.6020327", "0.6005132", "0.60021394", "0.5995657", "0.5993152", "0.5985525", "0.59844315", "0.59834844", "0.5973729", "0.59599215", "0.59503305", "0.5949964", "0.5869613", "0.58631885", "0.5862957", "0.5860451", "0.5829626" ]
0.7383239
0
Get topic fields. Parses the topic Avro schema and returns a list of fields with Python types. Returns
async def get_fields(self) -> List[Field]: schema = await self.get_schema() fields = [] if schema: # The faust-avro parser expects a json-parsed avro schema # https://github.com/masterysystems/faust-avro/blob/master/faust_avro/parsers/avro.py#L20 parsed_schema = self._parse(json.loads(schema)) for field in parsed_schema.fields: fields.append(Field(field.name, field.type.python_type)) return fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields(\n schema: Union[Config, Schema], types: Union[Type, Tuple[Type]] = None\n) -> List[Tuple[str, BaseField]]:\n\n fields = list(schema._fields.items())\n if isinstance(schema, Config):\n fields += list(schema._schema._fields.items())\n\n if types:\n fields = [item for item in fields if isinstance(item[1], types)]\n return fields", "def fields(proto):\n return [x[0].name for x in proto.ListFields()]", "def get_all_fields(\n schema: Union[Schema, Config]\n) -> List[Tuple[str, Schema, BaseField]]:\n if isinstance(schema, Config):\n schema = schema._schema\n\n ret = []\n prefix = schema._key + \".\" if schema._key else \"\"\n for key, field in schema._fields.items():\n ret.append((prefix + key, schema, field))\n if isinstance(field, Schema):\n ret.extend(\n [\n (prefix + subkey, schema, subfield)\n for subkey, schema, subfield in get_all_fields(field)\n ]\n )\n return ret", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def parse(self):\n result = []\n for field in self.get_fields():\n result.append(field.get_field())\n return result", "def get_fields(self):\n return list(self.metadata.keys())", "def get_fields(self):\n \n return self.metadata.keys()", "async def get_schema_info(self, collection):\n await self.ensure_collection(collection)\n try:\n # Luke handler is not supported in API v2 yet.\n # /v2/collections/<COLLECTION>/schema/fields doesn't show dynamically\n # created fields.\n # So using old API (/solr/...).\n response = await self.get(\n '/solr/{}/admin/luke?numTerms=0'.format(collection)\n )\n return json.loads(response.body.decode('utf-8'))\n except SolrError:\n logger.warning('Failed to fetch fields list for collection {}'\n .format(collection))\n raise", "def print_topic_fields(field_name, msg, depth):\n if hasattr(msg, '__slots__'):\n \"\"\" This level of the message has more fields within it. Display the current\n level, and continue descending through the structure.\n \"\"\"\n print(' ' * (depth * 2) + field_name)\n for slot in msg.__slots__:\n print_topic_fields(slot, getattr(msg, slot), depth + 1)\n elif isinstance(msg, list):\n \"\"\" We found a vector of field names. Display the information on the current\n level, and use the first element of the vector to display information\n about its content\n \"\"\"\n if (len(msg) > 0) and hasattr(msg[0], '__slots__'):\n print(' ' * (depth * 2) + field_name + '[]')\n for slot in msg[0].__slots__:\n print_topic_fields(slot, getattr(msg[0], slot), depth + 1)\n else:\n \"\"\" We have reached a terminal leaf, i.e., and field with an actual value attached.\n Just print the name at this point.\n \"\"\"\n print(' ' * (depth * 2) + field_name)", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def _get_fields(self):\n return self._fields", "def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields", "def get_data(self, topic, datetime_from=None, datetime_to=None):\n try:\n with self.__db_lock:\n time_column = \"timestamp\"\n sql = \"SELECT `{}`, `data`, `format_string` FROM `events` WHERE `topic` == ? ORDER BY `{}` ASC\".format(\n time_column, time_column)\n self.__cursor.execute(sql, (topic,))\n data = self.__cursor.fetchall()\n if data is None or len(data) == 0:\n return []\n\n # first column holds the datetime, second is the data (bytes), third is the format string, fourth is the timestamp\n data_decoded = []\n for d in data:\n timestamp = d[0]\n if d[2] == Database.__BYTES_DB_FORMAT_STRING:\n data = d[1]\n elif d[2] == Database.__UTF8_DB_FORMAT_STRING:\n data = d[1].decode('utf-8')\n else:\n data = struct.unpack(d[2], d[1])[0]\n data_decoded.append([timestamp, data])\n return data_decoded\n except Exception as e:\n logging.error(\n \"Exception when trying to get topics list: {}\".format(e))\n return []", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def get_avro_translated_schema(self):\n type_conversions = {\n 'STRING': 'string',\n 'NUMERIC': {\n 'type': 'bytes',\n 'logicalType': 'decimal',\n 'precision': 38,\n 'scale': 9,\n }\n }\n\n fields = []\n # TODO([email protected]): add support for nested fields\n for bq_field in self.bq_schema:\n field_type = type_conversions[bq_field.field_type]\n\n field = {\n 'name': bq_field.name,\n 'type': field_type,\n }\n\n fields.append(field)\n\n schema_dict = {\n 'type': 'record',\n 'name': self.schema_name,\n 'fields': fields,\n }\n avro_schema = avro.schema.Parse(json.dumps(schema_dict))\n\n return avro_schema", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def listFields(self):\n return self.get_json('/field')", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def schema_as_fieldlist(content_schema: Dict[str, Any], path: str = \"\") -> List[Any]:\n fields = []\n\n if \"properties\" in content_schema:\n required_fields = content_schema.get(\"required\", ())\n\n for prop, options in content_schema[\"properties\"].items():\n new_path = path + \".\" + prop if path else prop\n required = (\n options[\"required\"]\n if \"required\" in options\n else prop in required_fields\n )\n\n if \"type\" not in options:\n fields.append(FieldDescription.load(options, new_path, required))\n elif options[\"type\"] == \"object\":\n fields.append(FieldDescription.load(options, new_path, required))\n fields.extend(schema_as_fieldlist(options, path=new_path))\n elif options[\"type\"] == \"array\":\n fields.append(FieldDescription.load(options, new_path, required))\n fields.extend(\n schema_as_fieldlist(options[\"items\"], path=new_path + \".[]\")\n )\n else:\n fields.append(FieldDescription.load(options, new_path, required))\n\n if \"items\" in content_schema:\n new_path = path + \".\" + \"[]\" if path else \"[]\"\n content_schema[\"type\"] = \"array of {}s\".format(\n deduce_type(content_schema[\"items\"])\n )\n fields.append(FieldDescription.load(content_schema, new_path))\n fields.extend(schema_as_fieldlist(content_schema[\"items\"], path=new_path))\n\n return fields", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def get_proto_fields():\n raise NotImplementedError()", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def get_fields(cls):\n return cls.fields.values()", "def get_fields(self):\n\n\t\treturn self.__fields", "def get_fields(self):\n\n fields = {}\n LOGGER.debug('Treating all columns as string types')\n if os.path.exists(self.data):\n with open(self.data) as src:\n data = json.loads(src.read())\n for key, value in data['features'][0]['properties'].items():\n if isinstance(value, float):\n type_ = 'number'\n elif isinstance(value, int):\n type_ = 'integer'\n else:\n type_ = 'string'\n\n fields[key] = {'type': type_}\n else:\n LOGGER.warning(f'File {self.data} does not exist.')\n return fields", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list" ]
[ "0.6299269", "0.60301435", "0.5931615", "0.5922192", "0.57615733", "0.57452226", "0.56565475", "0.5619404", "0.55596054", "0.5541145", "0.5519022", "0.5483845", "0.54358023", "0.54198164", "0.53955734", "0.5376999", "0.5376526", "0.537617", "0.53621113", "0.53554416", "0.5350862", "0.5350658", "0.53304034", "0.5315039", "0.52774143", "0.5243031", "0.5237889", "0.5225857", "0.5225422", "0.521796" ]
0.72090507
0
Register an Avro schema with the Schema Registry. If the schema is already register for this subject it does nothing.
async def register(self, schema: AvroSchemaT) -> Union[int, None]: logger.info(f"Register schema for subject {self._subject}.") is_registered = False try: is_registered = await self._client.is_registered( self._subject, schema ) except Exception: msg = "Could not connect to Schema Registry." raise SchemaException(msg) schema_id = None if not is_registered: try: schema_id = await self._client.register(self._subject, schema) except Exception: msg = f"Could not register schema for subject {self._subject}." raise SchemaException(msg) return schema_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self, schema):\n if schema.fullname in VALID_TYPES:\n raise SchemaParseException(\n '%s is a reserved type name.' % schema.fullname)\n if schema.fullname in self.names:\n raise SchemaParseException(\n 'Avro name %r already exists.' % schema.fullname)\n\n logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname)\n self._names[schema.fullname] = schema", "def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None", "def schema(self, schema):\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def set_schema(self, schema):\r\n self.__schema = schema", "def create_schema(self, schema: str):\n return", "async def register_schemas(\n self, *, compatibility: str | None = None\n ) -> None:\n for subject_name, schema in self.schemas.items():\n await self._registry.register_schema(\n schema=schema,\n subject=subject_name,\n compatibility=compatibility,\n )", "def registration_schema(self, ctx):\n schema = RegistrationSchema()\n schema.context['ctx'] = ctx\n return schema", "def schema(self, schema):\n # type: (object) -> None\n\n if schema is not None:\n if not isinstance(schema, object):\n raise TypeError(\"Invalid type for `schema`, type has to be `object`\")\n\n self._schema = schema", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def registerDefault(registry=None):\n if registry is None:\n registry = implicit.registry\n\n registry.register(\n (schema.interfaces.IField,),\n interfaces.IFieldFactory, u'', SchemaFieldFactory)\n\n registry.register(\n (IInterface,),\n interfaces.IFieldFactory, u'', InterfaceSchemaFieldFactory)\n\n registerSchemaField(\n SchemaField, schema_interfaces.IField, registry=registry)", "def insert(self, dev_schema):\n headers = self.get_headers()\n rv = self.api_request('POST', self.url + '/device', dev_schema, headers)\n if rv is None:\n raise Exception('Failed to store device schema')\n return rv", "def update_schema(self, engine_name, schema):\n endpoint = \"engines/{}/schema\".format(engine_name)\n data = json.dumps(schema)\n return self.swiftype_session.request('post', endpoint, data=data)", "def schema_helper(self, name, _, schema=None, **kwargs):\n if schema is None:\n return None\n\n schema_instance = resolve_schema_instance(schema)\n\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n\n return json_schema", "def _load_schemas(self) -> None:\n schema_paths = self._root.rglob(\"*.json\")\n for schema_path in schema_paths:\n schema = json.loads(schema_path.read_text())\n\n if self._suffix:\n schema[\"name\"] = f'{schema[\"name\"]}{self._suffix}'\n\n fqn = get_avro_fqn(schema)\n self.schemas[fqn] = schema", "def addSchemaFile(self, newSchemaFile):\n\t\tself.schemaFile.append(newSchemaFile)", "def extend_schema(schema, documentAST=None):\n\n assert isinstance(schema, GraphQLSchema), \"Must provide valid GraphQLSchema\"\n assert documentAST and isinstance(\n documentAST, ast.Document\n ), \"Must provide valid Document AST\"\n\n # Collect the type definitions and extensions found in the document.\n type_definition_map = {}\n type_extensions_map = defaultdict(list)\n\n for _def in documentAST.definitions:\n if isinstance(\n _def,\n (\n ast.ObjectTypeDefinition,\n ast.InterfaceTypeDefinition,\n ast.EnumTypeDefinition,\n ast.UnionTypeDefinition,\n ast.ScalarTypeDefinition,\n ast.InputObjectTypeDefinition,\n ),\n ):\n # Sanity check that none of the defined types conflict with the\n # schema's existing types.\n type_name = _def.name.value\n if schema.get_type(type_name):\n raise GraphQLError(\n (\n 'Type \"{}\" already exists in the schema. It cannot also '\n + \"be defined in this type definition.\"\n ).format(type_name),\n [_def],\n )\n\n type_definition_map[type_name] = _def\n elif isinstance(_def, ast.TypeExtensionDefinition):\n # Sanity check that this type extension exists within the\n # schema's existing types.\n extended_type_name = _def.definition.name.value\n existing_type = schema.get_type(extended_type_name)\n if not existing_type:\n raise GraphQLError(\n (\n 'Cannot extend type \"{}\" because it does not '\n + \"exist in the existing schema.\"\n ).format(extended_type_name),\n [_def.definition],\n )\n if not isinstance(existing_type, GraphQLObjectType):\n raise GraphQLError(\n 'Cannot extend non-object type \"{}\".'.format(extended_type_name),\n [_def.definition],\n )\n\n type_extensions_map[extended_type_name].append(_def)\n\n # Below are functions used for producing this schema that have closed over\n # this scope and have access to the schema, cache, and newly defined types.\n\n def get_type_from_def(type_def):\n type = _get_named_type(type_def.name)\n assert type, \"Invalid schema\"\n return type\n\n def get_type_from_AST(astNode):\n type = _get_named_type(astNode.name.value)\n if not type:\n raise GraphQLError(\n (\n 'Unknown type: \"{}\". Ensure that this type exists '\n + \"either in the original schema, or is added in a type definition.\"\n ).format(astNode.name.value),\n [astNode],\n )\n return type\n\n # Given a name, returns a type from either the existing schema or an\n # added type.\n def _get_named_type(typeName):\n cached_type_def = type_def_cache.get(typeName)\n if cached_type_def:\n return cached_type_def\n\n existing_type = schema.get_type(typeName)\n if existing_type:\n type_def = extend_type(existing_type)\n type_def_cache[typeName] = type_def\n return type_def\n\n type_ast = type_definition_map.get(typeName)\n if type_ast:\n type_def = build_type(type_ast)\n type_def_cache[typeName] = type_def\n return type_def\n\n # Given a type's introspection result, construct the correct\n # GraphQLType instance.\n def extend_type(type):\n if isinstance(type, GraphQLObjectType):\n return extend_object_type(type)\n if isinstance(type, GraphQLInterfaceType):\n return extend_interface_type(type)\n if isinstance(type, GraphQLUnionType):\n return extend_union_type(type)\n return type\n\n def extend_object_type(type):\n return GraphQLObjectType(\n name=type.name,\n description=type.description,\n interfaces=lambda: extend_implemented_interfaces(type),\n fields=lambda: extend_field_map(type),\n )\n\n def extend_interface_type(type):\n return GraphQLInterfaceType(\n name=type.name,\n description=type.description,\n fields=lambda: extend_field_map(type),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_union_type(type):\n return GraphQLUnionType(\n name=type.name,\n description=type.description,\n types=list(map(get_type_from_def, type.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def extend_implemented_interfaces(type):\n interfaces = list(map(get_type_from_def, type.interfaces))\n\n # If there are any extensions to the interfaces, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for namedType in extension.definition.interfaces:\n interface_name = namedType.name.value\n if any([_def.name == interface_name for _def in interfaces]):\n raise GraphQLError(\n (\n 'Type \"{}\" already implements \"{}\". '\n + \"It cannot also be implemented in this type extension.\"\n ).format(type.name, interface_name),\n [namedType],\n )\n interfaces.append(get_type_from_AST(namedType))\n\n return interfaces\n\n def extend_field_map(type):\n new_field_map = OrderedDict()\n old_field_map = type.fields\n for field_name, field in old_field_map.items():\n new_field_map[field_name] = GraphQLField(\n extend_field_type(field.type),\n description=field.description,\n deprecation_reason=field.deprecation_reason,\n args=field.args,\n resolver=cannot_execute_client_schema,\n )\n\n # If there are any extensions to the fields, apply those here.\n extensions = type_extensions_map[type.name]\n for extension in extensions:\n for field in extension.definition.fields:\n field_name = field.name.value\n if field_name in old_field_map:\n raise GraphQLError(\n (\n 'Field \"{}.{}\" already exists in the '\n + \"schema. It cannot also be defined in this type extension.\"\n ).format(type.name, field_name),\n [field],\n )\n new_field_map[field_name] = GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n\n return new_field_map\n\n def extend_field_type(type):\n if isinstance(type, GraphQLList):\n return GraphQLList(extend_field_type(type.of_type))\n if isinstance(type, GraphQLNonNull):\n return GraphQLNonNull(extend_field_type(type.of_type))\n return get_type_from_def(type)\n\n def build_type(type_ast):\n _type_build = {\n ast.ObjectTypeDefinition: build_object_type,\n ast.InterfaceTypeDefinition: build_interface_type,\n ast.UnionTypeDefinition: build_union_type,\n ast.ScalarTypeDefinition: build_scalar_type,\n ast.EnumTypeDefinition: build_enum_type,\n ast.InputObjectTypeDefinition: build_input_object_type,\n }\n func = _type_build.get(type(type_ast))\n if func:\n return func(type_ast)\n\n def build_object_type(type_ast):\n return GraphQLObjectType(\n type_ast.name.value,\n interfaces=lambda: build_implemented_interfaces(type_ast),\n fields=lambda: build_field_map(type_ast),\n )\n\n def build_interface_type(type_ast):\n return GraphQLInterfaceType(\n type_ast.name.value,\n fields=lambda: build_field_map(type_ast),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_union_type(type_ast):\n return GraphQLUnionType(\n type_ast.name.value,\n types=list(map(get_type_from_AST, type_ast.types)),\n resolve_type=cannot_execute_client_schema,\n )\n\n def build_scalar_type(type_ast):\n return GraphQLScalarType(\n type_ast.name.value,\n serialize=lambda *args, **kwargs: None,\n # Note: validation calls the parse functions to determine if a\n # literal value is correct. Returning null would cause use of custom\n # scalars to always fail validation. Returning false causes them to\n # always pass validation.\n parse_value=lambda *args, **kwargs: False,\n parse_literal=lambda *args, **kwargs: False,\n )\n\n def build_enum_type(type_ast):\n return GraphQLEnumType(\n type_ast.name.value,\n values={v.name.value: GraphQLEnumValue() for v in type_ast.values},\n )\n\n def build_input_object_type(type_ast):\n return GraphQLInputObjectType(\n type_ast.name.value,\n fields=lambda: build_input_values(type_ast.fields, GraphQLInputObjectField),\n )\n\n def build_implemented_interfaces(type_ast):\n return list(map(get_type_from_AST, type_ast.interfaces))\n\n def build_field_map(type_ast):\n return {\n field.name.value: GraphQLField(\n build_field_type(field.type),\n args=build_input_values(field.arguments),\n resolver=cannot_execute_client_schema,\n )\n for field in type_ast.fields\n }\n\n def build_input_values(values, input_type=GraphQLArgument):\n input_values = OrderedDict()\n for value in values:\n type = build_field_type(value.type)\n input_values[value.name.value] = input_type(\n type, default_value=value_from_ast(value.default_value, type)\n )\n return input_values\n\n def build_field_type(type_ast):\n if isinstance(type_ast, ast.ListType):\n return GraphQLList(build_field_type(type_ast.type))\n if isinstance(type_ast, ast.NonNullType):\n return GraphQLNonNull(build_field_type(type_ast.type))\n return get_type_from_AST(type_ast)\n\n # If this document contains no new types, then return the same unmodified\n # GraphQLSchema instance.\n if not type_extensions_map and not type_definition_map:\n return schema\n\n # A cache to use to store the actual GraphQLType definition objects by name.\n # Initialize to the GraphQL built in scalars and introspection types. All\n # functions below are inline so that this type def cache is within the scope\n # of the closure.\n\n type_def_cache = {\n \"String\": GraphQLString,\n \"Int\": GraphQLInt,\n \"Float\": GraphQLFloat,\n \"Boolean\": GraphQLBoolean,\n \"ID\": GraphQLID,\n \"__Schema\": __Schema,\n \"__Directive\": __Directive,\n \"__DirectiveLocation\": __DirectiveLocation,\n \"__Type\": __Type,\n \"__Field\": __Field,\n \"__InputValue\": __InputValue,\n \"__EnumValue\": __EnumValue,\n \"__TypeKind\": __TypeKind,\n }\n\n # Get the root Query, Mutation, and Subscription types.\n query_type = get_type_from_def(schema.get_query_type())\n\n existing_mutation_type = schema.get_mutation_type()\n mutationType = (\n existing_mutation_type and get_type_from_def(existing_mutation_type) or None\n )\n\n existing_subscription_type = schema.get_subscription_type()\n subscription_type = (\n existing_subscription_type\n and get_type_from_def(existing_subscription_type)\n or None\n )\n\n # Iterate through all types, getting the type definition for each, ensuring\n # that any type not directly referenced by a field will get created.\n types = [get_type_from_def(_def) for _def in schema.get_type_map().values()]\n\n # Do the same with new types, appending to the list of defined types.\n types += [get_type_from_AST(_def) for _def in type_definition_map.values()]\n\n # Then produce and return a Schema with these types.\n return GraphQLSchema(\n query=query_type,\n mutation=mutationType,\n subscription=subscription_type,\n # Copy directives.\n directives=schema.get_directives(),\n types=types,\n )", "def _set_schema(self, schema_value):\n self._id = schema_value.id\n\n if type(self).__name__ != schema_value.type:\n # Make sure this object is the correct type.\n raise ValueError('Cannot convert a {} protocol to a {}.'\n .format(str(type(self)), schema_value.type))\n\n for input_full_path in schema_value.inputs:\n\n value = copy.deepcopy(schema_value.inputs[input_full_path])\n\n input_path = ProtocolPath.from_string(input_full_path)\n self.set_value(input_path, value)", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def create_schema(self, schema):\n base = '/api/storage/v1/schema'\n svc = \"%(base)s/%(prop)s\" % {'base': base, 'prop': schema['property']}\n ret = self.rclient.get(svc)\n if ret.status == restclient.Status.OK:\n LOG.warning('Property %s already exists.', schema['property'])\n return\n ret = self.rclient.post(base, schema)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error Creating '\n 'Property: %(property)s '\n 'Type: %(type)s '\n 'Description: %(description)s '\n 'Return code: %(ret.status)d '\n 'Message: %(ret.data)s.')\n % {'property': schema['property'],\n 'type': schema['type'],\n 'description': schema['description'],\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.ShareBackendException(msg=exception_msg)", "def schema_registry(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"schema_registry\")", "def merge_attribute_schema(self, attr_schema):\n name = attr_schema.name\n if name not in self.schema:\n self.schema[name] = attr_schema\n else:\n self.schema[name].merge_schema(attr_schema)", "def check_for_schema(cls):\n if not hasattr(cls, \"Schema\") or cls.Schema is None:\n raise PillowtalkError(\"Schema not found. @add_schema may not have been added to class definition.\")", "def register(self):\n assert not self.is_registered\n with transaction.atomic():\n cursor = connection.cursor()\n cursor.execute(\"SELECT audit.audit_table(%s)\", [self.model_content_type.model_class()._meta.db_table])\n self.is_registered = True\n self.save()", "def load_dde_schemas(self, schema):\n if self.verbose:\n print(f'Loading registered DDE schema \"{schema}\"')\n schema_source = schemas.get(schema)\n schema_source.pop(\"_id\")\n return schema_source", "def validate_schema(self, schema):\n if type(schema) is not type(self):\n raise AttributeSchemaError(\n \"Expected schema to have type '%s'; found '%s'\"\n % (type(self), type(schema))\n )\n\n if schema.name != self.name:\n raise AttributeSchemaError(\n \"Expected schema to have name '%s'; found '%s'\"\n % (self.name, schema.name)\n )", "def serializeSchema(schema):\n\n # determine portal_type\n try:\n prefix, portal_type, schemaName = splitSchemaName(schema.__name__)\n except ValueError:\n # not a dexterity schema\n return\n\n # find the FTI and model\n fti = queryUtility(IDexterityFTI, name=portal_type)\n if fti.model_source:\n model = fti.lookupModel()\n\n # synchronize changes to the model\n syncSchema(schema, model.schemata[schemaName], overwrite=True)\n fti.model_source = serializeModel(model)\n else:\n raise TypeError(\"Changes to non-dynamic schemata not yet supported.\")", "def __init__(self, writer_schema, output_file=None, skip_validate=True):\n self._writer_schema = writer_schema\n self._writer_schema_json = json.loads(str(self._writer_schema))\n self._output_file = output_file\n self._set_avro_writers()" ]
[ "0.75095624", "0.636198", "0.6255365", "0.62535286", "0.62535286", "0.62535286", "0.60564584", "0.6006052", "0.59905624", "0.5787812", "0.5713144", "0.5643266", "0.5495948", "0.5483089", "0.54456383", "0.53514594", "0.5350759", "0.51839006", "0.5180588", "0.51743895", "0.5140542", "0.5093603", "0.50920594", "0.507335", "0.5058553", "0.5058542", "0.50542456", "0.50287694", "0.5023872", "0.5012338" ]
0.7447215
1
This function obtains Excel file path using tkinter module.
def get_file_path(): root = tk.Tk() root.withdraw() file_path = filedialog.askopenfilename(filetypes=[("Excel file", "*.xlsx")]) return file_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def filepicker():\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.askopenfilename()\n return file_path", "def input_file(self):\r\n try:\r\n f = tkFileDialog.askopenfilename(parent=self.top, initialdir=\"/home/marcin/pulpit/Py/\",\r\n title=\"Wybór pliku excel z danymi\",\r\n filetypes=[(\"Excel file\", \".xlsx\")])\r\n self.filepath_input.set(os.path.realpath(f))\r\n self.excel_input_file = os.path.realpath(f)\r\n except ValueError:\r\n tkMessageBox.showerror(\"Error\", \"Wystąpił problem z załadowaniem pliku excel z danymi.\")", "def get_path_via_file_ui():\n\n import Tkinter as tk\n import tkFileDialog as filedialog\n root = tk.Tk()\n root.withdraw()\n return filedialog.askopenfilename()", "def get_workbook_path():\n working_dir = get_working_dir()\n if 'config.txt' not in os.listdir(working_dir):\n create_config_file()\n create_progress_workbook()\n\n with open(working_dir + os.sep + 'config.txt') as config_file:\n workbook_path = config_file.read().strip()\n return workbook_path", "def filePath(self):\n # get filename\n filename = tkFileDialog.askopenfilename()\n return str(filename)", "def getExcelApp(self):\r\n return self.excelapp", "def get_input_name():\n xlsTypes = [(\"Файлы Excel или csv\", \".xls .xlsx\")]\n return askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=xlsTypes, title=\"Выберите файлы Excel или CSV\")", "def entry_set_excel(self, entry):\r\n global exceldokument\r\n exceldokument = filedialog.askopenfilename(filetypes=[(\"Excel file\",\"*.xlsx\"),(\"Excel file\", \"*.xlsm\")])\r\n entry.delete(0, 'end')\r\n entry.insert(tk.END, exceldokument)", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def lectxl(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".xlsx\")\n workbook = xlrd.open_workbook(NOM)\n SheetNameList = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(SheetNameList[0])\n num_rows = worksheet.nrows \n f=[NOM]\n for curr_row in range(0,num_rows):\n row = worksheet.row(curr_row)\n f.append(row)\n return f", "def getFile():\n from tkinter import Tk, filedialog\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n return(filedialog.askopenfilenames())", "def get_excel(exceldocument):\r\n\r\n sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0)\r\n return sheet", "def OpenFileExcel(self, *args, **kwargs):\n directory = None\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == 'directory':\n directory = value\n\n\n\n with wx.FileDialog(self, \"Open report file\", directory,\n wildcard=\"excel files (*.xlsx)|*.xlsx|(*.xls)|*.xlsx|(*.csv)|*.csv\",\n style=wx.FD_OPEN) as fileDialog:\n \n if fileDialog.ShowModal() == wx.ID_CANCEL:\n return \n\n\n else:\n\n pathname = fileDialog.GetPath()\n print('the file to be opened is :'+ pathname)\n\n def openWorkbook(xlapp, xlfile):\n try:\n xlwb = xlapp.Workbooks(xlfile)\n except Exception as e:\n try:\n xlwb = xlapp.Workbooks.Open(xlfile)\n except Exception as e:\n print(e)\n xlwb = None\n return (xlwb)\n\n pathname = os.path.normcase(pathname)\n\n\n try:\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = openWorkbook(excel, pathname)\n #ws = wb.Worksheets('Sheet1')\n excel.Visible = True\n except Exception as e:\n print(e)\n\n finally:\n # RELEASES RESOURCES\n ws = None\n wb = None\n excel = None", "def getFilePathToEntry(self):\n\n\t\tpath = askopenfilename(filetypes = (\"All files\", \"*.*\"), parent = self.parent)\n\n\t\t# Once self.filePath gets a filepath, delete what's in the entry and put self.filePath into the entry\n\t\tif path != '':\n\t\t\tself.filePathEntry.delete(0, 'end')\n\t\t\tself.filePathEntry.insert(0, path)", "def callDialog(self):\n self.pathTuple = filedialog.askopenfilenames(filetypes=[(\"Excel files\", \".xlsx .xls .xlsm .xlsb\")])\n self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]", "def load_file(self):\n return tkinter.filedialog.askopenfilename(defaultextension=\".txt\")", "def get_waiter_excel_financial_report_path(self, staff_id, period):\n try:\n headers = ('№п/п', 'Назва події', 'Check-in', 'Check-out', 'Час на зміні', 'Рейтинг', 'Нараховано')\n path = f'{config.WORKING_DIR}/user_reports/{staff_id}_{period}.xlsx'\n\n if os.path.isfile(path):\n os.remove(path)\n\n staff = self.get_staff_by_id(staff_id)\n report = self.get_staff_financial_report_for_month(staff_id, period)\n report_with_headers = [headers]\n sum_for_month = sum([x[6] for x in report if x[6] is not None])\n\n for rep in report:\n report_with_headers.append(rep)\n\n report_with_headers.append(('','','','','','',sum_for_month))\n\n self.generate_waiter_financial_report_excel_file(staff, period, report_with_headers, path)\n self.logger.write_to_log('excel file with month report generated', 'model')\n\n return path\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def askOpenFile(dirname=\".\"):\n\n import Tkinter,tkFileDialog\n root = Tkinter.Tk()\n file = tkFileDialog.askopenfile(parent=root,mode='rb',title='Choose a file',initialdir=dirname)\n return file", "def load_youd_bartlett_demo():\n return read_excel(join(dirname(__file__),'YoudHansenBartlett2002_demo.xls'))", "def OpenTOLNFile():\n global TOLNpath\n TOLNpath = tkinter.filedialog.askopenfilename(filetypes = [('TXT', '.txt')])\n if TOLNpath != '':\n label3.config(text = \"您选择的文件是:\" + TOLNpath)\n else:\n label3.config(text = \"您没有选择任何文件\")\n #TOLN_file = open(TOLNfilename,'r',errors = \"ignore\")\n return", "def getFolder():\n from tkinter import Tk, filedialog\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n return(filedialog.askdirectory()+'/')", "def _get_file_path(self):\n self.select_pdf()\n self.file_path_label.configure(\n text=self._shorten_file_name())\n self.file_path_label.grid(row=0, column=1)", "def excel(df_ccl, df_arg_stocks, df_bonds, df_arg_stocks_ccl):\n if os.path.exists('CCL.xlsx'):\n wb = xw.Book('CCL.xlsx')\n # SHEET CEDEARS\n ws = wb.sheets('CCL CEDEARs')\n ws.range('A1').expand().value = df_ccl\n # SHEET MERVAL\n ws_merval = wb.sheets('Merval')\n ws_merval.range('A1').expand().value = df_arg_stocks\n # SHEET BONOS\n ws_bonds = wb.sheets('Bonos')\n ws_bonds.range('A1').expand().value = df_bonds\n # SHEET CCL MERVAL\n ws_ccl = wb.sheets('CCL ADRs')\n ws_ccl.range('A1').expand().value = df_arg_stocks_ccl\n\n tiempo = time.asctime()\n print('Carga exitosa de datos. Ultima ejecución: ',tiempo)", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')", "def selected_filepath(self):\n return self.__make_path(self.selected_filename)", "def save_file_cegly(self):\r\n try:\r\n save = tkFileDialog.asksaveasfilename(parent=self.top, initialdir=\"/home/marcin/pulpit/\",\r\n title=\"Wybór pliku do zapisu danych z Cegieł\",\r\n filetypes=[(\"Excel file\", \".xlsx\")])\r\n self.filepath_CEGLY.set((os.path.realpath(save)))\r\n self.CEGLY_file = os.path.realpath(save)\r\n except ValueError:\r\n tkMessageBox.showerror(\"Error\", \" Wystąpił problem z plikiem do zapisu danych z Cegieł.\")", "def get_excel(self, file_name):\n global download_component\n\n download_soup = BeautifulSoup(self.res.text, 'lxml')\n download_component = get_download_component(download_soup)\n\n #Start excel session\n xsess = requests.Session()\n xsess.headers = EXCEL_HEADERS\n \n #prepare excel session\n self.data['SAPEVENTQUEUE'] = \"Button_Press~E002Id~E004\" + \\\n download_component + \"~E003~E002ResponseData~E004delta~E005ClientAction~E004submit~E003~E002~E003\"\n self.res = self.sess.post(self.url, data=self.data)\n\n #parse data from prepared excel session\n fileid, action = get_excel_url(BeautifulSoup(self.res.text,'lxml-xml')) \n \n #replace\n xurl = HOST_URL + action\n xurl = xurl.replace(\"\\\\x2f\",\"/\")\n xurl = xurl.replace(\"\\\\x7e\",\"~\")\n xurl = xurl.replace(\"\\\\x3f\", \"?\")\n xurl = xurl.replace(\"\\\\x2d\",\"-\")\n xurl = xurl.replace(\"\\\\x3d\",\"=\")\n xurl = xurl.replace(\"\\\\x253a\",\":\")\n xurl = xurl.replace(\"\\\\x26\",\"&\")\n xres = xsess.post(xurl)\n \n #write file\n with open(file_name,'wb') as f:\n f.write(xres.content)" ]
[ "0.67193115", "0.65942097", "0.6497085", "0.6414859", "0.6397723", "0.6352703", "0.6035921", "0.59851617", "0.58859783", "0.5827919", "0.5658049", "0.5592338", "0.55894524", "0.55575365", "0.5459018", "0.54376125", "0.5339908", "0.5337097", "0.5326472", "0.5319257", "0.52957076", "0.52586627", "0.5246408", "0.5244873", "0.524108", "0.52035165", "0.51998866", "0.51902074", "0.51831853", "0.51642007" ]
0.81460154
0
Loads scores from the given handle.
def load_scores(handle): logging.info('Loading scores') result = pd.read_csv(handle, index_col=0) logging.info('Loaded a table with shape {}'.format(result.shape)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_scores(self, score_file_name):\n try:\n with open(score_file_name, 'rb') as score_file:\n self.scores = pickle.load(score_file)\n except FileNotFoundError:\n pass", "def load_score(fhandle: TextIO) -> annotations.NoteData:\n\n #### read start, end times\n intervals = np.loadtxt(fhandle, delimiter=\",\", usecols=[0, 1], dtype=np.float_)\n\n #### read notes as string\n fhandle.seek(0)\n content = fhandle.readlines()\n values = np.array(\n [librosa.note_to_hz(line.split(\",\")[2].strip(\"\\n\")) for line in content]\n )\n\n return annotations.NoteData(intervals, \"s\", values, \"hz\")", "def _read_scores(self,path):\n scores = dict()\n fin = open(path,'r')\n for line in fin:\n k, v = line.split()\n scores[k.strip()] = float(v.strip())\n return scores", "def _load_high_score(self):\n try:\n with open(\"scores.json\") as file:\n return int(json.load(file))\n except:\n print(\"Failed to read high score from scores.json!\")\n return 0", "def load_steady_scores(self):\n\n data=np.load(self.data_path,allow_pickle=True)\n scores_attrs=['re_scores','ri_scores','he_scores']\n \n if 're_scores' not in data.keys():\n self.compute_steady_scores()\n self.save_steady_scores()\n data=np.load(self.data_path,allow_pickle=True) \n \n for scores_attr in scores_attrs:\n \n assert(scores_attr in data.keys())\n setattr(self,scores_attr,data[scores_attr])", "def load_scores(score_dir):\n score_files = fi.find_files(score_dir, 'sc')\n scores = {\n get_target_name(f):\n pd.read_csv(f, delimiter='\\s*', index_col='description',\n engine='python')\n for f in score_files\n }\n # If duplicate structures present, remove all but first.\n for x, y in scores.items():\n scores[x] = y.loc[~y.index.duplicated(keep='first')]\n return scores", "def load(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'r') as rank_file:\n for record in rank_file:\n team, score = common.parse(record)\n self.score[team] = score", "def load_scores():\n from copy import copy\n from string import atof\n aas = open('aas.scr')\n pro = open('pro.scr')\n gly = open('gly.scr')\n\n aasline = aas.readline().split()\n proline = pro.readline().split()\n glyline = gly.readline().split()\n \n probx = [0 for i in xrange(36)] #this will be x index\n proby = [0 for i in xrange(36)] #this will be y index \n\n for row_counter in range(36):\n for column_counter in range(36):\n probx[column_counter] = atof(aasline[column_counter])\n aasline = aas.readline().split()\n proby[row_counter] = copy(probx)\n aas = copy(proby)\n\n probx = [0 for i in xrange(36)]\n proby = [0 for i in xrange(36)]\n for row_counter in range(36):\n for column_counter in range(36):\n probx[column_counter] = atof(proline[column_counter])\n proline = pro.readline().split()\n proby[row_counter] = copy(probx)\n pro = copy(proby)\n\n probx = [0 for i in xrange(36)]\n proby = [0 for i in xrange(36)]\n for row_counter in range(36):\n for column_counter in range(36):\n probx[column_counter] = atof(glyline[column_counter])\n glyline = gly.readline().split()\n proby[row_counter] = copy(probx)\n gly = copy(proby) \n return (aas, gly, pro)", "def _scan_scores(self,handle, consumer):\n read_and_call(handle, consumer.scores, start=\"Smith-Waterman\")", "def readFile(self, fname):\r\n self.scores = []\r\n self.fname = fname\r\n try:\r\n with open(fname, 'r') as f:\r\n for line in f:\r\n self.appendScore(line.split(' '))\r\n except:\r\n pass", "def get_highscores(self):\n\t\ttry:\n\t\t\twith open(self.filename) as f_obj:\n\t\t\t\tcontents = f_obj.read()\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 733 7043 for assistance.')\n\t\telse:\n\t\t\tjson_contents = json.loads(contents)\n\t\t\treturn sorted(json_contents, key=self.extract_score)", "def load_parameters(handle):\n\n logging.info('Loading scaling parameters from {}'.format(handle.name))\n\n result = pd.read_csv(handle, index_col=0)\n\n result.index = result.index.astype(str)\n\n logging.info('Result is a table with shape {}'.format(result.shape))\n\n return result", "def parse_scores_file(filename):\n scores = []\n with open(filename, \"r\") as scores_file:\n for line in scores_file:\n line = line.rstrip()\n info = line.split(\":\")\n scores.append(Score(info[0], int(info[1]), float(info[2])))\n return scores", "def load_scores_wiggle( fname, chrom_buffer_size=3 ):\n scores_by_chrom = dict()\n try:\n for chrom, pos, val in bx.wiggle.Reader( UCSCOutWrapper( open( fname ) ) ):\n if chrom not in scores_by_chrom:\n if chrom_buffer_size:\n scores_by_chrom[chrom] = BinnedArray()\n chrom_buffer_size -= 1\n else:\n scores_by_chrom[chrom] = PositionalScoresOnDisk()\n scores_by_chrom[chrom][pos] = val\n except UCSCLimitException:\n # Wiggle data was truncated, at the very least need to warn the user.\n print('Encountered message from UCSC: \"Reached output limit of 100000 data values\", so be aware your data was truncated.')\n except IndexError:\n stop_err('Data error: one or more column data values is missing in \"%s\"' % fname)\n except ValueError:\n stop_err('Data error: invalid data type for one or more values in \"%s\".' % fname)\n return scores_by_chrom", "def load_all_scores(path=None, use_cache=True):\n cache_path = SAVE_ROOT / 'feature' / 'dataframe_map.pkl'\n make_parent_dirs(cache_path)\n if cache_path.is_file() and use_cache:\n logging.info('loading score_data from cache file: {}'.format(cache_path))\n return pickle.load(cache_path.open('rb'))\n score_db_index = load_score_db_index(path)\n score_data = make_all_score_matrices(score_db_index)\n pickle.dump(score_data, cache_path.open('wb'))\n return score_data", "def _load_model_from_file(path, handle):\n logger.debug('Reading file from %s assuming pickled model.' % path)\n try:\n model = pickle.load(handle)\n except (TypeError, pickle.UnpicklingError):\n logger.debug('Cannot unpickle %s. Assuming json model next.' % path)\n try:\n model = load_json_model(path)\n except ValueError:\n logger.debug(\"Cannot import %s as json model. Assuming sbml model next.\" % path)\n try:\n model = read_sbml_model(path)\n except AttributeError as e:\n logger.error(\"cobrapy doesn't raise a proper exception if a file does not contain an SBML model\")\n raise e\n except Exception as e:\n logger.error(\n \"Looks like something blow up while trying to import {} as a SBML model.\"\n \"Try validating the model at http://sbml.org/Facilities/Validator/ to get more information.\".format(\n path))\n raise e\n return model", "def get_score(cfg):\n key = (cfg.mut, cfg.pH)\n return lazy_load(SCORE_MAP, key, read_score, get_score_path, cfg)", "def read_score(self):\n file_path = 'score.txt'\n \n with open(file_path, 'r') as f:\n score = f.read()\n\n if score == '':\n return 0\n else:\n return int(score)", "def _LoadScores(self, scoresFile):\n scores = []\n with open(scoresFile, \"r\") as file:\n for line in file:\n # Make a string with the score in front of the name so it can be sorted.\n temp = line.split(\",\")[1].rstrip() + \",\" + line.split(\",\")[0]\n scores.append(temp)\n return scores", "def del_highscores(self):\n\t\ttry:\n\t\t\twith open(self.filename) as f_obj:\n\t\t\t\tcontents = f_obj.read()\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 733 7043 for assistance.')\n\t\telse:\n\t\t\tjson_contents = json.loads(contents)\n\t\t\tfor item in json_contents:\n\t\t\t\titem['player_name'] = 'EMPTY'\n\t\t\t\titem['player_score'] = 0\n\t\t\tself.save_highscores(json_contents)", "def load_lsh(filename):\n with open(filename, 'rb') as handle:\n return pickle.load(handle)", "def load_score_dict(score_dict='sentiment.txt'):\n with open(score_dict, 'r') as f:\n dictionary = {}\n\n # Remove empty lines from file.\n lines = (line.strip() for line in f)\n lines = (line for line in lines if line)\n\n for line in lines:\n if line.startswith('#'):\n pass\n else:\n (word, score) = line.split()\n dictionary[str(word)] = float(score)\n return dictionary", "def get_highscore():\n highscore = 0\n try:\n with open('highscore', 'r') as f:\n highscore = f.readline()\n except IOError:\n pass\n return highscore", "async def load_state(self):\n\n\t\twith open(os.path.join(\"config\", \"leaderboards.json\"), \"r+\") as leaderboards:\n\t\t\tself.leaderboards = json.loads(leaderboards.read())", "def deserialize(self, fh):\n description, rows = pickle.load(fh)\n return CachedCursor(description, rows)", "def load_data(original_input_handle, cluster_input_handle):\n\n info('Loading original data from {}'.format(original_input_handle.name))\n\n original_data = pd.read_csv(original_input_handle, index_col=0)\n\n info('Loaded a table with shape {}'.format(original_data.shape))\n\n clusters = None\n\n if cluster_input_handle is not None:\n\n info('Loading cluster assignments from {}'.format(\n cluster_input_handle.name))\n\n clusters = pd.read_csv(cluster_input_handle, index_col=0, squeeze=True)\n\n info('Loaded {} entries'.format(clusters.size))\n\n return original_data, clusters", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n serial_string = load_dict[\"data%s\" % SparseParameter.IDENTIFIER]\n self._data = self._reconstruct_matrix(serial_string)\n\n if \"explored_data\" + SparseParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + SparseParameter.IDENTIFIER]\n idx_col = explore_table[\"idx\"]\n explore_list = []\n for irun, name_idx in enumerate(idx_col):\n serial_string = load_dict[\n \"xspm%s%08d\" % (SparseParameter.IDENTIFIER, name_idx)\n ]\n matrix = self._reconstruct_matrix(serial_string)\n explore_list.append(matrix)\n\n self._explored_range = explore_list\n self._explored = True\n\n except KeyError as e:\n super(SparseParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True", "def load_checkpoint(model, scoresfile):\n # load data from scores file\n X = np.loadtxt(scoresfile, delimiter=',')\n\n # separate into points and scores\n scores = X[:,-1]\n points = X[:,:-1]\n\n # set best hyperparameters based on best scores\n ind = np.argmin(scores)\n best_overall_point = points[ind]\n model.decode(best_overall_point)\n\n return model, points, scores", "def HighScore_r():\r\n fichier=open(\"save/HGSC/HIGHSCORE.txt\",\"r\")\r\n l=1\r\n for ligne in fichier:\r\n if l==1:\r\n highscore=int(ligne)\r\n l=0\r\n fichier.close()\r\n return highscore", "def ParseFile(self, handle, name):\n return cPickle.load(handle)" ]
[ "0.6391052", "0.60156506", "0.6003782", "0.58916295", "0.5874204", "0.5735446", "0.5705583", "0.5670833", "0.5634658", "0.5388726", "0.5343005", "0.5258118", "0.5206841", "0.5193093", "0.5190318", "0.5149233", "0.51236445", "0.5112225", "0.5052212", "0.50291836", "0.50266147", "0.5005707", "0.4991792", "0.49722037", "0.4939837", "0.4924886", "0.48799175", "0.48731208", "0.48456818", "0.48372543" ]
0.79614854
0
Obtains the MLE variance given a series of values and a calculated mean.
def get_mle_variance(series, mean=None): if mean is None: mean = series.mean() return 1 / series.size * ((series - mean)**2).sum()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variance(numbers, mean):\n variance = 0 # We will add to this value in a loop\n N = len(numbers)\n \n for i in numbers:\n\n # Operations follow typical BEDMAS\n variance += ((i - mean) * (i - mean))/N\n \n return variance", "def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val", "def variance(l):\n m = mean(l)\n dif = 0\n for x in l:\n dif += (m-x)**2\n return dif/len(l)", "def variance(self, mean=None):\n raise NotImplementedError", "def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)", "def _variance(mean_variance, samples):\n mean = mean_variance[0] / samples\n variance = mean_variance[1]\n variance /= samples\n variance -= mean * mean\n return variance", "def variance(values, weights=None, axis=0):\n \n average = np.average(values, weights=weights, axis=axis)\n variance = np.average((values-average)**2, weights=weights, axis=axis)\n return variance", "def variance(L, is_sample=0):\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)", "def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))", "def sample_mean_var_ml(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], 0\n s = 0.0\n ss = 0.0\n for i in x:\n s += i\n ss += i*i\n mu = s/n\n var = (ss/n) - mu*mu\n return mu, var", "def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v", "def variance(xs: List[float]) -> float:\n assert len(xs) >= 2, \"variance requires at least two elements\"\n\n n = len(xs)\n deviations = de_mean(xs)\n return sum_of_squares(deviations) / (n - 1)", "def variance(xs: List[float]) -> float:\n assert len(xs) >= 2, \"variance requires at least two elements\"\n\n n = len(xs)\n deviations = de_mean(xs)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n \"\"\" note - why n-1?: since we are likely looking at a sample, x_bar is only an\n estimate of the actual mean, which means that on average (x_i - x_bar) ** 2\n is an underestimate of x_i's squared deviation from the mean, which is why\n we divide by n-1 instead of n (see bit.ly/lL2EapI)\"\"\"\n n = len(x)\n deviations = deviations_from_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var", "def variance(x):\r\n n = len(x)\r\n deviations = dev_mean(x)\r\n return sum_of_squares(deviations) / (n-1)", "def var_calc(data, col, mean):\n\tv = sum([(mean - row[col])**2 for row in data])\n\treturn v", "def calc_variance(a, b, c, d, e):\n mean_of_num = (a + b + c + d + e) / 5\n return ( (a - mean_of_num)**2 + (b - mean_of_num)**2 + (c - mean_of_num)**2\n + (d - mean_of_num)**2 + (e - mean_of_num)**2) / 5", "def variance(self):\n return (math.exp(self.sigma ** 2) - 1.0) \\\n * math.exp(2.0 * self.mu + self.sigma ** 2)", "def _calculate_excess_variance(self, lc):\n std = self._calculate_std(lc)\n return np.var(lc) - std**2", "def explained_variance(returns, values):\n exp_var = 1 - torch.var(returns - values) / torch.var(returns)\n return exp_var.item()", "def variance(data, m=None):\n n, ss = _SS(data, m)\n if n < 2:\n raise ValueError('sample variance or standard deviation'\n ' requires at least two data points')\n return ss/(n-1)", "def weighted_var(values, weights):\n average = np.average(values, weights=weights)\n # Fast and numerically precise:\n variance = np.average((values-average)**2, weights=weights)\n return variance", "def test_mean_variance():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.mean_variance(f, maximise=True)\n expected = np.asarray(\n [1.42320289996384, 1.54948632859709])\n assert np.allclose(R, expected)\n R = common_metrics.mean_variance(f, maximise=False)\n expected = np.asarray(\n [0.132210105461122, 0.351723890540445])\n assert np.allclose(R, expected)" ]
[ "0.7475806", "0.71996796", "0.7120669", "0.69633657", "0.69283533", "0.6919777", "0.690976", "0.67857486", "0.6750658", "0.6750658", "0.66914403", "0.6673788", "0.6655616", "0.6629216", "0.6552021", "0.6552021", "0.65044594", "0.65044594", "0.65044594", "0.6442417", "0.64366317", "0.64280236", "0.63769686", "0.63396454", "0.6320044", "0.63125134", "0.6281299", "0.6278863", "0.6278572", "0.626125" ]
0.8110193
0
Obtains cluster assignments from the given scores.
def get_clusters(df, allow_unassigned, variance_coefficient, letters): logging.info('Calculating cluster assignments') # Calculate minimum thresholds to call cluster assignments. min_thresholds = pd.Series(np.tile(1e-6, df.shape[1]), index=df.columns) if allow_unassigned: # Estimate the variance for each factor by fixing the MLE estimate of # the mean to 0. min_thresholds = df.apply( get_mle_variance, mean=0).apply(np.sqrt) * variance_coefficient unassigned_mask = (df >= min_thresholds).sum(axis=1) < 1 result = df.apply(pd.Series.argmax, axis=1) # Apply letters if required. if letters: result = ( result.astype(int) - 1).apply(string.ascii_uppercase.__getitem__) # Set unassigned patients. result.loc[unassigned_mask] = 0 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clustering(self, pred_boxes, pred_scores):\n pred_boxes_cat = torch.cat(pred_boxes, dim=0)\n\n pred_boxes_cat[:, -1] = limit_period(pred_boxes_cat[:, -1])\n pred_scores_cat = torch.cat(pred_scores, dim=0)\n ious = boxes_iou3d_gpu(pred_boxes_cat, pred_boxes_cat)\n cluster_indices = torch.zeros(len(ious)).int() # gt assignments of preds\n cur_cluster_id = 1\n while torch.any(cluster_indices == 0):\n cur_idx = torch.where(cluster_indices == 0)[0][0] # find the idx of the first pred which is not assigned yet\n cluster_indices[torch.where(ious[cur_idx] > 0.3)[0]] = cur_cluster_id\n cur_cluster_id += 1\n clusters = []\n scores = []\n for i in range(1, cluster_indices.max().item() + 1):\n clusters.append(pred_boxes_cat[cluster_indices==i])\n scores.append(pred_scores_cat[cluster_indices==i])\n if len(scores)==0:\n print('debug')\n\n return clusters, scores", "def get_cluster_assignments(self, data, n_clusters=10):\n kmeans = KMeans(n_clusters=n_clusters)\n kmeans.fit(data)\n pred = kmeans.predict(data)\n return pd.DataFrame(pred)", "def cell_assignments(cluster_solution_id):\n query = db.session.query(Cluster, CellAssignment)\\\n .filter(Cluster.cluster_solution_id == cluster_solution_id)\\\n .filter(Cluster.id == CellAssignment.cluster_id)\n\n result = [dict(name=q[1].name, cluster_name=q[0].name) for q in query]\n\n return result", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def score_of_nodes(self, score):\n for hypervisor_id in self.model.get_all_hypervisors():\n hypervisor = self.model. \\\n get_hypervisor_from_id(hypervisor_id)\n count = self.model.get_mapping(). \\\n get_node_vms_from_id(hypervisor_id)\n if len(count) > 0:\n result = self.calculate_score_node(hypervisor)\n else:\n # The hypervisor has not VMs\n result = 0\n if len(count) > 0:\n score.append((hypervisor_id, result))\n return score", "def assignment(self, addresses, centroids, k):\n newClusters = {}\n print centroids\n for (lat, long) in addresses:\n minDistance = float('Inf')\n minIndex = 0\n for i in range(k):\n if pow(self.euclideanDistance((lat, long), centroids[i]),2) < minDistance:\n minDistance = pow(self.euclideanDistance((lat, long), centroids[i]),2)\n minIndex = i\n if minIndex in newClusters:\n newClusters[minIndex].append((lat, long))\n else:\n newClusters[minIndex] = [(lat, long)]\n return newClusters", "def compute_scores(self, *scorers):\n if self.nodes[0]:\n list_ = self.nodes\n else:\n list_ = self.reaction_trees\n\n for idx, item in enumerate(list_):\n scores = {repr(scorer): scorer(item) for scorer in scorers}\n self.all_scores[idx].update(scores)\n self._update_route_dict(self.all_scores, \"all_score\")", "def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):\n for (alg_name, alg_dict) in clustering_alg:\n if \"alg\" in alg_dict:\n clustering = alg_dict[\"alg\"].fit(sdist)\n labels_pred = clustering.labels_\n alg_dict[\"labels\"] = labels_pred\n else:\n labels_pred = alg_dict[\"labels\"]\n\n pred_label_dict, new_labels = normalize_labels(labels_pred)\n\n alg_dict[\"cm\"] = contingency_matrix(labels_true, new_labels)", "def cluster_fusion(self, clusters, scores):\n boxes_fused = []\n for c, s in zip(clusters, scores):\n # reverse direction for non-dominant direction of boxes\n dirs = c[:, -1]\n max_score_idx = torch.argmax(s)\n dirs_diff = torch.abs(dirs - dirs[max_score_idx].item())\n lt_pi = (dirs_diff > pi).int()\n dirs_diff = dirs_diff * (1 - lt_pi) + (2 * pi - dirs_diff) * lt_pi\n score_lt_half_pi = s[dirs_diff > pi / 2].sum() # larger than\n score_set_half_pi = s[dirs_diff <= pi / 2].sum() # small equal than\n # select larger scored direction as final direction\n if score_lt_half_pi <= score_set_half_pi:\n dirs[dirs_diff > pi / 2] += pi\n else:\n dirs[dirs_diff <= pi / 2] += pi\n dirs = limit_period(dirs)\n s_normalized = s / s.sum()\n sint = torch.sin(dirs) * s_normalized\n cost = torch.cos(dirs) * s_normalized\n theta = torch.atan2(sint.sum(), cost.sum()).view(1,)\n center_dim = c[:, :-1] * s_normalized[:, None]\n boxes_fused.append(torch.cat([center_dim.sum(dim=0), theta]))\n if len(boxes_fused) > 0:\n boxes_fused = torch.stack(boxes_fused, dim=0)\n else:\n boxes_fused = None\n print('debug')\n return boxes_fused", "def plot_sim_matrix(\n clusterings: list, scoring: Callable[[object, object], object]\n) -> object:\n forDF = []\n for c in clusterings:\n cID = c.get_description()\n for c2 in clusterings:\n c2ID = c2.get_description()\n forDF.append([cID, c2ID, scoring(c, c2).score])\n df = pd.DataFrame(columns=[\"com1\", \"com2\", \"score\"], data=forDF)\n df = df.pivot(index=\"com1\", columns=\"com2\", values=\"score\")\n return sns.clustermap(df)", "def set_ability_scores(self, scores: List[int]):\n for s in range(6):\n self.dna[s] = scores[s]", "def clusterByLocation(sc,records,scoredRecords,fClusterSearchEpsilon,nMinClusterPoints,nMinClusterUnique,fileName,outdir='scoreFiles'):\n\n # assign clusters and filter out non clustered records\n recordList = map(lambda term: ScoreRecord(term[0][1],term[1]),scoredRecords)\n assignToCluster(recordList, fClusterSearchEpsilon, nMinClusterPoints)\n recordList = filter(lambda x: x.cluster != -1, recordList)\n\n # collect records per cluster and filter out records that don't meet\n # min user threshold\n clustDict = {}\n for record in recordList:\n key = str(record.cluster)\n if key not in clustDict:\n clustDict[key] = ScoreBin(record)\n else:\n clustDict[key].addRecord(record)\n bins = clustDict.values()\n bins = filter(lambda x: len(x.users)>=nMinClusterUnique, bins)\n if len(bins) == 0:\n sys.exit(\"No clusters found, you need to relax cluster parameters\")\n\n\n lClustPoly = []\n for bin in bins:\n createHull(bin, False)\n if bin.objPoly is not None:\n lClustPoly.append(bin.objPoly)\n\n bc_lClustPoly = sc.broadcast(lClustPoly)\n lBackground = records.map(lambda x: makeTotalsArray(x, bc_lClustPoly, False)).reduce(lambda x, y: map(add, x, y))\n\n for i in range(len(bins)):\n bins[i].postsInHull = lBackground[i]\n\n bins = map(lambda x: x.toDict(), bins)\n writeDict = {\"type\":\"place\", \"clusters\":bins}\n with codecs.open(outdir+\"/\"+fileName, encoding=\"utf-8\",mode=\"wb\") as fOut:\n json.dump(writeDict, fOut)\n return writeDict", "def csls(scores, knn = 5):\n def mean_similarity(scores, knn, axis = 1):\n nghbs = np.argpartition(scores, -knn, axis = axis) # for rows #[-k:] # argpartition returns top k not in order but it's efficient (doesnt sort all rows)\n # TODO: There must be a faster way to do this slicing\n if axis == 1:\n nghbs = nghbs[:,-knn:]\n nghbs_score = np.concatenate([row[indices] for row, indices in zip(scores, nghbs)]).reshape(nghbs.shape)\n else:\n nghbs = nghbs[-knn:,:].T\n nghbs_score = np.concatenate([col[indices] for col, indices in zip(scores.T, nghbs)]).reshape(nghbs.shape)\n\n return nghbs_score.mean(axis = 1)\n # 1. Compute mean similarity return_scores\n src_ms = mean_similarity(scores, knn, axis = 1)\n trg_ms = mean_similarity(scores, knn, axis = 0)\n # 2. Compute updated scores\n normalized_scores = ((2*scores - trg_ms).T - src_ms).T\n return normalized_scores", "def node_assignment_score(edge_index: nb.int64[:,:],\n edge_scores: nb.float32[:,:],\n n: nb.int64) -> nb.int64[:]:\n return edge_assignment_score(edge_index, edge_scores, n)[1]", "def node_and_vm_score(self, sorted_score, score):\n node_to_release = sorted_score[len(score) - 1][0]\n vms_to_mig = self.model.get_mapping().get_node_vms_from_id(\n node_to_release)\n\n vm_score = []\n for vm_id in vms_to_mig:\n vm = self.model.get_vm_from_id(vm_id)\n if vm.state == vm_state.VMState.ACTIVE.value:\n vm_score.append(\n (vm_id, self.calculate_score_vm(vm)))\n\n return node_to_release, vm_score", "def cost_vs_clusters(filename):\n graph,source,homes,indexToLoc = graph_file_io.graph_from_input(filename)\n\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n cost_list = []\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n cost_list.append(cost)\n return cluster_list, cost_list", "def _estimate_assignments(self, graph: GraphRepresentation) -> None:\n embed_graph = augment_diagonal(graph)\n latent = AdjacencySpectralEmbed(\n n_components=self.n_components, **self.embed_kws\n ).fit_transform(embed_graph)\n if isinstance(latent, tuple):\n latent = np.concatenate(latent, axis=1)\n gc = GaussianCluster(\n min_components=self.min_comm,\n max_components=self.max_comm,\n **self.cluster_kws\n )\n vertex_assignments = gc.fit_predict(latent) # type: ignore\n self.vertex_assignments_ = vertex_assignments", "def get_label_scores_mapping(labels, scores):\n return {label: scores[i] for i, label in enumerate(labels)}", "def get_clusters(cluster_path): #{{{\n print 'loading cluster info'\n indicesToParticle = pickle.load(open(cluster_path+\"/verticesToParticle.p\",\"rb\"))\n indicesOnCluster = pickle.load(open(cluster_path+\"/verticesOnCell.p\",\"rb\"))\n maxIndices = pickle.load(open(cluster_path+\"/maxVertices.p\",\"rb\"))\n print 'done'\n\n return indicesToParticle, indicesOnCluster, maxIndices #}}}", "def cluster_assign(images_lists, dataset):\n assert images_lists is not None\n pseudolabels = []\n image_indexes = []\n for cluster, images in enumerate(images_lists):\n image_indexes.extend(images)\n pseudolabels.extend([cluster] * len(images))\n print(image_indexes)\n print(pseudolabels)\n \n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n\n return ReassignedDataset(image_indexes, pseudolabels, dataset, t)", "def cluster(eps, linkage='average'):\n # TODO: your code here\n # Start by creating leaves for all the profiles and computing Euclidean distances between each pair.\n nodes = [ExpressionHierarchicalClusterLeaf(ep) for ep in eps]\n distances = {}\n\n for i in range(len(nodes)):\n for j in range(i + 1, len(nodes)):\n dis = sum(k * k for k in nodes[i].ep.values - nodes[j].ep.values) ** 0.5\n distances[(nodes[i], nodes[j])] = dis\n\n # repeatedly find the closest pair of clusters and merge them into a new inner node that can be used in subsequent iterations.\n # meanwhile Compute cluster-profile and cluster-cluster distances, allowing the choice of average linkage\n while len(nodes) > 1:\n # find the closest pair\n min_dis = min(distances.values())\n node1 = None\n node2 = None\n for k, v in distances.items():\n if v == min_dis:\n node1 = k[0]\n node2 = k[1]\n\n # merge\n merged_node = ExpressionHierarchicalClusterInner(node1, node2)\n nodes.remove(node1)\n nodes.remove(node2)\n\n # https://stackoverflow.com/questions/11941817/how-to-avoid-runtimeerror-dictionary-changed-size-during-iteration-error\n for k1, k2 in list(distances.keys()):\n if k1 == node1 or k2 == node1 or k1 == node2 or k2 == node2:\n del distances[(k1, k2)]\n\n # calculate the new distance form other nodes to the merged node\n for other_node in nodes:\n pair_dis = []\n for ep1 in other_node.ordered_profiles():\n for ep2 in merged_node.ordered_profiles():\n pair_dis.append(sum(k * k for k in ep1.values - ep2.values) ** 0.5)\n if linkage == \"average\":\n cluster_dis = sum(pair_dis) / len(pair_dis)\n elif linkage == \"min\":\n cluster_dis = min(pair_dis)\n else:\n cluster_dis = max(pair_dis)\n\n # update the distance dict\n distances[(other_node, merged_node)] = cluster_dis\n\n # add merged_node\n nodes.append(merged_node)\n\n return nodes[0]", "def _single_pass_optimize(self, best_centre_inds, best_score, min_to_cluster, nbrs):\n def score_inds(vals):\n inds, ind = vals\n other_best_inds.append(ind)\n score = np.sum(np.min(self.orig_dists[np.ix_(inds,other_best_inds)], axis=1))\n other_best_inds.pop()\n return (score, ind)\n #dists = self.orig_dists.copy() If I zero out rows I don't need, I don't have to use ix_() which is 2x as fast. Probably doesn't matter, fast enough.\n best_centre_inds = best_centre_inds[::]\n inds_to_try = list(nbrs)\n for i in range(len(best_centre_inds)):\n other_best_inds = best_centre_inds[:i] + best_centre_inds[i+1:]\n cur_covered_set = set().union(*(nbrs[ind] for ind in other_best_inds))\n cvrd_inds = [list(cur_covered_set | nbrs[ind]) for ind in inds_to_try]\n valid_cvrd_inds = [(inds, ind) for inds, ind in zip(cvrd_inds, inds_to_try) if len(inds) >= min_to_cluster]\n score_vals = map(score_inds, valid_cvrd_inds)\n best_score, best_ind = min(score_vals)\n best_centre_inds[i] = best_ind\n return best_centre_inds", "def calc_csls_score(self, sess, batch_size=512):\n score_val = []\n eval_size = len(self.src_ind)\n # Calculate scores\n for i in range(0, eval_size, batch_size):\n score_src_ids = [self.src_ind[x] for x in range(i, min(i + batch_size, eval_size))]\n eval_dict = {self.src_ph: score_src_ids, self.tgt_ph: self.tgt_ids}\n score_val.append(sess.run(self.csls_subgraphs[\"ScoreGraph\"], feed_dict=eval_dict))\n score_val = np.concatenate(score_val)\n return score_val", "def match_cluster_sets(cs1, cs2):\n\n matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters]\n mapping = maximized_mapping(matr)\n return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping)", "def cluster(X=None, datalabels=None, nc=2):\n from sklearn.cluster import KMeans\n from sklearn.cluster import AffinityPropagation\n\n C = KMeans(n_clusters=nc,n_init=10,init='random')\n C.fit(X[:,:1])\n\n #C = AffinityPropagation(preference=-80,damping=0.5).fit(X)\n #cluster_centers_indices = C.cluster_centers_indices_\n\n clust = {}\n for (i, label) in enumerate(C.labels_):\n key = C.cluster_centers_[label][0]\n #print label,key, datalabels[i],X[i][1]\n if not clust.has_key(key):\n clust[key]=[]\n clust[key].append(datalabels[i])\n #print clust\n return C, clust", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def update_centers(data_set, assignments):\n new_means = defaultdict(list)\n for assignment, point in zip(assignments, data_set):\n new_means[assignment].append(point) \n centers = [point_avg(points) for points in new_means.values()]\n return centers", "def get_candidates(self, sess, avg1, avg2, batch_size=512, swap_score=False):\n all_scores = []\n all_targets = []\n for i in range(0, self.max_dict_size, batch_size):\n src_ids = [x for x in range(i, min(i + batch_size, self.max_dict_size))]\n dict_dict = {self.src_ph: src_ids, self.tgt_ph: self.tgt_ids}\n if swap_score:\n temp_score = sess.run(self.csls_subgraphs[\"ScoreG_T2S\"], feed_dict=dict_dict)\n else:\n temp_score = sess.run(self.csls_subgraphs[\"ScoreGraph\"], feed_dict=dict_dict)\n batch_score = 2 * temp_score - (avg1[src_ids][:, None] + avg2[None, :])\n top_matches = sess.run(\n self.csls_subgraphs[\"Top2\"], feed_dict={self.score_ph: batch_score}\n )\n all_scores.append(top_matches[0])\n all_targets.append(top_matches[1])\n all_scores = np.concatenate(all_scores)\n all_targets = np.concatenate(all_targets)\n all_pairs = np.concatenate(\n [np.arange(0, self.max_dict_size, dtype=np.int64)[:, None], all_targets[:, 0][:, None]],\n 1,\n )\n\n # Scores with high confidence will have large difference between first two guesses\n diff = all_scores[:, 0] - all_scores[:, 1]\n reordered = np.argsort(diff, axis=0)\n reordered = reordered[::-1]\n all_pairs = all_pairs[reordered]\n\n # Select words which are in top max_dict\n selected = np.max(all_pairs, axis=1) <= self.max_dict_size\n all_pairs = all_pairs[selected]\n\n # Make sure size is less than max_dict\n all_pairs = all_pairs[: self.max_dict_size]\n return all_pairs", "def get_node_scores(scores_file,G):\n scores = {}\n with open(scores_file) as f:\n for line in f:\n split = line.strip().split()\n scores[split[0]] = float(split[1])\n for nd in G.nodes():\n G.add_node(nd, score=scores[nd])", "def _relocate_clusters(self, cluster_labels):\n for cluster_label in range(self.k):\n if cluster_labels[cluster_label] is not None:\n # mean of the pixels assigned to cluster\n p_sum, p_count = np.asarray(\n cluster_labels[\n cluster_label\n ]).sum(axis=0), len(cluster_labels[cluster_label])\n self._clusters[cluster_label] = p_sum / p_count" ]
[ "0.6563418", "0.6028029", "0.6001135", "0.5851254", "0.5810269", "0.57647604", "0.5749857", "0.56806594", "0.5671507", "0.56269217", "0.56164557", "0.5607218", "0.5606947", "0.5574019", "0.5556459", "0.55542016", "0.55088186", "0.5474318", "0.5459507", "0.54418623", "0.5432693", "0.5419214", "0.5416019", "0.5406387", "0.5386218", "0.5385902", "0.5385664", "0.5358912", "0.53519386", "0.5342772" ]
0.6130404
1
Return the set of all inlined instructions in `root_insn` (included).
def get_inlined_insns(root_insn): result = set() def helper(insn): result.add(insn) for input in insn.inputs: if ( isinstance(input.value, ir.ComputingInstruction) and input.value.inline ): helper(input.value) helper(root_insn) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instructions(self):\n tmp_ins = []\n idx = 0\n for i in self.method.get_instructions():\n if idx >= self.start and idx < self.end:\n tmp_ins.append(i)\n\n idx += i.get_length()\n return tmp_ins", "def getIncludes(self):\n return self.includes[:]", "def get_imports(root_node):\n imports = {'NotImportFrom': []}\n for node in ast.walk(root_node):\n if isinstance(node, ast.Import):\n import_visitor = ImportVisitor()\n t = import_visitor.visit_Import(node)\n imports['NotImportFrom'].extend(t)\n\n elif isinstance(node, ast.ImportFrom):\n import_from_visitor = ImportFromVisitor()\n r = import_from_visitor.visit_ImportFrom(node)\n imports.update(r)\n return imports", "def get_roots(self):\n roots = []\n for symbol in self.GlobalSymbolDict.values():\n if symbol.isRoot():\n roots += [symbol]\n return roots", "def contains_insn(self, *args):\n return _ida_hexrays.cinsn_t_contains_insn(self, *args)", "def instructions(self):\n for inst in self.global_insts[:]:\n yield inst\n for function in self.functions[:]:\n for inst in function.instructions():\n yield inst", "def scan_addresses(root_dir, base_path=None):\r\n\r\n addresses = OrderedSet()\r\n for buildfile in BuildFile.scan_buildfiles(root_dir, base_path):\r\n addresses.update(Target.get_all_addresses(buildfile))\r\n return addresses", "def get_opcode_set(path):\n opcode_set = set()\n input_files = glob.glob(path + \"\\\\**\\\\*.txt\", recursive=True)\n for input_file in input_files:\n with open(input_file) as file_handler:\n for opcode in [line.rstrip('\\n') for line in file_handler.readlines()]:\n opcode_set.add(opcode)\n return opcode_set", "def getRegisters(self):\n if( self.cond == CT.NOT ):\n return self.right.getRegisters()\n elif( not isLogicalConst(self.cond) ):\n return list(set( self.left.getRegisters() + self.right.getRegisters()))\n else:\n return []", "def roots(self):\n if not self.__roots:\n self.__roots = set()\n for n in self.__nodes:\n if n not in self.__reverse_map:\n self.__roots.add(n)\n return self.__roots", "def in_order_traversal(self, root):\n\n def in_order_traversal_helper(root):\n if root:\n in_order_traversal_helper(root.left)\n result.append(root.data)\n in_order_traversal_helper(root.right)\n\n result = []\n in_order_traversal_helper(root)\n return result", "def get_includes(self):\r\n def visitor(fobj, lptr, depth, includes):\r\n if depth > 0:\r\n loc = lptr.contents\r\n includes.append(FileInclusion(loc.file, File(fobj), loc, depth))\r\n\r\n # Automatically adapt CIndex/ctype pointers to python objects\r\n includes = []\r\n conf.lib.clang_getInclusions(self,\r\n callbacks['translation_unit_includes'](visitor), includes)\r\n\r\n return iter(includes)", "def inorder(self,root)->list:\n\t\tres=[]\n\t\tif root:\n\t\t\tres=self.inorder(root.left)\n\t\t\tres.append(root.data)\n\t\t\tres=res+self.inorder(root.right)\n\t\treturn res", "def root_clauses(self):\n return self.clauses.filter( parent=None )", "def getRootIsolatedObjects():\n return frozenset([id for id, obj in getSite().aq_parent.objectItems() if IObjectToIsolate.providedBy(obj)])", "def contained(self):\n seen = set()\n return [l.to_segment for l in self.edges_to_contained \\\n if id(l) not in seen and not seen.add(id(l))]", "def get_seen_statements_from(path: str) -> set:\n return set([int(s) for s in replace_multiple_chars(path, ['/', '-', '?'], ' ').split() if s.isdigit()])", "def contains_insn(self, *args):\n return _ida_hexrays.cexpr_t_contains_insn(self, *args)", "def get_external_imports(tree: dict,\n only_top_level: bool = True) -> set:\n external_imports = set()\n modules = find_tree(tree, lambda x: x[\"type\"] == \"module\", how=\"all\")\n for module in modules:\n for import_item in module[\"imports\"].values():\n if import_item[\"lookup\"] is None:\n if import_item[\"type\"] == \"import\":\n external_imports.add(import_item[\"name\"])\n elif import_item[\"type\"] == \"from-import\":\n if import_item[\"module\"] is not None:\n external_imports.add(import_item[\"module\"])\n if only_top_level:\n external_imports = {i.partition(\".\")[0] for i in external_imports}\n return external_imports", "def GetInstructionList():\n return [i[0] for i in ida_idp.ph_get_instruc() if i[0]]", "def _extract_instructions(self, xmltree):\r\n return get_instructions(xmltree)", "def _extract_instructions(self, xmltree):\r\n return get_instructions(xmltree)", "def _extract_instructions(self, xmltree):\r\n return get_instructions(xmltree)", "def inorder(root: Node):\n return inorder(root.left) + [root.data] + inorder(root.right) if root else []", "def entry_nodes(self):\n return list(itertools.chain(*[arg.entry_nodes() for arg in self.args]))", "def includes(self) -> Set:\n if self._includes is None:\n manifest = self._get_manifest()\n self._includes = manifest[\"files\"][\"includes\"]\n\n return self._includes", "def get_all(self):\n return objects.registry.AssemblyList.get_all(self.context)", "def roots(self):\n return {\n nodeObj for nodeId, nodeObj\n in self.nodes.items()\n if nodeId not in self.childNodes}", "def in_order_traversal(self):\n elements = []\n\n #visit left tree\n if self.left:\n elements += self.left.in_order_traversal()\n\n #visit base node\n elements.append(self.data)\n\n #visit right tree\n if self.right:\n elements += self.right.in_order_traversal()\n\n return elements", "def AllSubElements(self):\n return (set(self._groups_to_load.keys()) |\n set(self._commands_to_load.keys()))" ]
[ "0.5307971", "0.500042", "0.49988607", "0.4996068", "0.49914584", "0.49559152", "0.49267817", "0.49252596", "0.48164862", "0.4806877", "0.47919488", "0.4760398", "0.47565317", "0.4750424", "0.47436726", "0.47354415", "0.4718832", "0.46956724", "0.46533853", "0.46467805", "0.46460897", "0.46460897", "0.46460897", "0.46453723", "0.46440366", "0.46425295", "0.46362725", "0.46010137", "0.4588879", "0.45797017" ]
0.8623057
0
Tries to select single node from nested dict and write it to the list at the index.
def _try_set(set_list, index, nested_dict, dict_keys=[]): try: for dict_key in dict_keys: nested_dict = nested_dict.__getitem__(dict_key) set_list[index] = str(nested_dict) return nested_dict except: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSubkeyByIndex(self, index):", "def _dictitem_gen(self, index):\n # first call can be assumed to work on structure dict\n if index in self.struct['dict']: # \"dict\" is a standard dictionary, thus iterating over it is the same as iterating over the keys\n for idx in self.struct['dict'][index]: # it is always a list\n if idx == 'lifted':\n # recursive case\n for s in self.iter_withpseudo():\n if isinstance(s, Structure) and s.struct['liftedkeys']:\n for elem in s._dictitem_gen(index): # yield from in python 3.x:\n yield elem\n else:\n # base case\n elem = self.struct['list'][idx]\n previous = self.struct['list'][:idx]\n cur_leaf = sum(1 if s is None else s['n'] for s in previous)\n\n if elem is None: # leaf\n yield self.leaves[cur_leaf]\n else:\n yield Structure(struct=elem, leaves=self.leaves[cur_leaf : cur_leaf+elem['n']])", "def nested_access(nested_list, indices):\n if len(indices) > 0:\n return nested_access(nested_list[indices[0]], indices[1:])\n else:\n return nested_list", "def __getitem__(self, index: int) -> T:\n node_at_index = self.__get_node_at_index(index)\n return node_at_index.item", "def _try_append(set_list, index, nested_dict, dict_keys=[]):\n try:\n for dict_key in dict_keys:\n nested_dict = nested_dict.__getitem__(dict_key)\n if set_list:\n set_list[index] += str(nested_dict)\n return nested_dict\n except:\n return ''", "def _get_node(self, index):\r\n\t\tself._validate_index(index)\r\n\t\treturn self._traverse(lambda i, list: i < index)[\"node\"]", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def _walk(self):\n while self._slice:\n new_slice = []\n for element in self._slice:\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n new_slice.extend(sublist)\n self._slice = new_slice", "def __getitem__(self, index):\n node = self.head\n index += 1\n for level in reversed(range(self.max_levels)):\n while node.width[level] <= index:\n index -= node.width[level]\n node = node.next[level]\n return node.value", "def _get_nested(nested_dict, field):\n print(nested_dict, field)\n keys = field.split('.')\n current = nested_dict\n for k in keys:\n print('key', k, 'current', current)\n # return None for nested fields without a value in this doc\n if isinstance(current, list):\n # this list could contain anything. skip objects not containing `k`.\n return [x[k] for x in current if x.get(k) is not None]\n if not k in current:\n current = None\n break\n current = current[k]\n return current", "def _walk(self, element):\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n for sublist_element in sublist:\n for recursive_elem in self._walk(sublist_element):\n yield recursive_elem", "def dot_index(index, data):\n if index:\n for key in index.split('.'):\n if isinstance(data, list):\n data = [x[key] for x in data]\n else:\n data = data[key]\n if isinstance(data, list):\n return data\n if isinstance(data, dict):\n return data.items()\n else:\n return [data]", "def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids", "def get_entry(obj, *path):\n\n try:\n for elem in path:\n is_index = isinstance(elem, int)\n is_list = isinstance(obj, list)\n if is_index != is_list:\n raise UpdateException('index given for non-list or vice versa')\n obj = obj[elem]\n return obj\n except Exception as ex:\n path_str = '/'.join(map(str, path))\n msg = f'unable to access object path \"/{path_str}\"'\n raise UpdateException(msg) from ex", "def get_doc(ds, idx):\n rtn = ds[idx]\n if isinstance(rtn, dict):\n rtn = rtn['text']\n return rtn", "def __getitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__getitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n return plist([self[k] for k in key]) # Don't pass root -- we are uprooting\n elif isinstance(key, slice):\n if self is self.__root__:\n return plist(list.__getitem__(self, key))\n return plist(list.__getitem__(self, key), root=plist(list.__getitem__(self.__root__, key)))\n else:\n return list.__getitem__(self, key)\n except TypeError as first_exception:\n try:\n if isinstance(key, list):\n return plist([self[i][k] for i, k in enumerate(key)]) # Don't pass root -- we are uprooting\n if isinstance(key, tuple):\n try:\n return plist([x[key] for x in self], root=self.__root__)\n except Exception:\n return plist([tuple(x[k] for k in key) for x in self], root=self.__root__)\n return plist([x[key] for x in self], root=self.__root__)\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))", "def getattr_nested(obj, idxs):\n if len(idxs) == 0:\n return obj\n\n idx = idxs.pop(0)\n\n if isinstance(obj, dict):\n if idx in obj:\n return getattr_nested(obj[idx], idxs)\n elif isinstance(obj, (list, tuple)) and isinstance(idx, int):\n if idx < len(obj):\n return getattr_nested(obj[idx], idxs)\n else:\n return getattr_nested(getattr(obj, idx))", "def py__simple_getitem__(self, index):\n compiled_value_index = compiled.create_simple_object(self.inference_state, index)\n for key, value in self.get_tree_entries():\n for k in self._defining_context.infer_node(key):\n for key_v in k.execute_operation(compiled_value_index, u'=='):\n if key_v.get_safe_value():\n return self._defining_context.infer_node(value)\n raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)", "def py__simple_getitem__(self, index):\n if isinstance(index, slice):\n return ValueSet([self])\n else:\n with reraise_getitem_errors(TypeError, KeyError, IndexError):\n node = self.get_tree_entries()[index]\n return self._defining_context.infer_node(node)", "def get(self, index):\n return self._get_node(index)", "def _find_one_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping,\n ) -> Union[dict, None]:\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n return item\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def tree_access_data(data, index, step):\n return tree_multimap(partial(access_data, step), data, index)", "def __getitem__(self, key):\n result = self.tree[key]\n if result is not None:\n \"\"\"This needs to be deep-copied in order not to change the elements in the map via the reference, but\n return the value as in SetlX.\n The index 2 from key implies stands for the value as key-value-pairs are represented as lists of length 2\"\"\"\n return copy.deepcopy(result.key[2])", "def __getitem__(self, key):\n if isinstance(key, list):\n return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))\n else:\n return dict.__getitem__(self, key)", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def _traverse_1_0_0(item):\n if 'child_nodes' in item.keys():\n for child_node in item['child_nodes']:\n _traverse_1_0_0(child_node)\n item['content'] = item['child_nodes']\n del item['child_nodes']", "def select_object_at_index(self, index):\n\t\treturn self.object_list[index]", "def set_nested_item(data_dict: dict, key_list: tuple or list, value):\r\n reduce(getitem, key_list[:-1], data_dict)[key_list[-1]] = value\r\n return data_dict", "def get_node(self, selector, index):\n\n self.arbor._setup_tree(self)\n self.arbor._grow_tree(self)\n indices = getattr(self, f\"_{selector}_field_indices\", None)\n if indices is None:\n raise RuntimeError(\"Bad selector.\")\n\n my_link = self.root._links[indices][index]\n return self.arbor._generate_tree_node(self.root, my_link)", "def __getitem__(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Set the _first item\n elif index == 0:\n return self._first\n # Recurse on _rest\n else:\n return self._rest[index - 1]" ]
[ "0.5381771", "0.5373577", "0.5308455", "0.52927774", "0.52425617", "0.523762", "0.5215978", "0.5201783", "0.5195301", "0.5146156", "0.5132212", "0.5052306", "0.5043918", "0.503015", "0.50289166", "0.50024855", "0.49894303", "0.4979314", "0.4975182", "0.49483192", "0.49455285", "0.4943872", "0.4933517", "0.49026987", "0.48963678", "0.48946047", "0.48861688", "0.48664665", "0.48565778", "0.48545215" ]
0.55979466
0
Appends selected single node from nested dict to the list at the index. Append instead of rewriting as in case of __try_set.
def _try_append(set_list, index, nested_dict, dict_keys=[]): try: for dict_key in dict_keys: nested_dict = nested_dict.__getitem__(dict_key) if set_list: set_list[index] += str(nested_dict) return nested_dict except: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def __setitem__(self, i: int, o: 'Tree') -> None:\n ...", "def Insert(self, index, rdfpathspec=None, **kwarg):\n if rdfpathspec is None:\n rdfpathspec = self.__class__(**kwarg)\n\n if index == 0:\n # Copy ourselves to a temp copy.\n nested_proto = self.__class__()\n nested_proto.SetRawData(self.GetRawData())\n\n # Replace ourselves with the new object.\n self.SetRawData(rdfpathspec.GetRawData())\n\n # Append the temp copy to the end.\n self.last.nested_path = nested_proto\n else:\n previous = self[index - 1]\n rdfpathspec.last.nested_path = previous.nested_path\n previous.nested_path = rdfpathspec", "def _try_set(set_list, index, nested_dict, dict_keys=[]):\n try:\n for dict_key in dict_keys:\n nested_dict = nested_dict.__getitem__(dict_key)\n set_list[index] = str(nested_dict)\n return nested_dict\n except:\n return ''", "def add_flat(dict_, key, elem):\r\n\r\n if isinstance(elem, dict):\r\n dict_.update(elem)\r\n else:\r\n dict_[key] = elem", "def dubk(d, x, y, z):\n try:\n d[x][y]['list'].append(z)\n except KeyError as e:\n d[x][y] = {'list': [z]}", "def add_version_to_leafjson(self, fpath, dist, leafname,\n indexjson=\"index.json\"):\n leafdir = self.path / leafname\n leafjson = leafdir / indexjson\n with self.lock_leaf_json(leafname, leafjson) as leafdata:\n if fpath.name in set([x['filename'] for x in leafdata]):\n self.log.warning(\"%s - Attempt to add duplicate to %s\", fpath, leafjson)\n return leafdata\n leafdata.append(self.leafdata(fpath, dist))\n return leafdata", "def append(self, subnodes):\n if not hasattr(subnodes, \"__iter__\"):\n subnodes = [subnodes]\n\n for subnode in subnodes:\n try:\n if not issubclass(type(subnode), pyfdt.FdtNop):\n index = self.index(subnode.name)\n item = self.pop(index)\n else:\n item = None\n except ValueError:\n item = None\n\n if isinstance(item, pyfdt.FdtNode) and isinstance(\n subnode, pyfdt.FdtNode\n ):\n item.merge(subnode)\n subnode = item\n\n super().append(subnode)", "def set_nested_item(data_dict: dict, key_list: tuple or list, value):\r\n reduce(getitem, key_list[:-1], data_dict)[key_list[-1]] = value\r\n return data_dict", "def __setitem__(self, index, item):\n # type: (int, Any) -> None\n items = self._refs(item) if isinstance(index, slice) else self.ref(item)\n return list.__setitem__(self, index, items)", "def __getitem__(self, object):\n # check for previously unknown object\n if object not in self.parents:\n self.parents[object] = object\n self.weights[object] = 1\n return object\n\n # find path of objects leading to the root\n path = [object]\n root = self.parents[object]\n while root != path[-1]:\n path.append(root)\n root = self.parents[root]\n\n # compress the path and return\n for ancestor in path:\n self.parents[ancestor] = root\n return root", "def append(self, key, value):\n dkey = digest(key)\n node = Node(dkey)\n\n def append_(nodes):\n # if this node is close too, then store here as well\n if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n try:\n pvalue = json.loads(value)\n self.set_keys.add(dkey)\n if dkey not in self.storage:\n _log.debug(\"%s local append key: %s not in storage set value: %s\" % (base64.b64encode(node.id), base64.b64encode(dkey), pvalue))\n self.storage[dkey] = value\n else:\n old_value_ = self.storage[dkey]\n try:\n old_value = json.loads(old_value_)\n new_value = list(set(old_value + pvalue))\n except:\n # When the key have been used for single values it does not contain a list\n # When have been deleted contains None\n # Just replace old value\n new_value = pvalue\n old_value = old_value_\n _log.debug(\"%s local append key: %s old: %s add: %s new: %s\" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value))\n self.storage[dkey] = json.dumps(new_value)\n except:\n _log.debug(\"Trying to append something not a JSON coded list %s\" % value, exc_info=True)\n ds = [self.protocol.callAppend(n, dkey, value) for n in nodes]\n return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to set key %s\" % key)\n _log.debug(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find().addCallback(append_)", "def __getitem__(self, item):\n index = self.reindex(item)\n return self.parent[index]", "def add_to_index(index, keyword, url):\n for entry in index:\n if entry[0] == keyword:\n for link in entry[1]:\n if link[0] == url:\n return\n entry[1].append([url, 0])\n return\n # not found, add new keyword to index\n index.append([keyword, [[url, 0]]])", "def add(self, item):\r\n self.root = self.recurse_add(self.root, item)", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def find(found_item, hash_table_cell):\n if found_item:\n found_item[1] = obj\n else:\n hash_table_cell.append([key, obj])\n self.size += 1\n self._keys.append(key)", "def append( self, obj ):\n self[obj.getType()] = obj\n obj.setParent( self.parent )\n return obj", "def _append_nested(\n collection: Any, data: Union[np.ndarray, List[Any], Tuple[Any, ...],\n Dict[str, Any]]) -> Any:\n if isinstance(data, dict):\n for k in collection:\n _append_nested(collection[k], data[k])\n return collection\n elif isinstance(data, tuple):\n return tuple(\n _append_nested(collection[index], value)\n for index, value in enumerate(data))\n else:\n collection.append(data)\n return collection", "def __call__(self, item):\n if 'search' not in item:\n return\n if item.get('complete'):\n return\n item.apply('complete', True)\n i_ = item.parent.index(item)\n for j, data in self._search(item['search']):\n item.parent.insert(i_ + 2 + j, sdict(data.items()))", "def insert(self, pathlist):\n node = self.root\n for letter in pathlist:\n child = node.get(letter)\n if not child:\n node[letter] = {}\n node = node[letter]", "def save(self, nodedict, root=''):\n setitem = super().__setitem__\n getitem = super().__getitem__\n tag = nodedict['tag']\n text = nodedict.get('text', None)\n if hasattr(text, 'strip'):\n text = text.strip()\n attrib = nodedict.get('attrib', {})\n path = '/'.join([root, tag])\n nodes = nodedict.get('nodes', [])\n if text not in ('', None): # text=0 is stored\n try:\n setitem(path, text)\n except Exception as exc:\n sys.stderr.write('%s: %s\\n' % (path, exc))\n raise\n elif attrib and not nodes:\n setitem(path, numpy.nan)\n for subdict in _resolve_duplicates(nodes):\n self.save(subdict, path)\n if attrib:\n dset = getitem(path)\n for k, v in attrib.items():\n dset.attrs[k] = v", "def __setitem__(self, index: int, item: Any) -> None:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Set the first item\n elif index == 0:\n self._first = item\n # Recurse on the _rest\n else:\n if not self._rest:\n raise IndexError\n self._rest.__setitem__(index - 1, item)", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def append(self, tree):\n self.insert(len(self), tree)", "def _walk(self):\n while self._slice:\n new_slice = []\n for element in self._slice:\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n new_slice.extend(sublist)\n self._slice = new_slice", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def insert(self, index: int, item: Any) -> None:\n if self.is_empty() and index != 0:\n raise IndexError\n # Insert at the beginning.\n elif index == 0:\n to_push = self._first\n # modify self._first\n self._first = item\n # Call insert on to_push onto _rest\n if not self._rest and to_push:\n self._rest = RecursiveList([to_push])\n else:\n self._rest.insert(0, to_push)\n # Append case, add at the end when _rest is None\n elif index == 1 and not self._rest:\n self._rest = RecursiveList([item])\n # Recurse on the rest of the list.\n else:\n if not self._rest:\n raise IndexError\n else:\n self._rest.insert(index - 1, item)", "def add_item_to_cache(self) -> None:\n item = self.get_selected_item(self.tree_db)\n if item is None:\n return\n\n data_node = item.data()\n json_cache = self._data_encoder.encode(data_node.get_instance())\n self.send_data_to_cache(json_cache)" ]
[ "0.57342213", "0.56596655", "0.56033224", "0.5559192", "0.54839617", "0.54343253", "0.5398545", "0.53620857", "0.535225", "0.53277856", "0.53153884", "0.53132755", "0.5304931", "0.52992874", "0.5288722", "0.52861303", "0.5253632", "0.52518916", "0.5250139", "0.523779", "0.52345145", "0.5212172", "0.5193763", "0.5185104", "0.5166666", "0.5159624", "0.51497984", "0.5133238", "0.5130228", "0.51230943" ]
0.6493033
0
Use try_func and converts its string result to date string.
def _try_date(set_list, index, nested_dict, dict_keys=[], try_func=_try_set): import datetime try: dt = try_func(None, None, nested_dict, dict_keys) # 2012-07-05T00:00:00+04:00 dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S%z") try_func(set_list, index, str(dt.date())) print(str(dt.date())+" sdfsdfsdf") return dt.date() # Дата присвоения кадастрового номера except: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def try_func(func):\n try:\n return func()\n except Exception as e:\n return e", "def _get_normal_date(self, args):\n\n func1, func2, func3 = args\n self.assertIsNotNone(func1(20130201, \"20190120\"))\n self.assertIsNotNone(func2(\"2013/02/01\", \"2019-01-20\"))\n self.assertIsNotNone(func3(r\"2013-/\\-02~@-\\/-@~01\",\n pd.to_datetime('2019-01-20')))", "def convert_date_run(datestring):\n if isinstance(datestring, datetime.date):\n return datestring\n\n try:\n return datetime.datetime.strptime(datestring, \"%Y-%m-%d\").date()\n except ValueError:\n try:\n return datetime.datetime.strptime(datestring, \"%m/%d/%Y\").date()\n except ValueError:\n # ISO 8601 without timezone\n return datetime.datetime.strptime(datestring, \"%Y-%m-%dT%H:%M:%S\").date()", "def _get_date(string):\n try:\n return _date.fromordinal(_dateparse(string).toordinal())\n except ValueError:\n print(string)\n raise", "def convert(v):\n\n if type(v) is str and rexp.match(v):\n return as_date(v)\n return v", "def convert_date(datestring):\n datestring = datestring.rstrip('†')\n if datestring not in ('NA', 'None specified', 'TBA', 'None', 'N/A', ''):\n try:\n return dateutil_parser.parse(datestring).date()\n except ValueError: # dateutil's error messages aren't friendly\n raise ValueError(\"Not a date: {0}\".format(datestring))", "def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue", "def make_date_extractor(md_field: str) -> Callable:\n\n def extract(props: Dict[str, str]) -> str:\n ds = ''\n v = props.get(md_field, '')\n try:\n d = datetime.strptime(v, '%Y/%m/%d')\n ds = d.strftime('%Y%m%d')\n except Exception:\n pass\n return ds\n\n return extract", "def str_2_date( sdate ):\r\n if isinstance( sdate, str ):\r\n for fmt in ( \"%Y-%m-%d\", \"%m/%d/%Y\" ):\r\n try:\r\n return datetime.strptime( sdate, fmt ).date()\r\n except ValueError:\r\n pass\r\n else:\r\n return sdate", "def date_or_none(date_str: str | None | dt.date | dt.datetime) -> dt.date | None:\n\n if not date_str:\n return None\n\n if isinstance(date_str, dt.datetime):\n return date_str.date()\n\n if isinstance(date_str, dt.date):\n return date_str\n\n if \" \" in date_str and len(date_str) > 10:\n return dt.datetime.strptime(date_str, \"%d %B %Y\").date()\n\n p_date_str = date_str.replace(\"/\", \"-\").replace(\".\", \"-\")\n date_split = p_date_str.split(\"-\")\n\n if len(date_split) > 3 or len(date_split[-1]) > 4:\n raise ValidationError(f\"Date {date_str} not in parsable format\")\n\n if len(date_split[0]) == 4:\n date_format = \"%Y-%m-%d\"\n elif len(date_split[-1]) == 4:\n date_format = \"%d-%m-%Y\"\n else:\n date_format = \"%d-%m-%y\"\n\n return dt.datetime.strptime(p_date_str, date_format).date()", "def str_to_date(date_str: str) -> Optional[datetime.date]:\n if not date_str:\n # If the type is falsy, return None.\n return\n try:\n # Most dates in the API are in this format...\n return datetime.strptime(date_str, \"%m/%d/%Y\").date()\n except ValueError:\n # Please forgive me for this nested try-except block.\n # This API is _whack_.\n try:\n # But some are in this format...\n return datetime.strptime(date_str, \"%Y-%m-%d\").date()\n except ValueError:\n # And sometimes you get random crap like '0000-00-00'...\n return\n except TypeError:\n # If the type is truthy, but can't be cast to a date, return None.\n return", "def get_date_or_none(date_str, date_format='%Y-%m-%d'):\n try:\n return datetime.strptime(date_str, date_format).date()\n except (ValueError, TypeError):\n return None", "def str2date(string):\n if string is 'None':\n return None\n return datetime.strptime(string + '000', '%Y-%m-%dT%H:%M:%S.%f')", "def get_date(date):\n return date", "def interpret_date( text ):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def _convert_date(date_string, s_format='%Y-%m-%d'):\r\n if isinstance(date_string, str):\r\n return datetime.strptime(date_string, s_format)\r\n elif isinstance(date_string, datetime):\r\n return date_string\r\n else:\r\n raise TypeError(date_string, 'is not a string or datetime object')", "def convert_str_to_date(date: str):\n try:\n return datetime.strptime(date, \"%Y/%m/%d\").date()\n except ValueError:\n try:\n return datetime.strptime(date, \"%Y-%m-%d\").date()\n except ValueError as error:\n raise error", "def format_date(data, format_string='%Y-%m-%d'):\n if (data == '') or 'BC' in data:\n return None\n return datetime.strptime(data, format_string)", "def format_date(val, fmt='%m-%d-%Y'):\n date_obj = None\n\n try:\n date_obj = datetime.strptime(val, fmt)\n except Exception as exc:\n log.warning(\"Problem formatting date: {} - {} due: {}\"\n .format(val, fmt, exc))\n\n return date_obj", "def str_to_date(str):\n if not str:\n return str\n return datetime.datetime.strptime(str, DEFAULT_SERVER_DATE_FORMAT).date()", "def auto_converter(cell, book=None, date_format='%Y/%m/%d', *args, **kwds):\n if cell.ctype == 2:\n __v = cell.value\n return str(int(__v) if int(__v) == __v else __v)\n elif cell.ctype == 3:\n import datetime\n date_tuple = xlrd.xldate_as_tuple(cell.value, book.datemode)\n __d = datetime.datetime(*date_tuple)\n # Shortcut days to just print the way we expect\n if __d.hour == __d.minute == __d.second == 0:\n return __d.date().strftime(date_format)\n else:\n return __d.isoformat()\n else:\n return str(cell.value).strip()", "def interpret_date(text):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def date_from_string(my_string):\n if my_string:\n return datetime.strptime(my_string, DATE_FORMAT).date()\n return None", "def convert_to_date(excel_sec, book, missing):\n if excel_sec is not '':\n date = xlrd.xldate_as_tuple(excel_sec, book.datemode)\n return str(datetime.datetime(*date))\n else:\n return missing", "def filter_formatdate(val, format_str):\n if not isinstance(val, (datetime, date, time)):\n return val\n return val.strftime(format_str)", "def get_date_or_none(obj, key):\n try:\n return datetime.strptime(obj[key], '%Y-%m-%d')\n except (KeyError, ValueError):\n return None", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def get_date(text=\"\"):\n clear()\n date = input(\"Enter {}date (Format:YYYY-MM-DD): \".format(text))\n try:\n datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except ValueError:\n input(\"Please enter date in this format: YYYY-MM-DD.\"\n \" Press enter to continue.\")\n return get_date()\n else:\n return date", "def _str_to_date(self, date):\n return datetools.date_parser(date)", "def try_strptime(s: str, fmt: str) -> datetime.datetime:\n try:\n return datetime.datetime.strptime(s, fmt)\n except ValueError:\n return None" ]
[ "0.6210365", "0.5784953", "0.56346416", "0.55400366", "0.5485929", "0.54631007", "0.5443882", "0.5425232", "0.54151005", "0.5411978", "0.53939974", "0.5378411", "0.5376887", "0.5360426", "0.5290894", "0.5288425", "0.52860683", "0.5281345", "0.52689224", "0.52656925", "0.5246117", "0.5242045", "0.5211766", "0.5210925", "0.52001536", "0.51797867", "0.51641035", "0.5134775", "0.5133669", "0.51317763" ]
0.6782496
0
Selects features filterwise per filterband, starting with no features, then selecting the best filterpair from the bestfilterband (measured on internal train/test split)
def collect_best_features(self): bincsp = self.binary_csp # just to make code shorter n_folds = len(self.binary_csp.folds) n_class_pairs = len(self.binary_csp.class_pairs) result_shape = (n_folds, n_class_pairs) self.train_feature = np.empty(result_shape, dtype=object) self.train_feature_full_fold = np.empty(result_shape, dtype=object) self.test_feature = np.empty(result_shape, dtype=object) self.test_feature_full_fold = np.empty(result_shape, dtype=object) self.selected_filters_per_filterband = np.empty(result_shape, dtype=object) for fold_i in range(n_folds): for class_pair_i in range(n_class_pairs): bin_csp_train_features = deepcopy(bincsp.train_feature[ self.selected_filter_inds, fold_i, class_pair_i]) bin_csp_train_features_full_fold = deepcopy( bincsp.train_feature_full_fold[ self.selected_filter_inds, fold_i, class_pair_i]) bin_csp_test_features = deepcopy(bincsp.test_feature[ self.selected_filter_inds, fold_i, class_pair_i]) bin_csp_test_features_full_fold = deepcopy( bincsp.test_feature_full_fold[ self.selected_filter_inds,fold_i, class_pair_i]) selected_filters_per_filt = self.select_best_filters_best_filterbands( bin_csp_train_features, max_features=self.n_features, forward_steps=self.forward_steps, backward_steps=self.backward_steps, stop_when_no_improvement=self.stop_when_no_improvement) self.train_feature[fold_i, class_pair_i] = \ self.collect_features_for_filter_selection( bin_csp_train_features, selected_filters_per_filt) self.train_feature_full_fold[fold_i, class_pair_i] = \ self.collect_features_for_filter_selection( bin_csp_train_features_full_fold, selected_filters_per_filt) self.test_feature[fold_i, class_pair_i] = \ self.collect_features_for_filter_selection( bin_csp_test_features, selected_filters_per_filt) self.test_feature_full_fold[fold_i, class_pair_i] = \ self.collect_features_for_filter_selection( bin_csp_test_features_full_fold, selected_filters_per_filt) self.selected_filters_per_filterband[fold_i, class_pair_i] = \ selected_filters_per_filt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask", "def select_best_chanels():\r\n \r\n \r\n all_paths = [['data_bci\\\\row_data\\\\subject1\\\\'], ['data_bci\\\\row_data\\\\subject2\\\\'],['data_bci\\\\row_data\\\\subject3\\\\']]\r\n\r\n train_subjects = ['01']\r\n test_subject = '02'\r\n freq = 512\r\n\r\n cutoff_beggining = 0\r\n columns_to_read = ['Fp1', 'AF3' ,'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5',\r\n 'P7', 'P3', 'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6',\r\n 'CP2', 'C4', 'T8', 'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz','class']\r\n seq_len = 0\r\n cut_step = 0\r\n num_perseg = freq\r\n num_overlap = int(num_perseg/2)\r\n min_freq=8\r\n max_freq=45\r\n \r\n chanels_rank = rank_chanels()\r\n \r\n result = []\r\n for i in range(1, len(chanels_rank)):\r\n intermidiate_result = []\r\n for path in all_paths:\r\n train_full_data, train_full_data_filtered, train_full_anots, test_full_data, test_full_filtered, test_full_annoations = read_filter(path, train_subjects,test_subject, columns_to_read, cutoff_beggining, seq_len, cut_step)\r\n\r\n train_psd_signals = eval_psd_not_modulated(train_full_data, num_perseg, num_overlap, freq, min_freq, max_freq)\r\n test_psd_signals = eval_psd_not_modulated(test_full_data, num_perseg, num_overlap, freq, min_freq, max_freq) \r\n\r\n train_psd_signals = flatten_data(train_psd_signals[:,:,chanels_rank[:i]])\r\n test_psd_signals = flatten_data(test_psd_signals[:,:,chanels_rank[:i]])\r\n \r\n acc = evalute_subset(train_psd_signals, test_psd_signals, train_full_anots, test_full_annoations)\r\n intermidiate_result.append(acc)\r\n \r\n result.append(intermidiate_result)\r\n #mean_subject_acc = np.array([sum(humans_acc)/len(humans_acc) for humans_acc in result])\r\n #best_idx = np.argmax(mean_subject_acc)\r\n\r\n return result, chanels_rank", "def broadbandfilters(self):\n all = self.allbroadbandfilters\n return [all[layer-1] for layer in self.__layers]", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def fit(self, x_train_fb, y_train):\n labels = np.sort(np.unique(y_train))\n n_classes = len(labels)\n self.csp = CSP(self.n_filters)\n fbcsp_filters_multi = []\n for i in range(n_classes):\n cls_of_interest = labels[i]\n select_class_labels = lambda cls, y_labels: [0 if y == cls else 1 for y in y_labels]\n y_train_cls = np.asarray(select_class_labels(cls_of_interest, y_train))\n fbcsp_filters = self.fit_ovr(x_train_fb, y_train_cls)\n fbcsp_filters_multi.append(fbcsp_filters)\n return np.asarray(fbcsp_filters_multi)", "def selectFeatures(k_features=5, *args):\n X, y = args\n skb = SelectKBest(k=k_features)\n return skb.fit_transform(X, y)", "def sub_select_features(features, strategy):\n\n def extract_one_index(y_val):\n index_ones = []\n y_prev = 0\n start_stop = []\n if y_val[-1] == 1:\n y_val = y_val.tolist() + [0]\n for i, y in enumerate(y_val):\n if y_prev == 0 and y == 1:\n start_stop = [i]\n if y_prev == 1 and y == 0:\n start_stop.append(i)\n index_ones.append(start_stop)\n y_prev = y\n return index_ones\n\n def wrapper(start_stop, maxi):\n size = start_stop[1] - start_stop[0]\n bound = (size+1)//2\n return [max(0, start_stop[0]-bound), min(maxi, start_stop[1]+bound)]\n\n def deduce_index_to_keep(one_index, maxi):\n wrapped = [wrapper(start_stop, maxi) for start_stop in one_index]\n to_keep = [idx for idx in range(wrapped[0][0], wrapped[0][1])]\n for start_stop in wrapped[1:]:\n to_keep += [idx for idx in range(start_stop[0], start_stop[1]) if idx > to_keep[-1]]\n return to_keep\n\n if strategy == 0:\n new_features = features # We do nothing\n\n else:\n new_features = dict()\n for which in ['train', 'test']:\n one_id = extract_one_index(features['y_'+which])\n true_idx = deduce_index_to_keep(one_id, len(features['y_'+which]))\n try:\n new_features['x_'+which] = features['x_'+which][true_idx]\n new_features['y_'+which] = features['y_'+which][true_idx]\n except IndexError as e:\n print(which)\n print(features['x_'+which].shape)\n print(features['y_'+which].shape)\n print(one_id)\n raise e\n\n return new_features", "def _forward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = ()\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f in k:\n continue\n candidate_features = tuple(sorted([*k, f]))\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def feature_selection(x_train, y_train, nb_feats=150):\n cs = np.zeros(x_train.shape[1])\n for f in range(x_train.shape[1]):\n if np.isclose(np.sum(x_train[:, f]), 0):\n cs[f] = 0\n continue\n\n cs[f], p = spearmanr(x_train[:, f], np.mean(y_train, axis=1))\n select = np.argsort(np.abs(cs))[np.max([-nb_feats, -len(cs)]):]\n return select", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def feature_selection(cls, tbl, thresh=-1):\n\n numerical_columns = [col for col in tbl.columns if col not in [\"F21\", \"F20\", \"F54\", \"Name\"]]\n X = tbl[numerical_columns[:-1]].values\n y = tbl[numerical_columns[-1]].values\n\n n = X.shape[1]\n slist = np.zeros((n, 3))\n slist[:, -1] = 1\n\n # identify relevant features\n slist[:, 0] = cls._c_correlation(X, y) # compute 'C-correlation'\n idx = slist[:, 0].argsort()[::-1]\n slist = slist[idx,]\n slist[:, 1] = idx\n if thresh < 0:\n thresh = np.median(slist[-1, 0])\n\n slist = slist[slist[:, 0] > thresh, :] # desc. ordered per SU[i,c]\n\n \"Identify redundant features among the relevant ones\"\n cache = {}\n m = len(slist)\n p_su, p, p_idx = cls._get_first_element(slist)\n for i in xrange(m):\n q_su, q, q_idx = cls._get_next_element(slist, p_idx)\n if q:\n # p, q = int(p), int(q)\n while q:\n if (p, q) in cache:\n pq_su = cache[(p, q)]\n else:\n pq_su = cls._symmetrical_uncertainty(X[:, int(p)], X[:, int(q)])\n cache[(p, q)] = pq_su\n\n if pq_su >= q_su:\n slist = cls._remove_element(slist, q_idx)\n q_su, q, q_idx = cls._get_next_element(slist, q_idx)\n\n p_su, p, p_idx = cls._get_next_element(slist, p_idx)\n if not p_idx:\n break\n\n sbest = slist[slist[:, 2] > 0, :2]\n selected_features = [int(ff) for ff in sbest[:, 1]]\n selected_features = [numerical_columns[i] for i in selected_features]\n selected_features.insert(0, \"Name\")\n selected_features.append(\"category\")\n new_tbl = tbl[selected_features]\n\n return new_tbl", "def obtain_filters_mask(model, threshold, cba_index, prune_index):\n\n num_pruned_bn = 0\n num_total_bn = 0\n num_remain_filters = []\n mask_remain_filters = []\n\n # The number of filters reserved must be a multiple of 8\n int_multiple = 8\n filter_switch = list(range(0, 1024, int_multiple))\n\n # cba_index stores all convolution layers with BN layer (the previous layer of YOLO layer is without BN layer)\n for index in cba_index:\n bn_module = model.module_list[index][1]\n if index in prune_index:\n mask = obtain_bn_mask(bn_module, threshold).cpu().numpy()\n num_layer_remain_bn = int(mask.sum())\n if num_layer_remain_bn < 8:\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-8]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n else:\n for i, _ in enumerate(filter_switch):\n if num_layer_remain_bn < filter_switch[i]:\n num_layer_remain_bn = filter_switch[i - 1]\n break\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-num_layer_remain_bn]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n\n num_remain_bn = int(mask.sum())\n num_pruned_bn = num_pruned_bn + mask.shape[0] - num_remain_bn\n\n if num_remain_bn == 0:\n print(\"Channels would be all pruned!\")\n raise Exception\n\n logger.info('layer index: %d \\t total channel: %d \\t remaining channel: %d',\n index, mask.shape[0], num_remain_bn)\n else:\n mask = np.ones(bn_module.weight.data.shape)\n num_remain_bn = mask.shape[0]\n num_total_bn += mask.shape[0]\n num_remain_filters.append(num_remain_bn)\n mask_remain_filters.append(mask.copy())\n\n prune_ratio = num_pruned_bn / num_total_bn\n logger.info('Prune channels: %d \\t Prune ratio: %.3f', num_pruned_bn, prune_ratio)\n\n return num_remain_filters, mask_remain_filters", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, debug, subbands_ifft, return_subbands_only, rectify_and_lowpass_subbands, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function, include_subbands_noise, subbands_noise_mean, subbands_noise_stddev):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n if debug: # return the real and imaginary parts of the subbands separately -- use if matching to their output\n nets['subbands_r'] = tf.real(nets['subbands'])\n nets['subbands_i'] = tf.imag(nets['subbands'])\n\n # TODO: with using subbands_ifft is redundant. \n # make the time subband operations if we are returning the subbands or if we want to include all of the keys in the graph\n if subbands_ifft or return_subbands_only or include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n if return_subbands_only or include_all_keys:\n nets['subbands_time'] = nets['subbands_ifft']\n if rectify_and_lowpass_subbands: # TODO: the subband operations are hard coded in?\n nets['subbands_time_relu'] = tf.nn.relu(nets['subbands_time'], name='rectified_subbands')\n nets['subbands_time_lowpassed'] = hanning_pooling_1d_no_depthwise(nets['subbands_time_relu'], downsample=2, length_of_window=2*4, make_plots=False, data_format='NCW', normalize=True, sqrt_window=False)\n\n # TODO: noise is only added in the case when we are calcalculating the time subbands, but we might want something similar for the cochleagram\n if return_subbands_only or include_all_keys:\n # Compress subbands if specified and add noise. \n nets = compression_function(nets, input_node_name='subbands_time_lowpassed', output_node_name='subbands_time_lowpassed_compressed')\n if include_subbands_noise:\n nets = add_neural_noise(nets, subbands_noise_mean, subbands_noise_stddev, input_node_name='subbands_time_lowpassed_compressed', output_node_name='subbands_time_lowpassed_compressed_with_noise')\n nets['subbands_time_lowpassed_compressed_with_noise'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed_with_noise'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed_with_noise']\n else:\n nets['subbands_time_lowpassed_compressed'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed']\n\n return nets", "def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask", "def select_best(self,dataframe: pd.DataFrame):\n \n # create a Dataframe only for categorical variables\n # categorical_df = pd.get_dummies(dataframe[self.cat_feats])\n categorical_df = dataframe[self.cat_feats]\n \n for feats in self.cat_feats:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(dataframe[feats].values)\n categorical_df.loc[:,feats] = lbl.transform(dataframe[feats].values)\n \n # select only Top 5 variables \n selector = SelectKBest(chi2,k=5)\n # give the targetcolumn and the rest of the data to the scalar to fit\n selector.fit(categorical_df,dataframe[self.target_cols])\n # get the indicies of the selected columns\n cols = selector.get_support(indices=True)\n\n # For display purpose Only\n dfscores = pd.DataFrame(selector.scores_)\n dfcolumns = pd.DataFrame(categorical_df.columns)\n\n #concat two dataframes for better visualization \n featureScores = pd.concat([dfcolumns,dfscores],axis=1)\n featureScores.columns = ['Features','Score'] #naming the dataframe columns\n featureScores = featureScores.sort_values(by='Score', ascending=False)\n \n utils.bar_plot(\n x_data= featureScores['Features'],\n y_data=featureScores['Score'],\n title=\"Select_K_Best using CHI2 For Categorical Features\",\n x_title=\"Features\",\n y_title=\"CHI2 Score\",\n output_path= os.path.join(self.output_path,\"select_k_best_chi2.html\")\n )\n \n self.cat_feats = featureScores['Features'].values.tolist()[:self.num_best]\n # drop the columns which did not qualify\n for feats in self.dataframe_d_copy.columns:\n if feats not in self.cat_feats:\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feats,axis=1)\n return self.cat_feats", "def kbest(X, y, select_method, pipeline):\n\n # Fitting the tuned pipeline to the whole dataset and extracting the\n # selected features\n pipe = pipeline.fit(X=X, y=y)\n if select_method is 'enet':\n coefs = (pipe\n .best_estimator_\n .named_steps['selector']\n .estimator_\n .coef_[pipe\n .best_estimator_\n .named_steps['selector']\n .get_support()])\n elif select_method is 'f-test':\n coefs = (pipe\n .best_estimator_\n .named_steps['selector']\n .scores_[pipe\n .named_steps['selector']\n .get_support()])\n else:\n raise ValueError(\"\"\"Must specify feature selection technique \n in select method\"\"\")\n \n # Getting feature names\n names = (X\n .columns\n .values[pipe\n .best_estimator_\n .named_steps['selector']\n .get_support()])\n names_scores = list(zip(names, coefs))\n kbest_df = (pd\n .DataFrame(data=names_scores,\n columns=['Features',\n 'Coefs'])\n .sort_values(by='Coefs',\n ascending=False))\n\n # Filtering out zeroed coefficients from the elastic net that were not\n # removed in SelectFromModel\n if select_method is 'enet':\n kbest_df = kbest_df.loc[(kbest_df['Coefs'] != 0.000000)\n | kbest_df['Coefs'] != -0.000000]\n else:\n pass\n\n # Getting the tuned parameters\n optimal_params = pipeline.best_params_\n params_df = pd.DataFrame.from_dict(data=optimal_params,\n orient='index',\n columns=['Parameters'])\n best_inner_cv_test_score = pipeline.best_score_\n\n return kbest_df, params_df, best_inner_cv_test_score", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def bessel_bandpass_filter(data, lowcut, highcut, fs, order=2):\n\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n # bessel() and lfilter() are from scipy.signal\n\n b, a = bessel(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def select_features(self):\r\n \r\n features_list = list(self.feed_data.columns.values)\r\n features_list.remove(\"min_time\")\r\n thisrace = self.config.race_to_predict\r\n\r\n #if never ran race before, don't include these variables in feature\r\n #selection, they're just 0's anyway\r\n if self.config.first_time_running_race == True:\r\n unuseable_columns = [('min_time', thisrace),('std', thisrace),('num_races', thisrace),\r\n ('rainfall', thisrace),\r\n ('temp', thisrace),\r\n ('wind', thisrace),\r\n ('metersup', thisrace), \r\n 'sex_W']\r\n else:\r\n #drop this column...probs should have removed it earlier. \r\n unuseable_columns = ['sex_W']\r\n #print(features_list)\r\n for element in unuseable_columns:\r\n features_list.remove(element)\r\n data_with_all_feats = self.feed_data.drop(unuseable_columns,axis=1)\r\n colstodrop = features_list\r\n thiscols = []\r\n data_with_current_feats = data_with_all_feats.drop(features_list,axis=1)\r\n checkfit=100.0\r\n scores = []\r\n dropped_cols = []\r\n loopgain =True\r\n #mymod = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=10,\r\n # min_samples_split = 25, criterion='mse')\r\n thisloopfeatures_list = features_list\r\n curcols = data_with_current_feats.columns\r\n countgain=0\r\n #print(\"cc\",curcols)\r\n while loopgain == True:\r\n thisloopscore=100.0\r\n for fet in thisloopfeatures_list:\r\n data_with_current_feats[fet] = data_with_all_feats[fet]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=15,\r\n min_samples_split = 12, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n if ((thisloopscore - oobs) > 0.0):\r\n thisloopscore = oobs\r\n fetwinner = fet\r\n data_with_current_feats.drop(fet,axis=1,inplace=True)\r\n etrain.drop(fet,axis=1,inplace=True)\r\n\r\n data_with_current_feats[fetwinner] = data_with_all_feats[fetwinner]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n #print(fetwinner,predscore)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n #print(fetwinner,\"~\",oobs)\r\n thisloopfeatures_list.remove(fetwinner)\r\n if ((checkfit-oobs)>0.0001):\r\n checkfit = oobs\r\n curcols = data_with_current_feats.columns\r\n #print(curcols)\r\n else:\r\n break\r\n\r\n\r\n self.final_df = self.feed_data[data_with_current_feats.columns]\r\n self.Xtrain=self.final_df.sample(frac=0.8,random_state=200)\r\n self.Xtest=self.final_df.drop(self.Xtrain.index)#\r\n self.ytrain = self.Xtrain.pop('min_time')\r\n self.ytest = self.Xtest.pop('min_time')\r\n self.model= RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n self.model.fit(self.Xtrain,self.ytrain)\r\n #print(y)\r\n return", "def feature_selection_rf(df, threshold, cols_to_filter, label_col = 'label', pcg = 1.0):\n print(\"[Info] Feature selection by Random Forest may take a long time\")\n\n df = df.select(cols_to_filter + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_numeric_columns(df, label_col = label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n input_cols = list(set(df.columns) - set([label_col]))\n\n assembler = VectorAssembler(inputCols=input_cols, outputCol='features')\n\n numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter = param_selection(df)\n\n rf_model = RandomForestClassifier(numTrees=numTrees, maxDepth=maxDepth,\n minInstancesPerNode=minInstancesPerNode,\n maxBins=maxBins, featureSubsetStrategy='auto', minInfoGain=0.0,\n impurity='gini', subsamplingRate=subsamplingRate, labelCol = label_col)\\\n\n pipeline = Pipeline(stages=[assembler, rf_model])\n\n pipeline_model = pipeline.fit(df)\n\n from churn_nrt.src.projects_utils.models.modeler import getOrderedRelevantFeats\n\n feat_imp_nrt = getOrderedRelevantFeats(pipeline_model, input_cols, \"f\")\n\n n = threshold if(threshold >=1) else round(threshold*len(feat_imp_nrt))\n\n num_cols = [f[0] for f in feat_imp_nrt][0:n]\n\n return num_cols", "def test_filterestimator():\n raw = io.read_raw_fif(raw_fname)\n events = read_events(event_name)\n picks = pick_types(raw.info, meg=True, stim=False, ecg=False,\n eog=False, exclude='bads')\n picks = picks[1:13:3]\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True)\n epochs_data = epochs.get_data()\n\n # Add tests for different combinations of l_freq and h_freq\n filt = FilterEstimator(epochs.info, l_freq=40, h_freq=80,\n filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto')\n y = epochs.events[:, -1]\n with warnings.catch_warnings(record=True): # stop freq attenuation warning\n X = filt.fit_transform(epochs_data, y)\n assert_true(X.shape == epochs_data.shape)\n assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)\n\n filt = FilterEstimator(epochs.info, l_freq=None, h_freq=40,\n filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto')\n y = epochs.events[:, -1]\n with warnings.catch_warnings(record=True): # stop freq attenuation warning\n X = filt.fit_transform(epochs_data, y)\n\n filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)\n y = epochs.events[:, -1]\n with warnings.catch_warnings(record=True): # stop freq attenuation warning\n assert_raises(ValueError, filt.fit_transform, epochs_data, y)\n\n filt = FilterEstimator(epochs.info, l_freq=40, h_freq=None,\n filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto')\n with warnings.catch_warnings(record=True): # stop freq attenuation warning\n X = filt.fit_transform(epochs_data, y)\n\n # Test init exception\n assert_raises(ValueError, filt.fit, epochs, y)\n assert_raises(ValueError, filt.transform, epochs, y)", "def broadbandfilter(self):\n _, = self.broadbandfilters\n return _", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=1):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = filtfilt(b, a, data)\n return y", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val" ]
[ "0.5992918", "0.59564954", "0.59535414", "0.5932676", "0.5923692", "0.58042324", "0.5780261", "0.57645375", "0.5748206", "0.57386553", "0.56789696", "0.5660202", "0.56304276", "0.55929834", "0.55776244", "0.5565502", "0.5558058", "0.5543337", "0.5526073", "0.55201733", "0.5511409", "0.55038625", "0.54986125", "0.54629266", "0.5454232", "0.54539317", "0.54517263", "0.5421027", "0.5419613", "0.5412689" ]
0.6909208
0
Demonstrates the syntax necessary for basic usage of the FoldTree object performs these changes with a demonstrative pose example and writes structures to PDB files if is True
def fold_tree(PDB_out = False): ########## # FoldTree # a FoldTree encodes the internal coordinate dependencies of a Pose # a Pose object MUST have a FoldTree # the FoldTree allows regions of a pose to become independent, # it is used in many important applications, particularly: # loop modeling: where changes to the conformation of a loop region # should NOT alter the conformation of the entire protein # rigid-body docking: where changes in the position of one docking # partner should not alter the position of the other docking partner # a FoldTree is effectively a list of Edge objects, you can view the Edges # by printing the FoldTree ("print FoldTree") # the length of a FoldTree (FoldTree.size) MUST match the length of its # corresponding Pose (Pose.total_residue) # it is possible to create an improper FoldTree, the method # FoldTree.check_fold_tree returns True if the FoldTree is complete and # usable and False otherwise # some Edge objects are Jumps, indicating a "jump" in the internal # coordinate dependency # when a FoldTree is created, it can accept an optional integer argument # setting the FoldTree to contain a single Edge with a length equal to # the input integer value, the same result is attained by creating an # empty FoldTree (no input arguments) and using the method # FoldTree.simple_tree with an input integer equal to the size of the # FoldTree # 1. create the example pose test_pose = pose_from_sequence('ACDEFGHIKLMNPQRSTVWY'*3) # 2. setup the jump points, where a jump is anchored, and the cutpoint cutpoint = int( test_pose.total_residue() / 2 ) # integer division, no decimal low_jump_point = cutpoint - 10 high_jump_point = cutpoint + 10 # the easiest way to create a new complete FoldTree is to use the method # FoldTree.simple_tree to create and empty FoldTree and assign jumps to # it using the method FoldTree.new_jump # the FoldTree constructor is overloaded to accept an input integer # indicating how large to make the FoldTree # 3. create a simple, one jump FoldTree for the pose # a. using FoldTree.new_jump #pose_fold_tree = FoldTree(test_pose.total_residue()) #### these two lines produce the same FoldTree as the one above pose_fold_tree = FoldTree() pose_fold_tree.simple_tree(test_pose.total_residue()) pose_fold_tree.new_jump(low_jump_point, high_jump_point, cutpoint) print( '\nThe first FoldTree is proper:', pose_fold_tree.check_fold_tree() ) # b. using FoldTree.add_edge # a more difficult method for creating a FoldTree is simply to create it # empty and use the method FoldTree.add_edge to fill the FoldTree with # new Edge data pose_fold_tree = FoldTree() pose_fold_tree.add_edge(1, low_jump_point, -1) pose_fold_tree.add_edge(low_jump_point, cutpoint, -1) pose_fold_tree.add_edge(low_jump_point, high_jump_point, 1) pose_fold_tree.add_edge(high_jump_point, test_pose.total_residue(), -1) pose_fold_tree.add_edge(high_jump_point, cutpoint + 1, -1) print( 'The second FoldTree is proper:', pose_fold_tree.check_fold_tree() ) # demonstrate FoldTree's effect on structure # 4. linearize it for i in range(1, test_pose.total_residue() + 1): test_pose.set_phi(i, -180) test_pose.set_psi(i, 180) test_pose.set_omega(i, 180) # the Pose.fold_tree method is an overloaded getter/setter, # providing it with no input returns the Pose's FoldTree object # providing a FoldTree object as input overwrites the Pose's current # FoldTree with the new one # the FoldTree is set here to prevent problems when "linearizing" test_pose.fold_tree(pose_fold_tree) # this object is contained in PyRosetta v2.0 and above (optional) pymover = PyMOLMover() # 5. change and display the new structures # a. export "linearized" structure test_pose.pdb_info().name('linearized') # for PyMOLMover pymover.apply(test_pose) if PDB_out: test_pose.dump_pdb('linearized.pdb') print( '\nlinearized structure output' ) # b. make an early change test_pose.set_phi(low_jump_point - 10, 50) test_pose.pdb_info().name('pre_jump') # for PyMOLMover pymover.apply(test_pose) # all downstream residues move if PDB_out: test_pose.dump_pdb('pre_jump.pdb') print( 'pre jump perturbed structure output' ) # c. make a change in the first edge created by the jump test_pose.set_phi(low_jump_point + 5, 50) test_pose.pdb_info().name('early_in_jump') # for PyMOLMover pymover.apply(test_pose) # residues up to the cutpoint change if PDB_out: test_pose.dump_pdb('early_in_jump.pdb') print( 'first internal jump edge perturbed structure output' ) # d. make a change in the second edge created by the jump test_pose.set_phi(high_jump_point - 5, 50) test_pose.pdb_info().name('late_in_jump') # for PyMOLMover pymover.apply(test_pose) # residues down to the cutpoint change if PDB_out: test_pose.dump_pdb('late_in_jump.pdb') print( 'second internal jump edge perturbed structure output' ) # e. make a late change test_pose.set_phi(high_jump_point + 10, 50) test_pose.pdb_info().name('post_jump') # for PyMOLMover pymover.apply(test_pose) # all residues downstream move if PDB_out: test_pose.dump_pdb('post_jump.pdb') print( 'post jump perturbed structure output' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tree():\n nobv.visual_tree()", "def main():\n \n # 1. Learn a decision tree from the data in training.txt\n print \"--Building trees--\"\n train_examples = read_file('training.txt')\n print(train_examples)\n attrs = range(len(train_examples[0])-1)\n rand_tree = decision_tree_learning(train_examples, attrs, use_gain=False)\n gain_tree = decision_tree_learning(train_examples, attrs, use_gain=True)\n print \"--Done building--\"\n print\n\n # 2. Document the tree you got\n print \"--Random tree--\"\n print_tree(rand_tree)\n print\n print \"--Learn tree--\"\n print_tree(gain_tree)\n print\n\n # 3. Classify all examples in the test-set\n test_examples = read_file('test.txt')\n print \"--Testing random tree--\"\n test(rand_tree, test_examples, attrs)\n print\n print \"--Testing information gain tree--\"\n test(gain_tree, test_examples, attrs)\n print \"--Done testings--\"", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-s\", dest=\"statefile\", help=\"statefile\")\n\tparser.add_option(\"-o\", dest=\"outname\", help=\"outname\")\n\tparser.add_option(\"-l\", dest=\"ligcutoff\", help=\"gridlig cutoff\", default=2.5)\n\tparser.add_option(\"-b\", dest=\"bbcutoff\", help=\"gridbb cutoff\", default=2.0)\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\tif not options.pdbfile or not options.statefile or not options.outname:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t# get output filename\n\tcols = options.outname.split(\".\")\n\toutgridlig = cols[0] + \".gridlig\"\n\toutgridbb = cols[0] + \".gridbb\"\n\n\t# get backbone from protein\n\tprotein = Molecule()\n\tprotein.readPDB(options.pdbfile)\n\t\n\tsele = Selection()\n\tsele.makeSelection(\"BB\")\n\tbb = sele.apply_selection(protein).atomList()\n\n\t# read in previous statefile information\n\ttry:\n\t\tSTATEFILE = open(options.statefile)\n\texcept:\n\t\tprint \"unable to open statefile\"\n\t\tsys.exit()\n\n\tgridlig_file = \"\"\n\tgridbb_file = \"\"\n\tfor line in STATEFILE.readlines():\n\t\tcols = line.split()\n\t\tif cols[0] == \"gridlig:\":\n\t\t\tgridlig_file = cols[1]\n\t\tif cols[0] == \"gridbb:\":\n\t\t\tgridbb_file = cols[1]\n\n\tgridlig = grid()\n\tgridbb = grid()\n\n\tgridlig.read(gridlig_file)\n\tgridbb.read(gridbb_file)\n\n\tgridlig.setFullOccupied()\n\tgridbb.setFullOccupied()\n\n\tligcutoff = float(options.ligcutoff)\n\tbbcutoff = float(options.bbcutoff)\n\tgridTrimInclude(gridbb, bb, bbcutoff)\n\tgridTrimExclude(gridlig, bb, ligcutoff)\n\n\tgridlig.write(outgridlig)\n\tgridbb.write(outgridbb)", "def log_tree(self, description, tree):\n if self.log:\n print(description)\n print_content(astor.to_source(tree), '.py')\n print()\n print()", "def make_tree(fname: str):\n\n tree = PDDL_Tree.create(fname)\n tree.print_tree()", "def _initialize_trees(self):", "def climb_tree():\n global UP_TREE\n westdesc = \"\"\n eastdesc = \"\"\n northdesc = \"\"\n southdesc = \"\"\n UP_TREE = True\n westinvalid = False\n eastinvalid = False\n northinvalid = False\n southinvalid = False\n\n\n printmessage(\"You climb the large tree to get a look at your surroundings.\", 5, MAGENTA, 2)\n\n if ZERO_BASE_PLYR_POS in range(0, 10):\n northinvalid = True\n if ZERO_BASE_PLYR_POS in range(90, 100):\n southinvalid = True\n if ZERO_BASE_PLYR_POS in range(0, 91, 10):\n eastinvalid = True\n if ZERO_BASE_PLYR_POS in range(9, 100, 10):\n westinvalid = True\n \n if not westinvalid: \n westpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 1]\n if HAS_COMPASS: \n DISCOVERED[ZERO_BASE_PLYR_POS + 1] = \"Y\"\n if westpos == 10: # Water\n westdesc = TREE_VIEWS[2]\n else:\n westdesc = TREE_VIEWS[1]\n\n westpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 1]\n if westpos == 1:\n westdesc = TREE_VIEWS[3]\n elif westpos == 2:\n westdesc = TREE_VIEWS[4]\n else:\n westdesc = TREE_VIEWS[5]\n\n if not eastinvalid:\n eastpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 1]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 1] = \"Y\"\n if eastpos == 10: # Water\n eastdesc = TREE_VIEWS[2]\n else:\n eastdesc = TREE_VIEWS[1]\n\n eastpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 1]\n if eastpos == 1:\n eastdesc = TREE_VIEWS[3]\n elif eastpos == 2:\n eastdesc = TREE_VIEWS[4]\n else:\n eastdesc = TREE_VIEWS[6]\n\n\n if not northinvalid:\n northpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 10] = \"Y\"\n if northpos == 10: # Water\n northdesc = TREE_VIEWS[2]\n else:\n northdesc = TREE_VIEWS[1]\n\n northpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 10]\n if northpos == 1: # bear\n northdesc = TREE_VIEWS[3]\n elif northpos == 2: # grizzly\n northdesc = TREE_VIEWS[4]\n else:\n northdesc = TREE_VIEWS[7]\n\n\n if not southinvalid:\n southpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS + 10] = \"Y\"\n if southpos == 10: # Water\n southdesc = TREE_VIEWS[2]\n else:\n southdesc = TREE_VIEWS[1]\n\n southpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 10]\n if southpos == 1: # bear\n southdesc = TREE_VIEWS[3]\n elif southpos == 2: # grizzly\n southdesc = TREE_VIEWS[4]\n else:\n southdesc = TREE_VIEWS[8]\n\n clear_messages(0)\n printmessage(\"West: \" + westdesc, 2, GREEN, 0)\n printmessage(\"East: \" + eastdesc, 3, YELLOW, 0)\n printmessage(\"North: \" + northdesc, 4, CYAN, 0)\n printmessage(\"South: \" + southdesc, 5, MAGENTA, 0)\n #show_movement(True, 10)\n update_player_on_map()\n pause_for_keypress()\n clear_messages(0)", "def decode_structure(root):\r\n # decode = model.sampleDecoder(root_code)\r\n syms = [torch.ones(8).mul(10)]\r\n stack = [root]\r\n boxes = []\r\n objs = []\r\n copyboxs = []\r\n while len(stack) > 0:\r\n node = stack.pop()\r\n # label_prob = model.nodeClassifier(f)\r\n # _, label = torch.max(label_prob, 1)\r\n label = node.label\r\n node_type = torch.LongTensor([node.node_type.value]).item()\r\n if node_type == 1: # ADJ\r\n # left, right = model.adjDecoder(f)\r\n stack.append(node.left)\r\n stack.append(node.right)\r\n s = syms.pop()\r\n syms.append(s)\r\n syms.append(s)\r\n if node_type == 2: # SYM\r\n # left, s = model.symDecoder(f)\r\n # s = s.squeeze(0)\r\n stack.append(node.left)\r\n syms.pop()\r\n syms.append(node.sym.squeeze(0))\r\n if node_type == 0: # BOX\r\n reBox = node.box\r\n reBoxes = [reBox]\r\n recopyBoxes = [1]\r\n reObj = node.objname\r\n reObjs = [reObj]\r\n s = syms.pop()\r\n l1 = abs(s[0] + 1)\r\n l2 = abs(s[0])\r\n l3 = abs(s[0] - 1)\r\n if l1 < 0.15:\r\n sList = torch.split(s, 1, 0)\r\n bList = torch.split(reBox.data.squeeze(0), 1, 0)\r\n f1 = torch.cat([sList[1], sList[2], sList[3]])\r\n f1 = f1 / torch.norm(f1)\r\n f2 = torch.cat([sList[4], sList[5], sList[6]])\r\n folds = round(1 / s[7].item())\r\n for i in range(folds - 1):\r\n rotvector = torch.cat([f1, sList[7].mul(2 * 3.1415).mul(i + 1)])\r\n rotm = vrrotvec2mat(rotvector)\r\n center = torch.cat([bList[0], bList[1], bList[2]])\r\n dir0 = torch.cat([bList[3], bList[4], bList[5]])\r\n dir1 = torch.cat([bList[6], bList[7], bList[8]])\r\n dir2 = torch.cat([bList[9], bList[10], bList[11]])\r\n newcenter = rotm.matmul(center.add(-f2)).add(f2)\r\n newdir1 = rotm.matmul(dir1)\r\n newdir2 = rotm.matmul(dir2)\r\n newbox = torch.cat([newcenter, dir0, newdir1, newdir2])\r\n reBoxes.append(newbox)\r\n recopyBoxes.append(reBox)\r\n reObjs.append(reObj)\r\n if l3 < 0.15:\r\n sList = torch.split(s, 1, 0)\r\n bList = torch.split(reBox.data.squeeze(0), 1, 0)\r\n trans = torch.cat([sList[1], sList[2], sList[3]])\r\n trans_end = torch.cat([sList[4], sList[5], sList[6]])\r\n center = torch.cat([bList[0], bList[1], bList[2]])\r\n trans_length = math.sqrt(torch.sum(trans ** 2))\r\n trans_total = math.sqrt(torch.sum(trans_end.add(-center) ** 2))\r\n folds = round(trans_total / trans_length)\r\n for i in range(folds):\r\n center = torch.cat([bList[0], bList[1], bList[2]])\r\n dir0 = torch.cat([bList[3], bList[4], bList[5]])\r\n dir1 = torch.cat([bList[6], bList[7], bList[8]])\r\n dir2 = torch.cat([bList[9], bList[10], bList[11]])\r\n newcenter = center.add(trans.mul(i + 1))\r\n newbox = torch.cat([newcenter, dir0, dir1, dir2])\r\n reBoxes.append(newbox)\r\n recopyBoxes.append(reBox)\r\n reObjs.append(reObj)\r\n if l2 < 0.15:\r\n sList = torch.split(s, 1, 0)\r\n bList = torch.split(reBox.data.squeeze(0), 1, 0)\r\n ref_normal = torch.cat([sList[1], sList[2], sList[3]])\r\n ref_normal = ref_normal / torch.norm(ref_normal)\r\n ref_point = torch.cat([sList[4], sList[5], sList[6]])\r\n center = torch.cat([bList[0], bList[1], bList[2]])\r\n dir0 = torch.cat([bList[3], bList[4], bList[5]])\r\n dir1 = torch.cat([bList[6], bList[7], bList[8]])\r\n dir2 = torch.cat([bList[9], bList[10], bList[11]])\r\n if ref_normal.matmul(ref_point.add(-center)) < 0:\r\n ref_normal = -ref_normal\r\n newcenter = ref_normal.mul(2 * abs(torch.sum(ref_point.add(-center).mul(ref_normal)))).add(center)\r\n if ref_normal.matmul(dir1) < 0:\r\n ref_normal = -ref_normal\r\n new_dir1 = dir1.add(ref_normal.mul(-2 * ref_normal.matmul(dir1)))\r\n if ref_normal.matmul(dir2) < 0:\r\n ref_normal = -ref_normal\r\n new_dir2 = dir2.add(ref_normal.mul(-2 * ref_normal.matmul(dir2)))\r\n newbox = torch.cat([newcenter, dir0, new_dir1, new_dir2])\r\n reBoxes.append(newbox)\r\n recopyBoxes.append(reBox)\r\n reObjs.append(reObj)\r\n\r\n boxes.extend(reBoxes)\r\n objs.extend(reObjs)\r\n copyboxs.extend(recopyBoxes)\r\n return boxes, copyboxs, objs", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def debug_with_new(shell, workflow):\n projFilePath = \"/magnetic/test_project.ilp\"\n\n # New project\n shell.createAndLoadNewProject(projFilePath)\n\n # Add a file\n from ilastik.applets.dataSelection.opDataSelection import DatasetInfo\n info = DatasetInfo()\n #info.filePath = '/magnetic/gigacube.h5'\n info.filePath = '/magnetic/synapse_small.npy'\n #info.filePath = '/magnetic/singleslice.h5'\n opDataSelection = workflow.dataSelectionApplet.topLevelOperator\n opDataSelection.Dataset.resize(1)\n opDataSelection.Dataset[0].setValue(info)\n \n # Set some features\n import numpy\n featApplet = workflow.applets[2]\n featureGui = featApplet.gui\n opFeatures = featApplet.topLevelOperator\n # sigma: 0.3 0.7 1.0 1.6 3.5 5.0 10.0\n selections = numpy.array( [[True, False, False, False, False, False, False],\n [False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False], # ST EVs\n [False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False], # GGM\n [False, False, False, False, False, False, False]] )\n opFeatures.SelectionMatrix.setValue(selections)\n opFeatures.Scales.setValue( featureGui.ScalesList )\n opFeatures.FeatureIds.setValue( featureGui.FeatureIds )\n\n # Select the labeling drawer\n shell.setSelectedAppletDrawer(3)\n\n # Save the project\n shell.onSaveProjectActionTriggered()", "def main(df, filename, depth = -1, ensemble = None):\r\n \r\n decision_tree_dict = {}\r\n # empty dictionary on which the decision tree will be built\r\n decision_tree(df, decision_tree_dict, {}, depth = depth, ensemble = ensemble)\r\n # function call to build the decision tree\r\n #display_tree(decision_tree_dict, filename, header=True)\r\n #print(decision_tree_dict)\r\n return decision_tree_dict", "def segmental_context(save_path=conv07_outpath, data=conv07_data):\n\n save_path = os.path.join(save_path, \"segmental_context\")\n for phone in [\"phone1\", \"phone2\"]:\n attributes_names = filter(lambda x: x.startswith(phone), all_attributes) + [\"outcome\"]\n tree_file_name = phone + \"-all.dot\" #will also save a .png with the same name\n make_tree_from_attributes(save_path, tree_file_name, attributes_names, data=data)", "def main():\n\n\t\"\"\"\n\ttree = BST(treetype=int)\n\tsubtree=BST(treetype=int)\n\n\ttree.make_minheight_tree(range(15))\n\n\tsubtree.make_minheight_tree(range(7))\n\n\tprint tree \n\tprint subtree\n\n\tprint tree.subtree_matching(subtree)\n\t\"\"\"", "def prep_decisiontree(tp_vcf, fp_vcf, name, metrics, format_metrics):\n out_decision = \"%s-decisiontree-%s.graphviz\"\n #metrics = ['FS', 'MFE', 'NBQ', 'ReadPosEndDist']\n #format_metrics = [\"AD\", \"PL\", \"QUAL\"]\n extras = []\n depth = 2\n with open(tp_vcf) as in_handle:\n df_tp = read_vcf_metrics(in_handle, metrics, format_metrics, 1)\n with open(fp_vcf) as in_handle:\n df_fp = read_vcf_metrics(in_handle, metrics, format_metrics, -1)\n df = pandas.concat([df_tp, df_fp])\n for val, vartype in [(0, \"snp\"), (1, \"indel\"), (None, \"all\")]:\n if val is None:\n cur_df = df\n else:\n cur_df = df[df[\"indel\"] == val]\n explore_ml_decisiontree(cur_df,\n metrics + format_metrics + extras, depth,\n out_decision % (name, vartype))\n #print df_tp.describe()\n #print df_fp.describe()", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def get_code(tree, feature_names, target_names, spacer_base=\" \"):\n left = tree.tree_.children_left\n right = tree.tree_.children_right\n threshold = tree.tree_.threshold\n# print(tree.tree_.feature)\n# print(feature_names)\n features = [feature_names[i] for i in tree.tree_.feature]\n value = tree.tree_.value\n \n def recurse(left, right, threshold, features, node, depth):\n spacer = spacer_base * depth\n if (threshold[node] != -2):\n print(spacer + \"if ( 2*\" + features[node] + \" <= \" + str(int(2*threshold[node])) + \" ) {\")\n if left[node] != -1:\n recurse(left, right, threshold, features, left[node], depth+1)\n print(spacer + \"}\\n\" + spacer +\"else {\")\n if right[node] != -1:\n recurse(left, right, threshold, features, right[node], depth+1)\n print(spacer + \"}\")\n else:\n target = value[node]\n target_count_vec = []\n target_name_vec = []\n for i, v in zip(np.nonzero(target)[1], target[np.nonzero(target)]):\n target_name = target_names[i]\n target_count = int(v)\n target_count_vec.append(target_count)\n target_name_vec.append(target_name)\n\n index = target_count_vec.index(min(target_count_vec))\n target_name = target_name_vec[index]\n print(spacer + \"return \" + str(int(target_name)) + \";\" )\n\n recurse(left, right, threshold, features, 0, 0)", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-s\", dest=\"sele\", help=\"sele\")\n\tparser.add_option(\"-g\", dest=\"grid\", help=\"grid\")\n\tparser.add_option(\"-o\", dest=\"outfile\", help=\"outfile\")\n\tparser.add_option(\"-r\", dest=\"replace\", help=\"replace\", action=\"store_true\")\n\tparser.add_option(\"-c\", dest=\"cutoff\", help=\"cutoff\", default=3.0)\n\t(options, args) = parser.parse_args()\n\n\tif not options.pdblist or not options.grid:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif options.outfile:\n\t\toutgrid = options.outfile\n\telif options.replace:\n\t\toutgrid = options.grid\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif options.pdblist:\n\t\ttry:\n\t\t\tPDBLIST = open(options.pdblist, 'r')\n\t\texcept:\n\t\t\tprint \"unable to open pdblist\"\n\t\t\tsys.exit()\n\n\t\tpdbfiles = []\n\t\tfor line in PDBLIST.readlines():\n\t\t\tline = string.rstrip(line)\n\t\t\tpdbfiles.append(line)\n\n\tif options.sele:\n\t\tselection = Selection()\n\t\tselection.makeSelection(options.sele)\n\n\tmygrid = grid()\t\t\n\tmygrid.read(options.grid)\n\tcloneGrid = grid()\n\tmygrid.clone(cloneGrid)\n\tmygrid.setUnoccupied()\n\n\tprotein = Molecule()\t\t\n\tfor pdbfile in pdbfiles:\n\t\tprint pdbfile\n\t\tprotein.readPDB(pdbfile)\n\n\n\t\tif options.sele:\n\t\t\tnewmol = selection.apply_selection(protein)\n\t\telse:\n\t\t\tnewmol = protein.clone()\n\n\t\tatomlist = newmol.atomList()\n\n\t\tcloneGrid.setFullOccupied()\n\t\tgridTrimInclude(cloneGrid,atomlist,float(options.cutoff))\n\t\tmygrid.addGrid(cloneGrid)\n\n\n\t\tnewmol.clear()\n\t\tprotein.clear()\n\n\tnpdb = len(pdbfiles)\n\tmygrid.intify(npdb)\n\tmygrid.write(outgrid)", "def vs_create_tree(event):\n get_vs(event['c']).create_tree()", "def TreeInit(tree):\n \"\"\" Settings/NI_6133 \"\"\"\n tree.addNode('.SETTINGS')\n tree.addNode('.SETTINGS.EXPERIMENT')\n tree.addNode('.SETTINGS.NI')\n tree.addNode('.SETTINGS.NI.NI_6602_TIME')\n tree.addNode('.SETTINGS.NI.NI_6133')\n tree.addNode('.NI_6133')\n tree.addNode('.NI_FPGA')\n tree.addNode('.SETTINGS.NI.NI_6133_DIO')\n tree.addNode('.TEK_2024B')\n tree.addNode('.TEK_2024B.TEK')\n tree.addNode('.TEK_2024B.TEK1')\n tree.addNode('.PIMAX3')\n tree.addNode('.PIMAX3.RAW')\n tree.addNode('.PIMAX3.CAM_SETTING')\n \"\"\" Single-valued member nodes \"\"\"\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_DATE','TEXT',\n 'SHOTDATEANDTIME')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NOTES','TEXT','SHOTNOTES')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SYS_MESSAGE','TEXT','SYSMESSAGE')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_QUALITY','TEXT',\n 'SHOTQUALITY')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NUMBER','TEXT',\n 'SHOTNUMBER')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:PROG_VERSION','TEXT',\n 'PROGRAM_VERSION')\n AddNodeWithTag(tree, '.TEK_2024B.TEK:RAW', 'TEXT', 'RAWTEKSCOPE')\n AddNodeWithTag(tree, '.TEK_2024B.TEK1:RAW', 'TEXT', 'RAWTEKSCOPE1')", "def main():\n\n parser = argparse.ArgumentParser(description=\"Prints the contents of a NiPoPoW\")\n parser.add_argument(\"--blocks\", required=True, type=int, help=\"Number of blocks\")\n parser.add_argument(\n \"--output\", default=\"proof.pkl\", type=str, help=\"Name of exported proof\"\n )\n args = parser.parse_args()\n blocks = args.blocks\n output = args.output\n if output.find(\".pkl\") == -1:\n output += \".pkl\"\n\n # Create blockchain\n header, headers_map, interlink_map = create_blockchain(blocks=blocks)\n print_headers(headers_map)\n print_interlinks(headers_map, interlink_map)\n\n # Create proof\n proof = make_proof(header, headers_map, interlink_map)\n print_proof(proof, headers_map)\n\n ### Start spoiling proof\n\n # remove_genesis(proof)\n # proof = change_interlink_hash(proof, 0)\n # proof = skip_blocks(proof, -2)\n # proof = replace_block(proof, headers_map, interlink_map, int(len(proof)/2))\n # print_proof(proof, headers_map)\n # verify_proof(Hash(proof[0][0]), proof)\n\n ### Stop spoiling proof\n\n proof_tool = ProofTool(\"../../data/proofs/\")\n p, f, lca = proof_tool.create_proof_and_forkproof(blocks, forkindex, forkblocks)\n print(p, f, lca)\n\n fixed_fork_proof = proof_tool.fetch_proof(f)\n verify_proof(Hash(fixed_fork_proof[0][0]), fixed_fork_proof)\n\n # proof_tool = ProofTool(\"../../data/proofs/\")\n # proof_tool.export_proof(fixed_fork_proof, f)", "def tree_model(feature_train, help_rank_train, model_name):\n decision_tree = DecisionTreeClassifier()\n decision_tree = decision_tree.fit(feature_train, help_rank_train)\n tree_model = open(model_name,'wb')\n dump(decision_tree, tree_model, -1)\n return", "def test1():\n inp = iotbx.pdb.input(lines=mmcif_str.split(\"\\n\"), source_info=None)\n h = inp.construct_hierarchy()\n text = h.as_mmcif_string()\n for a,b in zip(mmcif_str.splitlines(),text.splitlines()):\n if a != b:\n print(\"from mmCIF text: %s\" %(a))\n print(\"From hierarchy : %s\" %(b))\n assert a==b\n assert text == mmcif_str\n chains = list(h.overall_counts().chain_ids.keys())\n chains.sort()\n answer = ['A']\n assert (chains == answer), '%s %s' % (chains, answer)\n resnames = sorted(h.overall_counts().resnames.keys())\n assert resnames == [' CA', '7ZT', 'HOH', 'LYS'], resnames\n\n # write as mmCIF string\n mmcif_text = h.as_mmcif_string()\n\n new_h = iotbx.pdb.input(lines=mmcif_text.split(\"\\n\"), source_info=None).construct_hierarchy()\n # as PDB:\n for a,b in zip(h.as_pdb_string().splitlines(),new_h.as_pdb_string().splitlines()):\n if a != b:\n print(\"Original as PDB: %s\" %(a))\n print(\"From CIF as PDB: %s\" %(b))\n assert a==b\n # As mmCIF\n for a,b in zip(h.as_mmcif_string().splitlines(),new_h.as_mmcif_string().splitlines()):\n if a != b:\n print(\"Original as CIF: %s\" %(a))\n print(\"From CIF as CIF: %s\" %(b))\n assert a==b\n\n assert h.is_similar_hierarchy(new_h)\n for a,b in zip(new_h.as_mmcif_string().splitlines(),mmcif_str.splitlines()):\n if a != b:\n print(\"From hierarchy: %s\" %(a))\n print(\"Original CIF : %s\" %(b))\n assert a == b\n assert new_h.as_mmcif_string() == mmcif_str", "def modTree(\n tree\n ):\n\n # create file name \n filename=tree+\".internodeLabels.tree\"\n\n # internode label starter\n label=1\n\n # read in tree and create tags\n tree = Phylo.read(tree, 'nexus')\n # loop through internal nodes\n for i in tree.get_nonterminals():\n # create temp array to hold 'addtag, nodexyz, and children of node names'\n temp=[]\n temp.append(\"addtag\")\n nodeID=\"node\"+str(label)\n temp.append(nodeID)\n # for each internal node, get the children tips in the tree and append them to the temp list\n for ii in i.get_terminals():\n temp.append(ii.name)\n # prints lines for bayesTraits\n print(*temp, sep=' ')\n print(\"addMRCA\", nodeID, nodeID, sep=' ')\n # replace the confidence value with nodeID in the phylogeny.\n # This is for the additional newick tree that gets written out\n i.confidence=nodeID\n # add one for the next internode label\n label+=1\n #Phylo.write(tree, filename, 'nexus')", "def adjust_tree(tree, args_dict = {}):\n if ((tree.category() == 'VBar') and (len(tree.children) == 2) and (tree.children[1].label.has_key('SUBCAT')) and (tree.children[1].label['SUBCAT'] == 'copula')):\n if (tree.children[0].label[feature_type] == 'DP'):\n DP = tree.children[0].label\n tree.children[0].label = FeatStructNonterminal(dict([item for item in DP.items() if (item[0] != 'PARTICLE')] + [('PARTICLE', 'pred')])) # give the DP a dummy particle\n if ((tree.category() == 'TP') and (len(tree.children) == 1)): # insert vacuous subject node\n tree.children = [SynTree(Trace(tree.children[0].ID, False), [], tree.QR_level, tree.language), tree.children[0]]\n if ((tree.category() == 'DBar') and (len(tree.children) == 1) and (tree.children[0].category() == 'NP')): # insert ambiguous determiner\n tree.children = [SynTree(FeatStructNonterminal([('PropN', False), (feature_type, 'D'), ('TRACE', False)]), [SynTree('*det*', [], tree.QR_level, tree.language)], tree.QR_level, tree.language), tree.children[0]]\n return args_dict", "def test_Tree():", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-g\", dest=\"grid\", help=\"grid\")\n\tparser.add_option(\"-o\", dest=\"outfile\", help=\"outfile\")\n\tparser.add_option(\"-s\", dest=\"selection\", help=\"selection\")\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\ttry:\n\t\t\tLIST = open(options.pdblist, 'r')\n\t\texcept:\n\t\t\tprint \"unable to open pdblist\"\n\t\t\tsys.exit()\n\n\t\tfor line in LIST.readlines():\n\t\t\tline = string.rstrip(line)\n\t\t\tpdbfiles.append(line)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif not options.grid or not options.outfile:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\ttry:\n\t\tOUTPUT = open(options.outfile, 'w')\n\texcept:\n\t\tprint \"unable to create outfile\"\n\t\tsys.exit()\n\n\n\tif options.selection:\n\t\tselection = Selection()\n\t\tselection.makeSelection(options.selection)\n\n\tprotein = Molecule()\t\t\n\n\tmygrid = grid()\t\t\n\tmygrid.read(options.grid)\n\n\tfor pdbfile in pdbfiles:\n\t\tprotein.readPDB(pdbfile)\n\t\tif options.selection:\n\t\t\tnewmol = selection.apply_selection(protein)\n\t\telse:\n\t\t\tnewmol = protein.clone()\n\n\n\t\tatomlist = atomsInGrid(mygrid, newmol)\n\t\tOUTPUT.write(pdbfile + \": \" + str(len(atomlist)) + \"\\n\")\n\t\tprint pdbfile,len(atomlist)\n\n\t\tprotein.clear()\n\t\tnewmol.clear()\n\n\tOUTPUT.close()", "def demo(draw_parses=None, print_parses=None):\n demos = ['aandeelhoudersvergadering', 'hardloopwedstrijd']\n trees = []\n with MBMA() as program:\n for word in demos:\n print 'Parsing: %s' % word\n results = program.classify(word)\n trees.extend(program.trees(results))\n if draw_parses is None:\n print\n print 'Draw parses (y/n)?',\n draw_parses = sys.stdin.readline().strip().lower().startswith('y')\n if draw_parses:\n from nltk.draw.tree import draw_trees\n print ' please wait...'\n draw_trees(*trees)\n\n if print_parses is None:\n print\n print 'Print parses (y/n)?',\n print_parses = sys.stdin.readline().strip().lower().startswith('y')\n if print_parses:\n for parse in trees:\n print parse", "def parse(tree: Union[Keyvalues, str], preserve_ids: bool = False) -> 'VMF':\n if not isinstance(tree, Keyvalues):\n # if not a tree, try to read the file\n with open(tree) as file:\n tree = Keyvalues.parse(file)\n\n map_info = {}\n ver_info = tree.find_block('versioninfo', or_blank=True)\n for key in ('editorversion',\n 'mapversion',\n 'editorbuild',\n 'prefab'):\n map_info[key] = ver_info[key, '']\n\n map_info['formatversion'] = ver_info['formatversion', '100']\n if map_info['formatversion'] != '100':\n # If the version is different, we're probably about to fail horribly\n raise Exception(\n 'Unknown VMF format version \" ' +\n map_info['formatversion'] + '\"!'\n )\n\n view_opt = tree.find_block('viewsettings', or_blank=True)\n view_dict = {\n 'bSnapToGrid': 'snaptogrid',\n 'bShowGrid': 'showgrid',\n 'bShow3DGrid': 'show3dgrid',\n 'bShowLogicalGrid': 'showlogicalgrid',\n 'nGridSpacing': 'gridspacing'\n }\n for key in view_dict:\n map_info[view_dict[key]] = view_opt[key, '']\n\n cordons = tree.find_block('cordons', or_blank=True)\n map_info['cordons_on'] = cordons['active', '0']\n\n cam_props = tree.find_block('cameras', or_blank=True)\n map_info['active_cam'] = cam_props['activecamera', '-1']\n map_info['quickhide'] = tree.find_block('quickhide', or_blank=True)['count', '']\n\n # We have to create an incomplete map before parsing any data.\n # This ensures the ID manager objects have been created, so we can\n # ensure unique IDs in brushes, entities and faces.\n map_obj = VMF(map_info=map_info, preserve_ids=preserve_ids)\n\n for vis in tree.find_all('visgroups', 'visgroup'):\n map_obj.vis_tree.append(VisGroup.parse(map_obj, vis))\n\n for c in cam_props:\n if c.name != 'activecamera':\n Camera.parse(map_obj, c)\n\n for ent in cordons.find_all('cordon'):\n Cordon.parse(map_obj, ent)\n\n map_spawn = tree.find_block('world', or_blank=True)\n map_obj.spawn = worldspawn = Entity.parse(map_obj, map_spawn, _worldspawn=True)\n # Ensure the correct classname, which adds to by_class as a side effect. It is possible\n # to name worldspawn, kinda pointless though.\n worldspawn['classname'] = 'worldspawn'\n map_obj.by_target[worldspawn['targetname'].casefold() or None].add(worldspawn)\n # Always a brush entity.\n if worldspawn.solids is None:\n worldspawn.solids = []\n map_obj.brushes = worldspawn.solids\n\n for ent in tree.find_all('Entity'):\n map_obj.add_ent(\n Entity.parse(map_obj, ent, False) # hidden=False\n )\n\n # find hidden entities\n for hidden_ent in tree.find_all('hidden'):\n for ent in hidden_ent:\n map_obj.add_ent(\n Entity.parse(map_obj, ent, True) # hidden=True\n )\n\n return map_obj", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree" ]
[ "0.5582495", "0.55227566", "0.55077416", "0.5327612", "0.53146017", "0.52920765", "0.52888846", "0.5259966", "0.5250141", "0.52456695", "0.52336556", "0.5221779", "0.5210651", "0.5195875", "0.5167817", "0.5102092", "0.5098444", "0.50898075", "0.5076373", "0.50581986", "0.5050455", "0.5036229", "0.50343573", "0.501388", "0.49846515", "0.49721983", "0.496856", "0.49630058", "0.49564037", "0.49483633" ]
0.78134197
0
Download instancedata.csv from MINLPLib which can be used to get statistics on the problems from minlplib.
def get_minlplib_instancedata(target_filename=None): if target_filename is None: target_filename = os.path.join(os.getcwd(), 'minlplib', 'instancedata.csv') download_dir = os.path.dirname(target_filename) if os.path.exists(target_filename): raise ValueError('A file named {filename} already exists.'.format(filename=target_filename)) if not os.path.exists(download_dir): os.makedirs(download_dir) downloader = download.FileDownloader() downloader.set_destination_filename(target_filename) downloader.get_text_file('http://www.minlplib.org/instancedata.csv')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def download_report():\n entities = get_names()\n save_csv(entities)", "def extract_data():\n logging.info(f'Reading data from {impftermine.agg_export_file_name()}...')\n df_wl = pd.read_csv(impftermine.agg_export_file_name())\n vacc_report_file = os.path.join(credentials.vmdl_path, 'vaccination_report_bs_age_group_long.csv')\n logging.info(f'Reading data from {vacc_report_file}...')\n df_impf = pd.read_csv(vacc_report_file)\n return df_wl, df_impf", "def _get_data(self):\n try:\n \n with open('auto-mpg.data.txt', 'w') as data_file:\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n logger.debug(f'response code from url: 200')\n self.response_code = 200\n for line in r.iter_lines():\n data_file.write(line.decode() + '\\n')\n else:\n self.response_code = r.status_code\n logger.info(f'{url} returned status code {r.status_code}')\n except Exception as e:\n logger.info(f'Unexpected error writing to file {str(e)}. Exiting.')\n sys.exit()", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def _downloadDataFile(self):\n config = SiteConfiguration.objects.get()\n\n with requests.Session() as s:\n # Authentication\n data = {\n 'identificationBean.identifiant': '{}'.format(config.login),\n 'identificationBean.mdp': '{}'.format(config.password),\n 'userName': '{}'.format(config.username)\n }\n url = 'http://extranet.ffbb.com/fbi/identification.do'\n s.post(url, data=data)\n\n # Create filters\n params = (\n ('action', 'executeCsv'),\n ('rechercherRencontreSaisieResultatBean.idDivision', ''),\n ('rechercherRencontreSaisieResultatBean.rechercherEquipe2', 'O'),\n ('rechercherRencontreSaisieResultatBean.dateDebutRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.dateFinRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.idPoule', ''),\n ('rechercherRencontreSaisieResultatBean.numeroEquipe', ''),\n )\n\n # Get Csv file\n url = 'http://extranet.ffbb.com/fbi/rechercherCompetitionRencontre.do'\n response = s.get(url, params=params)\n\n if(response.headers['content-type'] != 'application/ms-excel;charset=UTF-8'):\n return False\n\n # Create the file\n if response.status_code == 200:\n os.makedirs(os.path.dirname(settings.DATA_PATH), exist_ok=True)\n with open(settings.DATA_PATH, 'wb') as f:\n for chunk in response:\n f.write(chunk)\n\n return True", "def download_potholes():\n\n\tlink = \"https://data.cityofchicago.org/api/views/7as2-ds3y/rows.csv?accessType=DOWNLOAD\"\n\tdf = pd.read_csv(link)\n\tdf = df[(df.STATUS == \"Open\") | (df.STATUS == \"Open - Dup\")]\n\tdf = df[[\"LATITUDE\", \"LONGITUDE\"]]\n\tdf = df.dropna(axis =0, subset=[\"LATITUDE\", \"LONGITUDE\"])\n\treturn df", "def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)", "def _download_mnist_realval(dataset):\n origin = (\n 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'\n )\n print 'Downloading data from %s' % origin\n urllib.urlretrieve(origin, dataset)", "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def load_instances():\n with open(\"instances.json\", \"r\", encoding=\"utf-8\") as list_file:\n instance_urls = json.load(list_file)\n\n print(\" Fetching instance statistics:\")\n instance_data = []\n release = requests.get(settings.RELEASE_API, timeout=settings.TIMEOUT)\n current_version = version.parse(release.json().get(\"tag_name\", None))\n for instance_url in instance_urls:\n print(f\" - Fetching: {instance_url}\")\n try:\n response = requests.get(\n f\"{instance_url}api/v1/instance\", timeout=settings.TIMEOUT\n )\n data = response.json()\n version_number = data[\"version\"]\n parsed_version = version.parse(version_number)\n if parsed_version < version.parse(settings.MINIMUM_VERSION):\n raise Exception(\"Instance is out of date with version:\", version_number)\n\n # pylint: disable=consider-using-f-string\n instance = {\"path\": instance_url}\n instance[\"users\"] = \"{:,}\".format(data[\"stats\"][\"user_count\"])\n instance[\"users_int\"] = int(data[\"stats\"][\"user_count\"])\n instance[\"registration\"] = (\n \"open\"\n if data[\"registrations\"]\n else \"invite\"\n if data[\"approval_required\"]\n else \"closed\"\n )\n description_text = data[\"short_description\"] or \"\"\n if not description_text:\n description = data[\"description\"]\n # pylint: disable=invalid-name\n for p in str(html.fromstring(description).text_content()).split(\"\\n\"):\n description_text += f\"<p>{p}</p>\" if p else \"\"\n if len(description_text) > 80:\n break\n instance[\"description\"] = description_text\n instance[\"logo\"] = data[\"thumbnail\"]\n instance[\"name\"] = data[\"title\"]\n instance[\"version\"] = data[\"version\"]\n\n # Determine a sort order\n rank = 0\n # reduce weight of user count\n rank += instance[\"users_int\"] ** 0.5\n # distance between this version and current version (how up to date is it?)\n rank -= (\n (current_version.major - parsed_version.major) * 100\n + (current_version.minor - parsed_version.minor) * 10\n + (current_version.micro - parsed_version.micro)\n )\n # prioritize open instance\n if instance[\"registration\"] == \"open\":\n rank += 10\n elif instance[\"registration\"] == \"invite\":\n rank += 5\n instance[\"rank\"] = -1 * rank\n\n except Exception as err: # pylint: disable=broad-except\n print(f\" ! {err}\")\n print(\" - Site could possibly be down. Please check it manually:\")\n print(f\" - Site url: {instance_url}\")\n instance = {\"path\": instance_url, \"skip\": True}\n instance_data.append(instance)\n\n instance_data = [i for i in instance_data if not i.get(\"skip\")]\n instance_data = sorted(instance_data, key=lambda i: i[\"rank\"])\n return instance_data", "def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df", "def download_data_and_save():\n url = 'https://github.com/djay/covidthailand/wiki/combined.csv'\n s=requests.get(url).content\n global df\n global last_updated\n df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n df.to_parquet(file_name, compression='UNCOMPRESSED')\n df.to_csv('jaydata.csv')\n last_updated = df['Date'][df.index[-1]].strftime(\"%d %B %Y\")\n\n url = 'https://raw.githubusercontent.com/wiki/djay/covidthailand/vaccinations.csv'\n s=requests.get(url).content\n global vac_df\n vac_df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n vac_df.to_parquet('vaccination.parquet', compression='UNCOMPRESSED')\n\n print(\"Data downloaded and saved successfully. Data up to \" + last_updated)", "def downloadData(url):\n response = urllib2.urlopen(url)\n html = response.read()\n localfile = open('hitdata.csv', 'wb')\n localfile.write(html)\n localfile.close()", "def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()", "def main():\n \n logging.info('Loading classification dataset from db')\n # start_time = dt.datetime.strptime(options.start_time, \"%Y%m%d%H%M\")\n # end_time = dt.datetime.strptime(options.end_time, \"%Y%m%d%H%M\")\n\n a = mlfb.mlfb(1, logging_level=options.logging_level)\n metadata, header, data = a.get_rows(options.dataset, rowtype=options.type)\n\n logging.debug('Length of metadata: {}'.format(len(metadata)))\n logging.debug('Shape of data {}'.format(data.shape))\n logging.debug('Header is: {}'.format(','.join(header)))\n\n print(header)\n print(np.array(metadata))\n print(data)\n \n # TODO saving as csv\n \n # Serialize model to disc\n # logging.info('Serializing dataset to disc: {}'.format(options.save_path))\n \n # csv = dataset.as_csv()\n # with open(options.save_path, \"w\") as f: \n # f.write(csv)", "def download_data(dev_mode: str, model: word2vec.Word2Vec) -> (np.ndarray, np.ndarray):\n assert dev_mode.lower() == 'false' or dev_mode.lower() == 'true'\n \n if dev_mode.lower() == 'false':\n print('Using Actual Data...')\n data_path = os.path.join(args.data_dir, 'HIV.csv')\n df = pd.read_csv(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(Chem.MolFromSmiles(x['smiles']), 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['HIV_active'].astype(int))\n else:\n # use example data set\n data_path = os.path.join(args.data_dir, 'ames.sdf')\n df = PandasTools.LoadSDF(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(x['ROMol'], 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['class'].astype(int))\n \n return X,y", "def dwn_rel_sup_csv(request):\n i = int(request.GET.get('i'))\n \n return FileResponse(open('temp/relation_support_datasets/relation_support_dataset_{}_{}.csv'.format(i, request.user.username),'rb'))", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def get_csv(args):\n\n connection = httplib.HTTPSConnection(args.host, args.port, args.key,\n args.cert)\n connection.request('GET', args.url + ';csv')\n response = connection.getresponse()\n if response.status != 200:\n print 'UNKNOWN: unable to poll stats api'\n sys.exit(NAGIOS_UNKNOWN)\n\n return response.read().rstrip().split(\"\\n\")", "def Dump():\n with open(path.join(MAIN_PATH, INST), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in instances:\n writer.writerow(inst)\n \n with open(path.join(MAIN_PATH, \"test_instances.csv\"), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in test_instances:\n writer.writerow(inst)", "def _get_data(self):\n\n # Grab the data. Note, the separator is actually ', ', not just a\n # comma, so specify. Also, recognize the \"?\" as an NA value\n # (I think it is easier to have pandas catch the NA values instead\n # of manually searching for and parsing these in the future).\n # Finally, set the engine to python, since having a separator greater\n # than one character automatically does this, and prints a warning\n # message. By explicitly telling it to use python, we suppress the\n # warning.\n self.train_df = pd.read_csv(self.train_url, sep=', ', header=None,\n na_values='?', engine='python')\n\n # For the training data, have one comment row, so need to ignore\n self.test_df = pd.read_csv(self.test_url, sep=', ', header=None,\n skiprows=1, na_values='?', engine='python')\n\n # Get the header data\n response = requests.get(self.head_url)\n header = response.text.split('\\n')\n\n # Now, filter to grab the header lines:\n # First, make sure there is at least one character for the line, and\n # ignore lines that start with the comment character for the file \"|\"\n header = [row for row in header if len(row) > 0 and row[0] != '|']\n\n # Ignore the first row, since it is just identifying the classifier\n # task and, get just the header values\n header = [head.split(':')[0] for head in header[1:]]\n\n # Finally, we need to add a header name for the last column (if <= or >\n # income of 50k)\n header.append('income')\n\n # Now, set the header for the data sets\n self.train_df.columns = header\n self.test_df.columns = header", "def download_dataset(self):\n raise NotImplementedError", "def get_df(config_summary_url):\n return pd.read_csv(urlretrieve(config_summary_url)[0])" ]
[ "0.5701007", "0.5519135", "0.5494347", "0.54325104", "0.5428697", "0.5407511", "0.5373714", "0.5348428", "0.5331514", "0.53146833", "0.5308775", "0.5301517", "0.52482224", "0.5236613", "0.5229138", "0.5218212", "0.52168965", "0.5206146", "0.5202182", "0.5193772", "0.5179708", "0.5111847", "0.51050425", "0.51041275", "0.5100293", "0.50949895", "0.50915945", "0.5083629", "0.5075713", "0.5072237" ]
0.7105052
0
This function filters problems from MINLPLib based on instancedata.csv from MINLPLib and the conditions specified through the function arguments. The function argument names correspond to column headings from instancedata.csv. The arguments starting with min or max require int or float inputs. The arguments starting with acceptable require either a string or an iterable of strings. See the MINLPLib documentation for acceptable values.
def filter_minlplib_instances(instancedata_filename=None, min_nvars=0, max_nvars=math.inf, min_nbinvars=0, max_nbinvars=math.inf, min_nintvars=0, max_nintvars=math.inf, min_nnlvars=0, max_nnlvars=math.inf, min_nnlbinvars=0, max_nnlbinvars=math.inf, min_nnlintvars=0, max_nnlintvars=math.inf, min_nobjnz=0, max_nobjnz=math.inf, min_nobjnlnz=0, max_nobjnlnz=math.inf, min_ncons=0, max_ncons=math.inf, min_nlincons=0, max_nlincons=math.inf, min_nquadcons=0, max_nquadcons=math.inf, min_npolynomcons=0, max_npolynomcons=math.inf, min_nsignomcons=0, max_nsignomcons=math.inf, min_ngennlcons=0, max_ngennlcons=math.inf, min_njacobiannz=0, max_njacobiannz=math.inf, min_njacobiannlnz=0, max_njacobiannlnz=math.inf, min_nlaghessiannz=0, max_nlaghessiannz=math.inf, min_nlaghessiandiagnz=0, max_nlaghessiandiagnz=math.inf, min_nsemi=0, max_nsemi=math.inf, min_nnlsemi=0, max_nnlsemi=math.inf, min_nsos1=0, max_nsos1=math.inf, min_nsos2=0, max_nsos2=math.inf, acceptable_formats=None, acceptable_probtype=None, acceptable_objtype=None, acceptable_objcurvature=None, acceptable_conscurvature=None, acceptable_convex=None): if instancedata_filename is None: instancedata_filename = os.path.join(os.getcwd(), 'minlplib', 'instancedata.csv') if not os.path.exists(instancedata_filename): raise RuntimeError('{filename} does not exist. Please use get_minlplib_instancedata() first or specify the location of the MINLPLib instancedata.csv with the instancedata_filename argument.'.format(filename=instancedata_filename)) acceptable_formats = _process_acceptable_arg( 'acceptable_formats', acceptable_formats, set(['ams', 'gms', 'lp', 'mod', 'nl', 'osil', 'pip']) ) default_acceptable_probtype = set() for pre in ['B', 'I', 'MI', 'MB', 'S', '']: for post in ['NLP', 'QCQP', 'QP', 'QCP', 'P']: default_acceptable_probtype.add(pre + post) acceptable_probtype = _process_acceptable_arg( 'acceptable_probtype', acceptable_probtype, default_acceptable_probtype ) acceptable_objtype = _process_acceptable_arg( 'acceptable_objtype', acceptable_objtype, set(['constant', 'linear', 'quadratic', 'polynomial', 'signomial', 'nonlinear']) ) acceptable_objcurvature = _process_acceptable_arg( 'acceptable_objcurvature', acceptable_objcurvature, set(['linear', 'convex', 'concave', 'indefinite', 'nonconvex', 'nonconcave', 'unknown']) ) acceptable_conscurvature = _process_acceptable_arg( 'acceptable_conscurvature', acceptable_conscurvature, set(['linear', 'convex', 'concave', 'indefinite', 'nonconvex', 'nonconcave', 'unknown']) ) acceptable_convex = _process_acceptable_arg( 'acceptable_convex', acceptable_convex, set(['True', 'False', '']) ) int_arg_name_list = [ 'nvars', 'nbinvars', 'nintvars', 'nnlvars', 'nnlbinvars', 'nnlintvars', 'nobjnz', 'nobjnlnz', 'ncons', 'nlincons', 'nquadcons', 'npolynomcons', 'nsignomcons', 'ngennlcons', 'njacobiannz', 'njacobiannlnz', 'nlaghessiannz', 'nlaghessiandiagnz', 'nsemi', 'nnlsemi', 'nsos1', 'nsos2', ] min_list = [ min_nvars, min_nbinvars, min_nintvars, min_nnlvars, min_nnlbinvars, min_nnlintvars, min_nobjnz, min_nobjnlnz, min_ncons, min_nlincons, min_nquadcons, min_npolynomcons, min_nsignomcons, min_ngennlcons, min_njacobiannz, min_njacobiannlnz, min_nlaghessiannz, min_nlaghessiandiagnz, min_nsemi, min_nnlsemi, min_nsos1, min_nsos2, ] max_list = [ max_nvars, max_nbinvars, max_nintvars, max_nnlvars, max_nnlbinvars, max_nnlintvars, max_nobjnz, max_nobjnlnz, max_ncons, max_nlincons, max_nquadcons, max_npolynomcons, max_nsignomcons, max_ngennlcons, max_njacobiannz, max_njacobiannlnz, max_nlaghessiannz, max_nlaghessiandiagnz, max_nsemi, max_nnlsemi, max_nsos1, max_nsos2, ] acceptable_arg_name_list = [ 'probtype', 'objtype', 'objcurvature', 'conscurvature', 'convex' ] acceptable_set_list = [ acceptable_probtype, acceptable_objtype, acceptable_objcurvature, acceptable_conscurvature, acceptable_convex ] with open(instancedata_filename, 'r') as csv_file: reader = csv.reader(csv_file, delimiter=';') headings = {column: ndx for ndx, column in enumerate(next(reader))} rows = [row for row in reader] cases = list() for ndx, row in enumerate(rows): if len(row) == 0: continue case_name = row[headings['name']] available_formats = row[headings['formats']] available_formats = available_formats.replace('set([', '') available_formats = available_formats.replace('])', '') available_formats = available_formats.replace('{', '') available_formats = available_formats.replace('}', '') available_formats = available_formats.replace(' ', '') available_formats = available_formats.replace("'", '') available_formats = available_formats.split(',') available_formats = set(available_formats) should_continue = False if len(acceptable_formats.intersection(available_formats)) == 0: logger.debug('excluding {case} due to available_formats'.format(case=case_name)) should_continue = True for ndx, acceptable_arg_name in enumerate(acceptable_arg_name_list): acceptable_set = acceptable_set_list[ndx] arg = row[headings[acceptable_arg_name]] if _check_acceptable(arg=arg, acceptable_set=acceptable_set, arg_name=acceptable_arg_name, case_name=case_name): should_continue = True for ndx, arg_name in enumerate(int_arg_name_list): _min = min_list[ndx] _max = max_list[ndx] arg = int(row[headings[arg_name]]) if _check_int_arg(arg=arg, _min=_min, _max=_max, arg_name=arg_name, case_name=case_name): should_continue = True if should_continue: continue cases.append(case_name) return cases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_max_outliers(res, min=None, max=None):\n min_max_list = []\n if isinstance(min, (int, float)):\n data1 = res[res < min].reset_index()\n data1['limit type'] = 'minimum'\n data1['limit'] = min\n min_max_list.append(data1)\n if isinstance(max, (int, float)):\n data1 = res[res > max].reset_index()\n data1['limit type'] = 'maximum'\n data1['limit'] = max\n min_max_list.append(data1)\n\n min_max1 = pd.concat(min_max_list)\n\n return min_max1", "def column_range_validation_factory(minim=None, maxim=None, ignore_missing_vals=False):\n if minim is None:\n if isinstance(maxim, datetime):\n minim = datetime.min\n else:\n minim = -1 * (sys.maxsize - 1)\n if maxim is None:\n if isinstance(minim, datetime):\n maxim = datetime.max\n else:\n maxim = sys.maxsize\n\n def in_range_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (isinstance(x, (type(minim), type(maxim)))) and (x <= maxim) and (x >= minim), {}\n\n in_range_validation_fn.__doc__ = \"checks whether values are between {} and {}\".format(\n minim, maxim\n )\n if ignore_missing_vals:\n in_range_validation_fn.__doc__ += \", ignoring nulls\"\n\n return in_range_validation_fn", "def process(self, inputs):\n\n input_df = inputs[0]\n str_list = []\n for column_item in self.conf:\n column_name = column_item['column']\n if 'min' in column_item:\n minValue = column_item['min']\n str_item = '%s >= %f' % (column_name, minValue)\n str_list.append(str_item)\n if 'max' in column_item:\n maxValue = column_item['max']\n str_item = '%s <= %f' % (column_name, maxValue)\n str_list.append(str_item)\n input_df = input_df.query(\" and \".join(str_list))\n return input_df", "def my_min(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[0]\n\n if not args:\n raise ValueError(\"Can't find min, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find min, wrong data format\")\n return sorter(args)", "def min_(*args, **kwargs):\n ...", "def test_check_min(self):\n\t\tself.filter.set_operator(\".min\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=15)))\n\t\tself.assertFalse(self.filter.check(Object(field=9)))", "def filter_criteria_met(current_gre, current_gpa, current_toefl):\r\n\r\n if int(current_gre) < global_constants['MINIMUM_GRE']:\r\n return False\r\n if float(current_gpa) < global_constants['MINIMUM_GPA']:\r\n return False\r\n if int(current_toefl) < global_constants['MINIMUM_TOEFL']:\r\n return False\r\n return True", "def valfilter(ls,minmax):\n # Find how often each values occur in ls\n count = Counter(ls)\n # Remove keys that occur only once\n keys = count.keys()\n for key in keys:\n if count[key] == 1:\n del count[key]\n keys = count.keys()\n # Return min or max as specified\n if minmax == 'min':\n return min(keys)\n if minmax == 'max':\n return max(keys)", "def _find_verify_arguments(filters):\n if (\"minsize\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"minsize\"]):\n exit_with_error(\"Maximum size cannot be less than minimum size.\")\n if (\"size\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"size\"]):\n exit_with_error(\"Maximum size cannot be less than (exact) size.\")\n if (\"size\" in filters and \"minsize\" in filters and\n filters[\"minsize\"] > filters[\"size\"]):\n exit_with_error(\"Minimum size cannot be more than (exact) size.\")", "def checkmaxmin(droi):\n \n # Loads data.\n fd = load_frames('gofparams')\n rawdata = loadresultsfile('results1.txt', 'Mean1')\n \n pardir = cmn.makepardir_data()\n movie = os.path.basename(os.path.abspath('.'))\n \n d = {}\n rois = droi.keys()\n \n # Finds the max and min points using the parameters specified in the list droi.\n for roi in rois:\n\n maxsurr, maxwinlen, maxtrshift, minsurr, minwinlen, mintrshift = droi[roi]\n \n dmax, dmin = peaklib.maxminanalysis(rawdata, maxsurr, maxwinlen, maxtrshift, minsurr,\n minwinlen, mintrshift)\n \n d[roi+'_dmax'] = dmax\n d[roi+'_dmin'] = dmin\n\n # Plots the raw traces with the max and min points indicated.\n for roi in rois:\n plt.figure(figsize=(14,10))\n peaklib.plotminmax(d[roi+'_dmax'], d[roi+'_dmin'], 'b', 1, 0)\n \n figname = movie+'_'+roi\n plt.title('{0} \\n {1} \\n frames = {2}-{3} ({4} total)'.format(figname, fd['condition'], \n fd['f1'], fd['f_end'], fd['f_end']-fd['f1']))\n \n figpath = peaklib.makefilepath(pardir, ANALYSISFOLDPNG, figname)\n plt.savefig(figpath)\n plt.close()\n \n # Writes the min/max data into a file with the function writei.\n \n ifilefold = ANALYSISFOLDTXT + '/' + movie + '/'\n ipath = peaklib.makesubdir(pardir, ifilefold)\n peaklib.writei(d, fd, ipath)", "def filterIQM(apidf, modality, filter_list):\n cols = apidf.columns\n cols = cols.map(lambda x: x.replace(\".\", \"_\").lower())\n apidf.columns = cols\n\n query = []\n mod = modality.lower()\n\n if mod == 'bold':\n expected_filters = {'snr':'snr','tsnr':'tsnr',\n 'dvar':'dvars_nstd','fd':'fd_mean',\n 'fwhm':'fwhm_avg','tesla':'bids_meta_magneticfieldstrength',\n 'gsr_x':'gsr_x','gsr_y':'gsr_y',\n 'te':'bids_meta_echotime','tr':'bids_meta_repetitiontime'}\n elif mod == 't1w':\n expected_filters = {'snr':'snr_total', 'snrg':'snr_gm', 'srnw':'snr_wm',\n 'snrc':'snr_csf', 'cnr':'cnr', 'efc':'efc',\n 'fwhm':'fwhm_avg','tesla':'bids_meta_magneticfieldstrength',\n 'te':'bids_meta_echotime','tr':'bids_meta_repetitiontime'}\n elif mod == 't2w':\n expected_filters = {'snr':'snr_total', 'snrg':'snr_gm', 'srnw':'snr_wm',\n 'snrc':'snr_csf', 'cnr':'cnr', 'efc':'efc',\n 'fwhm':'fwhm_avg','tesla':'bids_meta_magneticfieldstrength',\n 'te':'bids_meta_echotime','tr':'bids_meta_repetitiontime'}\n\n if all(isinstance(x,str) for x in filter_list):\n filter_list = list(map(str.lower, filter_list))\n else:\n raise ValueError('filter_list contains items other than strings')\n\n filter_check = list(expected_filters.keys())\n\n for filt in filter_list:\n var = filt.split(' ')[0]\n op = filt.split(' ')[1]\n val = filt.split(' ')[2]\n if var in filter_check:\n filt_str = expected_filters[var] + op + val\n query.append(filt_str)\n else:\n raise Exception(var + 'is not found in the available IQMs')\n\n filtered_df = apidf.query(' & '.join(query))\n\n return filtered_df", "def validate_input(function):\n\n def validator(min_factor, max_factor):\n if max_factor < min_factor:\n raise ValueError(\"Min factor must be smaller or equal as max factor\")\n if min_factor == max_factor:\n return None, []\n else:\n return function(min_factor, max_factor)\n\n return validator", "def findmin(f, ranges, args=(), Ns=None, full_output=False, method='brute',\n finish=False):\n if method == 'brute':\n Ns = Ns or 3\n x0, J0, xs, Jout = brute(f, ranges, args=args, Ns=Ns, full_output=True)\n elif method == 'monte carlos':\n Ns = Ns or 1000\n x0, J0, xs, Jout = monte_carlos(f, ranges, args=args, Ns=Ns, full_output=True)\n else:\n valid_methods = ('brute', 'monte carlos')\n raise ValueError('optimization method must be one of {0!r}'.format(\n ', '.join(valid_methods)))\n\n # Mask any values that are not finite\n mask = np.isfinite(Jout)\n xs = xs[mask]\n Jout = Jout[mask]\n if not len(xs):\n raise RuntimeError('Failed to find optimized parameters')\n\n if finish:\n import scipy.optimize\n res = scipy.optimize.fmin(f, x0, args=args, full_output=True)\n x0, J0 = res[0:2]\n\n if not full_output:\n return x0\n return x0, J0, xs, Jout", "def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []", "def test_by_source_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, s='s1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, s='xxxxxx')\n assert not res", "def filter_by_grade(minInRequest: bool, maxInRequest: bool, request: HttpRequest, sql_request: Q):\n if maxInRequest or minInRequest:\n if not maxInRequest:\n if minInRequest:\n minGrade = int(request.GET[\"min\"])\n sql_request &= Q(grade__gte=minGrade)\n elif not minInRequest:\n if maxInRequest:\n maxGrade = int(request.GET[\"max\"])\n sql_request &= Q(grade__lte=maxGrade)\n else:\n minGrade, maxGrade = int(request.GET[\"min\"]), int(request.GET[\"max\"])\n sql_request &= Q(grade__range=(minGrade, maxGrade))\n \n return sql_request", "def lin_cutoff_calc_from_perc(image, minimum, maximum):\n if minimum < 0 or maximum < 0 or minimum > 100 or maximum > 100:\n raise Exception(\"rvt.blend_funct.lin_cutoff_calc_from_perc: minimum, maximum are percent and have to be in \"\n \"range 0-100!\")\n if minimum + maximum > 100:\n raise Exception(\"rvt.blend_funct.lin_cutoff_calc_from_perc: if minimum + maximum > 100% then there are no\"\n \" values left! You can't cutoff whole image!\")\n distribution = np.nanpercentile(a=image, q=np.array([minimum, 100 - maximum]))\n min_lin = distribution[0]\n max_lin = distribution[1]\n if min_lin == max_lin:\n min_lin = np.nanmin(image)\n max_lin = np.nanmax(image)\n return {\"min_lin\": min_lin, \"max_lin\": max_lin}", "def find_minmax_criteria(self, data):\r\n found = {}\r\n data = dict(data)\r\n for k in data.keys():\r\n m = re.match(r'(?P<minmax>min|max)\\[(?P<property_id>\\d+)\\]', k)\r\n if m is not None:\r\n minmax = m.group('minmax')\r\n property_id = int(m.group('property_id'))\r\n if not found.has_key(property_id):\r\n found[property_id] = MinMaxCriteria(property_id)\r\n if minmax == 'min':\r\n found[property_id].min_value = self.__to_value(data[k])\r\n elif minmax == 'max':\r\n found[property_id].max_value = self.__to_value(data[k])\r\n self.minmax_criteria = found", "def customMin(x,mergedSegments, minValidData = 0.8):\n if mergedSegments.loc[x].nonNullProp >= minValidData : \n return np.inf\n\n idx = min(criteriaMatrix.get(x),\n key=lambda y : np.inf if y not in inversedIndex.values\n else criteriaMatrix.get(x).get(y)\n )\n return np.inf if idx not in inversedIndex.values else criteriaMatrix.get(x).get(idx)", "def get_filters():\n \n \"\"\"\"\"\"\"\"\n \n \"\"\"Messeges to genrate filters\"\"\"\n\tnote_messege = 'In this project, we make use of Python to explore data related to bike share systems for three major cities in the United States\\n'\n welcome_messege = 'Hello! Let\\'s explore some US bikeshare data!\\n'\n enter_city_name_messege = 'Which city would you like to filter by? Chicago, New York City or Washington? '\n filter_definition_messege = '\\nWould you like to filter the data by - \\n1. Month\\n2. Day\\n3. Both\\n4. No Filter\\n\\nPlease choose the appropriate filter name.\\nNote: Incorrect filter name will result as \\'no filter selected\\' by the user.\\n'\n enter_filter_messege = 'Desired filter (e.g: Month, Day, Both or No Filter): '\n enter_month_name_messege = 'Enter month name (e.g: january, february, march, april, may or june): '\n enter_day_name_messege = 'Enter day of the week (e.g: monday, tuesday, wednesday, thursday, friday, saturday, sunday): '\n exception_messege = '\\nWarning! That is not a valid input.\\n'\n warning_city_name_messege = '\\nWarning! Invalid city name. Select city name from the following cities only - Chicago, New York City or Washington.' \n warning_month_name_messege = '\\nWarning! Invalid month name. Select month name from the following months only - january, february, march, april, may or june'\n warning_day_name_messege = '\\nWarning! Invalid day name. Select day name from the following days only - monday, tuesday, wednesday, thursday, friday, saturday, sunday'\n \"\"\"\"\"\"\"\"\n \n \"\"\"City, Month and Day List\"\"\"\n city_list = ['chicago', 'new york city', 'washington']\n month_list = ['january', 'february', 'march', 'april', 'may', 'june']\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n \"\"\"\"\"\"\"\"\n \n\tprint(note_messege)\n print(welcome_messege)\n \n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs \n while True:\n try:\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n while city.lower() not in city_list:\n while True:\n try: \n print(warning_city_name_messege)\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n print(filter_definition_messege)\n while True:\n try:\n filter_choice = input(enter_filter_messege)\n break\n except:\n print(exception_messege)\n while True: \n if filter_choice.lower() == 'month':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n day = 'all'\n break\n \n elif filter_choice.lower() == 'day':\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday) \n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n month = 'all'\n break\n \n elif filter_choice.lower() == 'both':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege)\n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n break\n \n else:\n month = 'all'\n day = 'all'\n break\n \n\n print('-'*40)\n return city.lower(), month.lower(), day.lower()", "def clean_data(cube, max_value, min_value):\n\n data_clean = numpy.where(cube.data < min_value, min_value, cube.data) \n data_clean = numpy.where(data_clean > max_value, max_value, data_clean)\n \n return data_clean", "def filter_design(l_max, N_scales, design_type='default', lp_factor=20,\r\n a=2, b=2, t1=1, t2=2):\r\n g = []\r\n gp = []\r\n l_min = l_max / lp_factor\r\n t = set_scales(l_min, l_max, N_scales)\r\n if design_type == 'default':\r\n # Find maximum of gs. Could get this analytically, but this also works\r\n f = lambda x: -kernel(x, a=a, b=b, t1=t1, t2=t2)\r\n x_star = fminbound(f, 1, 2)\r\n gamma_l = -f(x_star)\r\n l_min_fac = 0.6 * l_min\r\n g.append(lambda x: gamma_l * np.exp(-(x / l_min_fac)**4))\r\n gp.append(lambda x: -4 * gamma_l * (x/l_min_fac)**3 *\r\n np.exp(-(x / l_min_fac)**4) / l_min_fac)\r\n for scale in t:\r\n g.append(lambda x,s=scale: kernel(s * x, a=a, b=b, t1=t1,t2=t2))\r\n gp.append(lambda x,s=scale: kernel_derivative(scale * x) * s)\r\n elif design_type == 'mh':\r\n l_min_fac = 0.4 * l_min\r\n g.append(lambda x: 1.2 * np.exp(-1) * np.exp(-(x/l_min_fac)**4))\r\n for scale in t:\r\n g.append(lambda x,s=scale: kernel(s * x, g_type='mh'))\r\n else:\r\n print(\"Unknown design type\")\r\n # TODO: Raise exception\r\n \r\n return (g, gp, t)", "def getInputDeviceRange(*args, maxValue: bool=True, minValue: bool=True,\n **kwargs)->List[float]:\n pass", "def MIN(*args):\n return _group_function(min, *args)", "def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x", "def __init__(self, allowable_min, allowable_max):\n self.allowable_min = allowable_min\n self.allowable_max = allowable_max\n self.value = None\n # Do it this way because we'll override in reset\n self.min_found = None\n self.max_found = None\n self.avg_found = None\n self.count = 0\n\n # override reset for the different data types\n self.reset()", "def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)", "def check_overlaps(self, filter_objects, verbose = False):\n if isinstance(FilterClass, type(filter_objects)):\n ## if only one filter is given\n filter_objects = [filter_objects, ]\n\n\n for i, filter_name in enumerate(filter_objects):\n if isinstance(FilterClass, type(filter_name)):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[i]\n\n if verbose:print(i, filter_obj)\n\n if hasattr(filter_obj, \"_lower_edge\") and \\\n hasattr(filter_obj, \"_upper_edge\") and \\\n hasattr(self, \"data\"):\n blue_bool = filter_obj._lower_edge > self.min_wavelength\n red_bool = filter_obj._upper_edge < self.max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n if verbose: print(within)\n if within:\n self._add_to_overlapping_filters(filter_name, verbose=verbose)\n else:\n warnings.warn(\"SpectrumClass.check_overlaps - something went wrong... no overlaps or data?\")\n if self._n_overlapping_filters == 1:\n self._overlapping_filter_list = [self._overlapping_filter_list,] ## added to fix issue #27\n pass", "def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv", "def galaxy_selection_func(table, min_mass=10**8.0, max_mass=np.inf, prim_gal_prop='stellar_mass'):\n\n mask = (table[prim_gal_prop] >= min_mass) & (table[prim_gal_prop] < max_mass)\n return mask" ]
[ "0.5590699", "0.54717195", "0.546341", "0.5421935", "0.5389123", "0.53543043", "0.5188106", "0.5153531", "0.51267356", "0.5076835", "0.50677097", "0.50538373", "0.50466853", "0.5036201", "0.5033214", "0.5024426", "0.49925968", "0.49806297", "0.4965975", "0.49592558", "0.495787", "0.49574614", "0.4956987", "0.4941772", "0.49360722", "0.49323037", "0.49282217", "0.4912162", "0.49075302", "0.49060047" ]
0.6130292
0
Return a steemstyle amount string given a (numeric, assetstr).
def _amount(amount, asset='HBD'): assert asset == 'HBD', 'unhandled asset %s' % asset return "%.3f HBD" % amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cp_asset_sale(self, amt: float) -> str:\n raise NotImplementedError", "def obtain_amount(cls, amount_string):\n return float(string.replace(amount_string, ',', '.'))", "def make_quantity(string):\n pass", "def format_amount(self) -> str:\n if self.amount_debit != '':\n return self.amount_debit.replace('-', '')\n return self.amount_credit.replace('-', '')", "def get_cost_string(es, t):\n strg = ''\n for item in es:\n strg += '{:011.7f}'.format(item / t) + ' '\n return strg", "def format_amount(amount: int | str | float) -> str:\r\n # Remove .0 by casting to int\r\n if float(amount) % 1 == 0:\r\n amount = int(float(amount))\r\n\r\n # Adding prefix + for positive number and 0\r\n if not str(amount).startswith('+') and float(amount) >= 0:\r\n amount = str(f'+{amount}')\r\n\r\n # return as string\r\n return str(amount)", "def credits_to_string(amount: int, significant_numbers: int = 3) -> str:\n letter = ''\n divider = 1\n absAmount = abs(amount)\n\n if absAmount >= 10**15:\n letter = 'Q'\n divider = 10**15\n elif absAmount >= 10**12:\n letter = 'T'\n divider = 10**12\n elif absAmount >= 10**9:\n letter = 'B'\n divider = 10**9\n elif absAmount >= 10**6:\n letter = 'M'\n divider = 10**6\n \n if divider == 1:\n return '{:,} C'.format(int(amount))\n if amount >= 10**18:\n return '{:,} {}C'.format(int(amount / divider), letter)\n else:\n power_of_10 = max(0,int(math.floor(math.log10(absAmount))))\n precision = significant_numbers - 1 - (power_of_10 % 3)\n return '{1:.{0}f} {2}C'.format(precision,\n math.floor(amount / 10**(power_of_10 - significant_numbers + 1)) / 10**precision, \n letter)", "def money_format(ammount):\n\td = Decimal(ammount) / Decimal(\"100\")\n\treturn u'£%s' % d.quantize(Decimal(\"0.01\"))", "def render_money(amount: Money, message: str = \"\") -> str:\n\n return f\"{message} {amount.amount} {amount.currency}\"", "def local_price(amount, currency):\n amt = convert(amount, currency)\n sym = symbol(currency)\n return f'{sym}{amt}'", "def _convert_charge(val: str) -> str:\n try:\n int_val = int(val)\n # negative value will take care of itself\n sign = \"+\" if int_val > 0 else \"\"\n return f\"{sign}{int_val}\"\n except:\n return val", "def int2str(value_int, currency):\r\n if currency in \"BTC LTC NMC\":\r\n return (\"%16.8f\" % (value_int / 100000000.0))\r\n elif currency in \"JPY SEK\":\r\n return (\"%12.3f\" % (value_int / 1000.0))\r\n else:\r\n return (\"%12.5f\" % (value_int / 100000.0))", "def bought_to_str(shares, ticker, price):\n return (' - You bought ' + shares + ' shares of ' +\n ticker + ' at a price of ' + price + ' per share\\n')", "def credits_to_string_with_exact_value(amount: int, separator: str = ' ', significant_numbers: int = 3) -> str:\n if amount >= 10**6:\n return '{}{}({:,} C)'.format(credits_to_string(amount), separator, amount)\n else:\n return '{:,} C'.format(amount)", "def gold(value):\n\tvalue = str(value)\n\t\n\tgold = value[:-4]\n\n\tsilver = value[-4:-2]\n\n\tcopper = value[-2:]\n\n\t\n\tif gold == '': gold = '0'\n\tif silver == '': silver = '0'\n\tif copper == '': copper = '0'\n\n\treturn \"%sg %ss %sc\" % (gold, silver, copper)", "def format_usd(my_price):\n return f\"${my_price:,.2f}\"", "def safe_format_amount(commodity, amount):\n if commodity is None:\n return str(amount)\n return commodity.format_amount(amount)", "def format_amount(amount):\n if not amount:\n return ''\n return \"{} {}\".format(format_currency(amount.number, amount.currency),\n amount.currency)", "def _get_asset(self, symbol):\n if symbol == \"BEX\":\n return {\"symbol\": \"BEX\",\n \"precision\": 3\n }\n elif symbol == \"BBD\":\n return {\"symbol\": \"BBD\",\n \"precision\": 3\n }\n elif symbol == \"VESTS\":\n return {\"symbol\": \"VESTS\",\n \"precision\": 6\n }\n else:\n return None", "def text_transform(val):\n if CURRENCY == \"USD\":\n return \"$%d\" % val\n if CURRENCY == \"EUR\":\n return \"‎€%d\" % val\n if CURRENCY == \"GBP\":\n return \"£%d\" % val\n return \"%d\" % val", "def __str__(self) -> str:\n return f'{self.amount}{self.currency}'", "def _format_instructed_amount_33B(self, val):\n instructed_amount = val.get('instructed_amount')\n currency = val.get('currency')\n if instructed_amount and currency:\n instructed_amount = apply_currency_precision(currency, abs(float(instructed_amount)))\n val = str(currency) + str(FSwiftMLUtils.float_to_swiftmt(str(instructed_amount)))\n return val", "def parse_amount(amount: str) -> int:\n amountLowerCase = amount.lower().replace('c', '')\n\n exp = 0\n if amountLowerCase.endswith('k'):\n exp = 3\n elif amountLowerCase.endswith('m'):\n exp = 6\n elif amountLowerCase.endswith('b'):\n exp = 9\n elif amountLowerCase.endswith('t'):\n exp = 12\n elif amountLowerCase.endswith('q'):\n exp = 15\n\n if exp == 0:\n return int(amountLowerCase)\n else:\n return int(float(amountLowerCase[:len(amountLowerCase)-1])*10**exp)", "def money_parse(ammount):\n\treturn int(Decimal(ammount) * 100)", "def deltastr(num, include_sign=True, currency=False):\n if num == 0:\n return ''\n elif num > 0:\n b4 = Fore.GREEN\n elif num < 0:\n b4 = Fore.RED\n signage = '+' if include_sign else ''\n b4 += '$' if currency else ''\n numfmt = ',.0f' if currency else ''\n return f'{b4}{num:{signage}{numfmt}}{Style.RESET_ALL}'", "def getTransactionAmount(self,message):\n amount = re.findall(Analyzer.rgxAmount,message.lower())\n return amount[0].capitalize()", "def dividend_to_str(stock, dividend, shares):\n return (' - ' + stock + ' paid out ' + '${:.2f}'.format(float(dividend))\n + ' dividend per share, and you have ' + str(shares) + ' shares\\n')", "def sold_to_str(shares, ticker, price, profit):\n return (' - You sold ' + shares + ' shares of ' + ticker\n + ' at a price of ' + price + ' per share for a '\n + ('profit' if float(profit[1:]) >= 0 else 'loss') + ' of '\n + profit + '\\n')", "def to_usd(my_price):\n return f\"${my_price:,.2f}\"", "def contract(s):\n if not s: return s\n\n tokens = s.split()\n old = tokens[0]\n count = [[1, old]]\n\n for t in tokens[1:]:\n if t == old:\n count[-1][0] += 1\n else:\n old = t\n count.append([1, t])\n\n return \" \".join(\"%d*%s\" % (c, t) for c, t in count)" ]
[ "0.6219759", "0.5915044", "0.5800827", "0.5674921", "0.56510484", "0.56374365", "0.56039447", "0.55580014", "0.54944694", "0.5424905", "0.53800106", "0.5352545", "0.5346404", "0.5282491", "0.5272913", "0.526662", "0.526149", "0.52452654", "0.52430576", "0.5226036", "0.5224858", "0.5194791", "0.5181311", "0.51724255", "0.5170263", "0.5164188", "0.5153423", "0.51267153", "0.51203406", "0.51134264" ]
0.73780584
0
Given a hive_posts row, create a legacystyle post object.
def database_post_object(row, truncate_body=0): paid = row['is_paidout'] post = {} post['active'] = json_date(row['active']) post['author_rewards'] = row['author_rewards'] post['id'] = row['id'] post['author'] = row['author'] post['permlink'] = row['permlink'] post['category'] = row['category'] if 'category' in row else 'undefined' post['title'] = row['title'] post['body'] = row['body'][0:truncate_body] if truncate_body else row['body'] post['json_metadata'] = row['json'] post['created'] = json_date(row['created_at']) post['last_update'] = json_date(row['updated_at']) post['depth'] = row['depth'] post['children'] = row['children'] post['last_payout'] = json_date(row['last_payout_at']) post['cashout_time'] = json_date(row['cashout_time']) post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid) curator_payout = sbd_amount(row['curator_payout_value']) post['curator_payout_value'] = to_nai(_amount(curator_payout)) post['total_payout_value'] = to_nai(_amount(row['payout'] - curator_payout)) post['reward_weight'] = 10000 # ABW: only relevant between HF12 and HF17 and we don't have access to correct value post['root_author'] = row['root_author'] post['root_permlink'] = row['root_permlink'] post['allow_replies'] = row['allow_replies'] post['allow_votes'] = row['allow_votes'] post['allow_curation_rewards'] = row['allow_curation_rewards'] post['parent_author'] = row['parent_author'] post['parent_permlink'] = row['parent_permlink_or_category'] post['beneficiaries'] = row['beneficiaries'] post['max_accepted_payout'] = to_nai(row['max_accepted_payout']) post['percent_hbd'] = row['percent_hbd'] post['net_votes'] = row['net_votes'] if paid: post['total_vote_weight'] = 0 post['vote_rshares'] = 0 post['net_rshares'] = 0 # if row['rshares'] > 0 else row['rshares'] ABW: used to be like this but after HF19 cashouts disappear and all give 0 post['abs_rshares'] = 0 post['children_abs_rshares'] = 0 else: post['total_vote_weight'] = row['total_vote_weight'] post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2 # effectively sum of all positive rshares post['net_rshares'] = row['rshares'] post['abs_rshares'] = row['abs_rshares'] post['children_abs_rshares'] = 0 # TODO - ABW: I'm not sure about that, it is costly and useless (used to be part of mechanism to determine cashout time) return post
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_blog_post(user_id):\n \n data = request.get_json()\n\n # Check if the user is in the database\n user = User.query.filter_by(id=user_id).first()\n if not user:\n return jsonify({\"message\": \"user does not exist!\"}), 400\n\n # Create an instance of a HashTable\n ht = hash_table.HashTable(10)\n\n # Create a blog post\n ht.add_key_value(\"title\", data[\"title\"])\n ht.add_key_value(\"body\", data[\"body\"])\n ht.add_key_value(\"date\", now)\n ht.add_key_value(\"user_id\", user_id)\n\n # Add a blog post to the database\n new_blog_post = BlogPost(\n title=ht.get_value(\"title\"),\n body=ht.get_value(\"body\"),\n date=ht.get_value(\"date\"),\n user_id=ht.get_value(\"user_id\"),\n )\n db.session.add(new_blog_post)\n db.session.commit()\n return jsonify({\"message\": \"new blog post created\"}), 200", "def post(self, post_id=None):\n\n if post_id:\n abort(400)\n else:\n args = parsers.post_post_parser.parse_args(strict=True)\n\n new_post = Post(args['title'])\n new_post.text = args['text']\n # new_post.user = user\n\n if args['tags']:\n for item in args['tags']:\n tag = Tag.query.filter_by(name=item).first()\n # If the tag already exist, append.\n if tag:\n new_post.tags.append(tag)\n # If the tag not exist, create the new one.\n # Will be write into DB with session do.\n else:\n new_tag = Tag(item)\n new_post.tags.append(new_tag)\n db.session.add(new_post)\n db.session.commit()\n return (new_post.id, 201)", "def create(cls, headline, text, blog):\n post = cls()\n post.headline = headline\n post.text = text\n post.blog = blog\n post.posted_date = timezone.now()\n try:\n post.save()\n return post\n except(ValueError, IntegrityError, OperationalError):\n return None", "def add_post_to_es(post, db_conn):\n\n from database.topic import get_topic, deliver_topic\n from database.user import get_user, deliver_user\n\n data = json_prep(deliver_post(post))\n topic = get_topic({'id': post['topic_id']}, db_conn)\n if topic:\n data['topic'] = json_prep(deliver_topic(topic))\n user = get_user({'id': post['user_id']}, db_conn)\n if user:\n data['user'] = json_prep(deliver_user(user))\n\n return es.index(\n index='entity',\n doc_type='post',\n body=data,\n id=post['id'],\n )", "def _create_row_description(self, hue, cmap):\n # HTML needs to be prepared so that description is hidden/hoverable\n desc = self.feature_descriptions[hue]\n\n parsed_html = BeautifulSoup(self._row_description_html.format(hue=hue), \"html.parser\")\n parsed_html.string.wrap(parsed_html.new_tag(\"p\"))\n parsed_html.p.append(append_description(desc, parsed_html))\n parsed_html.p[\"class\"] = self.feature_description_class\n\n feature_with_description = str(parsed_html)\n\n # Color Legend\n legend = self._create_legend(hue, cmap)\n\n # Feature Title Div\n kwargs = {\n \"text\": feature_with_description,\n \"css_classes\": [self._hue_title]\n }\n d = Div(**kwargs)\n\n # output Column\n c = column(\n d,\n legend,\n width=200,\n height=195,\n width_policy=\"fixed\",\n css_classes=[self._row_description]\n )\n return c", "def add_post(content):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n cleaned = bleach.clean(content, strip = True)\n c.execute(\"insert into posts values(%s)\", (cleaned,))\n db.commit()\n db.close()", "def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)", "def newPost(self, postLink, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createNewBlogEntry()\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # publish entry\r\n atomRespEntry = self.createAtomEntry(postLink, atomEntry)\r\n return atomRespEntry", "def create_post(user_id):\n\n user = User.query.get_or_404(user_id)\n title = request.form['title']\n content = request.form['content']\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n \n new_post = Post(title=title, content=content, user=user, tags=tags)\n db.session.add(new_post)\n db.session.commit()\n\n return redirect(f\"/users/{user_id}\")", "def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)", "def create(thing):\n fields = {}\n errors = []\n\n for col in thing.cols:\n new[col.field_name] = request.form.get(col.field_name)\n if col.required and not new[col.field_name]:\n errors.append('%s cannot be empty' % col.human_name)\n\n if errors:\n for e in errors:\n flash(e)\n add_template_variable('thing', thing)\n add_template_variable('fields', fields)\n return my_render_template('generic/create_post.html')\n\n # insert into database\n\n db = get_db()\n cursor = db.cursor()\n\n # create the two strings we use in the query\n field_names = \"'\" + \"', '\".join(thing.field_names) + \"'\"\n question_marks = \", \".join(map(lambda x: '?', thing.field_names.count() ))\n\n cursor.execute(\"insert into posts (%s) values (%s)\" % (field_names, question_marks), (title, body))\n db.commit()\n new_id = cursor.lastrowid\n\n # show new post to the user\n flash(\"You made a new %s\" % thing.human_name)\n return redirect(url_for('show_one', id_=new_id))", "def _transform_single_row(self, hashtag_agg: Dict, row: pd.Series):\n user_name = row[\"username\"] + \"_user\"\n tweet_id = str(row[\"id\"]) + \"_tweet\"\n tags = row[\"hashtags\"]\n\n self._users_labels.add(user_name)\n self._tweet_labels.add(tweet_id)\n\n if not self.graph.has_node(user_name):\n self.graph.add_node(user_name, type=\"username\")\n\n if not self.graph.has_node(tweet_id):\n self.graph.add_node(tweet_id, type=\"tweet_id\")\n\n for hashtag_index in tags:\n tag = hashtag_index[\"text\"] + \"_tag\"\n hashtag_agg[tag] += row[\"lemmas\"]\n\n if not self.graph.has_node(tag):\n self.graph.add_node(tag, type=\"hashtag\")\n\n if not self.graph.has_edge(tag, user_name):\n self.graph.add_edge(tag, user_name)\n\n if not self.graph.has_edge(tag, tweet_id):\n self.graph.add_edge(tag, tweet_id)\n\n self._hashtag_labels.add(tag)", "def _post_model_to_entry(self, redditpost):\n entry = Post()\n entry.post_id = redditpost.id\n entry.author = redditpost.author\n entry.author_premium = redditpost.author_premium\n entry.subreddit_subscribers = redditpost.subreddit_subscribers\n entry.title = redditpost.title\n entry.downs = redditpost.downs\n entry.ups = redditpost.ups\n entry.selftext = redditpost.selftext\n entry.num_comments = redditpost.num_comments\n entry.total_awards_received = redditpost.total_awards_received\n entry.view_count = redditpost.view_count\n entry.permalink = redditpost.permalink\n entry.url = redditpost.url\n entry.created = redditpost.created\n entry.created_utc = redditpost.created_utc\n\n return entry", "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def create_db_post(entry, keys, like):\n h = get_hash(entry['link'])\n collection = pos if like else neg\n return collection.update(\n {'hash': h},\n {\n 'link': entry['link'],\n 'title': entry['title'],\n 'published': '',\n 'content': \" \".join(keys),\n 'hash': h,\n 'read': False\n }, upsert=True\n )", "def add_post(content):\n db = psycopg2.connect(\"dbname=forum\")\n c = db.cursor()\n content = bleach.clean(content)\n c.execute(\"insert into posts values (%s)\", (content,))\n db.commit()\n db.close()\n # POSTS.append((content, datetime.datetime.now()))", "def convertResults(row):\n try:\n date = (row[2]).strftime(\"%Y-%m-%d %H:%M\")\n except AttributeError as err:\n logger.exception(err)\n date = \"0000-00-00 00:00\"\n post = {'post_id': row[0], 'user_id': row[1], 'post_date': date, 'post_content': row[3],\n 'post_photo_location': row[4], 'establishment_id': row[5], 'post_rating': row[6],\n 'post_subject': row[7], 'upvote': row[8], 'downvote': row[9]}\n return post", "def new_post(self, content):\n return self.proxy.wp.newPost(self.blog_id, self.username, self.password,\n content)", "def create_post(category, author, name, content, status):\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)", "def create_table():\n conn, curs = conn_curs()\n create = \"\"\"\n CREATE TABLE posts(\n id SERIAL PRIMARY KEY,\n title_selftext TEXT NOT NULL,\n subreddit VARCHAR(20) NOT NULL,\n subreddit_id VARCHAR(15) NOT NULL,\n num_comments INT NOT NULL,\n upvotes INT NOT NULL,\n downvotes INT NOT NULL,\n flair VARCHAR(20) NOT NULL,\n has_vid bool NOT NULL,\n num_awards INT NOT NULL)\n \"\"\"\n curs.execute(create)\n conn.commit()\n return", "def InsertRow(self, row_data, key, wksht_id='default'):\n new_entry = gdata.spreadsheet.SpreadsheetsList()\n for k, v in row_data.items():\n new_custom = gdata.spreadsheet.Custom()\n new_custom.column = k\n new_custom.text = v\n new_entry.custom[new_custom.column] = new_custom\n # Generate the post URL for the worksheet which will receive the new entry.\n post_url = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/full'%(\n key, wksht_id) \n return self.Post(new_entry, post_url, \n converter=gdata.spreadsheet.SpreadsheetsListFromString)", "def place_types_map_row_to_rdf(self, row):\n row_rdf = Graph()\n hipla_place_class = HIPLA_SCHEMA_NS['Place']\n row_rdf.add((hipla_place_class, RDF.type, self.instance_class))\n row_rdf.add((hipla_place_class, SKOS.prefLabel, Literal(\"Paikka\", lang='fi')))\n row_rdf.add((hipla_place_class, SKOS.prefLabel, Literal(\"Paikka\", lang='en')))\n super_class = None\n desc = None\n\n if row['Paikanlajiteema_id']:\n pnr_id = str(int(row['Paikanlajiteema_id']))\n entity_uri = PNR_SCHEMA_NS['place_type_' + pnr_id]\n label = row['Paikanlajiteema']\n self.latest_theme = entity_uri\n row_rdf.add((entity_uri, RDFS['subClassOf'], HIPLA_SCHEMA_NS['Place']))\n elif row['Paikanlajiryhmä_id']:\n pnr_id = str(int(row['Paikanlajiryhmä_id']))\n entity_uri = PNR_SCHEMA_NS['place_type_' + pnr_id]\n label = row['Paikanlajiryhmä']\n self.latest_group = entity_uri\n super_class = self.latest_theme\n elif row['Paikanlajialaryhmä_id']:\n pnr_id = str(int(row['Paikanlajialaryhmä_id']))\n entity_uri = PNR_SCHEMA_NS['place_type_' + pnr_id]\n label = row['Paikanlajialaryhmä']\n self.latest_subgroup = entity_uri\n super_class = self.latest_group\n elif row['Paikanlaji_id']:\n pnr_id = str(int(row['Paikanlaji_id']))\n entity_uri = PNR_SCHEMA_NS['place_type_' + pnr_id]\n label = row['Paikanlaji']\n desc = row['Paikanlajin_kuvaus']\n super_class = self.latest_subgroup\n self.create_kotus_classes(row, entity_uri)\n else:\n return None\n\n row_rdf.add((entity_uri, RDF.type, self.instance_class))\n row_rdf.add((entity_uri, SKOS['prefLabel'], Literal(label, lang='fi')))\n\n if (super_class):\n row_rdf.add((entity_uri, RDFS['subClassOf'], super_class))\n if (desc):\n row_rdf.add((entity_uri, DCTERMS['description'], Literal(desc, lang='fi')))\n\n return row_rdf", "def post_div(row):\r\n timestr = datetime.datetime.fromtimestamp(row[\"ts\"]).strftime(\"%Y-%m-%d %H:%M\")\r\n\r\n return \"\"\"\r\n<div class=\"post\">\r\n <div class=\"post-main\">\r\n <span class=\"post-score\">{0}</span><a class=\"post-plus-button\" href=\"{4}\">+</a><a class=\"post-minus-button\" href=\"{5}\">-</a><span class=\"post-content\">{1}</span>\r\n </div>\r\n <div class=\"post-metadata\">\r\n Submitted by <span class=\"post-author\">{2}</span> at <span class=\"post-timestamp\">{3}</span>\r\n </div>\r\n</div>\"\"\".format(\r\n row[\"score\"],\r\n row[\"content\"],\r\n row[\"submitter\"],\r\n timestr,\r\n url_for(\"score_plus_one\",postid=row[\"postid\"]),\r\n url_for(\"score_minus_one\",postid=row[\"postid\"])\r\n)", "def create(self, **kwargs):\n\n for k, v in kwargs.items():\n kwargs[k] = unicode(v)\n\n self.row = self.client.ssclient.InsertRow(kwargs, self.table.db.key, wksht_id=self.worksheet_id)\n\n self.data = kwargs", "def insert_post(shard, **kwargs):\n # Create the posting and insert it.\n post_id = kwargs.pop('post_id', None)\n if not post_id:\n post_id = models.human_uuid()\n\n new_topic = kwargs.get('new_topic', None)\n\n kwargs['post_time'] = datetime.datetime.now()\n\n post_key = ndb.Key(models.Post._get_kind(), post_id)\n post = models.Post(\n key=post_key,\n **kwargs)\n\n @ndb.tasklet\n def txn():\n if (yield post_key.get_async(use_memcache=False, use_cache=False)):\n logging.warning('Post already exists for shard=%r, post_id=%r',\n shard, post_id)\n raise ndb.Rollback()\n\n yield post.put_async(use_memcache=False, use_cache=False)\n\n # Pull task that indicates the post to apply. This must encode the\n # new_topic data for this post so the apply_posts() function doesn't\n # need the models.Post entity in order to make progress.\n enqueue_post_task(shard, [post_id], new_topic=new_topic)\n\n # Notify all users of the post.\n futures = []\n futures.append(ndb.transaction_async(txn))\n futures.append(notify_posts(shard, [post]))\n\n # Set the dirty bit for this shard. This causes apply_posts to run a\n # second time if the Post transaction above completed while apply_posts\n # was already in flight.\n dirty_bit(shard, set=True)\n\n # Enqueue an apply task to sequence and notify the new post.\n futures.append(enqueue_apply_task(shard, post_id=post_id))\n\n # Wait on futures in case they raise errors.\n ndb.Future.wait_all(futures)\n\n return post_key", "def generate_post(self):\n post = {'title': self.generate_title(), 'draft': False}\n for k in ('blog', 'id', 'labels', 'categories', 'draft'):\n if k not in self.header:\n continue\n if k == 'blog':\n post[k] = {'id': self.header[k]}\n else:\n post[k] = self.header[k]\n return post", "def creat_posting_list_obj(posting_list_line):\n if posting_list_line == \"\":\n return []\n \n qt = posting_list_line[0]\n tail = posting_list_line[1:]\n ordered_list = []\n access_dict = {}\n \n for i in range(0,len(tail)-1,2):\n doc_id = tail[i]\n score = tail[i+1]\n ordered_list.append((float(score),int(doc_id)))\n access_dict[int(doc_id)] = float(score)\n \n return PostingList(qt,ordered_list,access_dict)", "def make_new_post(user_id):\n user = User.query.get_or_404(user_id)\n tags = Tag.query.all()\n return render_template('posts/new_post.html', user=user, tags=tags)", "def create(data):\n \n # create theme\n return Theme(\n theme_id = data['id'],\n parent_id = data.get('parent_id', None),\n name = data['name'])", "def create_entry(entry):\n Entry.create(**entry)\n return entry" ]
[ "0.5269831", "0.5175269", "0.51678485", "0.5167002", "0.51123905", "0.5089774", "0.50829285", "0.50513357", "0.5044976", "0.50155133", "0.49868736", "0.49727294", "0.49441043", "0.4942868", "0.4867174", "0.48466963", "0.4832807", "0.48096168", "0.4804877", "0.47906214", "0.47831824", "0.4762386", "0.47590637", "0.4747746", "0.47203118", "0.4717608", "0.4716912", "0.4704833", "0.46912962", "0.4686569" ]
0.56856656
0
Computes the number of effective training iterations. An effective iteration is defined as the the aggregation of iterations. For examples, if accumulate_size = 4, then 4 iterations are considered as one effective iteration.
def compute_effective_steps_per_epoch(dataloader: Iterable, accumulate_size: int) -> int: return len(dataloader) // accumulate_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations", "def get_iterations(train_length: int, batch_size: int, epochs: int) -> int:\n return train_length // batch_size * epochs", "def number_of_iterations(self) -> int:\n return self._stats[\"iter_count\"]", "def number_of_iterations(self) -> int:\n pass", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def number_of_iterations(self):\n return self._solution[\"iterations\"]", "def number_of_iterations(self):\n return self._solution.nit", "def getNIterations(self):\n return self.getOrDefault(self.nIterations)", "def getNIterations(self):\n return self.getOrDefault(self.nIterations)", "def __len__(self):\n return self.nb_iterations", "def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n if self.n_timesteps > 1:\n n_elem = int(n_elem / self.n_timesteps)\n return n_elem", "def total_steps(self) -> int:\n if self.hparams.max_steps:\n return self.hparams.max_steps\n else:\n assert self.hparams.max_epochs is not None\n num_devices = max(1, self.hparams.gpus * self.hparams.num_nodes) # TODO: consider num_tpu_cores\n effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices\n dataset_size = len(self.train_loader.dataset)\n return (dataset_size / effective_batch_size) * self.hparams.max_epochs", "def total_steps(self) -> int:\n if self.hparams.max_steps:\n return self.hparams.max_steps\n else:\n assert self.hparams.max_epochs is not None\n num_devices = max(1, self.hparams.gpus * self.hparams.num_nodes) # TODO: consider num_tpu_cores\n effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices\n dataset_size = len(self.train_loader.dataset)\n return (dataset_size / effective_batch_size) * self.hparams.max_epochs", "def num_training_steps(self) -> int:\n if self.trainer.max_steps:\n return self.trainer.max_steps\n\n limit_batches = self.trainer.limit_train_batches\n batches = len(self.train_dataloader())\n batches = (\n min(batches, limit_batches)\n if isinstance(limit_batches, int)\n else int(limit_batches * batches)\n )\n\n num_devices = max(1, self.trainer.num_gpus, self.trainer.num_processes)\n if self.trainer.tpu_cores:\n num_devices = max(num_devices, self.trainer.tpu_cores)\n\n effective_accum = self.trainer.accumulate_grad_batches * num_devices\n return (batches // effective_accum) * self.trainer.max_epochs", "def learned_step_size(self):\n return np.array(self._cov_learn).T", "def get_max_iters():\n return 2000", "def num_training_steps(self, num_batches, gradient_accumulation):\n return len(\n [i for i in range(self.num_mini_batches + 1, self.num_mini_batches + num_batches + 1) if\n i % gradient_accumulation == 0])", "def overall_reduction(self):\n return 84", "def num_eval_instances(self):\n return self.num_train_instances // 4", "def total_predict_batches(self) -> int:\n return sum(self.trainer.num_predict_batches)", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def _compute_num_inserts_per_actor_step(samples_per_insert: float,\n batch_size: int,\n sequence_period: int = 1) -> float:\n return sequence_period * batch_size / samples_per_insert", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def get_step_size(total_items, batch_size):\n return np.ceil(total_items / batch_size)", "def n_per_item(self):\n return self.lam().sum(axis=0)", "def _iterate_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n return 2 ** self.Ns\n else:\n return scipy.special.comb(self.Ns, mixture_size, exact=True)", "def get_length_itertools(iter_type, iter_obj, iter_size):\n\n candidates = len(iter_obj)\n if 'permutation' in iter_type:\n total = 1\n for i in range(iter_size):\n total *= (candidates - i)\n elif 'product' in iter_type:\n total = candidates ** iter_size\n elif 'combination' in iter_type:\n total = binomail(candidates, iter_size)\n return total", "def get_evaluation_batch_size():\n return 1", "def IterationCount(self):\r\n\t\treturn self._get_attribute('iterationCount')" ]
[ "0.68272024", "0.6722169", "0.6686256", "0.661356", "0.6491655", "0.6375589", "0.6240505", "0.61844486", "0.61844486", "0.6106719", "0.60720783", "0.6068031", "0.6068031", "0.6049805", "0.5977738", "0.5970602", "0.59378844", "0.5913421", "0.5890675", "0.5831778", "0.57741433", "0.5756328", "0.57427484", "0.57426333", "0.57270426", "0.5692735", "0.5677338", "0.56633544", "0.5646763", "0.5640188" ]
0.7493815
0
Returns the states of the lr scheduler as dictionary.
def state_dict(self) -> dict: return self.lr_scheduler.state_dict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def _get_state_dict(optimizer) -> dict:\n if is_scheduler(optimizer):\n state = {\n \"scheduler\": optimizer.state_dict(),\n \"optimizer\": optimizer.optimizer.state_dict(),\n }\n else:\n state = optimizer.state_dict()\n return state", "def state_dict(self):\n return self._prbm.state_dict", "def get_state(self) -> Dict:\n state_dict = {}\n for param in self.optim_objs:\n param_name = pyro.get_param_store().param_name(param)\n state_dict[param_name] = _get_state_dict(self.optim_objs[param])\n return state_dict", "def get_state(self) -> Dict:\n return {\n \"patience\": self.patience,\n \"cooldown\": self.cooldown,\n \"cooldown_counter\": self.cooldown_counter,\n \"mode\": self.mode,\n \"threshold\": self.threshold,\n \"threshold_mode\": self.threshold_mode,\n \"best\": self.best,\n \"num_bad_epochs\": self.num_bad_epochs,\n \"mode_worse\": self.mode_worse,\n \"last_epoch\": self.last_epoch,\n }", "def get_state(self):\n xml = self.env.sim.model.get_xml() # model xml file\n state = np.array(self.env.sim.get_state().flatten()) # simulator state\n return dict(model=xml, states=state)", "def getstate(self):\n return {}", "def _stateDict(self):\n\n data = {}\n # if self.currentState[4]:\n # data['action'] = 'BRAK'\n # else:\n data['action'] = 'MCTL'\n data['speed'] = float(self.speed)\n data['steerAngle'] = float(self.steering_angle)\n\n return data", "def state_dict(self):\n if ADVERSARIAL_FLAG:\n return {'Net': self.net.state_dict(),\n 'AdvNet': self.adv_net.state_dict(),\n 'Optimizer': self.optimizer.state_dict(),\n 'AdvOptimizer': self.adv_optimizer.state_dict(),\n 'History': self.history,\n 'Stats': self.stats}\n return {'Net': self.net.state_dict(),\n 'Optimizer': self.optimizer.state_dict(),\n 'History': self.history,\n 'Stats': self.stats}", "def device_state_attributes(self):\n attr = {}\n attr[\"enabled\"] = self._zone.enabled and self._controller.enabled\n attr[\"status\"] = self._zone.status\n attr[\"schedule_count\"] = len(self._zone.schedules)\n attr[\"schedules\"] = \"\"\n attr[\"adjustment\"] = self._zone.adjustment.as_string\n current = self._zone.runs.current_run\n if current is not None:\n if current.schedule is not None:\n attr[\"current_schedule\"] = current.schedule.schedule_index + 1\n attr[\"current_name\"] = current.schedule.name\n else:\n attr[\"current_schedule\"] = RES_MANUAL\n attr[\"current_name\"] = RES_MANUAL\n attr[\"current_start\"] = dt.as_local(current.start_time)\n attr[\"current_duration\"] = str(current.duration)\n attr[\"time_remaining\"] = str(current.time_remaining)\n attr[\"percent_complete\"] = current.percent_complete\n else:\n attr[\"current_schedule\"] = RES_NOT_RUNNING\n attr[\"percent_complete\"] = 0\n\n next = self._zone.runs.next_run\n if next is not None:\n if next.schedule is not None:\n attr[\"next_schedule\"] = next.schedule.schedule_index + 1\n attr[\"next_name\"] = next.schedule.name\n else:\n attr[\"next_schedule\"] = RES_MANUAL\n attr[\"next_name\"] = RES_MANUAL\n attr[\"next_start\"] = dt.as_local(next.start_time)\n attr[\"next_duration\"] = str(next.duration)\n else:\n attr[\"next_schedule\"] = RES_NONE\n\n return attr", "def __getstate__(self):\n state = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n 'parameters' : self.parameters,\n 'mrsm_instance' : self.instance_keys,\n }\n return state", "def __getstate__(self):\n\n state = {}\n for key in self.__slots__:\n state[key] = getattr(self, key)\n\n return state", "def get_state(self):\n\n # TODO: Assemble a dictionary containing the tracker state\n\n return {}", "def state_dict(self):\n return {\n 'epoch': self.epoch,\n 'iterations_in_epoch': self.iterations_in_epoch,\n }", "def get_states(self):\n states = {}\n if hasattr(self, 'random_mask_state'):\n states['random_mask_state'] = self.random_mask_state.get_state()\n if hasattr(self, 'deformrandomstate'):\n states['deformrandomstate'] = self.deformrandomstate.get_state()\n states['randomstate'] = self.randomstate.get_state()\n return states", "def get_state(self) -> Dict[str, Any]:\n return {\"aq_potential_num\": self.aq_potential_num, \"wq_potential_num\": self.wq_potential_num}", "def get_finished_states_dict():\n\tfinished_states_dict = {}\n\tget_red_rcp_primary_result_data(finished_states_dict)\n\tget_blue_rcp_primary_result_data(finished_states_dict)\n\treturn finished_states_dict", "def state_dict(self):\n return (\n {\n \"scale\": self.get_scale(),\n \"growth_factor\": self._growth_factor,\n \"backoff_factor\": self._backoff_factor,\n \"growth_interval\": self._growth_interval,\n \"_growth_tracker\": self._get_growth_tracker(),\n \"_hysteresis_tracker\": self._hysteresis_tracker,\n }\n if self._enabled\n else {}\n )", "def get_state(self):\n return {\n \"epoch\": self.epoch,\n \"weights\": self.model.get_weights(),\n \"optimizer_weights\": self.model.optimizer.get_weights()\n }", "def state_dict(self):\n return {\n 'XY_net': self.XY_net.state_dict(),\n 'XY_optimizer_minee': self.XY_optimizer_minee.state_dict(),\n 'X_net': self.X_net.state_dict(),\n 'X_optimizer_minee': self.X_optimizer_minee.state_dict(),\n 'Y_net': self.Y_net.state_dict(),\n 'Y_optimizer_minee': self.Y_optimizer_minee.state_dict(),\n 'X': self.X,\n 'Y': self.Y,\n 'lr': self.lr,\n 'batch_size': self.batch_size,\n 'ref_batch_factor': self.ref_batch_factor\n }", "def device_state_attributes(self):\n attr = {}\n attr[\"enabled\"] = self._controller.enabled\n attr[\"zone_count\"] = len(self._controller._zones)\n attr[\"zones\"] = \"\"\n current = self._controller.runs.current_run\n if current is not None:\n attr[\"current_zone\"] = current.index + 1\n attr[\"current_name\"] = current.zone.name\n attr[\"current_start\"] = dt.as_local(current.start_time)\n attr[\"current_duration\"] = str(current.duration)\n attr[\"time_remaining\"] = str(current.time_remaining)\n attr[\"percent_complete\"] = current.percent_complete\n else:\n attr[\"current_schedule\"] = RES_NOT_RUNNING\n attr[\"percent_complete\"] = 0\n\n next = self._controller.runs.next_run\n if next is not None:\n attr[\"next_zone\"] = next.index + 1\n attr[\"next_name\"] = next.zone.name\n attr[\"next_start\"] = dt.as_local(next.start_time)\n attr[\"next_duration\"] = str(next.duration)\n else:\n attr[\"next_schedule\"] = RES_NONE\n\n return attr", "def state(self):\n return {\n 'network': self._network,\n 'target_network': self._target_network,\n 'optimizer': self._optimizer,\n 'num_steps': self._num_steps\n }", "def get_state(self):\n return self.agents, self.foods, self.viruses, self.masses, self.time", "def state_dict(self) -> dict:\n _state_dict: dict[str, Any] = super().state_dict\n _state_dict[\"rng_state\"] = self.rng.get_state()\n _state_dict[\"seed\"] = self.seed\n _state_dict[\"strategy\"] = self.strategy.state_dict\n return _state_dict", "def device_state_attributes(self):\n return {\n \"next_load_shedding_slot\": self.coordinator.data.get(\"next_load_shedding_slot\"),\n }", "def create_state_dict(self):\n return {\n 'resting': self.resting,\n 'moving': self.moving,\n 'animated resting': self.animated_resting,\n 'autoresting': self.auto_resting,\n 'automoving': self.auto_moving,\n 'battle resting': self.battle_resting,\n 'attack': self.attack,\n 'enemy attack': self.enemy_attack,\n c.RUN_AWAY: self.run_away,\n c.VICTORY_DANCE: self.victory_dance,\n c.KNOCK_BACK: self.knock_back,\n c.FADE_DEATH: self.fade_death\n }", "def schedd_states(schedd_classad):\n return {'Running': schedd_classad['TotalRunningJobs'],\n 'Idle': schedd_classad['TotalIdleJobs'],\n 'Held': schedd_classad['TotalHeldJobs'],\n 'Removed': schedd_classad['TotalRemovedJobs']}", "def get_state_for_lcp(self):\r\n return {\r\n 'done': self.done,\r\n 'correct_map': self.correct_map,\r\n 'student_answers': self.student_answers,\r\n 'input_state': self.input_state,\r\n 'seed': self.seed,\r\n }", "def get_state_dict(self):\n raise NotImplemented()" ]
[ "0.7723254", "0.7723254", "0.7137493", "0.6843552", "0.67712843", "0.6729426", "0.6640449", "0.6620688", "0.6570829", "0.6513248", "0.6504064", "0.6473677", "0.64505357", "0.64180076", "0.6406267", "0.640395", "0.6358328", "0.6326163", "0.6309934", "0.628206", "0.6259487", "0.62573975", "0.62429917", "0.6217021", "0.62120277", "0.61902213", "0.61835307", "0.6172479", "0.6164862", "0.6143587" ]
0.8734462
0
Load the states of the lr scheduler from a dictionary object.
def load_state_dict(self, state_dict: dict) -> None: self.lr_scheduler.load_state_dict(state_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_state_dict(self, state_dict):\n if self._lr_scheduler is not None:\n self._lr_scheduler.load_state_dict(state_dict)\n else: # here we store the state_dict until we instantiate the optimizer\n self._state_dict = state_dict", "def _load_state_dict(optimizer, state: dict) -> None:\n if is_scheduler(optimizer):\n optimizer.load_state_dict(state[\"scheduler\"])\n optimizer.optimizer.load_state_dict(state[\"optimizer\"])\n else:\n optimizer.load_state_dict(state)", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def load_state(self, dictionary):\n self.log_formatstr = dictionary['log_formatstr']\n self.backend_interval = dictionary['backend_interval']", "def load_state_dict(self, state, normalize_dataparallel_keys=False):\n try:\n loaded_masks = state['masks_dict']\n except KeyError as exception:\n #print('could not load the CompressionScheduler state.'' masks_dict is missing from state')\n msglogger.error('Could not load the CompressionScheduler state.'' masks_dict is missing from state')\n with contextlib.suppress(TypeError):\n #print('Scheduler state keys are: {}'.format(', '.join(state)))\n msglogger.debug('Scheduler state keys are: {}'.format(', '.join(state)))\n raise\n\n if normalize_dataparallel_keys:\n loaded_masks = {normalize_module_name(k): v for k, v in loaded_masks.items()}\n device = model_device(self.model)\n for name, mask in self.zeros_mask_dict.items():\n masker = self.zeros_mask_dict[name]\n masker.mask = loaded_masks[name]\n if masker.mask is not None:\n masker.mask = masker.mask.to(device)", "def load_from_state_dict(self, state_dict):\n raise NotImplementedError", "def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass", "def load_state_dict(self, state_dict):\n self.XY_net.load_state_dict(state_dict['XY_net'])\n self.XY_optimizer_minee.load_state_dict(\n state_dict['XY_optimizer_minee'])\n self.X_net.load_state_dict(state_dict['X_net'])\n self.X_optimizer_minee.load_state_dict(state_dict['X_optimizer_minee'])\n self.Y_net.load_state_dict(state_dict['Y_net'])\n self.Y_optimizer_minee.load_state_dict(state_dict['Y_optimizer_minee'])\n self.X = state_dict['X']\n self.Y = state_dict['Y']\n if 'lr' in state_dict:\n self.lr = state_dict['lr']\n if 'batch_size' in state_dict:\n self.batch_size = state_dict['batch_size']\n if 'ref_batch_factor' in state_dict:\n self.ref_batch_factor = state_dict['ref_batch_factor']", "def load_state_dict(self, state_dict):\n self.epoch = state_dict['epoch']\n itr_pos = state_dict.get('iterations_in_epoch', 0)\n if itr_pos > 0:\n # fast-forward epoch iterator\n itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))\n if itr_pos < len(itr):\n self._next_epoch_itr = itr.skip(itr_pos)", "def load_state_dict(self, arg):\n self.TrajectoryAutoencoder.load_state_dict(torch.load(arg))", "def load_state_dict(self, state_dict):\n if not self._enabled:\n return\n\n if len(state_dict) == 0:\n raise RuntimeError(\n \"The source state dict is empty, possibly because it was saved \"\n \"from a disabled instance of GradScaler.\"\n )\n\n self._init_scale = state_dict[\"scale\"]\n if self._scale is not None:\n self._scale.fill_(state_dict[\"scale\"])\n self._growth_factor = state_dict[\"growth_factor\"]\n self._backoff_factor = state_dict[\"backoff_factor\"]\n self._growth_interval = state_dict[\"growth_interval\"]\n self._init_growth_tracker = state_dict[\"_growth_tracker\"]\n if self._growth_tracker is not None:\n self._growth_tracker.fill_(state_dict[\"_growth_tracker\"])\n if \"_hysterisis_tracker\" in state_dict:\n self._hysteresis_tracker = state_dict[\"_hysterisis_tracker\"]\n else:\n self._hysteresis_tracker = 1", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", "def load_state_from_dict(self, dictionary):\n # _Context object should be empty\n if self.__dict__:\n log.warning(\"useful.logs.context should be empty before loading a \"\n \"new state into it\",\n extra={\"current_state\": self.__dict__})\n for key, value in dictionary.items():\n self.__setattr__(key, value)", "def from_dict(cls, dikt) -> \"Scheduler\":\n return util.deserialize_model(dikt, cls)", "def state_dict(self) -> dict:\n return self.lr_scheduler.state_dict()", "def load_snapshot(self,state_path):\n \n running_states=th.load(state_path)\n for key,val in running_states.items():\n setattr(self,key,val)", "def set_states(self, state_dict):\n self.trainer.get_model().load_state_dict(state_dict)", "def forgiving_state_restore(net, loaded_dict):\n net_state_dict = net.state_dict()\n new_loaded_dict = {}\n for k in net_state_dict:\n if k in loaded_dict and net_state_dict[k].size() == loaded_dict[k].size():\n new_loaded_dict[k] = loaded_dict[k]\n else:\n print(\"Skipped loading parameter\", k)\n # logging.info(\"Skipped loading parameter %s\", k)\n net_state_dict.update(new_loaded_dict)\n net.load_state_dict(net_state_dict)\n return net", "def load_dict(self, dct):\n pass", "def load_schedule(self, schedule):\n for sched in schedule:\n assert type(sched[\"num_batches\"]) == int\n if sched[\"weights\"] is not None: # schedule specificies specific variables for trainable vars\n assert type(sched[\"weights\"]) == list\n else: # scalar is used\n sched[\"weights\"] = self.get_trainable_variable_names()\n target_len = len(sched[\"weights\"])\n sched[\"weight_lr\"] = self.check_schedule_type(sched[\"weight_lr\"], float, target_len)\n sched[\"decay_steps\"] = self.check_schedule_type(sched[\"decay_steps\"], int, target_len)\n sched[\"decay_rate\"] = self.check_schedule_type(sched[\"decay_rate\"], float, target_len)\n sched[\"staircase\"] = self.check_schedule_type(sched[\"staircase\"], bool, target_len)", "def loadState(self):\n\t\tif not path.exists(STATEFILE):\n\t\t\tprint \"No previous statefile, assuming first run\"\n\t\t\tself.state['lastrun'] = datetime.datetime.now()-datetime.timedelta(days=365)\n\t\telse:\n\t\t\tsfile = open(STATEFILE,'r')\n\t\t\tself.state = cPickle.load(sfile)\n\t\tself.lastrun = self.state['lastrun']", "def load_params(self):\n\n self.curr_ts_state = None\n\n # Get TS from param\n self.transition_system = import_ts_from_file(rospy.get_param('transition_system_textfile'))\n\n # Get monitored TS state model\n self.state_dimension_name = rospy.get_param(\"~state_dimension_name\", \"load\")\n\n # Get monitored action\n self.monitored_action = rospy.get_param(\"~monitored_action\", \"pick\")\n \n # Create dict to retrieve next state given current state and next action\n self.action_to_state = dict()\n for state in self.transition_system['state_models'][self.state_dimension_name]['nodes']:\n temp_dict = dict()\n for connected_state in self.transition_system['state_models'][self.state_dimension_name]['nodes'][state]['connected_to']:\n temp_dict.update({self.transition_system['state_models'][self.state_dimension_name]['nodes'][state]['connected_to'][connected_state]: connected_state})\n self.action_to_state.update({state: temp_dict})", "def load(self, file_name_with_path: str):\n\n if self.state._models is None:\n self.register_models()\n logger.info(\"Agent State loaded successfully\")\n for k, model in self.state._models.items():\n model.load(file_name_with_path=os.path.join(f'{file_name_with_path}_{model.name}.th'))\n logger.info(f'{file_name_with_path}_{model.name}.th loaded')\n logger.info(f\"{model.name} model loaded successfully\")\n self.state = Munch(json.load(open(file_name_with_path + \".meta\")))", "def load(self, from_path):\n with open(from_path, 'rb') as f:\n self.load_state_dict(torch.load(f))", "def load(self, from_path):\n with open(from_path, 'rb') as f:\n self.load_state_dict(torch.load(f))", "def load_state_dict(self, state_dict, strict=True, args=None):\n\n \"\"\"Overrides fairseq_model.py\n\n \"\"\"\n if getattr(args, \"load_to_teacher\", False):\n logger.warning(\"Will load checkpoint weights to teacher!\")\n cur = self.state_dict()\n for k, v in state_dict.items():\n cur[\"teacher.\" + k] = v\n state_dict = cur\n\n return super().load_state_dict(state_dict, strict=strict, args=args)", "def load(self, path):\n self.load_state_dict(torch.load(path))" ]
[ "0.78618276", "0.74135333", "0.7223805", "0.7223805", "0.6929675", "0.6774868", "0.67248166", "0.671686", "0.6587653", "0.65414304", "0.65217894", "0.65166456", "0.63904953", "0.63904953", "0.63904953", "0.63538444", "0.6286555", "0.62593186", "0.6239442", "0.6191234", "0.6061811", "0.6054185", "0.6043089", "0.59806526", "0.5953405", "0.59406793", "0.591543", "0.591543", "0.5890713", "0.58854026" ]
0.8198997
0
Handle gradients reduction only in the last gradient accumulation step.
def handle_gradient(self) -> None: self.accumulate_step += 1 if self.accumulate_step < self.accumulate_size: pass else: self.accumulate_step = 0 self.grad_handler.handle_gradient()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_gradient(self):\n self._optimizer.sync_grad()", "def on_batch_end(self, state: _State):\n if not state.need_backward_pass:\n return\n\n loss = state.batch_metrics[self.loss_key]\n optimizer = self._optimizer\n\n self._accumulation_counter += 1\n need_gradient_step = \\\n (self._accumulation_counter + 1) % self.accumulation_steps == 0\n\n # This is very hacky check whether we have AMP optimizer and this may\n # change in future.\n # But alternative solution is to have AmpOptimizerCallback.\n # or expose another c'tor argument.\n if hasattr(optimizer, \"_amp_stash\"):\n from apex import amp\n # Need to set ``delay_unscale``\n # according to\n # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n delay_unscale = not need_gradient_step\n with amp.scale_loss(\n loss, optimizer, delay_unscale=delay_unscale\n ) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if need_gradient_step:\n self.grad_step(\n optimizer=optimizer,\n optimizer_wds=self._optimizer_wd,\n grad_clip_fn=self.grad_clip_fn\n )\n\n # if self.save_model_grads:\n # for tag, value in model.named_parameters():\n # tag = tag.replace(\".\", \"/\")\n # state.model_grads[tag] = value.grad.cpu().numpy()\n\n utils.maybe_recursive_call(optimizer, \"zero_grad\")\n\n self._accumulation_counter = 0", "def allreduce_hook(state: AllReduceState, grad: torch.Tensor):\n if state.gradient_predivide_factor > 1:\n grad.div_(state.gradient_predivide_factor)\n dist.all_reduce(grad, group=state.process_group)\n if state.gradient_postdivide_factor > 1:\n grad.div_(state.gradient_postdivide_factor)", "def pass_gradients(self):\n return self.last_grads", "def _register_grad_hook(self):\n\n def reduction_fn():\n # This function only needs to be called once\n if not self.need_reduction:\n return\n\n self.need_reduction = False\n all_grads = []\n\n # Bucketing all the gradients\n for param in self.module.parameters():\n if not param.requires_grad:\n continue\n if param.grad is not None and param.grad.requires_grad:\n raise RuntimeError(\"DistributedDataParallel only works \"\n \"with gradients that don't require \"\n \"grad\")\n if param.grad is not None:\n # Adding the gradients for reduction\n all_grads.append(param.grad.data)\n else:\n all_grads.append(torch.zeros_like(param))\n\n # Now bucketing the parameters\n dev_grads_buckets = _take_tensors(all_grads,\n self.reduce_bucket_size)\n\n # Now reduce each bucket one after another\n for grads_batch in dev_grads_buckets:\n grads_batch_coalesced = _flatten_dense_tensors(grads_batch)\n\n grads_batch_coalesced /= self.world_size\n\n distributed_utils.all_reduce(grads_batch_coalesced, self.process_group)\n\n grads_batch_reduced = _unflatten_dense_tensors(grads_batch_coalesced, grads_batch)\n for grad, reduced in zip(grads_batch, grads_batch_reduced):\n grad.copy_(reduced)\n\n # Now register the reduction hook on the parameters\n for p in self.module.parameters():\n if not p.requires_grad:\n continue\n\n def allreduce_hook(*unused):\n Variable._execution_engine.queue_callback(reduction_fn)\n\n p.register_hook(allreduce_hook)", "def post_gradient_application(self, sess: tf.Session) -> None:\n pass", "def process_gradient(engine, lr, parameters, grad_clip, accumulator):\n with tf.GradientTape() as tape:\n engine.optical_system.update()\n engine.clear_ray_history()\n engine.ray_trace(2)\n output = tf.stack(\n [engine.finished_rays[\"y_end\"], engine.finished_rays[\"z_end\"]],\n axis=1\n )\n goal = tf.constant([(0, -.25)], dtype=tf.float64)\n error = tf.math.squared_difference(output, goal)\n grad = tape.gradient(error, parameters)\n \n print(f\"raw gradient: {grad}\")\n \n try:\n grad = tf.where(tf.math.is_finite(grad), grad, tf.zeros_like(grad))\n except(ValueError):\n grad = tf.zeros_like(parameters, dtype=tf.float64)\n grad *= lr\n grad = tf.clip_by_value(grad, -grad_clip, grad_clip)\n print(f\"gradient after scaling: {grad}\")\n grad = tf.reshape(grad, (-1, 1))\n grad = tf.matmul(accumulator, grad)\n grad = tf.reshape(grad, (-1,))\n print(f\"gradient after accumulation: {grad}\")\n \n return grad, tf.reduce_sum(error)", "def compute_gradients(self):\n raise NotImplementedError()", "def accumulate_grad(target: Tensor, grad: np.ndarray):\n # if this is a const, just return\n if target.is_const:\n return\n\n if isinstance(target.operation, SumOp):\n assert target.shape == grad.shape, \\\n 'Cannot take derivative of a sum up tensor explicitly.\\n' \\\n 'Please avoid broadcast a sum up tensor and choose another way to calculate.\\n' \\\n 'See why softmax as a basic operation'\n\n assert target.shape == grad.shape, \\\n 'tensor and gradient shape not compatible. Tensor: {}, Gradient: {}'.format(target.shape, grad.shape)\n\n target.grad += grad", "def increment_grad(self, curr_loss, curr_state, hook=None):\n prev_loss = self._prev_loss\n prev_state = self._prev_state\n d_loss = None\n norm = None\n\n if self.loss:\n d_loss = curr_loss - prev_loss\n if d_loss > 0 and self.stabilizer:\n d_loss = -d_loss\n\n if self.norm:\n norm = compute_global_norm(curr_state, prev_state, d_loss)\n\n for n, p in self.state.items():\n if not p.requires_grad:\n continue\n\n curr_param = curr_state[n].detach()\n prev_param = prev_state[n].detach()\n prev_param_grad = prev_state[n].grad.detach()\n\n add = prev_param - curr_param\n if self.loss:\n add += -d_loss * prev_param_grad\n\n if self.norm:\n add.data.div_(norm)\n\n if hook is not None:\n hook(add)\n\n p.grad.add_(add)", "def _calc_gradients_chunked(self, batch_data, training_settings=None):\n\n if not is_chunkable(batch_data):\n raise BatchNotChunkableException()\n\n auxiliary_results = BatchChunkingResults()\n\n loss = 0\n # Will be set when we have the trainable variables\n accumulated_grads = None\n\n batch_size = len(batch_data)\n num_chunks = math.ceil(batch_size / self.batch_chunk_size)\n for chunk_idx in range(num_chunks):\n chunk_start = chunk_idx*self.batch_chunk_size\n chunk_end = min((chunk_idx+1)*self.batch_chunk_size, batch_size)\n\n chunk_len = chunk_end-chunk_start\n\n chunk = batch_data[chunk_start:chunk_end]\n\n with tf.GradientTape() as tape:\n results = self.evaluate_loss(chunk,\n inference_mode=False,\n evaluate_settings=training_settings)\n\n if 'loss' not in results:\n raise LossNotAvailableException()\n\n if self.trainable_variables is None:\n # We now have evaluated the model and the trainable variables should be available\n self._retrieve_trainable_variables()\n\n if accumulated_grads is None:\n if self.trainable_variables is None:\n raise MLPugException(\"Unexpected state : trainable variables not found. Please file an issue.\")\n\n accumulated_grads = {}\n for optimizer_name, tvs in self.trainable_variables.item():\n accumulated_grads[optimizer_name] = [tf.zeros_like(tv) for tv in tvs]\n\n loss = results['loss']\n aux_results = get_value_at('auxiliary_results', results, warn_on_failure=False)\n\n # loss is assumed to be the average over the sample loss for the chunk\n # Divide through batch size to factor in that this loss is part of a larger batch.\n last_chunk = chunk_idx == (num_chunks-1)\n chunk_loss = chunk_len*loss/batch_size\n chunk_gradients = self._back_propagate_from(chunk_loss, tape, last_chunk=last_chunk)\n\n loss += chunk_loss\n\n for optimizer_name, chunk_grads in chunk_gradients.items():\n accu_grads = accumulated_grads[optimizer_name]\n accumulated_grads[optimizer_name] = [(accu_grad+chunk_grad)\n for accu_grad, chunk_grad in zip(accu_grads, chunk_grads)]\n\n auxiliary_results += [{\n \"results\": aux_results,\n \"num_samples\": chunk_len\n }]\n\n return loss, auxiliary_results, accumulated_grads", "def backward_gradient(\n self, input: np.ndarray, head_gradients: Dict[str, np.ndarray]\n ) -> np.ndarray:\n raise NotImplementedError", "def apply_gradients(self, grads, x, state):\n fold_state = state[-1]\n state = state[:-1]\n x, state = super(FoldingInnerOptimizer, self).apply_gradients(\n grads, x, state)\n fold_state = self._reduce_fn(fold_state, x)\n state = state + (fold_state,)\n return x, state", "def backward(self, gradient):\n raise NotImplementedError()", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first call, empirically, autograd\n # fires it at the end for this param, which makes sense.\n p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.\n assert p_tmp.grad_fn is not None\n grad_acc = p_tmp.grad_fn.next_functions[0][\n 0] # Gets its GradAccumulation object.\n handle = grad_acc.register_hook(\n functools.partial(self._post_backward_hook, p))\n p._shard_bwd_hook = (grad_acc, handle)", "def GradientAdjuster(self):\n pass", "def backward(self, grad_out):\n\n # *********************************************\n # check this with torch.autograd.gradcheck !!!!\n # *********************************************\n\n k, a, m, y, targets = self.saved_tensors\n b = 1.0 - a\n\n features = y.numpy()\n labels = targets.numpy()\n\n loss, counts, centers, l_intra, inter_indices, l_inter, d = self.compute_loss(features, labels, k.numpy(), a.numpy(), b.numpy(), m.numpy())\n\n grad_inter = torch.FloatTensor(y.size())\n grad_intra = torch.FloatTensor(y.size())\n\n idx1 = inter_indices[0]\n idx2 = inter_indices[1]\n grad_inter[idx1] = torch.from_numpy(0.5 / (counts[idx1]) * np.abs(centers[idx1] - centers[idx2]))\n grad_inter[idx2] = torch.from_numpy(0.5 / (counts[idx2]) * np.abs(centers[idx2] - centers[idx1]))\n\n # compute intra class gradients with respect to xi, xj\n # only nonzero for these two values\n\n # *********************************************************\n # HOW TO COMPUTE GRADIENTS WITH RESPECT TO MULTIPLE SAMPLES\n # WHEN LOSS IS JUST COMPUTED OVERALL????\n # *********************************************************\n\n for idx in range(y.size()[1]):\n denom = np.array([np.power(d[idx,0]*np.sum(d[idx,:]),2)])\n grad = 2*k.double() / torch.from_numpy(denom)\n for entry in range(y.size()[0]):\n grad_intra[entry, idx] = grad[0]\n\n # compute inter class gradients with respect to xq, xr\n # only nonzero for these two values\n\n # ****************************************\n # SOMEHOW THE GRADIENT IS WAY TOO BIG ****\n # ****************************************\n grad_in = a*grad_intra + b*grad_inter\n print(grad_in)\n return grad_in, torch.DoubleTensor([0]), torch.DoubleTensor([0]), torch.DoubleTensor([0]), torch.DoubleTensor([0])", "def gradient(self, node, output_grad):\r\n return [output_grad]\r\n \"\"\"higher accuracy notice notice here\"\"\"", "def compute_gradient(self, function, arguments):", "def compute_gradient(self, input, error):\n raise NotImplementedError()", "def _UpdateGradient(self):\n self.mol.GetGradient('analytic')", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def gradient(self, node, output_grad):\r\n return [auto_sum_op(output_grad, get_shape_op(node.inputs[0]) ), 0-auto_sum_op(output_grad, get_shape_op(node.inputs[1]) )]\r\n #return [auto_sum_op(output_grad, ), 0-output_grad]\r", "def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable\n return tape_grad_fn(*result_grads)", "def apply_gradient(self, learning_rate):\n raise NotImplementedError()", "def optimize(self,gradients):\n\n for k in range(self.size):\n delta_weight = self.learningRate * gradients[k]\n full_change = delta_weight + self.momentum*self.last_change[k]\n self.weights[k] -= full_change\n self.last_change[k] = 1*gradients[k] #copy gradient mat", "def check_gradient(f, x, delta=1e-5, tol=1e-4):\n\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n \n orig_x = x.copy()\n #print('check_g, orig_x befor',orig_x)\n #print('check_g, x befor',x)\n #print('befor first pass in grad check')\n fx, analytic_grad = f(x)\n #print('after first pass in grad check')\n #print('check_g, orig_x after',orig_x)\n #print('check_g, x.shape',x.shape)\n #print('func',f(x)[0])\n #print('fx=',fx,'analityc_grad=',analytic_grad)\n \n assert np.all(np.isclose(orig_x, x, tol)), \"Functions shouldn't modify input variables\"\n\n assert analytic_grad.shape == x.shape\n #print('analitical grad.shape',analytic_grad.shape)\n analytic_grad = analytic_grad.copy()\n\n # We will go through every dimension of x and compute numeric\n # derivative for it\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n #print('it.shape=',it.shape)\n while not it.finished:\n ix = it.multi_index\n #print('ix',ix)\n #print('x[ix]',x[ix])\n analytic_grad_at_ix = analytic_grad[ix]\n #print('analitical_grad-at_ix',analytic_grad_at_ix)\n orig_x = x.copy()\n #print('orig_x',orig_x)\n #print('x.shape befor delta',x.shape)\n orig_x[ix]+=delta\n #print('x.shape after delta',x.shape)\n #print('orig_x[ix] delta +',orig_x[ix])\n fx_plus=f(orig_x)[0]\n #fx_plus=fx_plus_full[ix[0]]\n #print('fx__plus',fx_plus)\n orig_x = x.copy()\n orig_x[ix]-=delta\n #print('orig_x[ix] delta -',orig_x[ix])\n fx_minus=f(orig_x)[0]\n #print('fx_minus',fx_minus)\n \n divider=2*delta\n #print('divider',divider)\n #numeric_grad_at_ix = np.divide((fx_plus-fx_minus),divider)\n numeric_grad_at_ix = (fx_plus-fx_minus)/divider\n #print('numeric_grad_at_ix',numeric_grad_at_ix)\n #print('fx(ix)', fx[ix])\n\n # TODO compute value of numeric gradient of f to idx\n \n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True", "def on_backward_end(self, batch):\n if self.updater == \"backward\":\n grads = OrderedDict((name, param.grad.data.cpu(\n )) for name, param in self.model.model.named_parameters() if param.grad is not None)\n try:\n self.update(grads)\n except KeyboardInterrupt:\n raise\n except:\n pass" ]
[ "0.7104751", "0.670676", "0.6691065", "0.6685171", "0.64572406", "0.6428213", "0.6427255", "0.6425612", "0.6366954", "0.63388497", "0.63243264", "0.6315425", "0.62372506", "0.61818844", "0.61289996", "0.61289996", "0.6127821", "0.611862", "0.60883456", "0.6080549", "0.607218", "0.6044202", "0.60335076", "0.60193187", "0.6009682", "0.6001412", "0.600027", "0.5981998", "0.598175", "0.59811616" ]
0.8042873
0
Set the loaded background image as background pixmap for the scene.
def _setup_background(self): self.background_image = QtGui.QImage() data = self.model.get_background_image_data() self.background_image.loadFromData(data,'PNG') self.scene().addPixmap(QtGui.QPixmap.fromImage(self.background_image)) self.fitInView(QtCore.QRectF(self.background_image.rect()), QtCore.Qt.KeepAspectRatio)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_background(self, image):\n self.bg = pygame.image.load(image).convert()", "def set_background_image(self, imagename):\n self.background.image = ui.get_image(imagename, '/home/pi/music/images/')", "def display_background(self, imagepath):\n background_image = Image.open(imagepath)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')", "def display_background(self, imagepath):\n background_image = Image.open(imagepath)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')", "def SetBackgroundImage(self, image):\r\n\r\n self._backgroundImage = image\r\n self.Refresh()", "def display_background(self, image_path):\n # draws and paints the background with image of given path\n background_image = Image.open(image_path)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')", "def load_map(self, background_image=None):\n\n self.background_layer = BackgroundLayer(background_image)\n self.add(self.background_layer)", "def draw(self, background, image_path):\n self.image = pygame.image.load(image_path).convert_alpha()\n background.blit(self.image, (self.x_pos, self.y_pos))", "def load_background(self, filename):\n img = pygame.image.load(filename)\n return self.fit_image(img, self.width, self.height)", "def bg_set():\r\n global WIDTH, HEIGHT\r\n background = pygame.transform.scale(pygame.image.load_extended(\"images/bg.png\").convert_alpha(), (WIDTH, HEIGHT))\r\n pygame.display.update()\r\n return background", "def setBackground(self, background, dest=None, empty=0):\n if not dest:\n dest = self.screen\n self.base = pygame.display.set_mode(dest.get_size())\n backgroundGrid = g.Grid(self.rows, self.columns)\n self.fillEmptyCells(background, gameGrid=backgroundGrid)\n \n # blit tiled background image on background surface\n for r, c in backgroundGrid:\n x = r * self.imageWidth\n y = c * self.imageHeight\n currentItem = backgroundGrid.getItem(r, c)\n if currentItem != empty:\n self.base.blit(currentItem, (x, y))\n self.base = self.base.copy()", "def paint(self) -> None:\n pix = QPixmap(0, 0)\n pix.convertFromImage(self.image)\n self.scene.addPixmap(pix)", "def _open_background_image_dialog(self):\r\n new_background_image_file = QtGui.QFileDialog.getOpenFileName(parent=None, caption=\"Open background image file\",\r\n directory=\"resources\",\r\n filter=\"Image files (*.png *.jpg *.bmp)\")\r\n\r\n\r\n if new_background_image_file:\r\n # im = Image.open(new_background_image_file)\r\n # bytes = Image.toBytes()\r\n self.model.set_background_image(new_background_image_file)\r\n img = QtGui.QImage()\r\n img.load(new_background_image_file)\r\n data = QtCore.QByteArray()\r\n buf = QtCore.QBuffer(data)\r\n img.save(buf, 'PNG')\r\n self.model.set_background_image_data(data)\r\n self.updateScene_()", "def background(self, background):\n\n self._background = background", "def _build_background(self, name=None):\n if self._background is None:\n self._background = pg.Surface((self.tile_width * TILESIZE_SCREEN[0],\n self.tile_height * TILESIZE_SCREEN[1]))\n self._background.fill(BGCOLOR)\n self._create_background()\n\n if name is not None:\n pg.image.save(self._background, path.dirname(__file__) + '/' + name)", "def __create_background(self, filename):\n if self._bgcolor:\n self.c[\"bg\"] = self._bgcolor\n im = Image.open(filename).convert(\"RGBA\")\n self._imwidth, self._imheight = im.size\n self._cw = self.c.winfo_width()\n self._ch = self.c.winfo_height()\n if self._bgscale and (self._imwidth > self._cw or self._imheight > self._ch):\n # need increasing of image\n im = im.resize((min(self._imwidth, self._cw), min(self._imheight, self._ch)))\n self._im = ImageTk.PhotoImage(im)\n self._im.im = im\n x, y = tkutils.anchor_coords(0, 0, self._cw, self._ch, self._bganchor)\n self.tag = self.c.create_image(x, y, image=self._im, anchor=self._bganchor)\n self.c.tag_lower(self.tag, ALL) # or some symbol tag instead of ALL???\n # size of scheme\n self.width, self.height = im.size", "def draw(self, background, image_path):\n self.itemname = pygame.image.load(image_path).convert_alpha()\n background.blit(self.itemname, (self.x_pos, self.y_pos))", "def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])", "def main_background(self):\n self.screen.blit(self.background, (0, 0))", "def set_background(self, bg: Union[Surface, Color4]):\n if isinstance(bg, Surface):\n self.props.bg = bg\n elif isinstance(bg, Color4):\n self.props.bg_color = bg\n # set this to none; the next time we render,\n # the component will regenerate the background.\n # See _create_bg_surf() and to_surf()\n self.cached_background = None", "def background(self, Background):\n SCREEN.blit(Background, (0, 0))\n #SCREEN.fill((0,0,0))", "def paintScreen(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BACKGROUND_LEFT)\n self.imgBackgroundLeft = guiobjects.OcempImageMapTransparent(imgPath)\n self.window.add_child(self.imgBackgroundLeft)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BACKGROUND_RIGHT)\n imgBackgroundRight = guiobjects.OcempImageMapTransparent(imgPath)\n imgBackgroundRight.topleft = 297, 0\n self.window.add_child(imgBackgroundRight)", "def set_background(self, color_or_path):\n if isinstance(color_or_path, (tuple, list)):\n assert len(color_or_path) == 3, \"Length of 3 is required for RGB tuple\"\n self._background_color = color_or_path\n else:\n if not osp.isfile(color_or_path):\n raise ValueError(\"Invalid background image '{}'\".format(color_or_path))\n self._background_image = color_or_path\n self._final = None # Force rebuild", "def make_background(self):\n for x in range(self.env_list[0].size):\n for y in range(self.env_list[0].size):\n img = load_image(\"dirt.png\")[0]\n self.background.blit(img, (x*50, y*50))", "def SetBackground(*args, **kwargs):\n return _gdi_.DC_SetBackground(*args, **kwargs)", "def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)", "def blit_image_as_background(file_name, m):\n image_module = m.image\n cfg = m.cfg\n pgsurface = cfg.background\n\n pgsurface.blit(image_module.load(file_name), (0, 0))", "def set_image(self, image_URL, bkg = None):\r\n\r\n self.image = self.image = pygame.image.load(image_URL).convert()\r\n if not bkg == None:\r\n # Set our transparent color\r\n self.image.set_colorkey(white)\r\n self.rect = self.image.get_rect()\r\n if self.drawable:\r\n self.set_drawable()", "def SetBackground(*args, **kwargs):\n return _gdi_.PseudoDC_SetBackground(*args, **kwargs)", "def draw_background(self):\n back = pygame.Surface(self.size)\n width, height = self.size\n self.shapes['gradient'] = shapes.gen_gradient(\n (width, height / 2),\n self.colors[3],\n self.colors[4]\n )\n back.blit(self.shapes['gradient'], (0, height - self.sh('gradient')))\n\n # TODO: Don't use static path/icon\n image = '/usr/share/icons/Tango/scalable/mimetypes/audio-x-generic.svg'\n self.shapes['musicimg'] = load_svg(image, [height/2]*2)\n back.blit(\n self.shapes['musicimg'],\n (width / 10, (height - self.sh('musicimg')) / 2)\n )\n return back" ]
[ "0.774536", "0.7230186", "0.68150884", "0.68150884", "0.6737236", "0.67317253", "0.65337086", "0.65113914", "0.64595455", "0.6448829", "0.6414376", "0.63675696", "0.6260566", "0.6160613", "0.6140638", "0.6133443", "0.60689527", "0.59405315", "0.5938807", "0.591215", "0.5864799", "0.5826557", "0.5824913", "0.5818605", "0.5808503", "0.57849586", "0.5757961", "0.5757167", "0.57362133", "0.5711557" ]
0.8143723
0
Open a file dialog for letting the user choose a background image
def _open_background_image_dialog(self): new_background_image_file = QtGui.QFileDialog.getOpenFileName(parent=None, caption="Open background image file", directory="resources", filter="Image files (*.png *.jpg *.bmp)") if new_background_image_file: # im = Image.open(new_background_image_file) # bytes = Image.toBytes() self.model.set_background_image(new_background_image_file) img = QtGui.QImage() img.load(new_background_image_file) data = QtCore.QByteArray() buf = QtCore.QBuffer(data) img.save(buf, 'PNG') self.model.set_background_image_data(data) self.updateScene_()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selectImageFile(self):\n fileName = QFileDialog.getOpenFileName()\n if fileName:\n self.set_image(fileName[0])", "def showOpenImageDialog(self, event):\r\n openImageDialog = wx.FileDialog(self, \"Open\",\r\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\r\n if openImageDialog.ShowModal() == wx.ID_CANCEL:\r\n return\r\n self.setImage(openImageDialog.GetPath())", "def selectFile(title=\"Select image\", initialdir=None, multiple=False):\r\n file = filedialog.askopenfilename(\r\n initialdir=initialdir,\r\n multiple=multiple,\r\n title=title\r\n )\r\n return file", "def file(self, win):\n name = QtWidgets.QFileDialog.getOpenFileName(win, 'Open file')\n self.file_name = name[0]\n self.setImage(name[0])", "def image_file_dialog(self):\r\n extensionFilter=(\"TIFF files (*.tiff *.TIFF *.tif *.TIF)\"\r\n \"Image files (*.tiff *.TIFF *.tif *.jpg *.jpeg *.png *.bmp *.gif);;\"\r\n \"All files (*)\")\r\n \r\n filePath =QFileDialog.getOpenFileName(self,\r\n caption = 'select a scanned image',\r\n directory = self.ui.imagePath.text(),\r\n filter = extensionFilter)\r\n #in pyqt5 a tuple is returned, unpack it\r\n if os.environ['QT_API'] == 'pyqt5':\r\n filePath, _ = filePath\r\n \r\n if filePath != '':\r\n self.ui.imagePath.setText(filePath)\r\n self.image_path_changed()\r\n else:\r\n logging.info('file selection canceled')", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def openFileDialog(self): \n self.dialog = ocempgui.widgets.Box(373, 372)\n self.dialog.topleft = 528, 205\n\n background = guiobjects.OcempImageMapTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(WINDOW_UPLOAD))\n self.dialog.add_child(background)\n \n self.listDir = guiobjects.OcempImageFileList(310, 239)\n self.listDir.topleft = 31, 60\n self.dialog.add_child(self.listDir)\n\n buttonOK = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK), self.buttonTooltips[\"ok\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonOK.topleft = [233, 308]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"OK\")\n self.dialog.add_child(buttonOK)\n \n buttonCancel = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL), self.buttonTooltips[\"cancel\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonCancel.topleft = [122, 308]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"KO\")\n self.dialog.add_child(buttonCancel)\n\n self.window.add_child (self.dialog)", "def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def _launch_file_b(self):\n types = [\n (\"JPG\", \"*.jpg\"),\n (\"Bitmap\", \"*.bmp\"),\n (\"PNG\", \"*.png\"),\n (\"GIF\", \"*.gif\"),\n (\"All files\", \"*\")]\n dialog = tkFileDialog.Open(self, filetypes = types)\n self._file_path = dialog.show()\n\n self._file_name = self._scrub_name(self._file_path)\n self._move_img()\n return self._file_name", "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()", "def showFileDialog():\n\ttry:\n\t\timage_file_name = tkinter.filedialog.askopenfilename(filetypes=[(\"Image File\",['.jpg','.jpeg','.png'])])\n\t\tif image_file_name:\n\t\t\timage = Image.open(image_file_name)\n\t\t\twidth, height = image.size\n\t\t\t# let us check the image size meets criteria of application.if not let us prompt user to choose different file\n\t\t\tif width <= 500 or height <= 500 or width >= 700 or height >= 700:\n\t\t\t\tmbox.showinfo(\"Image Size\", \"Image width and height size cannot be less than 500 pixel or more than 700 pixel\")\t\n\t\t\t\tshowFileDialog()\n\t\t\telse:\t\n\t\t\t\tprint(image_file_name)\n\texcept:\n\t\tpass", "def __create_background(self, filename):\n if self._bgcolor:\n self.c[\"bg\"] = self._bgcolor\n im = Image.open(filename).convert(\"RGBA\")\n self._imwidth, self._imheight = im.size\n self._cw = self.c.winfo_width()\n self._ch = self.c.winfo_height()\n if self._bgscale and (self._imwidth > self._cw or self._imheight > self._ch):\n # need increasing of image\n im = im.resize((min(self._imwidth, self._cw), min(self._imheight, self._ch)))\n self._im = ImageTk.PhotoImage(im)\n self._im.im = im\n x, y = tkutils.anchor_coords(0, 0, self._cw, self._ch, self._bganchor)\n self.tag = self.c.create_image(x, y, image=self._im, anchor=self._bganchor)\n self.c.tag_lower(self.tag, ALL) # or some symbol tag instead of ALL???\n # size of scheme\n self.width, self.height = im.size", "def file_popup(file) -> str:\n layout = [\n [sg.Text(f\"Select the action to perform on\\n\\n{file}\")],\n [sg.Button(\"Open File\", key=\"-APP-\"),\n sg.Button(\"Open in File Explorer\", key=\"-EXPLORER-\"),\n sg.Button(\"Delete File\", key=\"-DEl-\",\n button_color=(\"Black\", \"OrangeRed\"))]\n ]\n window = sg.Window(\"Open selected file.\", layout, finalize=True)\n button, value = window.read()\n window.close()\n del window\n return button", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def ask_file(message=\"Select file for open.\", title=None):\n return dialog(\"ask_file\", message=message, title=title)", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def get_path_to_image():\n file_types = [\n (\"JPEG Image\", '*.jpeg; *jpg'),\n (\"PNG Image\", '*.png'),\n (\"BPM Image\", '*.bmp'),\n (\"Netpbm Image\", '*.ppm; *.pgm; *.pbm; *pnm')\n ]\n\n GlobalVar.file_path = filedialog.askopenfilename(filetypes=file_types)\n GlobalVar.name_original = GlobalVar.file_path.split('/')[-1]\n GlobalVar.is_open_image = True\n\n read_image(GlobalVar.file_path)", "def open_file(self):\n try:\n filename = tkFileDialog.askopenfilename()\n file = open(filename)\n self.image_window.status.config(text='Opened: ' + filename)\n return file\n except:\n self.status.config(text='You fool!')\n tkMessageBox.showwarning(\"Open file\",\n \"Cannot open file \" + filename)\n return None", "def choose_file(self):\n pass", "def showImage(self, filePath): \n size = 244, 244 \n try:\n guiobjects.generateImageSize(filePath, [244, 244], IMG_UPLOAD)\n except:\n return \n imgPath = IMG_UPLOAD\n img = ocempgui.draw.Image.load_image(imgPath)\n self.imgOptionsTab.picture = img\n self.generateMask(\"imgUpload.png\")", "def open(self, filepath=None):\n if filepath is None:\n filepath, dummy = QFileDialog.getOpenFileName(self, \"Open image file.\")\n if len(filepath) and os.path.isfile(filepath):\n image = QImage(filepath)\n self.setImage(image)", "def import_file(self):\n from tkinter import filedialog\n self.filepath = filedialog.askopenfilenames(\n initialdir=\"/\", title=\"Select file\",\n filetypes=((\"PNG files\", \"*.png\"),\n (\"JPEG files\", \"*.jpeg\"),\n (\"TIFF files\", \"*.tiff\"),\n (\"ZIP files\", \"*.zip\"),\n (\"all files\", \"*.*\")))", "def on_open_button(self, event):\n wildcard = \"All files (*.*)|*.*|\"\\\n \"Preprocessed _iso_res.csv file (*_iso_res.csv)|*_iso_res.csv|\"\\\n \"Massacre iso_csv file (*_iso.csv)|*_iso.csv|\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n \n if dlg.ShowModal() == wx.ID_OK:\n fullname = dlg.GetPaths()[0].split('/')\n dpa = '/'.join(fullname[:-1]) + '/'\n self.currentDirectory = dpa\n fna = fullname[-1]\n [dfr, pul, vlab] = openFile(dpa+fna)\n startApp(dfr, dpa, fna, pul, vlab, fsize=self.fsize, size=self.size)\n\n dlg.Destroy()", "def importImg(self):\n logger.info(\"import image \"+ str(self))\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image',\n BASE_DIR,\"Image files (*.jpg *.gif *.png)\")\n logger.debug(file)\n self.imageFile = file\n self.image.setPixmap(QtGui.QPixmap(file))\n self.image.adjustSize()", "def on_source_img_browse_btn_click(self):\r\n\t\tdlg = QFileDialog()\r\n\t\toptions = dlg.Options()\r\n\t\toptions |= QFileDialog.DontUseNativeDialog\r\n\t\tsource_img_filename, _ = dlg.getOpenFileName(\r\n\t\t\tself,\r\n\t\t\t\"Select Input Numpy Array\",\r\n\t\t\t\".\",\r\n\t\t\t\"NumPy Files (*.npy)\",\r\n\t\t\toptions=options)\r\n\t\tif source_img_filename:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t\tself.refresh_UI()", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )" ]
[ "0.69813025", "0.69487387", "0.6716531", "0.6712335", "0.65396273", "0.64668244", "0.6403048", "0.63787997", "0.6329275", "0.63117164", "0.62948525", "0.6234983", "0.6209773", "0.6207649", "0.6191353", "0.60875744", "0.60574764", "0.60410905", "0.6035234", "0.60351324", "0.60278684", "0.60261726", "0.6008692", "0.5946059", "0.59133905", "0.58581007", "0.58426625", "0.5830996", "0.5830937", "0.58194196" ]
0.8290579
0
Update the scene by first clearing it, and then add everything it should contain.
def updateScene_(self): self.scene().clear() self._setup_background() self._add_sockets() self._add_rooms() self._add_fuses() self._add_switchs() self._add_lamp_outlets()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_scene(self, event):\n self.shapes = []\n self.redraw()", "def refresh(self):\n\n # Set Graphics scene\n self.setScene(QtGui.QGraphicsScene())\n self._connections = set()\n self._nodes = {}\n self._selection = set()\n self._manipulation_mode = 0\n self._selection_rect = None", "def clean_all(self):\n self.scene.clear()\n self.image.fill(Qt.color0)", "def redraw(self):\n bpy.context.scene.objects.active = bpy.context.scene.objects.active", "def removeAllItems(self):\n\n if self.sceneItems:\n # clear all the marks in the scene\n self.scene.clear()\n\n # add our background pixmap back to the scene\n self.imgPixmapItem = self.scene.addPixmap(self.imgPixmap)\n\n # clear the scene items list\n self.sceneItems = []\n\n # update the viewport\n self.viewport().update()", "def redraw(self):\n self.scene.redraw()\n self.SwapBuffers()", "def clearViewer(self):\n\n self.removeScene()\n self.createScene()", "def clear_scene(self):\n # Save grid visibility\n restore = self.__grid_visibility\n\n # Set invis\n if restore:\n self.__graphics_grid.set_visibility(False)\n\n # Set all objects invis\n for obj in self.scene.objects:\n obj.visible = False\n\n # Restore grid (if needed)\n if restore:\n self.__graphics_grid.set_visibility(True)", "def clear_scene(self):\n # Set all robots variables as invisible\n for robot in self.__robots:\n robot.set_reference_visibility(False)\n robot.set_robot_visibility(False)\n\n self.scene.waitfor(\"draw_complete\")\n\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n self.__selected_robot = 0\n self.__reload_caption(new_list)", "def updateWorld(self):\n\t self.screen.clear()\n self.update()\n self.screen.refresh()", "def update_scenes(self) -> None:\n self.scenes.update(\n {\n f\"{group.id}_{scene.id}\": scene\n for group in self.groups.values() # type: ignore\n for scene in group.scenes.values()\n if f\"{group.id}_{scene.id}\" not in self.scenes\n }\n )", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def rebuild( self, scene = None ):\n rect = QRectF( 0, 0, self.minimumWidth(), self.minimumHeight() )\n self.setRect( rect )\n return True", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\r\n slicer.mrmlScene.Clear(0)", "def setUp(self):\r\n slicer.mrmlScene.Clear(0)", "def setUp(self):\r\n slicer.mrmlScene.Clear(0)", "def _update(self):\n if self._need_display_update:\n self._need_display_update = False\n\n self._set_view_slice(self.viewer.dims.indices)\n\n if self._need_visual_update:\n self._need_visual_update = False\n self._node.update()", "def setUp(self):\r\n slicer.mrmlScene.Clear()", "def refresh(self):\n\n assets_model = self.data[\"model\"][\"assets\"]\n assets_model.clear()\n\n has = {\"children\": False}\n\n project = io.ObjectId(os.environ[\"MINDBENDER__PROJECT\"])\n assets = io.find({\"type\": \"asset\", \"parent\": project})\n for asset in sorted(assets, key=lambda i: i[\"name\"]):\n item = QtWidgets.QListWidgetItem(asset[\"name\"])\n item.setData(QtCore.Qt.ItemIsEnabled, True)\n item.setData(DocumentRole, asset)\n assets_model.addItem(item)\n has[\"children\"] = True\n\n if not has[\"children\"]:\n item = QtWidgets.QListWidgetItem(\"No assets found\")\n item.setData(QtCore.Qt.ItemIsEnabled, False)\n assets_model.addItem(item)\n\n assets_model.setFocus()\n assets_model.setCurrentRow(0)\n self.data[\"button\"][\"load\"].hide()\n self.data[\"button\"][\"stop\"].hide()", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)", "def setUp(self):\n slicer.mrmlScene.Clear(0)" ]
[ "0.7382362", "0.72062266", "0.7178352", "0.70843875", "0.70334196", "0.68328506", "0.68294317", "0.6741447", "0.6685606", "0.656615", "0.64471614", "0.64011264", "0.637065", "0.63602686", "0.63602686", "0.62540436", "0.6220356", "0.6220356", "0.6220356", "0.61881584", "0.6180982", "0.6156833", "0.61334336", "0.61334336", "0.61334336", "0.61334336", "0.61334336", "0.61334336", "0.61334336", "0.61334336" ]
0.844773
0
Create a new TargetItem and TargetInfoWidget and add them to the scene
def _add_room(self, room_model): # Create a new TargetItem and set the model room_item = RoomItem() room_item.setModel(room_model) # Create a new TargetInfoWidget and set the model room_widget = RoomInfoWidget() room_widget.setModel(room_model) # Connect signals emitted by TargetItem # room_item.moved.connect(room_model.set_pos) room_item.double_clicked.connect(room_widget.show) room_item.deleteRoomAction.connect(room_model.prepare_for_deletion) # Connect signals emitted by TargetInfoWidget room_widget.finished.connect(room_widget.hide) self.scene().addItem(room_item) room_item.update() proxy = self.scene().addWidget(room_widget) room_widget.setProxy(proxy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_target(self, widget):\n\t\tself.main.window.set_sensitive(False)\n\t\tself.add_window = Targetadd(self.engine.database)\n\t\tself.add_window.cancel_button.connect(\"clicked\", self._sensitive_true, False)\n\t\tself.add_window.add_button.connect(\"clicked\", self._sensitive_true, True)\n\t\tself.add_window.window.connect(\"close\", self._sensitive_true, False)", "def create_target(self):\n\n # I used a random number variable (rand_target) in order to randomize the target created each time this function\n # is called.\n stand = StandardTarget()\n strong = StrongTarget()\n safe = SafeTarget()\n bird = Bird()\n\n rand_target = random.randint(1, 4)\n if rand_target == 1:\n self.targets.append(stand)\n elif rand_target == 2:\n self.targets.append(strong)\n elif rand_target == 3:\n self.targets.append(safe)\n elif rand_target == 4:\n self.targets.append(bird)", "def add_target(self, p):\n\n name, ok = QtWidgets.QInputDialog.getText(None, \"Target name\", \"Target name\", text=p.h)\n if not ok:\n return\n\n g.app.db['_quickmove']['global_targets'].append({\n 'name': name,\n 'unl': p.get_UNL(),\n })\n\n # make sure g.app.db knows it's been changed\n g.app.db['_quickmove'] = g.app.db['_quickmove']", "def add_target(self, target_obj):\n self.loaded_targets[target_obj.model_name] = target_obj", "def builder_will_create_target_image(self, builder, target, image_id, template, parameters):", "def create_item(window, text, x, y, parent, color=(100,193,212), command=None, bevel=True):\r\n font = pygame.font.Font(None, 20)\r\n text_width, text_height = font.size(text)\r\n text_surface = font.render(text,True,(0,0,0))\r\n rect = pygame.Rect(x-int(text_width/2.0),\r\n y-int(text_height/2.0),\r\n text_width,\r\n text_height)\r\n if command:\r\n return Button(text_surface=text_surface,\r\n color=color,\r\n bounds_relative_to_parent=rect,\r\n parent_bounds=parent.bounds,\r\n command=command,\r\n window=window,\r\n bevel=bevel)\r\n else:\r\n return Label(text_surface=text_surface,\r\n color=color,\r\n bounds_relative_to_parent=rect,\r\n parent_bounds=parent.bounds,\r\n window=window)", "def builder_did_create_target_image(self, builder, target, image_id, template, parameters):", "def CopyTarget(self, target):\r\n\r\n drop = AuiPaneInfo()\r\n drop.name = target.name\r\n drop.caption = target.caption\r\n drop.window = target.window\r\n drop.frame = target.frame\r\n drop.state = target.state\r\n drop.dock_direction = target.dock_direction\r\n drop.dock_layer = target.dock_layer\r\n drop.dock_row = target.dock_row\r\n drop.dock_pos = target.dock_pos\r\n drop.best_size = wx.Size(*target.best_size)\r\n drop.min_size = wx.Size(*target.min_size)\r\n drop.max_size = wx.Size(*target.max_size)\r\n drop.floating_pos = wx.Point(*target.floating_pos)\r\n drop.floating_size = wx.Size(*target.floating_size)\r\n drop.dock_proportion = target.dock_proportion\r\n drop.buttons = target.buttons\r\n drop.rect = wx.Rect(*target.rect)\r\n drop.icon = target.icon\r\n drop.notebook_id = target.notebook_id\r\n drop.transparent = target.transparent\r\n drop.snapped = target.snapped\r\n drop.minimize_mode = target.minimize_mode\r\n\r\n return drop", "def createtarget(self, lang, gentarget, dependees):\r\n raise NotImplementedError", "def _createPlace3dTextureMenuItems(node):\n pass", "def create_target(self):\r\n if random.randint(1, 4) == 1:\r\n target = StandardTarget()\r\n self.targets.append(target)\r\n \r\n elif random.randint(1, 4) == 2:\r\n target = StrongTarget()\r\n self.targets.append(target)\r\n \r\n elif random.randint(1, 4) == 3:\r\n target = SafeTarget()\r\n self.targets.append(target)\r\n \r\n elif random.randint(1, 4) == 4:\r\n target = BonusTarget()\r\n self.targets.append(target)\r\n # TODO: Decide what type of target to create and append it to the list\r", "def _createTextureMenuItems(ned, node):\n pass", "def create(self):\n self.add_handlers({\"^T\": self.change_forms,\"^Q\": self.exit})\n self.add(npyscreen.TitleFixedText, name='Inventory items:', value='')\n self.inventory_mle = self.add(npyscreen.Pager,\n values=['Checking for plugins in the inventory, please wait...'])", "def _addTextureMenuItems(ned, node):\n pass", "def target_create(obj, product_name, slo_id, sli_name, target_from, target_to, target_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slo = client.slo_list(product, id=slo_id)\n if not slo:\n fatal_error('SLO {} does not exist'.format(slo_id))\n\n slo = slo[0]\n\n product = client.product_list(name=slo['product_name'])[0]\n\n sli = client.sli_list(product=product, name=sli_name)\n if not sli or not sli_name:\n fatal_error('SLI {} does not exist'.format(sli_name))\n sli = sli[0]\n\n with Action(\n 'Creating Targets for SLO: {} for product: {}'.format(slo['title'], slo['product_name']), nl=True) as act:\n if target_file:\n target = json.load(target_file)\n else:\n target = {'sli_uri': sli['uri'], 'from': target_from, 'to': target_to}\n\n validate_target(target, act)\n\n if not act.errors:\n t = client.target_create(slo, target['sli_uri'], target_from=target.get('from'), target_to=target.get('to'))\n\n print(json.dumps(t, indent=4))", "def create_widgets(self):", "def _addPlace3dTextureMenuItems(ned, node):\n pass", "def scene_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n thumbnails_path = get_directory('icons')\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if AM.scene_name not in thumb_list or AM.scene_name in thumb_list and AM.replace_rename == 'replace':\r\n if AM.scene_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(AM.scene_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n box.prop(AM, \"scene_name\", text=\"\")\r\n \r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n\r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n \r\n if AM.scene_name and ((AM.scene_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(AM.scene_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n box.prop(AM, \"scene_name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def create_widgets( self ):", "def _createItem(self, rpcObject):\n item = ShowWidgetItem(rpcObject, self)\n return item", "def addTargetShip(self, targetID):\n self.targets.append(targetID)", "def materials_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n thumbnails_path = get_directory('icons')\r\n library_path = get_library_path()\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n if AM.as_mat_scene:\r\n thumb_list = thumb + [\"AM_Cloth\", \"AM_Sphere\"]\r\n else: \r\n thumb_list = thumb\r\n\r\n cam_is_valid = False\r\n obj_is_valid = False\r\n \r\n \r\n if not AM.as_mat_scene and not bpy.context.object:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"No active_object in the scene\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n elif not AM.as_mat_scene and not bpy.context.active_object.active_material:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"The object have no material\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.as_mat_scene and not isdir(join(library_path, 'materials', \"Render Scenes\")):\r\n box.operator(\"object.create_rder_scn_lib\", text = \"Create render scene library\", icon = 'FILESEL')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n \r\n if AM.as_mat_scene:\r\n asset_name = AM.scene_name\r\n else:\r\n active_mat = context.active_object.active_material\r\n asset_name = active_mat.name\r\n \r\n if len(bpy.context.active_object.material_slots) == 1:\r\n AM.multi_materials = False\r\n \r\n if AM.as_mat_scene and (not asset_name in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace') or\\\r\n not AM.as_mat_scene and (AM.multi_materials and get_valid_materials() or not AM.multi_materials and asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace'): \r\n if not AM.multi_materials:\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2 and AM.replace_rename == 'rename':\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n if not AM.as_mat_scene and len(bpy.context.active_object.material_slots) >= 2:\r\n if len(get_valid_materials()) != len(bpy.context.active_object.material_slots) and AM.multi_materials:\r\n box.label(\"Some materials wont be added\", icon = 'ERROR')\r\n box.label(\" because there already exist\")\r\n row = box.row()\r\n row.prop(AM, \"multi_materials\", text = \"All materials\")\r\n if AM.as_mat_scene:\r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n if AM.as_mat_scene:\r\n for obj in context.scene.objects:\r\n if obj.type == 'CAMERA':\r\n cam_is_valid = True\r\n \r\n if len([obj for obj in context.selected_objects if obj.type != 'CAMERA' and bpy.context.active_object == obj]) == 1:\r\n obj_is_valid = True\r\n \r\n row = box.row()\r\n row.label(\"Selected object rendering\", icon = 'FILE_TICK' if obj_is_valid else 'CANCEL')\r\n row = box.row()\r\n row.label(\"Camera in the scene\", icon = 'FILE_TICK' if cam_is_valid else 'CANCEL')\r\n if not cam_is_valid:\r\n row = box.row()\r\n row.operator(\"object.camera_add\", text = \"Add camera\", icon = 'OUTLINER_OB_CAMERA')\r\n \r\n if not AM.as_mat_scene:\r\n # --------------------- # \r\n # RENDER THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'render':\r\n row = box.row(align = True)\r\n row.label(\"Thumbnail:\")\r\n row.prop(AM, \"mat_thumb_type\", text = \"\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n\r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if (AM.as_mat_scene and AM.scene_name and cam_is_valid and obj_is_valid or not AM.as_mat_scene) and (AM.render_type == 'render' or (asset_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n if AM.as_mat_scene:\r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK')\r\n else:\r\n row.operator(\"object.add_material_in_library\", text=\"OK\", icon='FILE_TICK')\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.multi_materials and not get_valid_materials():\r\n box.label(\"All materials already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n else:\r\n AM.multi_materials = False\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def create_widget(self):\n item = QNodeItem(self)\n self.widget = item", "def targets_placeholder(self):", "def _create_viewer_item(viewer, name=None, reference=None):\n tools = viewer.toolbar_selection_tools\n tools.borderless = True\n tools.tile = True\n\n return {\n 'id': str(uuid.uuid4()),\n 'name': name or \"Unnamed Viewer\",\n 'widget': \"IPY_MODEL_\" + viewer.figure_widget.model_id,\n 'tools': \"IPY_MODEL_\" + viewer.toolbar_selection_tools.model_id,\n 'layer_options': \"IPY_MODEL_\" + viewer.layer_options.model_id,\n 'viewer_options': \"IPY_MODEL_\" + viewer.viewer_options.model_id,\n 'selected_data_items': [],\n 'collapse': True,\n 'reference': reference}", "def create_on_scene(self, name, **kwargs):\n monster = self.create(name, **kwargs)\n yield monster, self._level\n self._level.add(monster)", "def tb_add_tool(tb, id, name, img, description):\n tb.AddTool(id, name, wx.ArtProvider.GetBitmap(img), description)", "def create(self, tree):\n super(WxTraitsItem, self).create(tree)\n self._model = tree['model']\n self._view = tree['view']\n self._handler = tree['handler']", "def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)", "def _createTextureUtilityMenuItems(ned, node):\n pass" ]
[ "0.59375006", "0.57411295", "0.57406944", "0.5679419", "0.5618925", "0.55654866", "0.5560117", "0.5528846", "0.55212057", "0.5515769", "0.5513372", "0.5508875", "0.55074173", "0.5503268", "0.5492187", "0.54622674", "0.54380214", "0.54370105", "0.5423204", "0.54188454", "0.53897196", "0.5383142", "0.5375035", "0.5333821", "0.5324112", "0.5304577", "0.5304306", "0.5258635", "0.5243104", "0.52383363" ]
0.6025717
0
Raises a ValidationError if value has not length 32
def validate_authkey(value): if not len(value) == 32: raise ValidationError( 'Value must be a string containing 32 alphanumeric characters')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def longer_than_9(value):\n if len(value) < 10:\n raise forms.ValidationError('must be 10 characters or longer')", "def validate_length(string):\n if len(string) > 110:\n raise ValidationError('Tweet must be less than 110 characters')", "def clean(self, value):\r\n if value and (len(value) < 13 or len(value) > 16):\r\n raise forms.ValidationError(\"Please enter in a valid credit card number.\")\r\n return super(CreditCardField, self).clean(value)", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def validate_min_length(cls, value: str, field: ModelField) -> str:\n if cls.min_length is not None and len(value) < cls.min_length:\n raise InvalidLengthValue(\n field_name=field.name, constraint=cls.min_length, operation='min'\n )\n return value", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def test_max_length_validation(self):", "def test_max_length_validation(self):", "def validate(cls, **kwargs: Any) -> None:\n max_length = kwargs.get(\"max_length\", None)\n if max_length <= 0:\n raise ModelDefinitionError(\n \"Parameter max_length is required for field String\"\n )", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def validate_password_length(value):\r\n message = _(\"Invalid Length ({0})\")\r\n code = \"length\"\r\n\r\n min_length = getattr(settings, 'PASSWORD_MIN_LENGTH', None)\r\n max_length = getattr(settings, 'PASSWORD_MAX_LENGTH', None)\r\n\r\n if min_length and len(value) < min_length:\r\n raise ValidationError(message.format(_(\"must be {0} characters or more\").format(min_length)), code=code)\r\n elif max_length and len(value) > max_length:\r\n raise ValidationError(message.format(_(\"must be {0} characters or less\").format(max_length)), code=code)", "def _validate_string_min_length(self, value):\n if self.min_length is not None:\n return len(str(value)) >= self.min_length\n else:\n return True", "def test_more_than_max_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']) + 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def __call__(self, value):\n if value is None:\n return value\n\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if not value.isdigit():\n raise ValidationError(_(\"AHV must contain numbers only\"))\n if len(value) != 13:\n raise ValidationError(_(\"AHV must be 13 numbers long.\"))\n\n if self.ahv_checksum(value[:-1]) != value[-1]:\n raise ValidationError(_(\"Not a valid AHV number.\"))", "def validate_max_length(cls, value: str, field: ModelField) -> str:\n if cls.max_length is not None and len(value) > cls.max_length:\n raise InvalidLengthValue(\n field_name=field.name, constraint=cls.max_length, operation='max'\n )\n return value", "def test_validate_min_length(self):\n\n test_strings = [\n 'oa',\n 'al',\n 'v',\n ]\n\n testrow = TestSchema()\n\n for test_string in test_strings:\n testrow.string_min_field = test_string\n self.assertRaises(Exception, testrow.save)", "def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, str) and self._validate_length(value))\n else:\n return False", "def validate(self, value):\n super(MACAddressField, self).validate(value)\n if value:\n try:\n value = EUI(str(value), dialect=mac_bare)\n return\n except (ValueError, TypeError, ValidationError):\n raise ValidationError(self.error_messages[\"invalid\"] % {\"value\": value})", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def ci_val(value):\n min_length = 5\n max_length = 8\n\n ci_dupli = User.objects.filter(ci=value)\n\n if len(str(value)) < min_length:\n raise ValidationError(_('La cédula debe tener al menos {0} caracteres').format(min_length))\n \n if len(str(value)) > max_length:\n raise ValidationError(_('La cédula debe tener maximo {0} caracteres').format(max_length))\n \n if ci_dupli.exists():\n raise ValidationError(_('Cédula ya esta registrada'))\n\n if any(char.isalpha() for char in str(value)):\n raise ValidationError(_('No puede contener letras'))", "def test_less_than_min_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MIN_PWD_LEN']) - 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def validate_others(val, min_range, max_range, field):\n if len(val) < min_range:\n raise ValidationError(\"{field} should contain at least {min} character\".format(field=field, min=min_range))\n if len(val) > max_range:\n raise ValidationError(\"{field} cannot contain more than {max} characters\".format(field=field, max=max_range))", "def _validate_string_max_length(self, value):\n if self.max_length is not None:\n return len(str(value)) <= self.max_length\n else:\n return True", "def test_validation_fails_with_invalid_field_length(self):\n\n result = LandCompensationLandSoldValidator.validate(INVALID_FIELD_LENGTH, INVALID_FIELD_LENGTH)\n self.assertEqual(2, len(result.errors))\n self.assertEqual('Answer too long', result.errors['land-sold-description'].summary_message)\n self.assertEqual('Answer too long', result.errors['land-works-particulars'].summary_message)", "def validate_index(value: str):\n if len(value) != 6 or not value.isdigit():\n raise ValidationError(\n _(f'%(value)s is not a proper index number, len={len(value)}, isdigit={value.isdigit()}'),\n params={'value': value},\n )", "def test_validate_max_length(self):\n test_strings = [\n 'thisloooooooooooooooongstring',\n True,\n 45,\n ]\n\n testrow = TestSchema()\n\n for test_string in test_strings:\n testrow.string_max_field = test_string\n self.assertRaises(Exception, testrow.save)" ]
[ "0.72884405", "0.6955354", "0.6885741", "0.68818116", "0.6824072", "0.66354096", "0.6615882", "0.6587561", "0.6587561", "0.65413004", "0.6537108", "0.65263134", "0.65212256", "0.6502707", "0.6483852", "0.6479096", "0.6475491", "0.6464795", "0.6441646", "0.6430137", "0.6428241", "0.6397657", "0.63946486", "0.63633776", "0.63574886", "0.635393", "0.634495", "0.63146013", "0.63027966", "0.62930113" ]
0.76096874
0
Get the body of the bind c function definition. Get the body of the bind c function definition by inserting if blocks to check the presence of optional variables. Once we have ascertained the presence of the variables the original function is called. This code slices array variables to ensure the correct step.
def _get_function_def_body(self, func, func_def_args, func_arg_to_call_arg, results, handled = ()): optional = next((a for a in func_def_args if a.original_function_argument_variable.is_optional and a not in handled), None) if optional: args = func_def_args.copy() optional_var = optional.var handled += (optional, ) true_section = IfSection(PyccelIsNot(optional_var, Nil()), self._get_function_def_body(func, args, func_arg_to_call_arg, results, handled)) args.remove(optional) false_section = IfSection(LiteralTrue(), self._get_function_def_body(func, args, func_arg_to_call_arg, results, handled)) return [If(true_section, false_section)] else: args = [FunctionCallArgument(func_arg_to_call_arg[fa], keyword = fa.original_function_argument_variable.name) for fa in func_def_args] size = [fa.shape[::-1] if fa.original_function_argument_variable.order == 'C' else fa.shape for fa in func_def_args] stride = [fa.strides[::-1] if fa.original_function_argument_variable.order == 'C' else fa.strides for fa in func_def_args] orig_size = [[PyccelMul(l,s) for l,s in zip(sz, st)] for sz,st in zip(size,stride)] body = [C_F_Pointer(fa.var, func_arg_to_call_arg[fa].base, s) for fa,s in zip(func_def_args, orig_size) if isinstance(func_arg_to_call_arg[fa], IndexedElement)] body += [C_F_Pointer(fa.var, func_arg_to_call_arg[fa]) for fa in func_def_args if not isinstance(func_arg_to_call_arg[fa], IndexedElement) \ and fa.original_function_argument_variable.is_optional] # If the function is inlined and takes an array argument create a pointer to ensure that the bounds # are respected if func.is_inline and any(isinstance(a.value, IndexedElement) for a in args): array_args = {a: self.scope.get_temporary_variable(a.value.base, a.keyword, memory_handling = 'alias') for a in args if isinstance(a.value, IndexedElement)} body += [AliasAssign(v, k.value) for k,v in array_args.items()] args = [FunctionCallArgument(array_args[a], keyword=a.keyword) if a in array_args else a for a in args] func_call = Assign(results[0], FunctionCall(func, args)) if len(results) == 1 else \ Assign(results, FunctionCall(func, args)) return body + [func_call]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __compile_subroutine_body(self):\r\n self.compile_statements()", "def _get_call_argument(self, bind_c_arg):\n original_arg = bind_c_arg.original_function_argument_variable\n arg_var = self.scope.find(original_arg.name, category='variables')\n if original_arg.is_ndarray:\n start = LiteralInteger(1) # C_F_Pointer leads to default Fortran lbound\n stop = None\n indexes = [Slice(start, stop, step) for step in bind_c_arg.strides]\n return IndexedElement(arg_var, *indexes)\n else:\n return arg_var", "def get_helper_c_code_args(self):\r\n return {'c_prefix': 'CudaNdarray',\r\n 'strides_mul': 4\r\n }", "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)", "def _wrap_Variable(self, expr):\n if expr.rank == 0 and expr.dtype in NativeNumeric:\n return expr.clone(expr.name, new_class = BindCVariable)\n else:\n scope = self.scope\n func_name = scope.get_new_name('bind_c_'+expr.name.lower())\n func_scope = scope.new_child_scope(func_name)\n mod = expr.get_user_nodes(Module)[0]\n import_mod = Import(mod.name, AsName(expr,expr.name), mod=mod)\n func_scope.imports['variables'][expr.name] = expr\n\n # Create the data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+expr.name),\n is_const=True, memory_handling='alias')\n func_scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, expr, func_scope)\n if expr.rank == 0:\n #assigns = []\n #c_loc = CLocFunc(expr, bind_var)\n raise NotImplementedError(\"Classes cannot be wrapped\")\n else:\n assigns = [Assign(result.shape[i], expr.shape[i]) for i in range(expr.rank)]\n c_loc = CLocFunc(expr, bind_var)\n body = [*assigns, c_loc]\n func = BindCFunctionDef(name = func_name,\n body = body,\n arguments = [],\n results = [result],\n imports = [import_mod],\n scope = func_scope,\n original_function = expr)\n return expr.clone(expr.name, new_class = BindCArrayVariable, wrapper_function = func,\n original_variable = expr)", "def eval_let(env, bindings, body):\n new_env = Env(env)\n for ll in value(bindings):\n assert typeof(ll) == 'list', 'bindings must be a list'\n pair = value(ll)\n assert len(pair) == 2, 'bindings must be pairs'\n nam, arg = pair\n assert typeof(nam) == 'atom', 'binding LHS must be atom'\n intern(new_env, value(nam), evalu(arg, env))\n args_evaled = [evalu(x, new_env) for x in body]\n return args_evaled[-1]", "def c_code_helper(self, bottom, weights, top, direction, sub, height=None, width=None):\n dH, dW = self.subsample\n if self.border_mode == \"half\":\n padH = padW = -1\n elif self.border_mode == \"full\":\n padH = padW = -2\n elif isinstance(self.border_mode, tuple):\n padH, padW = self.border_mode\n else:\n assert self.border_mode == \"valid\"\n padH = padW = 0\n if direction == \"forward\":\n direction = 0\n out = top\n elif direction == \"backprop weights\":\n direction = 1\n out = weights\n elif direction == \"backprop inputs\":\n direction = 2\n out = bottom\n else:\n raise ValueError(\"direction must be one of 'forward', \"\n \"'backprop weights', 'backprop inputs'\")\n # When subsampling, we cannot unambiguously infer the height and width\n # of bottom and weights from top, so we require them to be given.\n # Similarly, when pad=\"half\", we cannot infer the weight size.\n if ((direction != 0) and (dH != 1)) or ((direction == 1) and (padH == -1)):\n if not height:\n raise ValueError(\"height must be given for backprop with vertical sampling or pad='half'\")\n height = '(*(npy_int*)(PyArray_DATA(%s)))' % height\n else:\n height = 'NULL'\n if ((direction != 0) and (dW != 1)) or ((direction == 1) and (padW == -1)):\n if not width:\n raise ValueError(\"width must be given for backprop with horizontal sampling or pad='half'\")\n width = '(*(npy_int*)(PyArray_DATA(%s)))' % width\n else:\n width = 'NULL'\n sub = sub.copy()\n sub.update(locals())\n\n return \"\"\"\n // Mandatory args\n int direction = %(direction)s; // forward, bprop weights, bprop inputs\n\n // Optional args\n int dH = %(dH)s;\n int dW = %(dW)s;\n int padH = %(padH)s;\n int padW = %(padW)s;\n\n CudaNdarray * bottom = %(bottom)s;\n CudaNdarray * weights = %(weights)s;\n CudaNdarray * top = %(top)s;\n CudaNdarray * out2 = NULL;\n\n // Obtain or infer kernel width and height\n // (we need to know it early to be able to handle auto-padding)\n int kH, kW;\n if (direction != 1) {\n // weight is an input variable, we can just read its shape\n kH = CudaNdarray_HOST_DIMS(weights)[2];\n kW = CudaNdarray_HOST_DIMS(weights)[3];\n }\n else {\n if ((dH != 1) || (padH == -1)) {\n // vertical subsampling or half padding, kernel height is specified\n kH = %(height)s;\n }\n else if (padH == -2) {\n // vertical full padding, we can infer the kernel height\n kH = 2 - CudaNdarray_HOST_DIMS(bottom)[2] + (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH;\n }\n else {\n // explicit padding, we can infer the kernel height\n kH = CudaNdarray_HOST_DIMS(bottom)[2] + 2*padH - (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH;\n }\n if ((dW != 1) || (padW == -1)) {\n kW = %(width)s;\n }\n else if (padW == -2) {\n kW = 2 - CudaNdarray_HOST_DIMS(bottom)[3] + (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW;\n }\n else {\n kW = CudaNdarray_HOST_DIMS(bottom)[3] + 2*padW - (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW;\n }\n }\n\n // Auto-padding if requested\n if (padH == -1) { // vertical half padding\n padH = kH / 2;\n }\n else if (padH == -2) { // vertical full padding\n padH = kH - 1;\n }\n else if (padH < 0) {\n PyErr_SetString(PyExc_ValueError, \"BaseGpuCorrMM: padH must be >= -2\");\n %(fail)s\n }\n if (padW == -1) { // horizontal half padding\n padW = kW / 2;\n }\n else if (padW == -2) { // horizontal full padding\n padW = kW - 1;\n }\n else if (padW < 0) {\n PyErr_SetString(PyExc_ValueError, \"BaseGpuCorrMM: padW must be >= -2\");\n %(fail)s\n }\n\n // Infer output shape\n int out_dim[4];\n switch(direction) {\n case 0: // forward pass\n // output is top: (batchsize, num_filters, height, width)\n // height and width: top = (bottom + 2*pad - weight) / sample + 1\n out_dim[0] = CudaNdarray_HOST_DIMS(bottom)[0];\n out_dim[1] = CudaNdarray_HOST_DIMS(weights)[0];\n out_dim[2] = (CudaNdarray_HOST_DIMS(bottom)[2] + 2*padH - CudaNdarray_HOST_DIMS(weights)[2]) / dH + 1;\n out_dim[3] = (CudaNdarray_HOST_DIMS(bottom)[3] + 2*padW - CudaNdarray_HOST_DIMS(weights)[3]) / dW + 1;\n break;\n case 1: // backprop wrt. weights\n // output is weights: (num_filters, num_channels, height, width)\n // height and width: weights = bottom + 2*pad - (top - 1) * sample\n out_dim[0] = CudaNdarray_HOST_DIMS(top)[1];\n out_dim[1] = CudaNdarray_HOST_DIMS(bottom)[1];\n out_dim[2] = kH; // already inferred further above\n out_dim[3] = kW; // how convenient\n break;\n case 2: // backprop wrt. inputs\n // output is bottom: (batchsize, num_channels, height, width)\n // height and width: bottom = (top - 1) * sample + weights - 2*pad\n out_dim[0] = CudaNdarray_HOST_DIMS(top)[0];\n out_dim[1] = CudaNdarray_HOST_DIMS(weights)[1];\n out_dim[2] = (dH != 1) ? %(height)s : (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH + CudaNdarray_HOST_DIMS(weights)[2] - 2*padH;\n out_dim[3] = (dW != 1) ? %(width)s : (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW + CudaNdarray_HOST_DIMS(weights)[3] - 2*padW;\n break;\n default:\n PyErr_SetString(PyExc_ValueError, \"BaseGpuCorrMM: direction must be 0, 1, or 2\\\\n\");\n %(fail)s\n }\n\n // Prepare output array\n if ( !(%(out)s\n && %(out)s->nd==4\n && CudaNdarray_is_c_contiguous(%(out)s)\n && CudaNdarray_HOST_DIMS(%(out)s)[0]==out_dim[0]\n && CudaNdarray_HOST_DIMS(%(out)s)[1]==out_dim[1]\n && CudaNdarray_HOST_DIMS(%(out)s)[2]==out_dim[2]\n && CudaNdarray_HOST_DIMS(%(out)s)[3]==out_dim[3]))\n {\n Py_XDECREF(%(out)s);\n %(out)s = (CudaNdarray*)CudaNdarray_NewDims(4,out_dim);\n if (NULL == %(out)s)\n {\n PyErr_Format(PyExc_RuntimeError,\n \"BaseGpuCorrMM: Failed to allocate output of %%d x %%d x %%d x %%d\",\n out_dim[0], out_dim[1], out_dim[2], out_dim[3]);\n %(fail)s\n }\n }\n\n // Call CUDA code\n out2 = corrMM(%(bottom)s, %(weights)s, %(top)s, direction, dH, dW, padH, padW);\n if (out2==NULL){\n %(fail)s\n }\n assert (out2 == %(out)s);\n\n\"\"\" % sub", "def make_cpp_func_bodies(self):\n\t\tfor name, body in self.func_bodies.iteritems():\n\t\t\tt = Lexer(body).get_tokens()\t\t\t\n\t\t\tS = [] #Stack\n\t\t\tx = 0\n\t\t\twhile x < len(t):\n\t\t\t\tif t[x] == '(': #function call begins\n\t\t\t\t\tx += 1\n\t\t\t\t\tS.append(self.FUNCS_DICT.get(t[x], t[x]) + '(')\n\t\t\t\telif t[x] == ')': #function call ends\n\t\t\t\t\tacc = ''\n\t\t\t\t\twhile S[-1][-1] != '(':\n\t\t\t\t\t\t#pop off params until function call is reached\n\t\t\t\t\t\tacc = S.pop() + ',' + acc\n\t\t\t\t\t# [:-1] to strip off comma at the end\n\t\t\t\t\tS.append(S.pop() + acc[:-1] + ')') #S.pop() gives function\n\t\t\t\telse:\n\t\t\t\t\tS.append(self.convert_atom(t[x]))\n\t\t\t\tx += 1\n\t\t\tself.cpp_func_bodies[name] = S[0]", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.udp_debug_sptr_pc_input_buffers_full_var(self, *args)", "def get_helper_c_code_args(self):\r\n return {'c_prefix': 'PyGpuArray',\r\n 'strides_mul': 1\r\n }", "def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates", "def _bind_helper(self, conn, sql, bind):\n\n return sql, bind", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir", "def pc_input_buffers_full_var(self, *args):\n return _TestA_swig.my_qpsk_demod_cb_sptr_pc_input_buffers_full_var(self, *args)", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.general_burster_2_sptr_pc_input_buffers_full_var(self, *args)", "def default_helper_c_code_args():\r\n\r\n return {\r\n \"c_prefix\": \"PyArray\",\r\n \"strides_mul\": 1,\r\n }", "def sf_bound(l0, l2, lmbdas, bidual, r, rank=False):\n\n xvals, bnd = [], []\n if l0 == \"constr\":\n offset = 2\n if rank == False:\n bnd_label = r\"$p^{\\ast \\ast}(k + r + 2)$\"\n else:\n bnd_label = r\"$p^{\\ast \\ast}(k + r + 2) - \\zeta_r$\"\n\n for l in lmbdas:\n # if l - r - offset > 0:\n # xvals.append(l)\n # bnd.append(bidual[l-r-offset])\n if l + r + offset < lmbdas[-1]:\n bnd.append(bidual[l + r + offset])\n else:\n bnd.append(bidual[-1])\n\n xvals = lmbdas\n\n elif l0 == \"pen\":\n offset = 1\n if rank == False:\n bnd_label = r\"$p^{\\ast \\ast}(\\lambda) + \\lambda(r + 1)$\"\n else:\n bnd_label = r\"$p^{\\ast \\ast}(\\lambda) + \\lambda(r + 1) + \\zeta$\"\n\n if rank == False:\n bnd = [bd + l * (r + offset) for bd, l in zip(bidual, lmbdas)]\n else:\n # this is very confusing, but the code is set up such that the 3rd\n # argument (lmbdas) is the one that varies, in this case we are\n # letting the rank vary from 1,m so we artifically let lmbdas\n # bet this and set r to be the value of l0 penalty\n\n bnd = [bd + r * (l + offset) for bd, l in zip(bidual, lmbdas)]\n\n xvals = lmbdas\n\n return xvals, bnd, bnd_label", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_pc_input_buffers_full_var(self, *args)", "def bcp(self):\r\n logger.info('--\\tbcp')\r\n global gen_debug_info\r\n gen_debug_info.cnt_bcp += 1\r\n logger.info('\\t\\tcnt_bcp: %d' % gen_debug_info.cnt_bcp)\r\n\r\n conflict, ccindex = self.c_array.init_state(self.local_vars.vs)\r\n if conflict is True:\r\n logger.info('\\t\\tfind conflict in c_array.init_state()')\r\n return True, ccindex, -1\r\n\r\n self.need_bcp = True\r\n while self.need_bcp:\r\n self.need_bcp = False\r\n c_array = self.c_array\r\n unitc_i = self.c_array.find_unitc(self.local_vars.vs)\r\n for i, j in unitc_i:\r\n c = self.c_array.clauses[i] # unit clause\r\n vs = self.local_vars.vs[j]\r\n mindex = c_array.c_max_lvl_i[i]\r\n mvs = self.local_vars.vs[mindex]\r\n\r\n # 当出现一个文字出现多个推理时可以有几种不同的实现方式\r\n if vs.value != 0: # 选择第一个推理的\r\n continue\r\n # 选择最小层级推理的\r\n # if vs.value != 0 and self.local_vars.vs[mindex] > vs.level:\r\n # continue\r\n\r\n vs.value = c[j] # the free lit\r\n vs.level = mvs.level\r\n vs.implied = True\r\n self.local_vars.reason[j] = i + 1\r\n c_array.c_isreason[i] = True\r\n\r\n str1 = '\\t\\tc%d ' % (i + 1)\r\n str1 += 'var %d gvar %d '\\\r\n % (j + 1, self.local_vars.global_var[j] + 1)\r\n str1 += 'value %d level %d' % (c[j], vs.level)\r\n logger.info(str1)\r\n logger.debug(gen_debug_info.one_clause(self.c_array.clauses[i],\r\n self.local_vars,\r\n '\\t\\t'))\r\n conflict, ccindex = \\\r\n c_array.update_state(j, self.local_vars.vs)\r\n\r\n self.need_bcp = True\r\n if conflict is True:\r\n # find conflict\r\n return True, ccindex, j\r\n return False, 0, 0", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_pc_input_buffers_full_var(self, *args)", "def __smartdebug__(co,func_globals):\n\n from byteplay import Code,SetLineno,Label,LOAD_GLOBAL,POP_JUMP_IF_FALSE,POP_JUMP_IF_TRUE,JUMP_FORWARD\n code = Code.from_code(co)\n instructions = code.code\n\n # First, find all the \"if DEBUG:\" and \"if not DEBUG\"\n # We collect in reverse order so that we can update\n # in place more easily\n debugs = []\n for offset,op_arg in enumerate(instructions):\n if op_arg == (LOAD_GLOBAL,'DEBUG') and instructions[offset+1][0] in (POP_JUMP_IF_FALSE,POP_JUMP_IF_TRUE):\n debugs.insert(0,offset)\n\n # We want the bounds of the DEBUG true part and DEBUG false part for each\n # most ifs look like\n # LOAD_GLOBAL DEBUG\n # POP_JUMP_IF_FALSE L1 (sense may be reversed with _TRUE)\n # ...\n # JUMP_FORWARD L2\n # L1:\n # ...\n # L2:\n # They look different at the ends of loops, but I'm skipping those\n def back_one(x):\n while x > 0:\n opcode = instructions[x][0]\n if opcode != SetLineno and not isinstance(opcode,Label):\n break\n x -= 1\n return x\n def offset_of(L):\n for off,(op,_) in enumerate(instructions):\n if op is L: return off\n return None\n def true_false(x):\n pop_jump,L1 = instructions[x+1]\n O1 = offset_of(L1)\n if O1 < x: return None # Jumping backward, Loop if\n OJF = back_one(O1)\n jf,L2 = instructions[OJF]\n if jf != JUMP_FORWARD: return None # Not my pattern\n O2 = offset_of(L2)\n if pop_jump == POP_JUMP_IF_FALSE:\n return ((x+2,OJF),(OJF+1,O2),(x,O2))\n return ((OJF+1,O2),(x+2,OJF),(x,O2))\n \n\n while debugs:\n x = debugs[0]\n del debugs[0]\n bounds = true_false(x)\n if not bounds: continue\n (t0,t1),(f0,f1),(a,b) = bounds\n if func_globals.get('DEBUG',False):\n using = instructions[t0:t1]\n else:\n using = instructions[f0:f1]\n instructions[a:b] = using\n\n return code.to_code()", "def bind(self, arg_names, **bound_params):\n bound_params=bound_params.copy()\n covered_args=set(bound_params)\n covered_args.update(arg_names)\n uncovered_mand_args=self.get_mandatory_args().difference(covered_args)\n if len(uncovered_mand_args)>0:\n raise TypeError(\"mandatory parameters not supplied: {0}\".format(list(uncovered_mand_args)))\n def bound_call(*args, **call_params):\n params=bound_params.copy()\n params.update(call_params)\n params.update(zip(arg_names,args))\n return self(**params)\n return bound_call\n #sig=FunctionSignature(arg_names=arg_names,kwarg_name=\"kwargs\")\n #return sig.wrap_function(bound_call)", "def _wrap_FunctionDefResult(self, expr):\n var = expr.var\n name = var.name\n scope = self.scope\n # Make name available for later\n scope.insert_symbol(name)\n local_var = var.clone(scope.get_expected_name(name))\n\n if local_var.rank:\n # Allocatable is not returned so it must appear in local scope\n scope.insert_variable(local_var, name)\n\n # Create the C-compatible data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+name),\n is_const=False, memory_handling='alias')\n scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, var, scope)\n\n # Save the shapes of the array\n self._additional_exprs.extend([Assign(result.shape[i], var.shape[i]) for i in range(var.rank)])\n\n # Create an array variable which can be passed to CLocFunc\n ptr_var = var.clone(scope.get_new_name(name+'_ptr'),\n memory_handling='alias')\n scope.insert_variable(ptr_var)\n\n # Define the additional steps necessary to define and fill ptr_var\n alloc = Allocate(ptr_var, shape=result.shape,\n order=var.order, status='unallocated')\n copy = Assign(ptr_var, var)\n c_loc = CLocFunc(ptr_var, bind_var)\n self._additional_exprs.extend([alloc, copy, c_loc])\n\n return result\n else:\n return BindCFunctionDefResult(local_var, var, scope)", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_pc_input_buffers_full_var(self, *args)", "def eval_python_blocks(req, body):\n localsdict = {\"request\": req}\n globalsdict = {}\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n\n try:\n start = 0\n while body.find(\"<%\", start) != -1:\n start = body.find(\"<%\")\n end = body.find(\"%>\", start) \n\n if start != -1 and end != -1:\n codeblock = body[start+2:end].lstrip()\n\n sys.stdout = StringIO.StringIO()\n sys.stderr = StringIO.StringIO()\n\n try:\n exec codeblock in localsdict, globalsdict\n\n except Exception, e:\n print \"ERROR in processing: %s\" % e\n\n output = sys.stdout.getvalue() + sys.stderr.getvalue()\n body = body[:start] + output + body[end+2:]\n\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n\n return body", "def pc_input_buffers_full_var(self, *args):\n return _TestA_swig.cleanslate_sptr_pc_input_buffers_full_var(self, *args)", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.ax25_pdu_packer_sptr_pc_input_buffers_full_var(self, *args)", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.hdlc_deframer_sptr_pc_input_buffers_full_var(self, *args)", "def _wrap_FunctionDef(self, expr):\n if expr.is_private:\n return EmptyNode()\n\n name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}')\n self._wrapper_names_dict[expr.name] = name\n\n # Create the scope\n func_scope = self.scope.new_child_scope(name)\n self.scope = func_scope\n\n self._additional_exprs = []\n\n if any(isinstance(a.var, FunctionAddress) for a in expr.arguments):\n warnings.warn(\"Functions with functions as arguments cannot be wrapped by pyccel\")\n return EmptyNode()\n\n # Wrap the arguments and collect the expressions passed as the call argument.\n func_arguments = [self._wrap(a) for a in expr.arguments]\n call_arguments = [self._get_call_argument(fa) for fa in func_arguments]\n func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)}\n\n func_results = [self._wrap_FunctionDefResult(r) for r in expr.results]\n\n func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results]\n\n body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results)\n\n body.extend(self._additional_exprs)\n self._additional_exprs.clear()\n\n self.exit_scope()\n\n func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr,\n doc_string = expr.doc_string)\n\n self.scope.functions[name] = func\n\n return func", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.message_debug_sptr_pc_input_buffers_full_var(self, *args)" ]
[ "0.5091313", "0.50208336", "0.48778033", "0.485153", "0.48233292", "0.48153418", "0.48068014", "0.48034468", "0.47375363", "0.47292742", "0.4714741", "0.4679764", "0.46484953", "0.46120793", "0.4569959", "0.45631406", "0.45606202", "0.45545164", "0.45443997", "0.45401987", "0.45309585", "0.4521664", "0.4521506", "0.4518399", "0.45029083", "0.448736", "0.44831663", "0.448189", "0.44703177", "0.4457786" ]
0.57275337
0
Get the argument which should be passed to the function call. The FunctionDefArgument passed to the function may contain additional information which should not be passed to the function being wrapped (e.g. an array with strides should not pass the strides explicitly to the function call, nor should it pass the entire contiguous array). This function extracts the necessary information and returns the object which can be passed to the function call.
def _get_call_argument(self, bind_c_arg): original_arg = bind_c_arg.original_function_argument_variable arg_var = self.scope.find(original_arg.name, category='variables') if original_arg.is_ndarray: start = LiteralInteger(1) # C_F_Pointer leads to default Fortran lbound stop = None indexes = [Slice(start, stop, step) for step in bind_c_arg.strides] return IndexedElement(arg_var, *indexes) else: return arg_var
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getArgument(self, *args):\n return _libsbml.FunctionDefinition_getArgument(self, *args)", "def _wrap_FunctionDefArgument(self, expr):\n var = expr.var\n name = var.name\n self.scope.insert_symbol(name)\n collisionless_name = self.scope.get_expected_name(var.name)\n if var.is_ndarray or var.is_optional:\n new_var = Variable(BindCPointer(), self.scope.get_new_name(f'bound_{name}'),\n is_argument = True, is_optional = False, memory_handling='alias')\n arg_var = var.clone(collisionless_name, is_argument = False, is_optional = False,\n memory_handling = 'alias', allows_negative_indexes=False)\n self.scope.insert_variable(arg_var)\n else:\n new_var = var.clone(collisionless_name)\n self.scope.insert_variable(new_var)\n\n return BindCFunctionDefArgument(new_var, value = expr.value, original_arg_var = expr.var,\n kwonly = expr.is_kwonly, annotation = expr.annotation, scope=self.scope)", "def fetch_argument(op_def, arg, ws):\n desc = arg if isinstance(arg, bytes) else arg.s\n if sys.version_info >= (3, 0):\n desc = desc.decode('utf-8')\n desc = desc.replace('$HANDLE', op_def.name)\n value = ws.get_tensor(desc).ToNumpy()\n if value.size == 1:\n return value.flatten()[0]\n return value", "def get_argument(self, name):\n val = self.arguments.get(name)\n if val:\n return val[0]\n return None", "def _get_argument(self, name: str) -> GraphQLArgument:\n arg = self.field.args.get(name)\n\n if arg is None:\n raise KeyError(f\"Argument {name} does not exist in {self.field}.\")\n\n return arg", "def get_argument(cfg, abb, call_path, num, raw=False, raw_value=False, can_fail=True, ty=None):\n # constraints\n if ty is not None:\n raw=False\n raw_value=True\n can_fail=False\n\n arg = cfg.vp.arguments[abb][num]\n\n if raw:\n return arg\n\n if len(arg) == 0:\n if can_fail:\n return None\n else:\n raise EmptyArgumentException(\"Argument is empty.\")\n\n value = arg.get_value(key=call_path, raw=raw_value)\n if ty is not None and not isinstance(value, ty):\n raise UnsuitableArgumentException(f\"Expecting {ty} but got {type(value)}\")\n\n return value", "def argument(self, name_argument):\n answer = self._call('argument', argument=name_argument)\n return answer.name, answer.value", "def getArgument(self, *args):\n return _libsbml.SBMLExternalValidator_getArgument(self, *args)", "def argument_value(self, idx: int):\n return self._values[idx][0]", "def inspect_arg(node):\n return inspect_ann(node)", "def _handle_arg(obj, arg):\n if isinstance(arg, PythonTensor):\n if arg.has_init:\n arg.init_data()\n if not arg.const_arg:\n return arg\n elif isinstance(arg, (Tensor, CSRTensor, COOTensor)):\n return arg\n elif hasattr(arg, \"__ms_mutable__\") and getattr(arg, \"__ms_mutable__\"):\n # mutable([]) will be eliminated by FuncGraphSpecializer, and empty list is not supported by backend.\n if isinstance(arg, list) and not arg:\n return None\n return arg\n elif context.get_context(\"grad_for_scalar\") and isinstance(arg, (int, float)):\n return arg\n elif hasattr(obj, \"enable_tuple_broaden\") and obj.enable_tuple_broaden and isinstance(arg, tuple) and \\\n _check_all_tensor(arg):\n return arg\n return None", "def _argument_adapter(callback):\n def wrapper(*args, **kwargs):\n if kwargs or len(args) > 1:\n callback(Arguments(args, kwargs))\n elif args:\n callback(args[0])\n else:\n callback(None)\n return wrapper", "def arg(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arg\")", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def variable_argument(self):\n if self.is_variadic():\n if self.args[-1] == '...':\n # An unnamed variable argument replaces __VA_ARGS__\n return \"__VA_ARGS__\"\n else:\n # Strip '...' from argument name\n return self.args[-1][:-3]\n else:\n return None", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def __call__(self, details: CallableDetails) -> CallableArg:\n if not details.args:\n raise IncompatibleHandlerFactoryError(\n f\"Callable {details.obj!r} has no explicit argument\"\n )\n arg = self._find(details)\n self._check_type(details, arg)\n return arg", "def _get_arg_name(self, arg, variable_name):", "def get_arg_for_TypeVar(typevar, generic, arg_holder=None):\n if not is_Generic(generic):\n generic = get_Generic_type(generic)\n return _get_arg_for_TypeVar(typevar, generic,\n generic if arg_holder is None else arg_holder)", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def get_subscription_argument(self, register_subscription_call):\n return register_subscription_call[0][0]", "def pyarg(self):\n return self._pyarg", "def extract_captured_arguments(func):\n captured_arguments = getattr(func, ATTR_NAME)\n if type(captured_arguments) is not _CapturedArguments: # pylint: disable=unidiomatic-typecheck\n # The attribute was not set by tcm, so effectively it does not exist.\n raise AttributeError\n delattr(func, ATTR_NAME)\n return captured_arguments", "def func(arg1, arg2):\n\n return arg", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def arguments_from_call_funccode(f):\n fc = fc_or_c(f.__call__)\n argcount = fc.co_argcount\n args = list(fc.co_varnames[1:argcount])\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return args", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def get_python_function_arguments(f):\n # Note that we only return non-optional arguments (we assume that any optional args are not specified).\n # This allows to, e.g., accept max(a, b, *more, name='') as a binary function\n param_specs = inspect.getfullargspec(f)\n annotations = param_specs.annotations\n arg_names = param_specs.args\n defaults = param_specs.defaults # \"if this tuple has n elements, they correspond to the last n elements listed\n # in args\"\n if defaults:\n arg_names = arg_names[:-len(defaults)]\n return (arg_names, annotations)", "def argument(*name_or_flags, **kwargs):\n\n return (list(name_or_flags), kwargs)", "def get_global_arg(self, key):\n return self.args[key]" ]
[ "0.68676627", "0.63390875", "0.6240098", "0.60637504", "0.59205437", "0.5899934", "0.57536966", "0.5694595", "0.55618656", "0.55556196", "0.5553342", "0.5541516", "0.5448954", "0.5444387", "0.5425658", "0.53730434", "0.5355409", "0.52947515", "0.5220035", "0.51893306", "0.515469", "0.5148944", "0.5141562", "0.51363707", "0.5132754", "0.51290256", "0.5111374", "0.5087154", "0.5073672", "0.5061762" ]
0.7051167
0
Create a Module which is compatible with C. Create a Module which provides an interface between C and the Module described by expr. This includes wrapping functions, interfaces, classes and module variables.
def _wrap_Module(self, expr): # Define scope scope = expr.scope mod_scope = Scope(used_symbols = scope.local_used_symbols.copy(), original_symbols = scope.python_names.copy()) self.scope = mod_scope # Wrap contents funcs_to_wrap = expr.funcs funcs = [self._wrap(f) for f in funcs_to_wrap] if expr.init_func: init_func = funcs[next(i for i,f in enumerate(funcs_to_wrap) if f == expr.init_func)] else: init_func = None if expr.free_func: free_func = funcs[next(i for i,f in enumerate(funcs_to_wrap) if f == expr.free_func)] else: free_func = None removed_functions = [f for f,w in zip(funcs_to_wrap, funcs) if isinstance(w, EmptyNode)] funcs = [f for f in funcs if not isinstance(f, EmptyNode)] interfaces = [self._wrap(f) for f in expr.interfaces] classes = [self._wrap(f) for f in expr.classes] variables = [self._wrap(v) for v in expr.variables if not v.is_private] variable_getters = [v for v in variables if isinstance(v, BindCArrayVariable)] imports = [Import(expr.name, target = expr, mod=expr)] name = mod_scope.get_new_name(f'bind_c_{expr.name.target}') self._wrapper_names_dict[expr.name.target] = name self.exit_scope() return BindCModule(name, variables, funcs, variable_wrappers = variable_getters, init_func = init_func, free_func = free_func, interfaces = interfaces, classes = classes, imports = imports, original_module = expr, scope = mod_scope, removed_functions = removed_functions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_module(\n submodule_name: str, cython_src_files: list,\n c_src_paths: list = None, c_include_paths: list = None,\n c_libraries: list = None\n):\n\n if c_include_paths is None: c_include_paths = []\n if c_libraries is None: c_libraries = []\n if c_src_paths is None: c_src_paths = []\n\n return Extension(\n name=\".\".join([module_name, submodule_name]),\n sources=[normpath(x) for x in cython_src_files],\n library_dirs=[normpath(x) for x in c_src_paths],\n include_dirs=[normpath(x) for x in c_include_paths],\n libraries=c_libraries,\n )", "def build_dynamic_module(self):\r\n self.code_gen()\r\n\r\n mod = cmodule.DynamicModule()\r\n\r\n # The code of instantiate\r\n # the 1 is for error_storage\r\n code = self.instantiate_code(1 + len(self.args))\r\n instantiate = cmodule.ExtFunction('instantiate', code,\r\n method=cmodule.METH_VARARGS)\r\n #['error_storage'] + argnames,\r\n #local_dict = d,\r\n #global_dict = {})\r\n\r\n # Static methods that can run and destroy the struct built by\r\n # instantiate.\r\n if PY3:\r\n static = \"\"\"\r\n static int {struct_name}_executor({struct_name} *self) {{\r\n return self->run();\r\n }}\r\n\r\n static void {struct_name}_destructor(PyObject *capsule) {{\r\n {struct_name} *self = ({struct_name} *)PyCapsule_GetContext(capsule);\r\n delete self;\r\n }}\r\n \"\"\".format(struct_name=self.struct_name)\r\n else:\r\n static = \"\"\"\r\n static int %(struct_name)s_executor(%(struct_name)s* self) {\r\n return self->run();\r\n }\r\n\r\n static void %(struct_name)s_destructor(void* executor, void* self) {\r\n delete ((%(struct_name)s*)self);\r\n }\r\n \"\"\" % dict(struct_name=self.struct_name)\r\n\r\n # We add all the support code, compile args, headers and libs we need.\r\n for support_code in self.support_code() + self.c_support_code_apply:\r\n mod.add_support_code(support_code)\r\n mod.add_support_code(self.struct_code)\r\n mod.add_support_code(static)\r\n mod.add_function(instantiate)\r\n for header in self.headers():\r\n mod.add_include(header)\r\n for init_code_block in self.init_code() + self.c_init_code_apply:\r\n mod.add_init_code(init_code_block)\r\n\r\n return mod", "def _wrap_Variable(self, expr):\n if expr.rank == 0 and expr.dtype in NativeNumeric:\n return expr.clone(expr.name, new_class = BindCVariable)\n else:\n scope = self.scope\n func_name = scope.get_new_name('bind_c_'+expr.name.lower())\n func_scope = scope.new_child_scope(func_name)\n mod = expr.get_user_nodes(Module)[0]\n import_mod = Import(mod.name, AsName(expr,expr.name), mod=mod)\n func_scope.imports['variables'][expr.name] = expr\n\n # Create the data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+expr.name),\n is_const=True, memory_handling='alias')\n func_scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, expr, func_scope)\n if expr.rank == 0:\n #assigns = []\n #c_loc = CLocFunc(expr, bind_var)\n raise NotImplementedError(\"Classes cannot be wrapped\")\n else:\n assigns = [Assign(result.shape[i], expr.shape[i]) for i in range(expr.rank)]\n c_loc = CLocFunc(expr, bind_var)\n body = [*assigns, c_loc]\n func = BindCFunctionDef(name = func_name,\n body = body,\n arguments = [],\n results = [result],\n imports = [import_mod],\n scope = func_scope,\n original_function = expr)\n return expr.clone(expr.name, new_class = BindCArrayVariable, wrapper_function = func,\n original_variable = expr)", "def make_module_from_function(funcobj):\n module = imp.new_module(funcobj.__name__)\n scope = marks.get(funcobj, 'scope')\n funclocals = trace_function(funcobj, scope)\n module.__dict__.update(funclocals)\n return module", "def create_module(self, body: list, **kwargs):\n return ast.Module(body=body)", "def _wrap_FunctionDef(self, expr):\n if expr.is_private:\n return EmptyNode()\n\n name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}')\n self._wrapper_names_dict[expr.name] = name\n\n # Create the scope\n func_scope = self.scope.new_child_scope(name)\n self.scope = func_scope\n\n self._additional_exprs = []\n\n if any(isinstance(a.var, FunctionAddress) for a in expr.arguments):\n warnings.warn(\"Functions with functions as arguments cannot be wrapped by pyccel\")\n return EmptyNode()\n\n # Wrap the arguments and collect the expressions passed as the call argument.\n func_arguments = [self._wrap(a) for a in expr.arguments]\n call_arguments = [self._get_call_argument(fa) for fa in func_arguments]\n func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)}\n\n func_results = [self._wrap_FunctionDefResult(r) for r in expr.results]\n\n func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results]\n\n body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results)\n\n body.extend(self._additional_exprs)\n self._additional_exprs.clear()\n\n self.exit_scope()\n\n func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr,\n doc_string = expr.doc_string)\n\n self.scope.functions[name] = func\n\n return func", "def compileModule(self, code):\n r = ast.Module(None, self.compileSuite(code))\n #print r\n return r", "def buildModule(name):\n m = imp.new_module(name)\n # function from another module\n m.foreignFunction = aFunction\n # function of the anonymous module\n exec \"\"\"\ndef isOk():\n return foreignFunction()\n\"\"\" in m.__dict__\n return m", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir", "def new_module(name, doc=None):\n m = ModuleType(name, doc)\n m.__file__ = name + \".py\"\n sys.modules[name] = m\n return m", "def compile(cls, module_ast, filename):\n\n # Protect against unicode filenames, which are incompatible\n # with code objects created via types.CodeType\n if isinstance(filename, unicode):\n filename = filename.encode(sys.getfilesystemencoding())\n\n # Generate the startup code for the module\n module_ops = [(SetLineno, 1)]\n for start in STARTUP:\n start_code = compile(start, filename, mode='exec')\n bp_code = Code.from_code(start_code)\n # Skip the SetLineo and ReturnValue codes\n module_ops.extend(bp_code.code[1:-2])\n\n # Add in the code ops for the module\n compiler = cls(filename)\n compiler.visit(module_ast)\n module_ops.extend(compiler.code_ops)\n\n # Generate the cleanup code for the module\n for end in CLEANUP:\n end_code = compile(end, filename, mode='exec')\n bp_code = Code.from_code(end_code)\n # Skip the SetLineo and ReturnValue codes\n module_ops.extend(bp_code.code[1:-2])\n\n # Add in the final return value ops\n module_ops.extend([\n (LOAD_CONST, None),\n (RETURN_VALUE, None),\n ])\n\n # Generate and return the module code object.\n mod_code = Code(\n module_ops, [], [], False, False, False, '', filename, 0, '',\n )\n return mod_code.to_code()", "def compile_extension_module(\n name, builddir, include_dirs,\n source_string, libraries=[], library_dirs=[]):\n modname = name.split('.')[-1]\n dirname = builddir / name\n dirname.mkdir(exist_ok=True)\n cfile = _convert_str_to_file(source_string, dirname)\n include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')]\n\n return _c_compile(\n cfile, outputfilename=dirname / modname,\n include_dirs=include_dirs, libraries=[], library_dirs=[],\n )", "def compile(cls, module_ast, filename):\n compiler = cls(filename)\n compiler.visit(module_ast)\n\n module_ops = [(SetLineno, 1)]\n extend_ops = module_ops.extend\n\n # Generate the startup code for the module\n for start in STARTUP:\n start_code = compile(start, filename, mode='exec')\n # Skip the SetLineo and ReturnValue codes\n extend_ops(Code.from_code(start_code).code[1:-2])\n\n # Add in the code ops for the module\n extend_ops(compiler.code_ops)\n\n # Generate the cleanup code for the module\n for end in CLEANUP:\n end_code = compile(end, filename, mode='exec')\n # Skip the SetLineo and ReturnValue codes\n extend_ops(Code.from_code(end_code).code[1:-2])\n \n # Add in the final return value ops\n extend_ops([\n (LOAD_CONST, None),\n (RETURN_VALUE, None),\n ])\n\n # Generate and return the module code object.\n mod_code = Code(\n module_ops, [], [], False, False, False, '', filename, 0, '',\n )\n return mod_code.to_code()", "def _create_module(name):\n module = new.module(name)\n sys.modules[name] = module\n return module", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def create_module(module_dict: Dict[str, Any], nets: List[Net]) -> Module:\n m_data = module_dict['module']\n footprint = m_data[0].replace('\"', \"\")\n layer = convert_to_layers(get_dict_by_key(m_data, 'layer')['layer'])[0]\n coords = get_dict_by_key(m_data, 'at')['at']\n if len(coords) == 3 and \"B.\" in layer.name:\n coords[2] = (float(coords[2]) + 180) % 360\n coords[1] = str(-1*float(coords[1]))\n attr = get_dict_by_key(m_data, 'attr')\n smd: bool = True if (attr and attr['attr'] == 'smd') else False\n module_texts: List[FpText] = get_texts(m_data, 'fp_text')\n figures: List[Union[FpPoly, FpCircle, FpArc, FpLine]] = get_lines(m_data, 'fp_line')\n figures.extend(get_circles(m_data, 'fp_circle'))\n pads = get_pads(m_data, nets)\n ref = [text.text for text in module_texts if text.text_type ==TextType.reference][0]\n update_nets_with_pads(pads, nets, ref)\n figures.extend(get_polys(m_data, 'fp_poly'))\n figures.extend(get_arcs(m_data, 'fp_arc'))\n return Module(footprint=footprint, layer=layer, coords=coords, smd=smd,\n texts=module_texts, pads=pads, figures=figures, extrapads=list())", "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def _cmplx_factory_ ( cmplxt , re , im ) :\n return cmplxt ( re , im )", "def generate_mutant_module(self, mutated_ast, module_shortname=\"\"):\n mutant_module_shortname = module_shortname\n mutant_code = compile(mutated_ast, mutant_module_shortname, \"exec\")\n mutant_module = imp.new_module(mutant_module_shortname)\n try:\n exec mutant_code in mutant_module.__dict__\n except TypeError:\n print 'checkpoint'\n return mutant_module", "def import_c_extension(mod_globals):\n c_module = None\n module_name = mod_globals['__name__']\n assert module_name.startswith('BTrees.')\n module_name = module_name.split('.')[1]\n if _should_attempt_c_optimizations():\n c_module = _c_optimizations_available(module_name)\n\n if c_module:\n new_values = dict(c_module.__dict__)\n new_values.pop(\"__name__\", None)\n new_values.pop('__file__', None)\n new_values.pop('__doc__', None)\n mod_globals.update(new_values)\n else:\n # No C extension, make the Py versions available without that\n # extension. The list comprehension both filters and prevents\n # concurrent modification errors.\n for py in [k for k in mod_globals if k.endswith('Py')]:\n mod_globals[py[:-2]] = mod_globals[py]\n\n # Assign the global aliases\n prefix = module_name[:2]\n for name in ('Bucket', 'Set', 'BTree', 'TreeSet'):\n mod_globals[name] = mod_globals[prefix + name]\n\n # Cleanup\n mod_globals.pop('import_c_extension', None)", "def preprocess_for_clml(mod):\n\n for _var in mod.get_global_vars():\n if _var.name_hint == \"main\":\n continue\n fn = mod[_var.name_hint]\n if \"Compiler\" in fn.attrs.keys() and fn.attrs[\"Compiler\"] == \"clml\":\n new_fn = fn.body\n clml_mod = tvm.IRModule.from_expr(new_fn)\n with tvm.transform.PassContext(opt_level=3):\n clml_mod = preprocess_module(clml_mod)\n new_body = clml_mod[\"main\"].body\n mod[_var.name_hint] = _function.Function(\n fn.params, new_body, fn.ret_type, fn.type_params, fn.attrs\n )\n return mod", "def build_cffi():\r\n print_banner(\"Building CFFI Module\")\r\n ffi = cffi.FFI()\r\n\r\n this_dir = pathlib.Path().resolve()\r\n h_file_name = this_dir / \"cmult.h\"\r\n with open(h_file_name) as h_file:\r\n # cffi does not like our preprocessor directives, so we remove them\r\n lns = h_file.read().splitlines()\r\n flt = filter(lambda ln: not re.match(r\" *#\", ln), lns)\r\n flt = map(lambda ln: ln.replace(\"EXPORT_SYMBOL \", \"\"), flt)\r\n ffi.cdef(str(\"\\n\").join(flt))\r\n\r\n ffi.set_source(\r\n \"cffi_example\",\r\n # Since we are calling a fully built library directly no custom source\r\n # is necessary. We need to include the .h files, though, because behind\r\n # the scenes cffi generates a .c file which contains a Python-friendly\r\n # wrapper around each of the functions.\r\n '#include \"cmult.h\"',\r\n # The important thing is to include the pre-built lib in the list of\r\n # libraries we are linking against:\r\n libraries=[\"cmult\"],\r\n library_dirs=[this_dir.as_posix()],\r\n extra_link_args=[\"-Wl,-rpath,.\"],\r\n )\r\n\r\n ffi.compile()\r\n print(\"* Complete\")", "def create_modulestore_instance(engine, doc_store_config, options, i18n_service=None):\r\n class_ = load_function(engine)\r\n\r\n _options = {}\r\n _options.update(options)\r\n\r\n for key in FUNCTION_KEYS:\r\n if key in _options and isinstance(_options[key], basestring):\r\n _options[key] = load_function(_options[key])\r\n\r\n if HAS_REQUEST_CACHE:\r\n request_cache = RequestCache.get_request_cache()\r\n else:\r\n request_cache = None\r\n\r\n try:\r\n metadata_inheritance_cache = get_cache('mongo_metadata_inheritance')\r\n except InvalidCacheBackendError:\r\n metadata_inheritance_cache = get_cache('default')\r\n\r\n return class_(\r\n metadata_inheritance_cache_subsystem=metadata_inheritance_cache,\r\n request_cache=request_cache,\r\n xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),\r\n xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None),\r\n doc_store_config=doc_store_config,\r\n i18n_service=i18n_service or ModuleI18nService(),\r\n **_options\r\n )", "def module_constructor(loader, node):\n new_module = Module.__new__(Module)\n yield new_module\n values = loader.construct_mapping(node, deep=True)\n values[\"constraint\"] = ec2rlcore.constraint.Constraint(values[\"constraint\"])\n values[\"path\"] = Module.temp_path\n # Strip trailing newlines from string values where yaml added them (e.g. title, helptext)\n for key in values.keys():\n if isinstance(values[key], str):\n values[key] = values[key].rstrip()\n new_module.__init__(**values)", "def _createModuleObj(self):\n # Create the SWIG module object to provide access to the C++ object.\n ModuleUniformVelModel.__init__(self)\n return", "def compile(self, expr):\r\n if expr not in self._compile_cache:\r\n c = compile(expr, \"\", \"eval\")\r\n for i in c.co_names: #prevent breakout via new-style-classes\r\n if i[0] == '_':\r\n raise NameError(\"Name '%s' is not allowed.\" %(i))\r\n self._compile_cache[expr] = c\r\n return self._compile_cache[expr]", "def onnx_compiler(func):\n\n assert isinstance(func, tvm.relay.function.Function)\n name = str(func.attrs.global_symbol)\n model = to_onnx(func, {}, name)\n const_vars = [const.name for const in model.graph.initializer]\n name_bytes = bytes(name, \"utf-8\")\n name_size = struct.pack(\"I\", len(name_bytes))\n model_serialized = model.SerializeToString()\n model_size = struct.pack(\"I\", model.ByteSize())\n data = b\"\" + name_size + name_bytes + model_size + model_serialized\n\n runtime_func = \"runtime.ONNXModuleCreate\"\n fcreate = tvm._ffi.get_global_func(runtime_func)\n return fcreate(data.hex(), name, const_vars)", "def make_module_instance(self, *args, **kwargs):\r\n\r\n # Function to go through member lists and dictionaries recursively,\r\n # to look for submodules on which make_module_instance needs to be called\r\n def recurse(v):\r\n if isinstance(v,list):\r\n iterv = enumerate(v)\r\n else:\r\n iterv = v.iteritems()\r\n #backport\r\n #iter = enumerate(v) if isinstance(v,list) else v.iteritems()\r\n for sk,sv in iterv:\r\n if isinstance(sv,(list,dict)):\r\n sv = recurse(sv)\r\n elif isinstance(sv,Module):\r\n sv = sv.make_module_instance(args,kwargs)\r\n v[sk] = sv\r\n return v\r\n\r\n for k,v in self.local_attr.iteritems():\r\n if isinstance(v,Module):\r\n v = v.make_module_instance(args,kwargs)\r\n self[k] = self.__wrapper__(v)\r\n elif isinstance(v,Method):\r\n self.__setitem__(k,v)\r\n else:\r\n # iterate through lists and dictionaries to wrap submodules\r\n if isinstance(v,(list,dict)):\r\n self[k] = self.__wrapper__(recurse(v))\r\n try:\r\n self[k] = self.__wrapper__(v)\r\n except Exception:\r\n if isinstance(v, Component):\r\n raise\r\n else:\r\n self.__dict__[k] = v\r\n return self", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.58194566", "0.5692998", "0.56872624", "0.56411546", "0.5595038", "0.5511902", "0.54851526", "0.54670924", "0.5420265", "0.5383697", "0.5296799", "0.5263366", "0.52061045", "0.51810807", "0.5178292", "0.5156475", "0.5152217", "0.51299", "0.5128299", "0.51144826", "0.50959384", "0.5068269", "0.5063956", "0.50597686", "0.5050482", "0.50491136", "0.5045991", "0.5018804", "0.5009883", "0.5008997" ]
0.747593
0
Create a Ccompatible function which executes the original function. Create a function which can be called from C which internally calls the original function. It does this by wrapping the arguments and the results and unrolling the body using self._get_function_def_body to ensure optional arguments are present before accessing them. With all this information a BindCFunctionDef is created which is Ccompatible. Functions which cannot be wrapped raise a warning and return an EmptyNode. This is the case for functions with functions as arguments.
def _wrap_FunctionDef(self, expr): if expr.is_private: return EmptyNode() name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}') self._wrapper_names_dict[expr.name] = name # Create the scope func_scope = self.scope.new_child_scope(name) self.scope = func_scope self._additional_exprs = [] if any(isinstance(a.var, FunctionAddress) for a in expr.arguments): warnings.warn("Functions with functions as arguments cannot be wrapped by pyccel") return EmptyNode() # Wrap the arguments and collect the expressions passed as the call argument. func_arguments = [self._wrap(a) for a in expr.arguments] call_arguments = [self._get_call_argument(fa) for fa in func_arguments] func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)} func_results = [self._wrap_FunctionDefResult(r) for r in expr.results] func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results] body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results) body.extend(self._additional_exprs) self._additional_exprs.clear() self.exit_scope() func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr, doc_string = expr.doc_string) self.scope.functions[name] = func return func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result_as_arg(self, node, C_new):\n F_new = C_new.clone()\n\n # Fortran function should wrap the new C function\n F_new._PTR_F_C_index = C_new._function_index\n F_new.wrap.assign(fortran=True)\n # Do not add '_bufferify'\n F_new.fmtdict.function_suffix = node.fmtdict.function_suffix\n\n # Do not wrap original function (does not have result argument)\n node.wrap.fortran = False\n return F_new", "def replace(\n self,\n tree: Optional[ast.FunctionDef] = None,\n globals_: Optional[ConstsDictT] = None,\n ) -> \"Function\":\n if tree is None:\n tree = self.tree\n if globals_ is None:\n globals_ = self.globals\n\n if len(self.closure_vals) > 0:\n func_fake_closure = eval_function_def_as_closure(\n tree, list(self.closure_vals), globals_=globals_, flags=self._compiler_flags\n )\n\n new_closure_vals = get_closure(func_fake_closure)\n for name in new_closure_vals:\n new_closure_vals[name] = self.closure_vals[name]\n else:\n new_closure_vals = self.closure_vals\n\n return Function(tree, globals_, new_closure_vals, self._compiler_flags)", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def eval(self) -> Callable:\n if len(self.closure_vals) > 0:\n func_fake_closure = eval_function_def_as_closure(\n self.tree,\n list(self.closure_vals),\n globals_=self.globals,\n flags=self._compiler_flags,\n )\n\n func = FunctionType(\n func_fake_closure.__code__,\n self.globals,\n func_fake_closure.__name__,\n func_fake_closure.__defaults__,\n tuple(self.closure_vals.values()),\n )\n\n for attr in (\"__kwdefaults__\", \"__annotations__\"):\n if hasattr(func_fake_closure, attr):\n setattr(func, attr, getattr(func_fake_closure, attr))\n else:\n func = eval_function_def(self.tree, globals_=self.globals, flags=self._compiler_flags)\n\n # A regular function contains a file name and a line number\n # pointing to the location of its source.\n # I we wanted to trick ``inspect.getsource()`` into working with\n # this newly generated function, we could create a temporary file and write it there.\n # But it leads to other complications, and is unnecessary at this stage.\n # So we just save the source into an attribute for ``Function.from_object()``\n # to discover if we ever want to create a new ``Function`` object\n # out of this function.\n vars(func)[SOURCE_ATTRIBUTE] = self.get_source()\n\n return func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def create_function(config, base_module=None, args=None, kwargs=None):\n try:\n fun, args, kwargs = __clsfn_args_kwargs(config, 'function', base_module,\n args, kwargs)\n return partial(fun, *args, **kwargs)\n except Exception as e:\n raise Exception(\n 'Could not create function\\n{}'.format(json.dumps(config, indent=4)),\n e\n )", "def fake_as_funcnode(alt_func, name, rename_attributes=None):\n\n def _wrapper(*args, **kwargs):\n inputs = []\n attributes = {}\n rename_attr_dict = {}\n if rename_attributes is not None:\n rename_attr_dict = {attr[0]: attr[1] for attr in rename_attributes}\n\n # resolve default value for kwargs\n arg_spec = inspect.signature(alt_func)\n bound = arg_spec.bind(*args, **kwargs)\n bound.apply_defaults()\n # default values are set on `bound.arguments`, but cannot get them\n # from `bound.kwargs`\n for i, (k, v) in enumerate(bound.arguments.items()):\n if i < len(args):\n continue\n kwargs[k] = v\n\n def set_attr(key, value):\n default_name = key if isinstance(key, str) else 'arg{}'.format(key)\n attributes[rename_attr_dict.get(key, default_name)] = value\n\n def expand_args(args_iter):\n for i, a in args_iter:\n if _is_var(a):\n inputs.append(a)\n elif isinstance(a, (tuple, list)):\n # all elements are variable -> add flatten them to inputs\n # all elements are not variable -> add them to attributes\n # mixed variable and other type value -> error\n flatten_arg = _flatten(a)\n var_or_not = map(_is_var, flatten_arg)\n if all(var_or_not):\n inputs.extend(flatten_arg)\n elif not any(var_or_not):\n set_attr(i, a)\n else:\n raise ValueError(\n 'arguments mixed variable and other type are not '\n 'supported')\n else:\n set_attr(i, a)\n\n expand_args(enumerate(args))\n expand_args(kwargs.items())\n if not inputs:\n raise ValueError(\n 'arguments of the function wrapped by \\'as_funcnode\\' '\n 'must include at least one chainer.Variable, function name: '\n '{}'.format(name))\n\n wrapped = WrappedFunctionNode(\n name, alt_func, args, kwargs, inputs, attributes=attributes)\n ret = wrapped.apply(inputs)\n if len(ret) > 1:\n return ret\n return ret[0]\n\n chainer.utils.experimental('as_funcnode')\n return _wrapper", "def _maybe_define_function(self, args, kwargs):\n args, kwargs, filtered_flat_args = (\n self._function_spec.canonicalize_function_inputs(args, kwargs))\n\n if self.input_signature is not None:\n args = self.input_signature\n kwargs = {}\n\n # Get runtime values of captures\n captures = self._captures_container.get_snapshot()\n\n # cache_key_deletion_observer is useless here. It's based on all captures.\n # A new cache key will be built later when saving ConcreteFunction because\n # only active captures should be saved.\n lookup_func_context, lookup_func_type, _ = function_context.make_cache_key(\n (args, kwargs), captures)\n concrete_function = self._function_cache.lookup(lookup_func_context,\n lookup_func_type)\n if concrete_function is not None:\n return concrete_function, filtered_flat_args\n\n with monitoring.MonitoredTimer(_graph_building_time_counter.get_cell()):\n with trace.Trace(\"tf.function-graph_building\"):\n logging.vlog(\n 1, \"Creating new FuncGraph for Python function %r (key: %r, %r)\",\n self._python_function, lookup_func_context,\n lookup_func_type)\n logging.vlog(2, \"Python function signature [args: %s] [kwargs: %s]\",\n args, kwargs)\n ag_status = (\n ag_ctx.Status.ENABLED\n if self._autograph else ag_ctx.Status.DISABLED)\n with ag_ctx.ControlStatusCtx(\n status=ag_status, options=self._autograph_options):\n if self.input_signature is None and self._reduce_retracing:\n general_func_type = self._function_cache.generalize(\n lookup_func_context, lookup_func_type)\n placeholder_bound_args = general_func_type.placeholder_arguments()\n args, kwargs = placeholder_bound_args.args[0]\n\n concrete_function = self._create_concrete_function(args, kwargs)\n\n graph_capture_container = concrete_function.graph._capture_func_lib # pylint: disable=protected-access\n # Maintain the list of all captures\n self._captures_container.update(graph_capture_container)\n # Get current active captures snapshot\n captures = graph_capture_container.get_snapshot()\n\n # Create a cache_key with args and captures\n traced_func_context, traced_func_type, traced_func_deletion_observer = (\n function_context.make_cache_key((args, kwargs), captures))\n\n self._function_cache.add(traced_func_context, traced_func_type,\n traced_func_deletion_observer,\n concrete_function)\n\n return concrete_function, filtered_flat_args", "def compile_function(self, function, arguments):", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def bind_partial(self, *args, **kwds) -> \"Function\":\n\n # We only need the signature, so clean the function body before eval'ing.\n empty_func = self.replace(tree=replace_fields(self.tree, body=[ast.Pass()]))\n signature = inspect.signature(empty_func.eval())\n bargs = signature.bind_partial(*args, **kwds)\n\n # Remove the bound arguments from the function AST\n bound_argnames = set(bargs.arguments.keys())\n new_tree = filter_function_def(self.tree, bound_argnames)\n\n # Add assignments for bound parameters\n assignments = []\n gen_sym = GenSym.for_tree(new_tree)\n new_bindings = {}\n for name, value in bargs.arguments.items():\n node, gen_sym, binding = reify_unwrapped(value, gen_sym)\n new_bindings.update(binding)\n assignments.append(ast.Assign(targets=[ast.Name(id=name, ctx=ast.Store())], value=node))\n\n new_globals = dict(self.globals)\n new_globals.update(new_bindings)\n\n new_tree = replace_fields(new_tree, body=assignments + new_tree.body)\n\n return Function(new_tree, new_globals, self.closure_vals, self._compiler_flags)", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def filter_function_def(function_def: ast.FunctionDef, bound_argnames: Set[str]) -> ast.FunctionDef:\n def_type = type(function_def)\n assert def_type in (ast.FunctionDef, ast.AsyncFunctionDef)\n\n new_args = filter_arguments(function_def.args, bound_argnames)\n\n return def_type(\n name=function_def.name,\n args=new_args,\n body=function_def.body,\n decorator_list=function_def.decorator_list,\n returns=function_def.returns,\n )", "def _wrap_FunctionDefArgument(self, expr):\n var = expr.var\n name = var.name\n self.scope.insert_symbol(name)\n collisionless_name = self.scope.get_expected_name(var.name)\n if var.is_ndarray or var.is_optional:\n new_var = Variable(BindCPointer(), self.scope.get_new_name(f'bound_{name}'),\n is_argument = True, is_optional = False, memory_handling='alias')\n arg_var = var.clone(collisionless_name, is_argument = False, is_optional = False,\n memory_handling = 'alias', allows_negative_indexes=False)\n self.scope.insert_variable(arg_var)\n else:\n new_var = var.clone(collisionless_name)\n self.scope.insert_variable(new_var)\n\n return BindCFunctionDefArgument(new_var, value = expr.value, original_arg_var = expr.var,\n kwonly = expr.is_kwonly, annotation = expr.annotation, scope=self.scope)", "def eval_function_def(\n function_def: Union[ast.AsyncFunctionDef, ast.FunctionDef],\n globals_=None,\n flags: Optional[int] = None,\n) -> Callable:\n\n assert type(function_def) in (ast.FunctionDef, ast.AsyncFunctionDef)\n\n # Making a copy before mutating\n module = ast.Module(body=[copy.deepcopy(function_def)], type_ignores=[])\n\n ast.fix_missing_locations(module)\n\n if flags is not None:\n kwds = dict(dont_inherit=True, flags=flags)\n else:\n kwds = {}\n code_object = compile(module, \"<nofile>\", \"exec\", **kwds)\n\n locals_ = {}\n eval(code_object, globals_, locals_)\n return locals_[function_def.name]", "def _create_concrete_function(self, args, kwargs):\n self.tracing_count += 1\n\n arglen = len(args)\n base_arg_names = self._function_spec.arg_names[:arglen]\n num_missing_args = arglen - len(self._function_spec.arg_names)\n missing_arg_names = [self._function_spec.vararg_name] * num_missing_args\n # Produce a list of missing args of the form [\"arg_0\", \"arg_1\", ...],\n # where arg is based on the self._function_spec.vararg_name.\n missing_arg_names = [\n \"%s_%d\" % (arg, i) for i, arg in enumerate(missing_arg_names)\n ]\n arg_names = base_arg_names + missing_arg_names\n concrete_function = monomorphic_function.ConcreteFunction(\n func_graph_module.func_graph_from_py_func(\n self._name,\n self._python_function,\n args,\n kwargs,\n None,\n autograph=self._autograph,\n autograph_options=self._autograph_options,\n arg_names=arg_names,\n capture_by_value=self._capture_by_value),\n self._function_attributes,\n spec=self.function_spec,\n # Tell the ConcreteFunction to clean up its graph once it goes out of\n # scope. This is not the default behavior since it gets used in some\n # places (like Keras) where the FuncGraph lives longer than the\n # ConcreteFunction.\n shared_func_graph=False)\n return concrete_function", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def eval_function_def_as_closure(\n function_def: ast.FunctionDef,\n closure_names: List[str],\n globals_: Optional[ConstsDictT] = None,\n flags: Optional[int] = None,\n) -> Callable:\n def_type = type(function_def)\n assert def_type in (ast.FunctionDef, ast.AsyncFunctionDef)\n\n none = ast.NameConstant(value=None)\n\n # We can't possibly recreate ASTs of existing closure variables\n # (because all we have are their values).\n # So we create fake closure variables for the function to attach to,\n # and then substitute the closure cells with the ones obtained from\n # the \"prototype\" of this function (a ``types.FunctionType`` object\n # from which this tree was extracted).\n fake_closure_vars = [\n ast.Assign(targets=[ast.Name(id=name, ctx=ast.Store())], value=none)\n for name in closure_names\n ]\n\n empty_args = ast.arguments(\n posonlyargs=[], args=[], vararg=None, kwonlyargs=[], kwarg=None, defaults=[], kw_defaults=[]\n )\n\n wrapper_def = def_type(\n name=\"__peval_wrapper\",\n args=empty_args,\n decorator_list=[],\n body=(\n fake_closure_vars\n + [function_def]\n + [ast.Return(value=ast.Name(id=function_def.name, ctx=ast.Load()))]\n ),\n )\n\n wrapper = eval_function_def(wrapper_def, globals_=globals_, flags=flags)\n return wrapper()", "def _get_function_wrapper(\n self, func: typing.Callable[..., typing.Union[\"typing.Awaitable[typing.Any]\", typing.Any]]\n ) -> typing.Callable[..., \"typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]\"]:\n prepared = self._await_if_required(func)\n\n # noinspection PyMissingOrEmptyDocstring\n @functools.wraps(prepared)\n def wrapper(\n *args: typing.Any, **kwargs: typing.Any\n ) -> typing.Union[\"concurrent.futures.Future[typing.Any]\", \"typing.Awaitable[typing.Any]\"]:\n \"\"\"Main function wrapper.\n\n :return: coroutine or function\n :rtype: Union[Awaitable, concurrent.futures.Future]\n \"\"\"\n loop: typing.Optional[asyncio.AbstractEventLoop] = self._get_loop(*args, **kwargs)\n\n if loop is None:\n return self.executor.submit(prepared, *args, **kwargs)\n\n return loop.run_in_executor(self.executor, functools.partial(prepared, *args, **kwargs))\n\n return wrapper", "def wrapper(*args, **kwargs):\n print('S: Function {} args: {} kwargs: {}'.format(\n function.__name__, str(args), str(kwargs)))\n return function(*args, **kwargs)", "def _wrap_FunctionDefResult(self, expr):\n var = expr.var\n name = var.name\n scope = self.scope\n # Make name available for later\n scope.insert_symbol(name)\n local_var = var.clone(scope.get_expected_name(name))\n\n if local_var.rank:\n # Allocatable is not returned so it must appear in local scope\n scope.insert_variable(local_var, name)\n\n # Create the C-compatible data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+name),\n is_const=False, memory_handling='alias')\n scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, var, scope)\n\n # Save the shapes of the array\n self._additional_exprs.extend([Assign(result.shape[i], var.shape[i]) for i in range(var.rank)])\n\n # Create an array variable which can be passed to CLocFunc\n ptr_var = var.clone(scope.get_new_name(name+'_ptr'),\n memory_handling='alias')\n scope.insert_variable(ptr_var)\n\n # Define the additional steps necessary to define and fill ptr_var\n alloc = Allocate(ptr_var, shape=result.shape,\n order=var.order, status='unallocated')\n copy = Assign(ptr_var, var)\n c_loc = CLocFunc(ptr_var, bind_var)\n self._additional_exprs.extend([alloc, copy, c_loc])\n\n return result\n else:\n return BindCFunctionDefResult(local_var, var, scope)", "def wrap(original_function: typing.Callable) -> typing.Callable:\n\n def decorator(new_function):\n if not isinstance(original_function, typing.Callable):\n raise TypeError(\"Argument passed to @wrap decorator must be a Callable\")\n\n # record function replacement for inspection purposes\n REPLACED_FUNCTIONS[f'{sys.modules[original_function.__module__].__name__}.{original_function.__name__}'] \\\n = f'{sys.modules[new_function.__module__].__name__}.{new_function.__name__}'\n\n # supply orginal_function as first argument to new_function\n def wrapper(*args, **kwargs):\n return new_function(original_function, *args, **kwargs)\n\n # copy properies such as __doc__, __module__ from original_function to wrapper\n functools.update_wrapper(wrapper, original_function)\n\n # replace function\n setattr(sys.modules[original_function.__module__], original_function.__name__, wrapper)\n return wrapper\n\n return decorator", "def make_function_asp_callable(*args: Any) -> _AnyCallable:\n if not args:\n raise ValueError(\"Invalid call to decorator\")\n\n # If the last element is not a function to be wrapped then a signature has\n # been specified.\n if TypeCastSignature.is_return_element(args[-1]):\n sigs = args\n fn = None\n else:\n # Last element needs to be a function\n fn = args[-1]\n if not callable(fn):\n raise ValueError(\"Invalid call to decorator\")\n\n # if exactly one element then use function annonations\n if len(args) == 1:\n sigs = _get_annotations(fn)\n else:\n sigs = args[:-1]\n\n # A decorator function that adjusts for the given signature\n def _sig_decorate(func):\n s = TypeCastSignature(*sigs)\n return s.wrap_function(func)\n\n # If no function and sig then called as a decorator with arguments\n if not fn and sigs:\n return _sig_decorate\n\n return _sig_decorate(fn)", "def WrapFunction(lib, funcname, restype, argtypes):\n func = lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def __call__(self, function: Callable) -> Callable:\n\n @functools.wraps(function)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n with self:\n return function(*args, **kwargs)\n\n return wrapper", "def __call__(self, function: Callable) -> Callable:\n\n @functools.wraps(function)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n with self:\n return function(*args, **kwargs)\n\n return wrapper", "def proxify_routine(routine, impl=None):\n\n # init impl\n impl = routine if impl is None else impl\n\n is_method = ismethod(routine)\n if is_method:\n function = get_method_function(routine)\n else:\n function = routine\n\n # flag which indicates that the function is not a pure python function\n # and has to be wrapped\n wrap_function = not hasattr(function, '__code__')\n\n try:\n # get params from routine\n args, varargs, kwargs, _ = getargspec(function)\n except TypeError:\n # in case of error, wrap the function\n wrap_function = True\n\n if wrap_function:\n # if function is not pure python, create a generic one\n # with assignments\n assigned = []\n for wrapper_assignment in WRAPPER_ASSIGNMENTS:\n if hasattr(function, wrapper_assignment):\n assigned.append(wrapper_assignment)\n # and updates\n updated = []\n for wrapper_update in WRAPPER_UPDATES:\n if hasattr(function, wrapper_update):\n updated.append(wrapper_update)\n\n @wraps(function, assigned=assigned, updated=updated)\n def wrappedfunction(*args, **kwargs):\n \"\"\"Default wrap function.\"\"\"\n\n function = wrappedfunction\n # get params from function\n args, varargs, kwargs, _ = getargspec(function)\n\n name = function.__name__\n\n result = _compilecode(\n function=function, name=name, impl=impl,\n args=args, varargs=varargs, kwargs=kwargs\n )\n\n # set wrapping assignments\n for wrapper_assignment in WRAPPER_ASSIGNMENTS:\n try:\n value = getattr(function, wrapper_assignment)\n except AttributeError:\n pass\n else:\n setattr(result, wrapper_assignment, value)\n\n # set proxy module\n result.__module__ = proxify_routine.__module__\n\n # update wrapping updating\n for wrapper_update in WRAPPER_UPDATES:\n try:\n value = getattr(function, wrapper_update)\n except AttributeError:\n pass\n else:\n getattr(result, wrapper_update).update(value)\n\n # set proxyfied element on proxy\n setattr(result, __PROXIFIED__, routine)\n\n if is_method: # create a new method\n args = [result, get_method_self(routine)]\n if PY2:\n args.append(routine.im_class)\n result = MethodType(*args)\n\n return result", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def bound_method_wrapper(*args, **kwargs):\n # __wrapped__ allows AutoGraph to swap in a converted function.\n strong_bound_method_wrapper = weak_bound_method_wrapper()\n wrapped_fn = strong_bound_method_wrapper.__wrapped__\n\n if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__:\n # If __wrapped__ was not replaced, then call original_function.\n # TODO(mdan): For better consistency, use the wrapper's call().\n wrapped_fn = original_function.python_function\n return wrapped_fn(weak_instance(), *args, **kwargs)\n\n # If __wrapped__ was replaced, then it is always an unbound function.\n # However, the replacer is still responsible for attaching self properly.\n # TODO(mdan): Is it possible to do it here instead?\n return wrapped_fn(*args, **kwargs)", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index" ]
[ "0.65536374", "0.62138677", "0.6150388", "0.6098803", "0.6089656", "0.60039127", "0.59987605", "0.59216076", "0.59151495", "0.58322114", "0.5787681", "0.57753617", "0.5758665", "0.5709189", "0.5702252", "0.564342", "0.5591137", "0.55847555", "0.5573809", "0.55646837", "0.55634725", "0.552368", "0.5514771", "0.54932797", "0.54886836", "0.54886836", "0.5464162", "0.5456604", "0.5435418", "0.54282355" ]
0.75368756
0
Create the equivalent BindCFunctionDefArgument for a Ccompatible function. Take a FunctionDefArgument and create a BindCFunctionDefArgument describing all the information that should be passed to the Ccompatible function in order to be able to create the argument described by `expr`. In the case of a scalar numerical the function simply creates a copy of the variable described by the function argument in the local scope. In the case of an array, C cannot represent the array natively. Rather it is stored in a pointer. This function therefore creates a variable to represent that pointer. Additionally information about the shape and strides of the array are necessary, however these objects are created by the `BindCFunctionDefArgument` class. The objects which describe the argument passed to the `expr` argument of the original function are also created here. However the expressions necessary to collect the information from the BindCFunctionDefArgument in order to create these objects are left for later. This is done to ensure that optionals are handled locally to the function call. This ensures that we do not duplicate if conditions.
def _wrap_FunctionDefArgument(self, expr): var = expr.var name = var.name self.scope.insert_symbol(name) collisionless_name = self.scope.get_expected_name(var.name) if var.is_ndarray or var.is_optional: new_var = Variable(BindCPointer(), self.scope.get_new_name(f'bound_{name}'), is_argument = True, is_optional = False, memory_handling='alias') arg_var = var.clone(collisionless_name, is_argument = False, is_optional = False, memory_handling = 'alias', allows_negative_indexes=False) self.scope.insert_variable(arg_var) else: new_var = var.clone(collisionless_name) self.scope.insert_variable(new_var) return BindCFunctionDefArgument(new_var, value = expr.value, original_arg_var = expr.var, kwonly = expr.is_kwonly, annotation = expr.annotation, scope=self.scope)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _wrap_FunctionDef(self, expr):\n if expr.is_private:\n return EmptyNode()\n\n name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}')\n self._wrapper_names_dict[expr.name] = name\n\n # Create the scope\n func_scope = self.scope.new_child_scope(name)\n self.scope = func_scope\n\n self._additional_exprs = []\n\n if any(isinstance(a.var, FunctionAddress) for a in expr.arguments):\n warnings.warn(\"Functions with functions as arguments cannot be wrapped by pyccel\")\n return EmptyNode()\n\n # Wrap the arguments and collect the expressions passed as the call argument.\n func_arguments = [self._wrap(a) for a in expr.arguments]\n call_arguments = [self._get_call_argument(fa) for fa in func_arguments]\n func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)}\n\n func_results = [self._wrap_FunctionDefResult(r) for r in expr.results]\n\n func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results]\n\n body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results)\n\n body.extend(self._additional_exprs)\n self._additional_exprs.clear()\n\n self.exit_scope()\n\n func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr,\n doc_string = expr.doc_string)\n\n self.scope.functions[name] = func\n\n return func", "def _wrap_FunctionDefResult(self, expr):\n var = expr.var\n name = var.name\n scope = self.scope\n # Make name available for later\n scope.insert_symbol(name)\n local_var = var.clone(scope.get_expected_name(name))\n\n if local_var.rank:\n # Allocatable is not returned so it must appear in local scope\n scope.insert_variable(local_var, name)\n\n # Create the C-compatible data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+name),\n is_const=False, memory_handling='alias')\n scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, var, scope)\n\n # Save the shapes of the array\n self._additional_exprs.extend([Assign(result.shape[i], var.shape[i]) for i in range(var.rank)])\n\n # Create an array variable which can be passed to CLocFunc\n ptr_var = var.clone(scope.get_new_name(name+'_ptr'),\n memory_handling='alias')\n scope.insert_variable(ptr_var)\n\n # Define the additional steps necessary to define and fill ptr_var\n alloc = Allocate(ptr_var, shape=result.shape,\n order=var.order, status='unallocated')\n copy = Assign(ptr_var, var)\n c_loc = CLocFunc(ptr_var, bind_var)\n self._additional_exprs.extend([alloc, copy, c_loc])\n\n return result\n else:\n return BindCFunctionDefResult(local_var, var, scope)", "def _get_call_argument(self, bind_c_arg):\n original_arg = bind_c_arg.original_function_argument_variable\n arg_var = self.scope.find(original_arg.name, category='variables')\n if original_arg.is_ndarray:\n start = LiteralInteger(1) # C_F_Pointer leads to default Fortran lbound\n stop = None\n indexes = [Slice(start, stop, step) for step in bind_c_arg.strides]\n return IndexedElement(arg_var, *indexes)\n else:\n return arg_var", "def _wrap_Variable(self, expr):\n if expr.rank == 0 and expr.dtype in NativeNumeric:\n return expr.clone(expr.name, new_class = BindCVariable)\n else:\n scope = self.scope\n func_name = scope.get_new_name('bind_c_'+expr.name.lower())\n func_scope = scope.new_child_scope(func_name)\n mod = expr.get_user_nodes(Module)[0]\n import_mod = Import(mod.name, AsName(expr,expr.name), mod=mod)\n func_scope.imports['variables'][expr.name] = expr\n\n # Create the data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+expr.name),\n is_const=True, memory_handling='alias')\n func_scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, expr, func_scope)\n if expr.rank == 0:\n #assigns = []\n #c_loc = CLocFunc(expr, bind_var)\n raise NotImplementedError(\"Classes cannot be wrapped\")\n else:\n assigns = [Assign(result.shape[i], expr.shape[i]) for i in range(expr.rank)]\n c_loc = CLocFunc(expr, bind_var)\n body = [*assigns, c_loc]\n func = BindCFunctionDef(name = func_name,\n body = body,\n arguments = [],\n results = [result],\n imports = [import_mod],\n scope = func_scope,\n original_function = expr)\n return expr.clone(expr.name, new_class = BindCArrayVariable, wrapper_function = func,\n original_variable = expr)", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def _convert_args(self, expr, args, kwargs):\n assert expr is not None\n\n if not kwargs:\n return args\n\n if kwargs and not isinstance(expr, Function):\n raise Exception(\"can only supply keyword parameters for a \"\n \"relay.Function, found {0}\".format(expr))\n\n params = expr.params\n param_names = [p.name_hint for p in params]\n num_of_args = len(args)\n\n cargs = list(args)[:]\n for i, name in enumerate(param_names):\n if i < num_of_args:\n if kwargs.get(name):\n raise Exception(\n \"duplicate argument supplied in \"\n \"both positional args (at position: {0}), \"\n \"and keyword argument (with name: {1})\".format(i, name))\n else:\n cargs.append(kwargs[name])\n\n if len(cargs) != len(params):\n raise Exception(\n \"insufficient arguments, expected \"\n \"{0}, provided {1}\".format(len(cargs), len(params)))\n\n return tuple(cargs)", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def _get_function_def_body(self, func, func_def_args, func_arg_to_call_arg, results, handled = ()):\n optional = next((a for a in func_def_args if a.original_function_argument_variable.is_optional and a not in handled), None)\n if optional:\n args = func_def_args.copy()\n optional_var = optional.var\n handled += (optional, )\n true_section = IfSection(PyccelIsNot(optional_var, Nil()),\n self._get_function_def_body(func, args, func_arg_to_call_arg, results, handled))\n args.remove(optional)\n false_section = IfSection(LiteralTrue(),\n self._get_function_def_body(func, args, func_arg_to_call_arg, results, handled))\n return [If(true_section, false_section)]\n else:\n args = [FunctionCallArgument(func_arg_to_call_arg[fa],\n keyword = fa.original_function_argument_variable.name)\n for fa in func_def_args]\n size = [fa.shape[::-1] if fa.original_function_argument_variable.order == 'C' else\n fa.shape for fa in func_def_args]\n stride = [fa.strides[::-1] if fa.original_function_argument_variable.order == 'C' else\n fa.strides for fa in func_def_args]\n orig_size = [[PyccelMul(l,s) for l,s in zip(sz, st)] for sz,st in zip(size,stride)]\n body = [C_F_Pointer(fa.var, func_arg_to_call_arg[fa].base, s)\n for fa,s in zip(func_def_args, orig_size)\n if isinstance(func_arg_to_call_arg[fa], IndexedElement)]\n body += [C_F_Pointer(fa.var, func_arg_to_call_arg[fa])\n for fa in func_def_args\n if not isinstance(func_arg_to_call_arg[fa], IndexedElement) \\\n and fa.original_function_argument_variable.is_optional]\n\n # If the function is inlined and takes an array argument create a pointer to ensure that the bounds\n # are respected\n if func.is_inline and any(isinstance(a.value, IndexedElement) for a in args):\n array_args = {a: self.scope.get_temporary_variable(a.value.base, a.keyword, memory_handling = 'alias') for a in args if isinstance(a.value, IndexedElement)}\n body += [AliasAssign(v, k.value) for k,v in array_args.items()]\n args = [FunctionCallArgument(array_args[a], keyword=a.keyword) if a in array_args else a for a in args]\n\n func_call = Assign(results[0], FunctionCall(func, args)) if len(results) == 1 else \\\n Assign(results, FunctionCall(func, args))\n return body + [func_call]", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def prepare_arguments(self, ftyp, args):\n # Determine fixed and variable arguments:\n if ftyp.is_vararg:\n fixed_amount = len(ftyp.arguments)\n fixed_args = args[:fixed_amount]\n var_args = args[fixed_amount:]\n else:\n fixed_args = args\n var_args = []\n\n # Evaluate arguments:\n ir_arguments = []\n\n # If return value is complex, reserve room for it an pass pointer\n if ftyp.return_type.is_struct:\n size, alignment = self.data_layout(ftyp.return_type)\n rval_alloc = self.emit(ir.Alloc(\"rval_alloc\", size, alignment))\n rval_ptr = self.emit(ir.AddressOf(rval_alloc, \"rval_ptr\"))\n ir_arguments.append(rval_ptr)\n else:\n rval_alloc = None\n\n # Place other arguments:\n for argument in fixed_args:\n value = self.gen_expr(argument, rvalue=True)\n ir_arguments.append(value)\n\n # Handle variable arguments:\n if ftyp.is_vararg:\n vararg_ptr = self.gen_fill_varargs(var_args)\n ir_arguments.append(vararg_ptr)\n else:\n assert not var_args\n\n return ir_arguments, rval_alloc", "def result_as_arg(self, node, C_new):\n F_new = C_new.clone()\n\n # Fortran function should wrap the new C function\n F_new._PTR_F_C_index = C_new._function_index\n F_new.wrap.assign(fortran=True)\n # Do not add '_bufferify'\n F_new.fmtdict.function_suffix = node.fmtdict.function_suffix\n\n # Do not wrap original function (does not have result argument)\n node.wrap.fortran = False\n return F_new", "def getAstNode_newTypeInstance(funcEnv, objType, argAst=None, argType=None):\n interpreter = funcEnv.interpreter\n origObjType = objType\n while isinstance(objType, CTypedef):\n objType = objType.type\n while isinstance(argType, CTypedef):\n argType = argType.type\n\n if isinstance(objType, CBuiltinType) and objType.builtinType == (\"void\",):\n # It's like a void cast. Return None.\n if argAst is None:\n return NoneAstNode\n tup = ast.Tuple(elts=(argAst, NoneAstNode), ctx=ast.Load())\n return getAstNodeArrayIndex(tup, 1)\n\n arrayLen = None\n if isinstance(objType, CArrayType):\n arrayOf = getAstNodeForVarType(funcEnv, objType.arrayOf)\n if objType.arrayLen:\n arrayLen = getConstValue(interpreter.globalScope.stateStruct, objType.arrayLen)\n assert arrayLen is not None\n if isinstance(argType, (tuple, list)):\n assert arrayLen == len(argType)\n else:\n # Handle array type extra here for the case when array-len is not specified.\n assert argType is not None\n if isinstance(argType, (tuple, list)):\n arrayLen = len(argType)\n else:\n assert isinstance(argType, CArrayType)\n arrayLen = getConstValue(interpreter.globalScope.stateStruct, argType.arrayLen)\n assert arrayLen is not None\n # Write back to type so that future getCType calls will succeed.\n objType.arrayLen = CNumber(arrayLen)\n\n typeAst = ast.BinOp(left=arrayOf, op=ast.Mult(), right=ast.Num(n=arrayLen))\n else:\n typeAst = getAstNodeForVarType(funcEnv, origObjType)\n\n if isinstance(argType, (tuple, list)): # CCurlyArrayArgs\n assert isinstance(argAst, ast.Tuple)\n assert len(argAst.elts) == len(argType)\n # There is a bit of inconsistency between basic types init\n # (like c_int), which must get a value (int),\n # and ctypes.Structure/ctypes.ARRAY, which for some field can either\n # get a value (int) or a c_int. For pointers, it must get\n # the var, not the value.\n # This is mostly the same as for calling functions.\n f_args = []\n while isinstance(objType, CTypedef):\n objType = objType.type\n if isinstance(objType, CStruct):\n for c in objType.body.contentlist:\n if not isinstance(c, CVarDecl): continue\n f_args += [c.type]\n elif isinstance(objType, CArrayType):\n f_args += [objType.arrayOf] * arrayLen\n else:\n assert False, \"did not expect type %r\" % objType\n assert len(argType) <= len(f_args)\n # Somewhat like autoCastArgs():\n s_args = []\n for f_arg_type, s_arg_ast, s_arg_type in zip(f_args, argAst.elts, argType):\n s_arg_ast = _makeVal(funcEnv, f_arg_type, s_arg_ast, s_arg_type)\n s_args += [s_arg_ast]\n return makeAstNodeCall(typeAst, *s_args)\n\n if isinstance(objType, CArrayType) and isinstance(argType, CArrayType):\n return ast.Call(func=typeAst, args=[], keywords=[], starargs=argAst, kwargs=None)\n\n if isinstance(argType, CWrapFuncType):\n if isVoidPtrType(objType):\n vAst = getAstNode_newTypeInstance(\n funcEnv, CFuncPointerDecl(type=argType.func.type, args=argType.func.args),\n argAst=argAst, argType=argType)\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, vAst, typeAst)\n if isinstance(objType, CWrapFuncType):\n return argAst\n assert isinstance(objType, CFuncPointerDecl) # what other case could there be?\n return makeAstNodeCall(getAstNodeAttrib(\"helpers\", \"makeFuncPtr\"), typeAst, argAst)\n\n if isinstance(objType, CPointerType) and usePyRefForType(objType.pointerOf):\n # We expect a PyRef.\n return makeAstNodeCall(getAstNodeAttrib(\"helpers\", \"PyRef\"),\n *([getAstNodeAttrib(argAst, \"ref\")] if argAst else []))\n\n if isPointerType(objType, checkWrapValue=True) and isPointerType(argType, checkWrapValue=True):\n # We can have it simpler. This is even important in some cases\n # were the pointer instance is temporary and the object\n # would get freed otherwise!\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, argAst, typeAst)\n\n if isSameType(interpreter.globalScope.stateStruct, objType, ctypes.c_void_p) and \\\n isinstance(argType, CFuncPointerDecl):\n # We treat CFuncPointerDecl not as a normal pointer.\n # However, we allow casts to c_void_p.\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, argAst, typeAst)\n\n if isinstance(objType, CFuncPointerDecl) and isinstance(argType, CFuncPointerDecl):\n # We did not allow a pointer-to-func-ptr cast above.\n # But we allow func-ptr-to-func-ptr.\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, argAst, typeAst)\n\n args = []\n if argAst is not None:\n if isinstance(argAst, (ast.Str, ast.Num)):\n args += [argAst]\n elif argType is not None:\n args += [getAstNode_valueFromObj(interpreter._cStateWrapper, argAst, argType)]\n else:\n # expect that it is the AST for the value.\n # there is no really way to 'assert' this.\n args += [argAst]\n\n if isPointerType(objType, checkWrapValue=True) and argAst is not None:\n # Note that we already covered the case where both objType and argType\n # are pointer types, and we get a ctypes pointer object.\n # In that case, we can use ctypes.cast, which is more or less safe.\n # Note what this case here means:\n # We get an integer from somewhere, and interpret is as a pointer.\n # So, if there is a bug in how we got this integer, this can\n # potentially lead to an invalid pointer and hard to find bug.\n # Also, if the memory was allocated before by Python,\n # normally the ctypes pointer handling would keep a reference\n # to the underlying Python object.\n # When we however just get the raw pointer address as an integer\n # and then convert that back to a pointer at this place,\n # it doesn't know about the underlying Python objects.\n # When the underlying Python objects will get out-of-scope\n # at some later point, which we cannot control here,\n # this again would lead to hard to find bugs.\n assert len(args) == 1\n return makeAstNodeCall(getAstNodeAttrib(\"intp\", \"_getPtr\"), args[0], typeAst)\n #astVoidPT = getAstNodeAttrib(\"ctypes\", \"c_void_p\")\n #astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n #astVoidP = makeAstNodeCall(astVoidPT, *args)\n #return makeAstNodeCall(astCast, astVoidP, typeAst)\n\n if isIntType(objType) and args:\n # Introduce a Python int-cast, because ctypes will fail if it is a float or so.\n assert len(args) == 1\n args = [makeAstNodeCall(ast.Name(id=\"int\", ctx=ast.Load()), *args)]\n if isinstance(objType, (CStruct, CUnion)) and argAst:\n # We get the object itself. We expect that this is supposed to be a copy.\n # However, there is no such thing as a copy constructor.\n assert len(args) == 1\n return makeAstNodeCall(Helpers.assign, makeAstNodeCall(typeAst), *args)\n if isinstance(objType, CVariadicArgsType):\n if argAst:\n return makeAstNodeCall(Helpers.VarArgs, argAst)\n assert isinstance(funcEnv.astNode, ast.FunctionDef)\n # TODO: Normally, we would assign the var via va_start().\n # However, we just always initialize with the varargs tuple also already here\n # because we have the ref to the real varargs here.\n # See globalincludewrappers.\n return makeAstNodeCall(\n Helpers.VarArgs,\n ast.Name(id=funcEnv.astNode.args.vararg or \"None\", ctx=ast.Load()),\n ast.Name(id=\"intp\", ctx=ast.Load()))\n return makeAstNodeCall(typeAst, *args)", "def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def argument(*args, **kwargs):\n def deco(fct):\n if isinstance(fct, Command):\n cmd = fct\n cmd.add_argument(*args, **kwargs)\n else:\n if not hasattr(fct, '_acmdlib_arguments'):\n fct._acmdlib_arguments = []\n fct._acmdlib_arguments.append((args, kwargs))\n #print \"===\",args,kwargs,type(fct),fct\n return fct\n return deco", "def gen_va_arg(self, expr: expressions.BuiltInVaArg):\n # TODO: how to deal with proper alignment?\n valist_ptrptr = self.gen_expr(expr.arg_pointer, rvalue=False)\n va_ptr = self.emit(ir.Load(valist_ptrptr, \"va_ptr\", ir.ptr))\n ir_typ = self.get_ir_type(expr.typ)\n # Load the variable argument:\n value = self.emit(ir.Load(va_ptr, \"va_arg\", ir_typ))\n size = self.emit(ir.Const(self.sizeof(expr.typ), \"size\", ir.ptr))\n va_ptr = self.emit(ir.add(va_ptr, size, \"incptr\", ir.ptr))\n self.emit(ir.Store(va_ptr, valist_ptrptr))\n return value", "def bind(self, function, execOnUpdate=True, arguments=[]):\n if isinstance(function, types.FunctionType):\n self.functions.append(function)\n self.functionArguments.append(arguments)\n self.functionUpdate.append(execOnUpdate)\n else:\n raise Exception(\"el elemento a agregar debe ser una funcion\")", "def eval_function_def_as_closure(\n function_def: ast.FunctionDef,\n closure_names: List[str],\n globals_: Optional[ConstsDictT] = None,\n flags: Optional[int] = None,\n) -> Callable:\n def_type = type(function_def)\n assert def_type in (ast.FunctionDef, ast.AsyncFunctionDef)\n\n none = ast.NameConstant(value=None)\n\n # We can't possibly recreate ASTs of existing closure variables\n # (because all we have are their values).\n # So we create fake closure variables for the function to attach to,\n # and then substitute the closure cells with the ones obtained from\n # the \"prototype\" of this function (a ``types.FunctionType`` object\n # from which this tree was extracted).\n fake_closure_vars = [\n ast.Assign(targets=[ast.Name(id=name, ctx=ast.Store())], value=none)\n for name in closure_names\n ]\n\n empty_args = ast.arguments(\n posonlyargs=[], args=[], vararg=None, kwonlyargs=[], kwarg=None, defaults=[], kw_defaults=[]\n )\n\n wrapper_def = def_type(\n name=\"__peval_wrapper\",\n args=empty_args,\n decorator_list=[],\n body=(\n fake_closure_vars\n + [function_def]\n + [ast.Return(value=ast.Name(id=function_def.name, ctx=ast.Load()))]\n ),\n )\n\n wrapper = eval_function_def(wrapper_def, globals_=globals_, flags=flags)\n return wrapper()", "def column_bind(arguments):\n return Component(\n \"ColumnBind\",\n arguments=arguments,\n options={\n \n },\n constraints=None)", "def _make_function_def_arglist(endpoint_info):\n required_ordered = [(pa['argument'], \"required\") for pa in endpoint_info.get('positional', []) if pa['required']]\n required_options = [(oa, \"required\") for oa, info in endpoint_info['options'].items() if info['required']]\n\n non_required_ordered = [pa for pa in endpoint_info.get('positional', []) if not pa['required']]\n non_required_ordered = map(lambda pa: (pa['argument'], pa.get('default', None)), non_required_ordered)\n\n non_required_options = [(oa, info) for (oa, info) in endpoint_info['options'].items() if not info['required']]\n non_required_options = map(lambda el: (el[0], el[1].get('default', None)), non_required_options)\n\n function_def_arglist = []\n for arglist in (required_ordered, required_options, non_required_ordered, non_required_options):\n function_def_arglist.extend(arglist)\n\n return function_def_arglist", "def add_c_arg(self, c_arg):\n self._c_args.append(c_arg)\n self.add_decompostion(c_arg)", "def gen_call(self, expr: expressions.FunctionCall):\n assert expr.callee.typ.is_pointer\n ftyp = expr.callee.typ.element_type\n assert isinstance(ftyp, types.FunctionType)\n\n ir_arguments, rval_alloc = self.prepare_arguments(ftyp, expr.args)\n\n callee = self.gen_expr(expr.callee, rvalue=True)\n\n # Use function or procedure call depending on return type:\n if ftyp.return_type.is_void:\n self.emit(ir.ProcedureCall(callee, ir_arguments))\n value = None\n elif ftyp.return_type.is_struct:\n self.emit(ir.ProcedureCall(callee, ir_arguments))\n value = rval_alloc\n else:\n ir_typ = self.get_ir_type(expr.typ)\n value = self.emit(\n ir.FunctionCall(callee, ir_arguments, \"result\", ir_typ)\n )\n\n return value", "def _gen_closure(fnc,arg):\n return lambda s: fnc(arg)", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def fsig(\n arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n parametric_bindings: Optional[ParametricBindings]\n) -> Tuple[ConcreteType, SymbolicBindings]:\n logging.vlog(5, 'Instantiating for builtin %r @ %s', name, span)\n _Checker(arg_types, name, span).len(2).is_array(0).is_fn(1, argc=1)\n t = arg_types[0].get_element_type() # pytype: disable=attribute-error\n u, symbolic_bindings = parametric_instantiator.instantiate_function(\n span, arg_types[1], (t,), ctx, parametric_bindings, {})\n return_type = ArrayType(u, arg_types[0].size) # pytype: disable=attribute-error\n return FunctionType(arg_types, return_type), symbolic_bindings", "def compile_function(self, function, arguments):", "def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]", "def call_top_interface_args_with_func_def(self, node: AnnCastCall):\n # call container is used to scope parameters\n call_con_name = call_container_name(node)\n\n # create argument and parameter variables\n # argument variables are inputs to the top interface\n # paramter variables are outputs of the top interface\n for i, n in enumerate(node.arguments):\n # argument name and scope str\n arg_name = call_argument_name(node, i)\n arg_con_scopestr = con_scope_to_str(node.func.con_scope)\n\n # parameter name and scopestr\n func_def = self.pipeline_state.func_def_node_from_id(node.func.id)\n param = func_def.func_args[i]\n assert(isinstance(param, AnnCastVar))\n param_name = param.val.name\n param_con_scopestr = con_scope_to_str(node.func.con_scope + [call_con_name])\n\n # argument and parameter share id, and start with initial version\n id = self.pipeline_state.next_collapsed_id()\n version = VAR_INIT_VERSION\n\n # build and store GrFN variables for argument and parameter\n arg_grfn_var = create_grfn_var(arg_name, id, version, arg_con_scopestr)\n arg_fullid = build_fullid(arg_name, id, version, arg_con_scopestr)\n self.pipeline_state.store_grfn_var(arg_fullid, arg_grfn_var)\n # store arg_fullid\n node.arg_index_to_fullid[i] = arg_fullid\n # create From Source metadata for the GrFN var\n from_source = False\n from_source_mdata = generate_from_source_metadata(from_source, VariableCreationReason.FUNC_ARG)\n add_metadata_to_grfn_var(arg_grfn_var, from_source_mdata)\n\n param_grfn_var = create_grfn_var(param_name, id, version, param_con_scopestr)\n param_fullid = build_fullid(param_name, id, version, param_con_scopestr)\n self.pipeline_state.store_grfn_var(param_fullid, param_grfn_var)\n # store param_fullid\n node.param_index_to_fullid[i] = param_fullid\n # create From Source metadata for the GrFN var\n add_metadata_from_name_node(param_grfn_var, param.val)\n\n # link argument and parameter through top interface\n node.top_interface_in[id] = arg_fullid\n node.top_interface_out[id] = param_fullid\n\n # DEBUG printing\n if self.pipeline_state.PRINT_DEBUGGING_INFO:\n print(\"After create_call_args_and_params():\")\n print(f\"\\ttop_interface_in = {node.top_interface_in}\")\n print(f\"\\ttop_interface_out = {node.top_interface_out}\")" ]
[ "0.6402835", "0.59598666", "0.58069235", "0.5668584", "0.54364705", "0.5237507", "0.5124962", "0.49953586", "0.48212668", "0.47471574", "0.47356513", "0.46868324", "0.46776998", "0.46632668", "0.46530426", "0.46255657", "0.45983043", "0.45781806", "0.45673853", "0.45637605", "0.44775516", "0.44728318", "0.44317135", "0.4389808", "0.4371178", "0.43647647", "0.43571058", "0.4315944", "0.43054858", "0.4303913" ]
0.76825356
0
Create the equivalent BindCFunctionDefResult for a Ccompatible function. Take a FunctionDefResult and create a BindCFunctionDefResult describing all the information that should be returned from the Ccompatible function in order to fully describe the result `expr`. This function also adds any expressions necessary to build the Ccompatible return value to `self._additional_exprs`. In the case of a scalar numerical the function simply creates a local version of the variable described by the function result and returns the BindCFunctionDefResult. In the case of an array, C cannot represent the array natively. Rather it is stored in a pointer. This function therefore creates a variable to represent that pointer. Additionally information about the shape and strides of the array are necessary. These objects are created by the `BindCFunctionDefResult` class. The assignment expressions which define the shapes and strides are then stored in `self._additional_exprs` along with the allocation of the pointer.
def _wrap_FunctionDefResult(self, expr): var = expr.var name = var.name scope = self.scope # Make name available for later scope.insert_symbol(name) local_var = var.clone(scope.get_expected_name(name)) if local_var.rank: # Allocatable is not returned so it must appear in local scope scope.insert_variable(local_var, name) # Create the C-compatible data pointer bind_var = Variable(dtype=BindCPointer(), name=scope.get_new_name('bound_'+name), is_const=False, memory_handling='alias') scope.insert_variable(bind_var) result = BindCFunctionDefResult(bind_var, var, scope) # Save the shapes of the array self._additional_exprs.extend([Assign(result.shape[i], var.shape[i]) for i in range(var.rank)]) # Create an array variable which can be passed to CLocFunc ptr_var = var.clone(scope.get_new_name(name+'_ptr'), memory_handling='alias') scope.insert_variable(ptr_var) # Define the additional steps necessary to define and fill ptr_var alloc = Allocate(ptr_var, shape=result.shape, order=var.order, status='unallocated') copy = Assign(ptr_var, var) c_loc = CLocFunc(ptr_var, bind_var) self._additional_exprs.extend([alloc, copy, c_loc]) return result else: return BindCFunctionDefResult(local_var, var, scope)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _wrap_FunctionDef(self, expr):\n if expr.is_private:\n return EmptyNode()\n\n name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}')\n self._wrapper_names_dict[expr.name] = name\n\n # Create the scope\n func_scope = self.scope.new_child_scope(name)\n self.scope = func_scope\n\n self._additional_exprs = []\n\n if any(isinstance(a.var, FunctionAddress) for a in expr.arguments):\n warnings.warn(\"Functions with functions as arguments cannot be wrapped by pyccel\")\n return EmptyNode()\n\n # Wrap the arguments and collect the expressions passed as the call argument.\n func_arguments = [self._wrap(a) for a in expr.arguments]\n call_arguments = [self._get_call_argument(fa) for fa in func_arguments]\n func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)}\n\n func_results = [self._wrap_FunctionDefResult(r) for r in expr.results]\n\n func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results]\n\n body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results)\n\n body.extend(self._additional_exprs)\n self._additional_exprs.clear()\n\n self.exit_scope()\n\n func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr,\n doc_string = expr.doc_string)\n\n self.scope.functions[name] = func\n\n return func", "def result_as_arg(self, node, C_new):\n F_new = C_new.clone()\n\n # Fortran function should wrap the new C function\n F_new._PTR_F_C_index = C_new._function_index\n F_new.wrap.assign(fortran=True)\n # Do not add '_bufferify'\n F_new.fmtdict.function_suffix = node.fmtdict.function_suffix\n\n # Do not wrap original function (does not have result argument)\n node.wrap.fortran = False\n return F_new", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def _wrap_Variable(self, expr):\n if expr.rank == 0 and expr.dtype in NativeNumeric:\n return expr.clone(expr.name, new_class = BindCVariable)\n else:\n scope = self.scope\n func_name = scope.get_new_name('bind_c_'+expr.name.lower())\n func_scope = scope.new_child_scope(func_name)\n mod = expr.get_user_nodes(Module)[0]\n import_mod = Import(mod.name, AsName(expr,expr.name), mod=mod)\n func_scope.imports['variables'][expr.name] = expr\n\n # Create the data pointer\n bind_var = Variable(dtype=BindCPointer(),\n name=scope.get_new_name('bound_'+expr.name),\n is_const=True, memory_handling='alias')\n func_scope.insert_variable(bind_var)\n\n result = BindCFunctionDefResult(bind_var, expr, func_scope)\n if expr.rank == 0:\n #assigns = []\n #c_loc = CLocFunc(expr, bind_var)\n raise NotImplementedError(\"Classes cannot be wrapped\")\n else:\n assigns = [Assign(result.shape[i], expr.shape[i]) for i in range(expr.rank)]\n c_loc = CLocFunc(expr, bind_var)\n body = [*assigns, c_loc]\n func = BindCFunctionDef(name = func_name,\n body = body,\n arguments = [],\n results = [result],\n imports = [import_mod],\n scope = func_scope,\n original_function = expr)\n return expr.clone(expr.name, new_class = BindCArrayVariable, wrapper_function = func,\n original_variable = expr)", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def _wrap_FunctionDefArgument(self, expr):\n var = expr.var\n name = var.name\n self.scope.insert_symbol(name)\n collisionless_name = self.scope.get_expected_name(var.name)\n if var.is_ndarray or var.is_optional:\n new_var = Variable(BindCPointer(), self.scope.get_new_name(f'bound_{name}'),\n is_argument = True, is_optional = False, memory_handling='alias')\n arg_var = var.clone(collisionless_name, is_argument = False, is_optional = False,\n memory_handling = 'alias', allows_negative_indexes=False)\n self.scope.insert_variable(arg_var)\n else:\n new_var = var.clone(collisionless_name)\n self.scope.insert_variable(new_var)\n\n return BindCFunctionDefArgument(new_var, value = expr.value, original_arg_var = expr.var,\n kwonly = expr.is_kwonly, annotation = expr.annotation, scope=self.scope)", "def get_C_code(self, C_function_name):\n from cascada.bitvector.printing import BvCCodePrinter\n\n width2type = BvCCodePrinter._width2C_type\n\n # in C, * binds to the declarator, not the type specifier\n input_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.input_vars])\n output_vars_c = ', '.join([\"{} *{}\".format(width2type(v.width), v.name) for v in self.output_vars])\n if self.external_vars:\n external_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.external_vars])\n external_vars_c = external_vars_c + \", \"\n else:\n external_vars_c = \"\"\n\n aux = f\"void {C_function_name}({input_vars_c}, {external_vars_c}{output_vars_c})\"\n header = f\"{aux};\"\n body = f\"#include <stdint.h>\\n{aux}{{\" # stdint for uint_*\n\n outvar2outvar_c = {v: core.Variable(\"*\" + v.name, v.width, allowed_symbols=\"*\") for v in self.output_vars}\n\n def primary_assignment2C_code(my_var, my_expr):\n assert isinstance(my_expr, (core.Constant, core.Variable, operation.PrimaryOperation))\n if my_var in self.output_vars:\n return f\"*{my_var} = {my_expr.crepr()};\"\n else:\n return f\"{width2type(my_var.width)} {my_var} = {my_expr.crepr()};\"\n\n for var, expr in self.assignments.items():\n expr = expr.xreplace(outvar2outvar_c)\n if isinstance(expr, operation.SecondaryOperation):\n expr = expr.doit(eval_sec_ops=True)\n body += f\"\\n\\t{primary_assignment2C_code(var, expr)}\"\n body += \"\\n};\"\n\n return header, body", "def bind_result(\n function: Callable[[_FirstType], Result[_UpdatedType, _SecondType]],\n) -> Kinded[Callable[\n [KindN[_ResultLikeKind, _FirstType, _SecondType, _ThirdType]],\n KindN[_ResultLikeKind, _UpdatedType, _SecondType, _ThirdType],\n]]:\n @kinded\n def factory(\n container: KindN[_ResultLikeKind, _FirstType, _SecondType, _ThirdType],\n ) -> KindN[_ResultLikeKind, _UpdatedType, _SecondType, _ThirdType]:\n return container.bind_result(function)\n return factory", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def gen_expr(self, expr, rvalue=False):\n assert isinstance(expr, expressions.CExpression), str(expr)\n\n with self.builder.use_location(expr.location):\n if isinstance(expr, expressions.UnaryOperator):\n value = self.gen_unop(expr)\n elif isinstance(expr, expressions.BinaryOperator):\n value = self.gen_binop(expr)\n elif isinstance(expr, expressions.TernaryOperator):\n value = self.gen_ternop(expr)\n elif isinstance(expr, expressions.VariableAccess):\n value = self.gen_variable_access(expr)\n elif isinstance(expr, expressions.FunctionCall):\n value = self.gen_call(expr)\n elif isinstance(expr, expressions.StringLiteral):\n value = self.gen_string_literal(expr)\n elif isinstance(expr, expressions.CharLiteral):\n value = self.gen_char_literal(expr)\n elif isinstance(expr, expressions.NumericLiteral):\n value = self.gen_numeric_literal(expr)\n elif isinstance(expr, expressions.CompoundLiteral):\n value = self.gen_compound_literal(expr)\n elif isinstance(expr, expressions.InitializerList):\n self.error(\"Illegal initializer list\", expr.location)\n elif isinstance(expr, expressions.Cast):\n value = self.gen_cast(expr)\n elif isinstance(expr, expressions.Sizeof):\n value = self.gen_sizeof(expr)\n elif isinstance(expr, expressions.FieldSelect):\n value = self.gen_field_select(expr)\n elif isinstance(expr, expressions.ArrayIndex):\n value = self.gen_array_index(expr)\n elif isinstance(expr, expressions.BuiltIn):\n value = self.gen_builtin(expr)\n else: # pragma: no cover\n raise NotImplementedError(str(expr))\n\n # Check for given attributes:\n assert isinstance(expr.typ, types.CType)\n assert isinstance(expr.lvalue, bool) # C semantics lvalue\n\n # If we need an rvalue, load it!\n if rvalue and expr.lvalue:\n if not expr.typ.is_function:\n value = self._load_value(value, expr.typ)\n\n elif not rvalue:\n assert expr.lvalue\n return value", "def gen_call(self, expr: expressions.FunctionCall):\n assert expr.callee.typ.is_pointer\n ftyp = expr.callee.typ.element_type\n assert isinstance(ftyp, types.FunctionType)\n\n ir_arguments, rval_alloc = self.prepare_arguments(ftyp, expr.args)\n\n callee = self.gen_expr(expr.callee, rvalue=True)\n\n # Use function or procedure call depending on return type:\n if ftyp.return_type.is_void:\n self.emit(ir.ProcedureCall(callee, ir_arguments))\n value = None\n elif ftyp.return_type.is_struct:\n self.emit(ir.ProcedureCall(callee, ir_arguments))\n value = rval_alloc\n else:\n ir_typ = self.get_ir_type(expr.typ)\n value = self.emit(\n ir.FunctionCall(callee, ir_arguments, \"result\", ir_typ)\n )\n\n return value", "def call_ccall(x):\n ret = c_call(x)\n return ret, cython.typeof(ret)", "def bind_context_result(\n function: Callable[\n [_FirstType],\n ReaderResult[_UpdatedType, _SecondType, _ThirdType],\n ],\n) -> Kinded[Callable[\n [KindN[_ReaderResultLikeKind, _FirstType, _SecondType, _ThirdType]],\n KindN[_ReaderResultLikeKind, _UpdatedType, _SecondType, _ThirdType],\n]]:\n @kinded\n def factory(\n container: KindN[\n _ReaderResultLikeKind,\n _FirstType,\n _SecondType,\n _ThirdType,\n ],\n ) -> KindN[_ReaderResultLikeKind, _UpdatedType, _SecondType, _ThirdType]:\n return container.bind_context_result(function)\n return factory", "def _get_function_def_body(self, func, func_def_args, func_arg_to_call_arg, results, handled = ()):\n optional = next((a for a in func_def_args if a.original_function_argument_variable.is_optional and a not in handled), None)\n if optional:\n args = func_def_args.copy()\n optional_var = optional.var\n handled += (optional, )\n true_section = IfSection(PyccelIsNot(optional_var, Nil()),\n self._get_function_def_body(func, args, func_arg_to_call_arg, results, handled))\n args.remove(optional)\n false_section = IfSection(LiteralTrue(),\n self._get_function_def_body(func, args, func_arg_to_call_arg, results, handled))\n return [If(true_section, false_section)]\n else:\n args = [FunctionCallArgument(func_arg_to_call_arg[fa],\n keyword = fa.original_function_argument_variable.name)\n for fa in func_def_args]\n size = [fa.shape[::-1] if fa.original_function_argument_variable.order == 'C' else\n fa.shape for fa in func_def_args]\n stride = [fa.strides[::-1] if fa.original_function_argument_variable.order == 'C' else\n fa.strides for fa in func_def_args]\n orig_size = [[PyccelMul(l,s) for l,s in zip(sz, st)] for sz,st in zip(size,stride)]\n body = [C_F_Pointer(fa.var, func_arg_to_call_arg[fa].base, s)\n for fa,s in zip(func_def_args, orig_size)\n if isinstance(func_arg_to_call_arg[fa], IndexedElement)]\n body += [C_F_Pointer(fa.var, func_arg_to_call_arg[fa])\n for fa in func_def_args\n if not isinstance(func_arg_to_call_arg[fa], IndexedElement) \\\n and fa.original_function_argument_variable.is_optional]\n\n # If the function is inlined and takes an array argument create a pointer to ensure that the bounds\n # are respected\n if func.is_inline and any(isinstance(a.value, IndexedElement) for a in args):\n array_args = {a: self.scope.get_temporary_variable(a.value.base, a.keyword, memory_handling = 'alias') for a in args if isinstance(a.value, IndexedElement)}\n body += [AliasAssign(v, k.value) for k,v in array_args.items()]\n args = [FunctionCallArgument(array_args[a], keyword=a.keyword) if a in array_args else a for a in args]\n\n func_call = Assign(results[0], FunctionCall(func, args)) if len(results) == 1 else \\\n Assign(results, FunctionCall(func, args))\n return body + [func_call]", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def make_wrapper(fname, atypes, rtype, cres):\n fndesc = cres.fndesc\n module = cres.library.create_ir_module(fndesc.unique_name)\n context = cres.target_context\n ll_argtypes = [context.get_value_type(ty) for ty in atypes]\n ll_return_type = context.get_value_type(rtype)\n\n # TODO: design a API for custom wrapping\n if type(rtype).__name__ == 'ArrayPointer':\n wrapty = ir.FunctionType(ir.VoidType(),\n [ll_return_type] + ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args[1:])\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret_void()\n builder.store(builder.load(out), wrapfn.args[0])\n builder.ret_void()\n else:\n wrapty = ir.FunctionType(ll_return_type, ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args)\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret(out)\n\n cres.library.add_ir_module(module)", "def actual_grad_fn(*result_grad_components):\n result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients(\n nest.flatten(result), result_grad_components)\n if not isinstance(result_grads, (list, tuple)):\n result_grads = [result_grads]\n\n if variables:\n input_grads, variable_grads = grad_fn(*result_grads, variables=variables)\n if len(variable_grads) != len(variables):\n raise ValueError(\"Must return gradient for each variable from \"\n \"@custom_gradient grad_fn.\")\n else:\n input_grads = grad_fn(*result_grads)\n variable_grads = []\n flat_grads = composite_tensor_gradient.get_flat_tensors_for_gradients(\n nest.flatten(input_grads))\n if len(flat_grads) != arg_count:\n raise ValueError(\n f\"custom_gradient function expected to return {arg_count} \"\n f\"gradients, but returned {len(flat_grads)} instead.\")\n return flat_grads + variable_grads", "def function_eval(\n self,\n name_token: lark.Token,\n exprlist: Optional[Iterable[Result]] = None) -> Result:\n function: CELFunction\n try:\n # TODO: Transitive Lookup of function in all parent activation contexts.\n function = self.functions[name_token.value]\n except KeyError as ex:\n err = (\n f\"undeclared reference to '{name_token}' \"\n f\"(in activation '{self.activation}')\"\n )\n value = CELEvalError(err, ex.__class__, ex.args, token=name_token)\n value.__cause__ = ex\n return value\n\n if isinstance(exprlist, CELEvalError):\n return exprlist\n\n try:\n list_exprlist = cast(List[Result], exprlist or [])\n return function(*list_exprlist)\n except ValueError as ex:\n value = CELEvalError(\n \"return error for overflow\", ex.__class__, ex.args, token=name_token)\n value.__cause__ = ex\n return value\n except (TypeError, AttributeError) as ex:\n self.logger.debug(f\"function_eval({name_token!r}, {exprlist}) --> {ex}\")\n value = CELEvalError(\n \"no such overload\", ex.__class__, ex.args, token=name_token)\n value.__cause__ = ex\n return value", "def test_fun_result(self):\n x = CArray([3, 5])\n correct_result = x[0] ** 2 + x[1] ** 2\n self._test_fun_result(self.fun, x, correct_result.item())", "def _cor_compile(rule, var, val, result_class, key, compilation_list):\n compilation = compilation_list.get(key, None)\n if compilation:\n if isinstance(val, ListRule):\n result = []\n for itemv in val.value:\n result.append(compilation['callback'](itemv))\n\n val = compilation['listclass'](result)\n else:\n val = compilation['callback'](val)\n return result_class(rule.operation, var, val)", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def _make_array(self, c):\n return (c * ctypes.py_object)()", "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)", "def make_function(self, unpack_single=True, **kwargs):\r\n thunk, inputs, outputs = self.make_thunk(**kwargs)\r\n\r\n def execute(*args):\r\n def e_arity(takes, got):\r\n return 'Function call takes exactly %i %s (%i given)' \\\r\n % (takes, ['argument','arguments'][takes>1], got)\r\n if (len(args) != len(inputs)):\r\n raise TypeError(e_arity(len(inputs), len(args)))\r\n for arg, variable in zip(args, inputs):\r\n variable.data = arg\r\n thunk()\r\n if unpack_single:\r\n return utils.to_return_values([variable.data\r\n for variable in outputs])\r\n else:\r\n return [variable.data for variable in outputs]\r\n execute.thunk = thunk\r\n execute.inputs = inputs\r\n execute.outputs = outputs\r\n\r\n return execute", "def call_cdef_inline(x):\n ret = cdef_inline(x)\n return ret, cython.typeof(ret)" ]
[ "0.6197848", "0.5908766", "0.5702783", "0.5584586", "0.5551152", "0.5510475", "0.53075147", "0.5056599", "0.5053299", "0.5033014", "0.50257194", "0.50233", "0.49712402", "0.4809258", "0.47916153", "0.4786185", "0.47819734", "0.477247", "0.4735379", "0.46769652", "0.46176627", "0.46065253", "0.4598054", "0.45853952", "0.45786664", "0.45786363", "0.45736584", "0.45310295", "0.45278624", "0.4521833" ]
0.8009484
0
classifies the label for the queried case by finding highest probability item P(B|A)P(A) is used to compare the probabilities
def classify(self, toBeClassified, laPlace = 0): #counting P(B|A) probability probabilitiesDictionary = {} for label in self.trainingY: probability = 1.0 #for every part of toBeClassified count the probability of occuring if the given test was correct choice and multiply all of them for case in toBeClassified: if case in self.instanceCounterDict[label].keys(): probability *= self.instanceCounterDict[label][case] + laPlace / self.totalInstanceCounter[label] + laPlace else: probability *= laPlace / self.totalInstanceCounter[label] + laPlace #P(B|A)P(A) probabilitiesDictionary[probability * self.prioProbability[label]] = label #find the highest probability case and return it highestProbability = max(probabilitiesDictionary.keys()) #return class name of highest probability class return probabilitiesDictionary[highestProbability]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def get_label(prob_label, target):\n return target if random.random() <= prob_label else 1 - target", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def most_probable_class(text, weights):\n\n pos_weights = weights['positive']\n neg_weights = weights['negative']\n neu_weights = weights['neutral']\n features = calculate_features(text)\n pos_numerator = 0.0\n neg_numerator = 0.0\n neu_numerator = 0.0\n denominator = 0.0\n for f in features:\n if f in pos_weights and f in neg_weights and f in neu_weights:\n pos_numerator += pos_weights[f] * features[f]\n neg_numerator += neg_weights[f] * features[f]\n neu_numerator += neu_weights[f] * features[f]\n denominator += pos_numerator + neg_numerator + neu_numerator\n else:\n pos_numerator += 0\n neg_numerator += 0\n neu_numerator += 0\n denominator += pos_numerator + neg_numerator + neu_numerator\n\n pos_prob = (\"positive\", exp(pos_numerator))# /exp(denominator))\n neg_prob = (\"negative\", exp(neg_numerator))# /exp(denominator))\n neu_prob = (\"neutral\", exp(neu_numerator))# /exp(denominator))\n return max(neu_prob, neg_prob, pos_prob, key=lambda x: x[1])", "def propagate_labels_majority(image,labels):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels)\n outputs = zeros(amax(rlabels)+1,'i')\n counts = zeros(amax(rlabels)+1,'i')\n for rlabel, label_, count in cors.T:\n if not rlabel or not label_:\n # ignore background correspondences\n continue\n if counts[rlabel] < count:\n outputs[rlabel] = label_\n counts[rlabel] = count\n outputs[0] = 0\n return outputs[rlabels]", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def get_proba_by_label(self, label=None):\n if self.get_count_by_label(label) == 0:\n if label == 0:\n # REMEMBER: this is a display only, not a math model, in display we sub neg from 1, so return 1 to get zero\n return 1\n else:\n return 0\n elif len(self.data) - self.get_count_by_label(-1) == 0:\n # they're all unpredictable\n return 0\n elif label is None:\n # weird case, change neg's to 1-proba, which is different than rest of display\n pos_proba = sum(d.proba for d in self.data if d.pred == 1)\n neg_proba = sum(1 - d.proba for d in self.data if d.pred == 0)\n return (pos_proba + neg_proba) / (len(self.data) - self.get_count_by_label(-1))\n else:\n return sum(d.proba for d in self.data if d.pred == label) / self.get_count_by_label(label)", "def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n #print vec2Classify\n # [0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]\n \n #print p0Vec\n \n #print p1Vec\n \"\"\"[-3.04452244 -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244 -3.04452244\n -3.04452244 -2.35137526 -2.35137526 -2.35137526 -2.35137526 -2.35137526\n -3.04452244 -1.94591015 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -1.94591015 -3.04452244 -1.65822808 -3.04452244 -2.35137526 -3.04452244\n -3.04452244 -3.04452244]\"\"\" \n \n #print vec2Classify * p1Vec\n \"\"\"\n [-0. -3.04452244 -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -3.04452244\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -3.04452244]\n \"\"\"\n \n #print sum(vec2Classify * p1Vec)\n # -9.13356731317\n \n p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult\n p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)\n \n if p1 > p0:\n return 1\n else: \n return 0", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def PredictLabel(sentence, model_main, word2vec, boundary=0.5):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_main.predict_proba(features.reshape(1,-1))[0]\n if model_main.classes_[prediction.argmax()]!=\"clerical\":\n return model_main.classes_[prediction.argmax()]\n else:\n if np.max(prediction)>boundary:\n return \"clerical\"\n else:\n ranger = range(len(prediction))\n del ranger[prediction.argmax()]\n return model_main.classes_[ranger][prediction[ranger].argmax()]", "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction", "def naive_bayes_classify(df: pd.DataFrame, vect, names):\n features = vect\n target = df.success_lvl\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, target, test_size=0.2, random_state=42)\n\n nb_clf = MultinomialNB()\n nb_clf.fit(X_train, y_train)\n nb_predictions = nb_clf.predict(X_test)\n print('Accuracy score for Naive Bayes:', accuracy_score(y_test, nb_predictions))\n\n\n # Find Top/Bottom num of terms used to describe the classes.\n num = 10\n low_class_prob_sorted = nb_clf.feature_log_prob_[0, :].argsort()[::-1]\n hi_class_prob_sorted = nb_clf.feature_log_prob_[1, :].argsort()[::-1]\n print('\\n', f'Low score Top{num} phrases:', np.take(names, low_class_prob_sorted[:num]))\n print('\\n', f'Low score Bot{num} phrases:', np.take(names, low_class_prob_sorted[-num:]))\n print('\\n', f'High score Top{num} phrases:', np.take(names, hi_class_prob_sorted[:num]))\n print('\\n', f'High score Bot{num} phrases:', np.take(names, hi_class_prob_sorted[-num:]))", "def classify(some_string, trained_pos, trained_neg):\n pos_probability = get_probability(trained_pos, some_string)\n neg_probability = get_probability(trained_neg, some_string)\n if (pos_probability >= neg_probability):\n return \"positive\"\n elif pos_probability < neg_probability: \n return \"negative\"", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)", "def classify_image(img_pil):\n results = tpu.ClassifyWithImage(img_pil, top_k=1)\n if len(results) == 0:\n return None, None\n i, score = results[0]\n label = labels[i]\n # print(label + \": \" + str(score))\n return label, score", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def predict(self, row):\n label_vote = dict()\n for i in range(len(self.forest)):\n result = self.forest[i].predict(row)\n label = max(result, key=result.get)\n \n if label_vote.get(label, None) is None:\n label_vote[label] = 0\n\n label_vote[label] += 1\n \n return max(label_vote, key=result.get)", "def get_classLabel(self, dataset, class_label): \n\t\tnode = self.root\n\t\tbroken=0\n\t\t\n\t\t#print(\"BEBE:\" + str(node.get_bebe( dataset)))\n\t\t\n\t\tif (node.get_bebe( dataset) == class_label ):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\t\t\tdef junk(data, class_label, seed, ratio):", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [[self.classes[np.argmax(subrow)] for subrow in row] for row in self.data]\n return tags", "def predLabel(self, DataMatrix):\n self.predict(DataMatrix)\n # Calculamos el valor mas alto, y a partir de este obtenemos el nombre de la etiqueta\n tags = [self.classes[np.argmax(elem)] for elem in self.data]\n return tags", "def decode_prediction(self, prediction):\n index = np.argmax(prediction)\n\n inv_map = {v: k for k, v in self.class_index.items()}\n label = inv_map[index]\n return label, np.amax(prediction)", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def comp_probs_and_labels(self, x_feats):\n scores = sparse.dot(x_feats, self.W) + self.Wb # [l, d] x [d, m] + [m] => [l, m]\n relation_probs = T.nnet.softmax(scores)\n labels = T.argmax(scores, axis=1) # [l, m] => [l]\n return labels, relation_probs", "def predict(self, datum):\r\n probs = {}\r\n for class_ in set(self.train_classes):\r\n probs[class_] = self.distribution.class_prob[class_] * reduce(lambda x,y:x*y, [self.distribution.prob(feat_ind_feat[0],feat_ind_feat[1],class_) for feat_ind_feat in enumerate(datum)])\r\n return max(probs, key=lambda x:probs[x])", "def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best", "def classify(indicator):\n # this function prints the spam classification\n if indicator > SPAMTHRESHOLD:\n return \"SPAM\"\n else:\n return \"HAM\"", "def convertclasstoemotion(pred):\n \n label_conversion = {'0': 'neutral',\n '1': 'calm',\n '2': 'happy',\n '3': 'sad',\n '4': 'angry',\n '5': 'fearful',\n '6': 'disgust',\n '7': 'surprised'}\n\n for key, value in label_conversion.items():\n if int(key) == pred:\n label = value\n return label", "def _classify_from_probs(predicts_proba):\n def find_majority(dict_probs):\n \"\"\"Find the majority class\"\"\"\n # if there is no majority class, pick the first from the sorted\n max_val = max(dict_probs.values())\n max_keys = [key for key in dict_probs.keys()\n if dict_probs[key] == max_val]\n return sorted(max_keys)[0]\n\n predicts = [find_majority(dict_probs) for dict_probs in predicts_proba]\n return predicts", "def predict(probs):\n # Your code here.\n return np.argmax(probs, axis=1)" ]
[ "0.6739165", "0.6672079", "0.6487649", "0.6459216", "0.64419186", "0.6410345", "0.64069885", "0.6355552", "0.63476574", "0.6324833", "0.631852", "0.6314344", "0.631261", "0.6312608", "0.62935036", "0.62587637", "0.6247219", "0.62198925", "0.6213259", "0.6187837", "0.6165054", "0.6161483", "0.6161333", "0.6157968", "0.6141791", "0.6114986", "0.6113736", "0.61012614", "0.60997635", "0.6099075" ]
0.71006674
0
A decorator which wraps a function's return value in ``list(...)``. Useful when an algorithm can be expressed more cleanly as a generator but the function should return an list.
def listify(fn=None, wrapper=list): def listify_return(fn): @functools.wraps(fn) def listify_helper(*args, **kw): return wrapper(fn(*args, **kw)) return listify_helper if fn is None: return listify_return return listify_return(fn)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def decorator(arg):\n return lambda: list(arg)", "def wrapped_func(ret_val, *args, **kwargs):\n val = func(*args, **kwargs)\n ret_val.append(val)", "def listify(gen: Callable[..., Union[Generator[T, None, None], AsyncGenerator[T, None]]]) -> Callable[..., List[T]]:\n if inspect.isasyncgenfunction(gen):\n\n @wraps(gen)\n async def list_func(*args, **kwargs) -> List[Any]:\n return [v async for v in gen(*args, **kwargs)]\n\n elif inspect.isgeneratorfunction(gen):\n\n @wraps(gen)\n def list_func(*args, **kwargs) -> List[Any]:\n return list(gen(*args, **kwargs))\n\n else:\n raise TypeError(f'{gen} is not a generator or async-generator')\n return list_func", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_returns_list(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_list():\n return [2, 1, 3]\n\n r = returns_list()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)", "def give_me_a_list():\n my_list=[1,2,3,4]\n return my_list\n pass", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def list() -> List:\n pass", "def as_list(gen):\n return list(gen())", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def memoize(func):\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func", "def cachedList(\n *,\n cached_method_name: str,\n list_name: str,\n num_args: Optional[int] = None,\n name: Optional[str] = None,\n) -> Callable[[F], CachedFunction[F]]:\n func = lambda orig: DeferredCacheListDescriptor(\n orig,\n cached_method_name=cached_method_name,\n list_name=list_name,\n num_args=num_args,\n name=name,\n )\n\n return cast(Callable[[F], CachedFunction[F]], func)", "def ListMonad(*elements: List[T]) -> _List[T]: # pylint: disable=invalid-name\n\n return _List(list(elements), None)", "def cast_to_list(position):\n\n\[email protected]\n\tdef wrapper(function, instance, args, kwargs):\n\t\tif not isinstance(args[position], list):\n\t\t\targs = list(args)\n\t\t\targs[position] = [args[position]]\n\t\t\targs = tuple(args)\n\n\t\treturn function(*args, **kwargs)\n\n\treturn wrapper", "def sequence(f, lst: list) -> list:\n ret = []\n for ele in lst:\n ret.append(f(ele))\n return ret", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]", "def get_list_dep() -> Callable:\n args = []\n body = [\" r = {}\"]\n # Apply list ops as annotations\n for list_op in self.list_ops:\n args += [f\"{list_op.name}: Optional[List[str]] = Query(None)\"]\n body += [\n f\" if {list_op.name} is not None:\",\n f' r[\"{list_op.name}\"] = {list_op.name}',\n ]\n code = [f\"def inner({', '.join(args)}) -> dict:\"] + body + [\" return r\"]\n r = {\"Optional\": typing.Optional, \"List\": typing.List, \"Query\": Query}\n exec(\"\\n\".join(code), {}, r)\n return r[\"inner\"]", "def to_list():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n l = []\n try:\n while True:\n l.append((yield))\n except GeneratorExit:\n target.send(l) \n target.close()\n\n return _dagpype_internal_fn_act", "def wrapper(*args, **kwargs):\n print('Before function')\n value = decorated_function(*args, **kwargs)\n print('After function')\n return value", "def get_value_list():\n return [some_random_number() for _ in range(some_random_number())]", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def aslist(something):\n return something if isinstance(something, list) else [something]", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])" ]
[ "0.78704184", "0.74657214", "0.72008944", "0.6643089", "0.66166073", "0.660685", "0.65524614", "0.6306997", "0.6270299", "0.62679815", "0.61245453", "0.6116196", "0.59481525", "0.5898619", "0.5858969", "0.58522457", "0.5849124", "0.58287376", "0.578699", "0.5776788", "0.5776716", "0.5735341", "0.57122487", "0.57062185", "0.5679125", "0.5663164", "0.55918306", "0.5574334", "0.5544414", "0.55405974" ]
0.75534225
1
Given the flow cell configuration, return RTA version tuple.
def config_to_rta_version(config): input_dir = config["input_dir"] path_run_info = glob.glob(os.path.join(input_dir, "?un?arameters.xml"))[0] run_parameters = load_run_parameters(path_run_info) rta_version = run_parameters["rta_version"] return rta_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_version(config):\n return 2 if is_v2_config(config) else 1", "def version(self):\n return self._call_txtrader_api('version', {})", "def test_get_cons3rt_version(self):\n pass", "def _get_revision(config):\n if config.revision:\n tokens = config.revision.split(\":\")\n svn_branch = tokens[0]\n if len(tokens) > 1:\n revision = config.revision.split(\":\")[1]\n else:\n revision = \"HEAD\"\n return (svn_branch, revision)\n else:\n return (\"trunk\", \"HEAD\")", "def to_interpreter_version(self) -> None | tuple[int, int]:", "def get_version_info() -> Tuple[Text, Text]:", "def get_reversion():\n return to_str(backend.get().af_get_revision())", "def armory_version_tbi():\n try:\n expected_armory_version = subprocess.check_output(\n \"python setup.py --version\".split(\" \")\n )\n except subprocess.CalledProcessError:\n print(\"armory .git not avaiable...trying armory\")\n expected_armory_version = subprocess.check_output(\"armory version\".split(\" \"))\n expected_armory_version = expected_armory_version.decode(\"utf-8\")\n expected_armory_version = expected_armory_version.replace(\"\\n\", \"\").strip()\n return expected_armory_version", "def _get_cfg_v(self):\n if CONFIG_VERSION_KEY in self[CONFIG_KEY]:\n v_str = self[CONFIG_KEY][CONFIG_VERSION_KEY]\n if not isinstance(v_str, str):\n raise InvalidConfigFileException(\"{} must be a string\".\n format(CONFIG_VERSION_KEY))\n v_bundle = v_str.split(\".\")\n assert len(v_bundle) == 3, \\\n InvalidConfigFileException(\"Version string is not tripartite\")\n try:\n v_bundle = list(map(int, v_bundle))\n except ValueError:\n raise InvalidConfigFileException(\"Version string elements are \"\n \"not coercible to integers\")\n if v_bundle[0] < 2:\n if SAMPLE_MODS_KEY in self[CONFIG_KEY]:\n raise InvalidConfigFileException(\n \"Project configuration file ({p}) subscribes to {c} \"\n \">= 2.0.0, since '{m}' section is defined. Set {c} to \"\n \"2.0.0 in your config\".format(p=self[CONFIG_FILE_KEY],\n c=CONFIG_VERSION_KEY,\n m=SAMPLE_MODS_KEY))\n else:\n self._format_cfg()\n return [\"2\", \"0\", \"0\"]\n return list(map(str, v_bundle))\n else:\n self._format_cfg()\n return [\"2\", \"0\", \"0\"]", "def get_version():\n import RadioAbsTools\n return RadioAbsTools.__version__", "def get_tgis_version():\n global tgis_version\n return tgis_version", "def get_teds_version( channel ):\n version = uInt32(0)\n CALL('GetPhysicalChanTEDSVersionNum', channel, byref(version))\n return version.value", "def get_version(ft_handle: FtHandle) -> SwChipVersion:\n version_struct = _RawVersion()\n result: Ft4222Status = _get_version(\n ft_handle, byref(version_struct))\n\n if result != Ft4222Status.OK:\n raise Ft4222Exception(result)\n\n return SwChipVersion.from_raw(version_struct)", "def bcl2fastq_wrapper(config):\n rta_version = config_to_rta_version(config)\n if rta_version >= RTA_MIN_BCL2FASTQ2:\n return \"bcl2fastq2\"\n else:\n return \"bcl2fastq\"", "def get_comp_versions (component):\n vprint (\"Detecting current version for \" + component)\n\n regex = re.compile (r\"version \" + version_restr)\n major = component + \"_major\"\n minor = component + \"_minor\"\n micro = component + \"_micro\"\n\n\n version = (None, None, None)\n with open (doc_root + \"/ACE_TAO/\" + component + \"/VERSION.txt\") as version_file:\n for line in version_file:\n match = regex.search (line)\n if match is not None:\n version = match.groups(default=0)\n\n vprint (\"Detected version %s.%s.%s\" % version)\n\n comp_versions[major] = int (version[0])\n comp_versions[minor] = int (version[1])\n comp_versions[micro] = int (version[2])\n\n break\n\n print (\"FATAL ERROR: Unable to locate current version for \" + component)\n raise Exception\n\n # Also store the current release (old from now)\n old_comp_versions[major] = comp_versions[major]\n old_comp_versions[minor] = comp_versions[minor]\n old_comp_versions[micro] = comp_versions[micro]\n\n if opts.update:\n if opts.release_type == ReleaseType.major:\n comp_versions[major] += 1\n comp_versions[minor] = 0\n comp_versions[micro] = 0\n elif opts.release_type == ReleaseType.minor:\n comp_versions[minor] += 1\n comp_versions[micro] = 0\n elif opts.release_type == ReleaseType.micro:\n comp_versions[micro] += 1\n\n def make_version (versions, joiner):\n return joiner.join ([\n str (versions[component + '_' + x]) for x in ReleaseType.__members__.keys ()\n ])\n\n comp_versions [component + \"_version\"] = make_version (comp_versions, '.')\n comp_versions [component + \"_version_\"] = make_version (comp_versions, '_')\n\n comp_versions [component + \"_code\"] = \\\n (comp_versions[major] << 16) + \\\n (comp_versions[minor] << 8) + \\\n comp_versions[micro]\n\n old_comp_versions [component + \"_version\"] = make_version (old_comp_versions, '.')\n old_comp_versions [component + \"_version_\"] = make_version (old_comp_versions, '_')\n\n if opts.update:\n vprint (\"Updating from version %s to version %s\" %\n (old_comp_versions [component + \"_version\"], comp_versions [component + \"_version\"]))\n else:\n vprint (\"Found version %s\" %\n (comp_versions [component + \"_version\"]))\n\n # else:\n # comp_versions [component + \"_version\"] = \\\n # str (comp_versions[major]) + '.' + \\\n # str (comp_versions[minor])", "def version(self):\n return tuple(int(x) for x in self.tag.split('.'))", "def model_version(self) -> Tuple[int, int, int]:\n return (\n ctypes.c_int.in_dll(self.stanlib, \"bs_major_version\").value,\n ctypes.c_int.in_dll(self.stanlib, \"bs_minor_version\").value,\n ctypes.c_int.in_dll(self.stanlib, \"bs_patch_version\").value,\n )", "def version(self):\n data = self._ftdi.spi_read(self.VERSION_ADDR, len=1, burst='fixed')\n return data[0] & self.VERSION_MASK", "def read_version(ctl):\n\tr = ctl.bus_read_struct_coherent(tm.status_addr, 'BB')\n\treturn r", "def get_version(self):\n return arbwave_version()", "def get_tasmota_version():\n with open(os.path.join(tasmota_dir, \"tasmota\",\n \"tasmota_version.h\"), \"r\") as f:\n for line in f:\n match = re.match('.* VERSION = (0x[0-9A-Fa-f]+);', line)\n if match:\n return match.groups()[0];\n raise Exception('No tasmota version found.')", "def get_revision(self):\n vers = self.send(\"?R\", recv=True)\n # Verify its a valid version\n # ? why was this commented out\n float(vers)\n # But return as string to avoid precision issues\n return vers", "def _get_version(self):", "def version(self):\n return tango_pb_device_version", "def ata_version(self) -> SmartSsdAtaVersion:\n return self._ata_version", "def get_read_version(self):\n return FutureInt64(self.capi.fdb_transaction_get_read_version(self.tpointer))", "def get_tgpio_version(self):\r\n return self._arm.get_tgpio_version()", "def get_version(self):\r\n return self._arm.get_version()", "def oracle(self, config):\n # print config.getBufferStackPair()\n # print GS_arcs\n if config.getBufferStackPair('left') in self.GS_arcs:\n return 'LeftArc'\n elif config.getBufferStackPair('right') in self.GS_arcs:\n return 'RightArc'\n else:\n return 'Shift'", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version" ]
[ "0.5258042", "0.51598346", "0.51081574", "0.5086691", "0.50494105", "0.49595442", "0.48709843", "0.48570353", "0.48323405", "0.48223144", "0.48180148", "0.4808632", "0.47978392", "0.4786692", "0.47680828", "0.47165644", "0.4712013", "0.4689256", "0.46807617", "0.46767527", "0.46665236", "0.46397838", "0.4638268", "0.46369436", "0.46238", "0.46170416", "0.4615514", "0.4598516", "0.45922238", "0.4559173" ]
0.6845571
0
Return name of bcl2fastq wrapper to use.
def bcl2fastq_wrapper(config): rta_version = config_to_rta_version(config) if rta_version >= RTA_MIN_BCL2FASTQ2: return "bcl2fastq2" else: return "bcl2fastq"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fastq_id(fastq_name):\n return fastq_name.split(' ')[0]", "def fastq_filename(fastq_base):\n return fastq_base+\"_1.fastq\", fastq_base+\"_2.fastq\"", "def genSamName(fastq):\n return os.path.join(samFolder, os.path.splitext(fastq)[0] + \".sam\")\n # return os.path.join(samFolder, ntpath.split(fastq)[1].replace(\".fastq\", \".sam\"))", "def bcl_to_fastq_info(path=None):\n # Initialise\n bcl2fastq_path = ''\n package_name = ''\n package_version = ''\n # Locate the core script\n if not path:\n exes = available_bcl2fastq_versions()\n if exes:\n bcl2fastq_path = exes[0]\n else:\n bcl2fastq_path = os.path.abspath(path)\n # Identify the version\n if os.path.basename(bcl2fastq_path) == 'configureBclToFastq.pl':\n # Found CASAVA or bcl2fastq 1.8.* version\n # Look for the top-level directory\n path = os.path.dirname(bcl2fastq_path)\n # Look for etc directory\n etc_dir = os.path.join(os.path.dirname(path),'etc')\n if os.path.isdir(etc_dir):\n for d in bcf_utils.list_dirs(etc_dir):\n m = re.match(r'^(bcl2fastq|CASAVA)-([0-9.]+)$',d)\n if m:\n package_name = m.group(1)\n package_version = m.group(2)\n break\n elif os.path.basename(bcl2fastq_path) == 'bcl2fastq':\n # Found bcl2fastq v2.*\n # Run the program to get the version\n version_cmd = applications.Command(bcl2fastq_path,'--version')\n output = version_cmd.subprocess_check_output()[1]\n for line in output.split('\\n'):\n if line.startswith('bcl2fastq'):\n # Extract version from line of the form\n # bcl2fastq v2.17.1.14\n package_name = 'bcl2fastq'\n try:\n package_version = line.split()[1][1:]\n except ex:\n logging.warning(\"Unable to get version from '%s': %s\" %\n (line,ex))\n else:\n # No package supplied or located\n logging.warning(\"Unable to identify bcl-to-fastq conversion package \"\n \"from '%s'\" % bcl2fastq_path)\n # Return what we found\n return (bcl2fastq_path,package_name,package_version)", "def run_bcl2fastq_2_17(*args,**kws):\n return run_bcl2fastq_2(*args,**kws)", "def fq_of_arrowed_bin(self, first, last):\n return self._arrowed_bin_prefix(first, last) + \".arrowed.fastq\"", "def get_tool_marker(config):\n if len(config[\"flowcell\"][\"demux_reads_override\"]) > 1:\n if config[\"demux_tool\"] == \"bcl2fastq2\":\n return \"bcl2fastq2.done\"\n else:\n raise InvalidConfiguration(\n \"Only bcl2fastq2 supports more than one bases mask at once, but you have {}\".format(\n \" and \".join(config[\"flowcell\"][\"demux_reads_override\"])\n )\n )\n elif \"M\" in config[\"flowcell\"][\"demux_reads\"]:\n if config[\"demux_tool\"] == \"picard\":\n return \"picard.done\"\n else:\n raise InvalidConfiguration(\n \"Only picard can be used to write UMIs to separate FASTQ file. There is an 'M' \"\n \"in your bases mask, but you wanted to run bcl2fastq(2).\"\n )\n elif config[\"demux_tool\"] == \"bcl2fastq1\":\n return \"bcl2fastq1.done\"\n elif config[\"demux_tool\"] == \"picard\":\n return \"picard.done\"\n else:\n return \"bcl2fastq2.done\"", "def get_fastq(wildcards):\n if sample_is_single_end(wildcards.sample):\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\"]].dropna()\n else:\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\", \"fq2\"]].dropna()", "def get_name(self):\n bcname = _pychidg.f90wrap_get_name(self=self._handle)\n return bcname", "def getBuildName(self):\n\n return super(SrtBuffer, self).getBuildName() + '_srtBuffer'", "def get_name(cls):\n\t\treturn '' if cls is SAM3X else cls.__name__", "def queue_name(is_parallel):\n return QUEUE_NAMES[int(bool(is_parallel))]", "def get_name():\n\n return 'nettools'", "def _create_name(self) -> str:\n return self.stream.__class__.__name__", "def fqpn(thing):\n return \".\".join([thing.__module__, thing.__name__])", "def name(self):\n return self._get_device_class_name()", "def get_name():\n return __name__", "def get_make_seq_name(iseq, *, mangle=False):\n\n name = _translate_function_name(interrogate_make_seq_seq_name(iseq), mangle)\n return name", "def convert_barcode_id_to_name(multiplex, fc_name, fq):\n fqout = list([None, None])\n if multiplex is None:\n fqout[0] = fq[0]\n if not fq[1] == None:\n fqout[1] = fq[1]\n else:\n bcid2name = dict([(mp['barcode_id'], mp['name']) for mp in multiplex])\n for bcid in bcid2name.keys():\n mstr = \"%s_%s_\" % (fc_name, bcid) \n if fq[0].find(mstr) != -1:\n from_str = \"%s_%s_\" %(fc_name, bcid)\n to_str = \"%s_%s_\" %(fc_name, bcid2name[bcid])\n fqout[0] = fq[0].replace(from_str, to_str)\n if not fq[1] == None:\n fqout[1] = fq[1].replace(from_str, to_str)\n fqout[0] = fqout[0].replace(\"_fastq.txt\", \".fastq\")\n if not fqout[1] == None:\n fqout[1] = fqout[1].replace(\"_fastq.txt\", \".fastq\")\n return os.path.basename(fqout[0]), (os.path.basename(fqout[1]) if len(fqout) > 1 else None)", "def call_name(self):\n return str(self.executable.name)", "def get_name(self):\n \n return 'Loop-Back'", "def name(self):\n module_filepath = inspect.getfile(type(self))\n module_filename = os.path.basename(module_filepath)\n command_name, _ = os.path.splitext(module_filename)\n return command_name", "def use_name(self):\n if self.is_strobe():\n return 'intsigr_%s' % self.name\n return 'intsig_%s' % self.name", "def get_name(self):\n name = getname(self)\n return TPM2B_NAME(name)", "def get_name(self):\n name = getname(self)\n return TPM2B_NAME(name)", "def getClassName(self):\n n = type(self).__name__\n return n", "def _get_classname(cls):\n return cls.__name__", "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def __GetWrapperFileName(cls, src):\n return FileUtils.GetBinPathForFile(src).replace('.i', '.swig.cc')", "def test_bcl2fastq(self):\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv').command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','Unaligned',\n '--sample-sheet','SampleSheet.csv'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n output_dir='run/bcl2fastq').command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','run/bcl2fastq',\n '--sample-sheet','SampleSheet.csv'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n output_dir='run/bcl2fastq',\n ignore_missing_bcl=True).command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','run/bcl2fastq',\n '--sample-sheet','SampleSheet.csv',\n '--ignore-missing-bcls'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n output_dir='run/bcl2fastq',\n mismatches=1,\n no_lane_splitting=True).command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','run/bcl2fastq',\n '--sample-sheet','SampleSheet.csv',\n '--barcode-mismatches','1',\n '--no-lane-splitting'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n bcl2fastq_exe='/opt/bin/bcl2fastq').command_line,\n ['/opt/bin/bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','Unaligned',\n '--sample-sheet','SampleSheet.csv'])" ]
[ "0.6006824", "0.5955702", "0.5946697", "0.59362376", "0.5814921", "0.56512016", "0.56245565", "0.55948746", "0.5536678", "0.54754657", "0.54747206", "0.54504925", "0.54346836", "0.5389236", "0.5371127", "0.5359996", "0.53590393", "0.5320611", "0.5313797", "0.52888805", "0.5284386", "0.5274762", "0.52658397", "0.5244484", "0.5244484", "0.5244184", "0.5207498", "0.5193418", "0.51922244", "0.51790553" ]
0.72031164
0
Generate path to wrapper
def wrapper_path(path): return "file://" + os.path.abspath(os.path.join(os.path.dirname(__file__), "wrappers", path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __GetWrapperFileName(cls, src):\n return FileUtils.GetBinPathForFile(src).replace('.i', '.swig.cc')", "def get_wrapper_js_path(cls):\n return os.path.join(os.path.dirname(__file__), \"wrap_crowd_source.js\")", "def _make_path(self) -> str:\r\n path_ = Path(path.join(conf.instance.output_path, self.path_prefix, self.name))\r\n if self.is_identifier_in_paths:\r\n path_ = path_ / self.identifier\r\n return path_", "def get_wrapper_name(cls, dut_name: str, wrapper_name: str) -> str:\n return f'{dut_name}_WRAPPER_{wrapper_name}'", "def BuildPath(DSLModel, table):\n folders = [\n DSLModel['GENERAL']['target_folder'],\n \"%s%s%s\" % (DSLModel['GENERAL']['target_folder'], os.sep, table['name'])\n ]\n for folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)\n init_path = folder + os.sep + '__init__.py'\n if not os.path.exists(init_path):\n fh = open(init_path, 'w')\n fh.write('# Module Initialiation File')\n fh.close()\n TemplatesPath = \"%s%s%s%stemplates\" % (DSLModel['GENERAL']['target_folder'], os.sep, table['name'],\n os.sep)\n if not os.path.exists(TemplatesPath):\n os.mkdir(TemplatesPath)", "def built_file_path(self, name, type=None, **kw):\n raise NotImplementedError", "def GetUtilWrapper(self):\n return ''", "def path_name(self):", "def __call__(self, components: Sequence[Text]) -> Text:\n return os.path.join(self._dirpath, *components)", "def path(self):\n ...", "def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path", "def build_path(self, *args):\n components = self.build_config + args\n return PATH.join(\n self.name,\n *components\n )", "def __make_path(self, filename):\n return self.__path() + os.sep + filename", "def outpath(self):\n return None", "def make_path(self, filename):\n return os.path.join(self.root_path, filename)", "def path(self) -> str:\n return self.src + \"/\"", "def GetSwigOutDir(cls):\n return os.path.join(FileUtils.GetEDir(), 'swig')", "def __fspath__(self):\n return str(self)", "def dependency_dir(self) -> Path:", "def __fspath__(self):\n raise NotImplementedError", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def path_creator(rel_path=''):\n if platform.system() != 'Windows':\n if rel_path == '':\n path_list=sys.argv[0].split('/')[:-1]\n return '/'.join(path_list)\n else:\n path_list = sys.argv[0].split('/')[:-1]\n return '/'.join(path_list) + '/' + rel_path\n else:\n if rel_path == '':\n path_list=sys.argv[0].split('\\\\')[:-1]\n path_res='\\\\'.join(path_list)\n return path_res\n else:\n path_list = sys.argv[0].split('\\\\')[:-1]\n rel_path=rel_path.split('/')\n path_res='\\\\'.join(path_list) + '\\\\' + '\\\\'.join(rel_path)\n return path_res", "def get_helper_out_paths(self):\n helper_folder_path = self.base_folder_path + \"/checkpoints/helper_decoders/\"\n\n return [helper_folder_path + n for n in self.get_helper_names()]", "def _get_as_path(self):\n return self.__as_path", "def get_wrapper_template(self, declaration):\n pass", "def get_base_path(self) -> str:\n raise NotImplementedError()", "def get_helper_path(tool):\n return os.path.join(TOOLS_DIR, tool)", "def get_example():\n ex_dir = os.path.join(app.config['TEMPLATE_DIR'], \"inline_code\")\n app.logger.debug(\"Example directory is {}\".format(ex_dir))\n return ex_dir", "def generate_utils(self):\n # type: (Generator) -> str\n return render_to_string(\n self.backend,\n \"utils.py\",\n {\n \"security_defs\": self.security_defs\n },\n )", "def get_template_path(self):\n raise NotImplementedError()" ]
[ "0.6649867", "0.6346353", "0.6323831", "0.5990675", "0.5835186", "0.57515067", "0.56931067", "0.56383884", "0.5632164", "0.5619172", "0.5572063", "0.556947", "0.55476326", "0.5536915", "0.5534219", "0.55296135", "0.55116963", "0.5505502", "0.54755765", "0.54253846", "0.5424099", "0.5419037", "0.54091465", "0.5406143", "0.5386784", "0.5383371", "0.53789216", "0.5376579", "0.53754383", "0.53657424" ]
0.7444612
0
Return library dicts for undetermined libraries in ``flowcell``.
def undetermined_libraries(flowcell, rta_version): lanes = set() for library in flowcell["libraries"]: lanes |= set(library["lanes"]) result = [] for lane in lanes: result.append( { "name": "lane{}".format(lane) if rta_version < RTA_MIN_BCL2FASTQ2 else "Undetermined", "reference": library["reference"], "barcode": "Undetermined", "barcode2": "Undetermined", "lanes": [lane], } ) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ExtractLibraryLoadAddressesFromLogcat(logs):\n browser_libs = LibraryLoadMap()\n renderer_libs = LibraryLoadMap()\n for m in re_library_address.finditer(logs):\n process_type, lib_name, lib_address = m.groups()\n lib_address = int(lib_address, 16)\n if process_type == 'BROWSER':\n browser_libs[lib_name] = lib_address\n elif process_type == 'RENDERER':\n renderer_libs[lib_name] = lib_address\n else:\n assert False, 'Invalid process type'\n\n return browser_libs, renderer_libs", "def get_libraries(self, archs: list[str]):\n libraries = self.ScopedLibraryDict.get(\"common\", []).copy()\n\n for arch in archs:\n libraries + self.ScopedLibraryDict.get(arch, []).copy()\n return list(set(libraries))", "def libraries(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_libraries()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def getLibs(env, categories=\"main\"):\n libs = []\n removeSelf = False\n for category in categories.split():\n if category == \"self\":\n category = \"main\"\n removeSelf = True\n for lib in env.libs[category]:\n if lib not in libs:\n libs.append(lib)\n if removeSelf:\n try:\n libs.remove(env[\"packageName\"])\n except ValueError:\n pass\n return libs", "def libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-l'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-l for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def libs(self):\n return self['libs']", "def get_imported_packages(self):\n package_versions_dict = {'python': sys.version, 'SasView': sas.system.version.__version__}\n err_version_dict = {}\n no_version_list = []\n # Generate a list of standard modules by looking at the local python library\n try:\n standard_lib = [path.stem.split('.')[0] for path in pathlib.Path(pathlib.__file__)\n .parent.absolute().glob('*')]\n except Exception:\n standard_lib = ['abc', 'aifc', 'antigravity', 'argparse', 'ast', 'asynchat', 'asyncio', 'asyncore',\n 'base64', 'bdb', 'binhex', 'bisect', 'bz2', 'calendar', 'cgi', 'cgitb', 'chunk', 'cmd',\n 'code', 'codecs', 'codeop', 'collections', 'colorsys', 'compileall', 'concurrent',\n 'configparser', 'contextlib', 'contextvars', 'copy', 'copyreg', 'cProfile', 'crypt',\n 'csv', 'ctypes', 'curses', 'dataclasses', 'datetime', 'dbm', 'decimal', 'difflib',\n 'dis', 'distutils', 'doctest', 'email', 'encodings', 'ensurepip', 'enum', 'filecmp',\n 'fileinput', 'fnmatch', 'formatter', 'fractions', 'ftplib', 'functools', 'genericpath',\n 'getopt', 'getpass', 'gettext', 'glob', 'graphlib', 'gzip', 'hashlib', 'heapq', 'hmac',\n 'html', 'http', 'idlelib', 'imaplib', 'imghdr', 'imp', 'importlib', 'inspect', 'io',\n 'ipaddress', 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma',\n 'mailbox', 'mailcap', 'mimetypes', 'modulefinder', 'msilib', 'multiprocessing', 'netrc',\n 'nntplib', 'ntpath', 'nturl2path', 'numbers', 'opcode', 'operator', 'optparse', 'os',\n 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', 'pkgutil', 'platform', 'plistlib',\n 'poplib', 'posixpath', 'pprint', 'profile', 'pstats', 'pty', 'pyclbr', 'pydoc',\n 'pydoc_data', 'py_compile', 'queue', 'quopri', 'random', 're', 'reprlib', 'rlcompleter',\n 'runpy', 'sched', 'secrets', 'selectors', 'shelve', 'shlex', 'shutil', 'signal',\n 'site-packages', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', 'sqlite3',\n 'sre_compile', 'sre_constants', 'sre_parse', 'ssl', 'stat', 'statistics', 'string',\n 'stringprep', 'struct', 'subprocess', 'sunau', 'symbol', 'symtable', 'sysconfig',\n 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'test', 'textwrap', 'this', 'threading',\n 'timeit', 'tkinter', 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'tty',\n 'turtle', 'turtledemo', 'types', 'typing', 'unittest', 'urllib', 'uu', 'uuid', 'venv',\n 'warnings', 'wave', 'weakref', 'webbrowser', 'wsgiref', 'xdrlib', 'xml', 'xmlrpc',\n 'zipapp', 'zipfile', 'zipimport', 'zoneinfo', '_aix_support', '_bootlocale',\n '_bootsubprocess', '_collections_abc', '_compat_pickle', '_compression', '_markupbase',\n '_osx_support', '_pydecimal', '_pyio', '_py_abc', '_sitebuiltins', '_strptime',\n '_threading_local', '_weakrefset', '__future__', '__phello__', '__pycache__']\n standard_lib.extend(sys.builtin_module_names)\n standard_lib.append(\"sas\")\n\n for module_name in sys.modules.keys():\n\n package_name = module_name.split('.')[0]\n\n # A built in python module or a local file, which have no version, only the python/SasView version\n if package_name in standard_lib or package_name in package_versions_dict:\n continue\n\n # Import module\n try:\n package = __import__(package_name)\n except Exception as e:\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to import module\"\n continue\n\n # Retrieving the modules version using the __version__ attribute\n if hasattr(package, '__version__'):\n # Module has __version__ attribute\n try:\n package_versions_dict[package_name] = package.__version__\n continue\n except Exception as e:\n # Unable to access module\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n f\"version using .__version__\"\n pass\n\n # Retrieving the modules version using the pkg_resources package\n # Unreliable, so second option\n try:\n package_versions_dict[package_name] = pkg_resources.get_distribution(package_name).version\n except Exception:\n # Modules that cannot be found by pkg_resources\n pass\n else:\n continue\n\n # Modules version number could not be attained by any of the previous methods\n\n no_version_list.append(package_name)\n\n # Currently not required for any packages used by SasView\n # Retrieving the modules version using the version attribute\n # if hasattr(package, 'version'):\n # # Module has version attribute\n # try:\n # if isinstance(package.version, str):\n # print(package)\n # package_versions_dict[package_name] = package.version\n # continue\n # except Exception as e:\n # # Unable to access module\n # err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n # f\"version using .version\"\n # pass\n\n # Clean up\n package_versions_dict = self.remove_duplicate_modules(package_versions_dict)\n no_version_dict = self.format_no_version_list(package_versions_dict, no_version_list)\n\n return {\"results\": package_versions_dict, \"no_results\": no_version_dict, \"errors\": err_version_dict}", "def get_libraries(self, branch=\"library\", **kwargs):\n return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)", "def consider_env(self): \n for spec in self._envlist(\"PYLIB\"):\n self.import_module(spec)", "def get_library_keys(self):\n return list({\n self._create_library_locator(library_index, branch=None)\n for library_index\n in self.find_matching_course_indexes(branch=\"library\")\n })", "def get_library_config(self):\r\n\r\n num_library_terms = LibraryTerms().get_library_len()\r\n\r\n library_config = {'total_terms': num_library_terms, 'deriv_order': 3, 'poly_order': 2}\r\n #library_config = {'total_terms': num_library_terms, 'deriv_order': 2, 'poly_order': 1}\r\n\r\n return library_config", "def build_sample_map(flowcell):\n result = {}\n rows = [(lane, lib[\"name\"]) for lib in flowcell[\"libraries\"] for lane in lib[\"lanes\"]]\n i = 1\n for _, name in sorted(set(rows)):\n if name not in result:\n result[name] = \"S{}\".format(i)\n i += 1\n return result", "def find_thirdparty_marshaller_plugins() -> Dict[\n str,\n Dict[str, pkg_resources.EntryPoint],\n]:\n all_plugins = tuple(\n pkg_resources.iter_entry_points(\"hdf5storage.marshallers.plugins\"),\n )\n return {\n ver: {p.module_name: p for p in all_plugins if p.name == ver}\n for ver in supported_marshaller_api_versions()\n }", "def load_dependencies() -> dict:\n global CARRIERS_DICT\n with open('mms_gateways.json') as mms:\n CARRIERS_DICT = json.loads(mms.read())\n with open('config.json', 'r') as cfig:\n cfig_dict = json.loads(cfig.read())\n return cfig_dict", "def _get_external_libraries(self, dom, type='CLASSES'):\n module = dom.getElementsByTagName('module')[0]\n components = module.getElementsByTagName('component')\n for component in components:\n if component.getAttribute('name') == 'NewModuleRootManager':\n for orderEntry in component.getElementsByTagName('orderEntry'):\n for library in orderEntry.getElementsByTagName('library'):\n if library.getAttribute('name') == 'external':\n for library_type in library.getElementsByTagName(type):\n return library_type.getElementsByTagName('root')\n return None", "def modules():\n cmd = \"{} -M\".format(_detect_os())\n ret = {}\n ret[\"static\"] = []\n ret[\"shared\"] = []\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n for line in out:\n comps = line.split()\n if not comps:\n continue\n if \"(static)\" in line:\n ret[\"static\"].append(comps[0])\n if \"(shared)\" in line:\n ret[\"shared\"].append(comps[0])\n return ret", "def initialize_archi():\n \n for ttype in [archi]:\n prefix = ttype.__name__ + \".\"\n type_name = ttype.__name__.split(\".\")[-1]\n impls_list[ttype] = {}\n for importer, impl_name, ispkg in pkgutil.iter_modules(ttype.__path__, prefix):\n try:\n impl = sys.modules[impl_name]\n impl_name = impl_name.split(\".\")[-1]\n log.info(\"Found target %s.%s\", type_name, impl_name)\n \n impl.init(my_gdb)\n impls_list[ttype][impl_name] = impl\n except KeyError as ke:\n pass\n except Exception as e:\n log.warn(\"Couldn't load target %s (%s)\", impl_name, e)\n \n current_impls[ttype] = ttype.prefered", "def get_required_mods(self):\r\n mods = []\r\n unknowntags = []\r\n for key, value in self.dependencies.items():\r\n if value.required_by:\r\n if value.provided_by:\r\n mods.append(list(value.provided_by)[0]) #Pick random'ish if more than one.\r\n else:\r\n unknowntags.append((key, value))\r\n return {\"mods\":sorted(mods, key= lambda x: x.mod.name), \"unknown\": unknowntags}", "def get_library_summaries(self, **kwargs):\n branch = 'library'\n libraries_summaries = []\n for entry, structure_info in self._get_courselike_blocks_for_branch(branch, **kwargs):\n library_locator = self._create_library_locator(structure_info, branch=None)\n library_block = [\n block_data\n for block_key, block_data in entry['blocks'].items()\n if block_key.type == \"library\"\n ]\n if not library_block:\n raise ItemNotFoundError\n\n if len(library_block) > 1:\n raise MultipleLibraryBlocksFound(\n \"Expected 1 library block, but found {}\".format(len(library_block))\n )\n\n library_block_fields = library_block[0].fields\n display_name = ''\n\n if 'display_name' in library_block_fields:\n display_name = library_block_fields['display_name']\n\n libraries_summaries.append(\n LibrarySummary(library_locator, display_name)\n )\n\n return libraries_summaries", "def get_library_list(self):\n ret = []\n prefix = \"-l\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/l\"\n for ii in self.__libraries:\n ret += [prefix + ii]\n return ret", "def list_libraries(self, trans, **kwargs):\n return self._libraries_grid(trans, **kwargs)", "def selected_libs(args: Namespace) -> List[str]:\n return args.lib or [\"python\", \"lkt\"]", "def compute_dependency_specs(cls, kwargs=None, payload=None):\n for spec in super(ImportJarsMixin, cls).compute_dependency_specs(kwargs, payload):\n yield spec\n\n imported_jar_library_specs = cls.imported_jar_library_specs(kwargs=kwargs, payload=payload)\n for spec in imported_jar_library_specs:\n yield spec", "def gather() -> None:\n # pylint: disable=too-many-locals\n\n # First off, clear out any existing output.\n existing_dirs = [\n os.path.join('src/external', d) for d in os.listdir('src/external')\n if d.startswith('python-') and d != 'python-notes.txt'\n ]\n existing_dirs += [\n os.path.join('assets/src', d) for d in os.listdir('assets/src')\n if d.startswith('pylib-')\n ]\n for existing_dir in existing_dirs:\n efrotools.run('rm -rf \"' + existing_dir + '\"')\n\n for buildtype in ['debug', 'release']:\n debug = buildtype == 'debug'\n bsuffix = '_debug' if buildtype == 'debug' else ''\n bsuffix2 = '-debug' if buildtype == 'debug' else ''\n\n libname = 'python' + PYTHON_VERSION_MAJOR + ('dm' if debug else 'm')\n\n bases = {\n 'mac':\n f'build/python_apple_mac{bsuffix}/build/macOS',\n 'ios':\n f'build/python_apple_ios{bsuffix}/build/iOS',\n 'tvos':\n f'build/python_apple_tvos{bsuffix}/build/tvOS',\n 'android_arm':\n f'build/python_android_arm{bsuffix}/build/sysroot',\n 'android_arm64':\n f'build/python_android_arm64{bsuffix}/build/sysroot',\n 'android_x86':\n f'build/python_android_x86{bsuffix}/build/sysroot',\n 'android_x86_64':\n f'build/python_android_x86_64{bsuffix}/build/sysroot'\n }\n\n # Note: only need pylib for the first in each group.\n builds: List[Dict[str, Any]] = [{\n 'name':\n 'macos',\n 'group':\n 'apple',\n 'headers':\n bases['mac'] + '/Support/Python/Headers',\n 'libs': [\n bases['mac'] + '/Support/Python/libPython.a',\n bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['mac'] + '/Support/XZ/libxz.a'\n ],\n 'pylib':\n (bases['mac'] + '/python/lib/python' + PYTHON_VERSION_MAJOR),\n }, {\n 'name':\n 'ios',\n 'group':\n 'apple',\n 'headers':\n bases['ios'] + '/Support/Python/Headers',\n 'libs': [\n bases['ios'] + '/Support/Python/libPython.a',\n bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['ios'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'tvos',\n 'group':\n 'apple',\n 'headers':\n bases['tvos'] + '/Support/Python/Headers',\n 'libs': [\n bases['tvos'] + '/Support/Python/libPython.a',\n bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['tvos'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'android_arm',\n 'group':\n 'android',\n 'headers':\n bases['android_arm'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm'] + '/usr/lib/libssl.a',\n bases['android_arm'] + '/usr/lib/libcrypto.a',\n bases['android_arm'] + '/usr/lib/liblzma.a',\n bases['android_arm'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst':\n 'android_armeabi-v7a',\n 'pylib': (bases['android_arm'] + '/usr/lib/python' +\n PYTHON_VERSION_MAJOR),\n }, {\n 'name': 'android_arm64',\n 'group': 'android',\n 'headers': bases['android_arm64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm64'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm64'] + '/usr/lib/libssl.a',\n bases['android_arm64'] + '/usr/lib/libcrypto.a',\n bases['android_arm64'] + '/usr/lib/liblzma.a',\n bases['android_arm64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_arm64-v8a',\n }, {\n 'name': 'android_x86',\n 'group': 'android',\n 'headers': bases['android_x86'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86'] + '/usr/lib/libssl.a',\n bases['android_x86'] + '/usr/lib/libcrypto.a',\n bases['android_x86'] + '/usr/lib/liblzma.a',\n bases['android_x86'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86',\n }, {\n 'name': 'android_x86_64',\n 'group': 'android',\n 'headers': bases['android_x86_64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86_64'] + '/usr/lib/libssl.a',\n bases['android_x86_64'] + '/usr/lib/libcrypto.a',\n bases['android_x86_64'] + '/usr/lib/liblzma.a',\n bases['android_x86_64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86_64',\n }]\n\n for build in builds:\n\n grp = build['group']\n builddir = f'src/external/python-{grp}{bsuffix2}'\n header_dst = os.path.join(builddir, 'include')\n lib_dst = os.path.join(builddir, 'lib')\n assets_src_dst = f'assets/src/pylib-{grp}'\n\n # Do some setup only once per group.\n if not os.path.exists(builddir):\n efrotools.run('mkdir -p \"' + builddir + '\"')\n efrotools.run('mkdir -p \"' + lib_dst + '\"')\n\n # Only pull modules into game assets on release pass.\n if not debug:\n # Copy system modules into the src assets\n # dir for this group.\n efrotools.run('mkdir -p \"' + assets_src_dst + '\"')\n efrotools.run(\n 'rsync --recursive --include \"*.py\"'\n ' --exclude __pycache__ --include \"*/\" --exclude \"*\" \"'\n + build['pylib'] + '/\" \"' + assets_src_dst + '\"')\n\n # Prune a bunch of modules we don't need to cut\n # down on size.\n prune = [\n 'config-*', 'idlelib', 'lib-dynload', 'lib2to3',\n 'multiprocessing', 'pydoc_data', 'site-packages',\n 'ensurepip', 'tkinter', 'wsgiref', 'distutils',\n 'turtle.py', 'turtledemo', 'test', 'sqlite3/test',\n 'unittest', 'dbm', 'venv', 'ctypes/test', 'imaplib.py',\n '_sysconfigdata_*'\n ]\n efrotools.run('cd \"' + assets_src_dst + '\" && rm -rf ' +\n ' '.join(prune))\n\n # Some minor filtering to system scripts:\n # on iOS/tvOS, addusersitepackages() leads to a crash\n # due to _sysconfigdata_dm_ios_darwin module not existing,\n # so let's skip that.\n fname = f'{assets_src_dst}/site.py'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt,\n ' known_paths = addusersitepackages(known_paths)',\n ' # efro tweak: this craps out on ios/tvos.\\n'\n ' # (and we don\\'t use it anyway)\\n'\n ' # known_paths = addusersitepackages(known_paths)')\n efrotools.writefile(fname, txt)\n\n # Copy in a base set of headers (everything in a group should\n # be using the same headers)\n efrotools.run(f'cp -r \"{build[\"headers\"]}\" \"{header_dst}\"')\n\n # Clear whatever pyconfigs came across; we'll build our own\n # universal one below.\n efrotools.run('rm ' + header_dst + '/pyconfig*')\n\n # Write a master pyconfig header that reroutes to each\n # platform's actual header.\n with open(header_dst + '/pyconfig.h', 'w') as hfile:\n hfile.write(\n '#if BA_OSTYPE_MACOS\\n'\n '#include \"pyconfig-macos.h\"\\n\\n'\n '#elif BA_OSTYPE_IOS\\n'\n '#include \"pyconfig-ios.h\"\\n\\n'\n '#elif BA_OSTYPE_TVOS\\n'\n '#include \"pyconfig-tvos.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__arm__)\\n'\n '#include \"pyconfig-android_arm.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\\n'\n '#include \"pyconfig-android_arm64.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__i386__)\\n'\n '#include \"pyconfig-android_x86.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\\n'\n '#include \"pyconfig-android_x86_64.h\"\\n\\n'\n '#else\\n'\n '#error unknown platform\\n\\n'\n '#endif\\n')\n\n # Now copy each build's config headers in with unique names.\n cfgs = [\n f for f in os.listdir(build['headers'])\n if f.startswith('pyconfig')\n ]\n\n # Copy config headers to their filtered names.\n for cfg in cfgs:\n out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])\n if cfg == 'pyconfig.h':\n\n # For platform's root pyconfig.h we need to filter\n # contents too (those headers can themselves include\n # others; ios for instance points to a arm64 and a\n # x86_64 variant).\n contents = efrotools.readfile(build['headers'] + '/' + cfg)\n contents = contents.replace('pyconfig',\n 'pyconfig-' + build['name'])\n efrotools.writefile(header_dst + '/' + out, contents)\n else:\n # other configs we just rename\n efrotools.run('cp \"' + build['headers'] + '/' + cfg +\n '\" \"' + header_dst + '/' + out + '\"')\n\n # Copy in libs. If the lib gave a specific install name,\n # use that; otherwise use name.\n targetdir = lib_dst + '/' + build.get('libinst', build['name'])\n efrotools.run('rm -rf \"' + targetdir + '\"')\n efrotools.run('mkdir -p \"' + targetdir + '\"')\n for lib in build['libs']:\n efrotools.run('cp \"' + lib + '\" \"' + targetdir + '\"')\n\n print('Great success!')", "def other_libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-other'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-other for package `%s': %s\" % (self.name, stderr))\n\n return uniq(stdout.split())", "def provides(self):\n\n return {'zlib': ComponentLibrary(libs=('win#-lzlib', '!win#-lz'))}", "def feature_dynamic_imports(self):\n # Get mutexes\n self.features[\"mutex\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {}).get(\"mutex\")\n\n # Get processes names\n self.features[\"processes\"] = []\n for p in self.report.get(\"behavior\", {}).get(\"processes\", []):\n p_name = p.get(\"process_name\")\n if p_name and p_name not in self.features[\"processes\"]:\n self.features[\"processes\"].append(p_name)\n\n # Get dynamically loaded library names\n self.features[\"dynamic_imports\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"dll_loaded\", [])", "def get_libraries(self, project=None):\n unaligned_path = self.get_unaligned_path()\n projects = self.get_projects()\n if project is not None:\n logger.debug(\"subsetting projects\")\n projects = [p for p in projects\n if re.search(project, p)]\n logger.debug(\"collecting list of libraries\")\n logger.debug(\"searching in projects {}\".format(projects))\n # Need to handle possibility of new basespace directory structure\n libList = []\n for p in projects:\n logger.debug(\"Attempting to collect libs for project: {}\".format(p))\n for l in os.listdir(os.path.join(unaligned_path, p)):\n logger.debug(\"Looking for lib name in: {}\".format(l))\n # Old basespace - able to parse libid from current dir\n if (len(parsing.get_library_id(l))):\n libList.append(l)\n # New basespace - need to go down one more level to parse lib\n elif (os.path.isdir(os.path.join(unaligned_path, p, l))): \n logger.debug(\"Lib name not found. Going down into: {}\"\n .format(os.path.join(unaligned_path, p, l)))\n for lNext in os.listdir(os.path.join(unaligned_path, p, l)):\n if (len(parsing.get_library_id(lNext))):\n libList.append(os.path.join(l,lNext))\n else:\n logger.debug(\"Lib name not found and {} is not a directory.\"\n .format(os.path.join(unaligned_path, p, l)))\n \n return libList", "def get_processed_libraries(self, project=None, sub_path=\"inputFastqs\"):\n flowcell_path = self.get_flowcell_path()\n projects = self.get_processed_projects()\n if project is not None:\n logger.debug(\"subsetting projects\")\n projects = [p for p in projects\n if re.search(project, p)]\n logger.debug(\"collecting list of libraries\")\n logger.debug(\"searching in projects {}\".format(projects))\n return [l for p in projects\n for l in os.listdir(os.path.join(flowcell_path, p, sub_path))\n if len(parsing.get_library_id(l))]", "def get_runtime_map(runtimes, module, tags=()):\n\n result = {}\n modruntimes = get_compatible_runtimes(module, tags=tags,\n include_ancestors=True)\n\n runtimes_full = []\n\n for runtime in runtimes:\n runtimes_full.append([c for c in runtime.__mro__ if is_runtime(c)])\n\n used = set()\n\n for i, runtime_mro in enumerate(runtimes_full):\n if i not in used:\n derivative = runtime_mro[0].get_derivative(module)\n if derivative is not None:\n result[derivative] = runtime_mro[0]\n\n for i, runtime_mro in enumerate(runtimes_full):\n if set(runtime_mro) & modruntimes and i not in used:\n try:\n result[module].add(runtime_mro[0])\n except KeyError:\n result[module] = {runtime_mro[0]}\n\n used.add(i)\n\n return result" ]
[ "0.5857328", "0.566059", "0.56077105", "0.5509866", "0.54729795", "0.54636264", "0.54300827", "0.5420543", "0.53581333", "0.5282064", "0.52263796", "0.5219019", "0.51474965", "0.5139492", "0.51295364", "0.51262486", "0.5125705", "0.5117036", "0.5068313", "0.50656843", "0.5049053", "0.50368", "0.5034304", "0.5031331", "0.5004464", "0.49985287", "0.4992631", "0.498859", "0.49750185", "0.49706343" ]
0.60556275
0
Return list with file names for the given library.
def lib_file_names(library, rta_version, n_template, n_index, lane=None, seq=None, name=None): if rta_version < RTA_MIN_BCL2FASTQ2 and library.get("barcode2", "Undetermined") not in ( "", "Undetermined", ): indices = ["".join((library["barcode"], "-", library["barcode2"]))] else: indices = [library["barcode"] or "NoIndex"] reads = ["R" + str(i + 1) for i in range(n_template)] reads += ["I" + str(i + 1) for i in range(n_index)] lanes = ["L{:03d}".format(lno) for lno in library["lanes"] if lane is None or lno == lane] if seq is None: seq = "" else: seq = seq + "_" if rta_version < RTA_MIN_BCL2FASTQ2: tpl = "{sample_name}_{index}_{lane}_{read}_001.fastq.gz" else: tpl = "{sample_name}_{seq}{lane}_{read}_001.fastq.gz" return list( sorted( [ tpl.format( sample_name=name or library["name"], index=index, lane=lane, read=read, seq=seq ) for index in indices for read in reads for lane in lanes ] ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-l'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-l for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def get_library_list(self):\n ret = []\n prefix = \"-l\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/l\"\n for ii in self.__libraries:\n ret += [prefix + ii]\n return ret", "def list_photo_libraries():\n \"\"\" on MacOS < 10.15, this may omit some libraries \"\"\"\n\n # On 10.15, mdfind appears to find all libraries\n # On older MacOS versions, mdfind appears to ignore some libraries\n # glob to find libraries in ~/Pictures then mdfind to find all the others\n # TODO: make this more robust\n lib_list = glob.glob(f\"{str(Path.home())}/Pictures/*.photoslibrary\")\n\n # On older OS, may not get all libraries so make sure we get the last one\n last_lib = get_last_library_path()\n if last_lib:\n lib_list.append(last_lib)\n\n output = subprocess.check_output(\n [\"/usr/bin/mdfind\", \"-onlyin\", \"/\", \"-name\", \".photoslibrary\"]\n ).splitlines()\n for lib in output:\n lib_list.append(lib.decode(\"utf-8\"))\n lib_list = list(set(lib_list))\n lib_list.sort()\n return lib_list", "def get_file_list() -> List[str]:\n filenames = []\n os.makedirs(\"sequence\", exist_ok=True)\n for file in glob.glob(\"sequence/*.smp\"):\n filenames.append(file.replace(\"sequence/\", \"\"))\n return filenames", "def files_list(directory: str) -> list:\n files = os.listdir(directory)\n\n return files", "def library_search_path(self, pedantic=False):\n return []", "def get_libraries_names():\n rpm_packages_path = path.join(PMDK_PATH, 'rpm', SYSTEM_ARCHITECTURE)\n libraries_names = [elem.split('-')[0] for elem in listdir(rpm_packages_path)\n if PMDK_VERSION in elem]\n return set(libraries_names)", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def files(self):\n return [surrogate(name) for name in self.hdr[rpm.RPMTAG_FILENAMES]]", "def libraryFolders() -> list:\n\tpaths = [steamDir() + '/steamapps/'] # create a list for library paths\n\ttry:\n\t\t# open the file that contains the library paths\n\t\twith open(steamDir() + '/steamapps/libraryfolders.vdf', 'r') as file:\n\t\t\tlibrary = Property.parse(file, 'libraryfolders.vdf').as_dict()\n\t\t\t# remove useless stuff\n\t\t\tlibrary['libraryfolders'].pop('timenextstatsreport')\n\t\t\tlibrary['libraryfolders'].pop('contentstatsid')\n\texcept Exception as e:\n\t\traise ConfigError(f'Error while reading steam library file: {e}')\n\n\t# check for other library paths, if the dict is empty, there's no one\n\tif len( library['libraryfolders'] ) != 0:\n\t\tfor i in range( len( library['libraryfolders'] ) ):\n\t\t\tpaths.append( library['libraryfolders'][ i ] + '/steamapps/' ) # append the path\n\n\t# return the \"compiled\" list of libraries\n\treturn paths", "def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l", "def searchfiles(pattern='C:\\\\RoboDK\\\\Library\\\\*.rdk'):\n import glob\n return glob.glob(pattern)", "def list_of_files(path):\r\n files_list=[]\r\n path = os.path.abspath(path)\r\n\r\n #if the path is a file name, returns a list of a single file name\r\n if os.path.isfile(path):\r\n files_list.append(path)\r\n #if the path is a directory name, returns a list of all the file names anded with .asm\r\n else:\r\n for file in os.listdir(path):\r\n if file.endswith(\".asm\"):\r\n files_list.append(os.path.join(path, file))\r\n return files_list", "def get_library_directory_list(self):\n ret = []\n prefix = \"-L\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/L\"\n for ii in self.__library_directories:\n ret += [prefix + ii]\n if self.__command_basename.startswith(\"ld\"):\n ret += [\"-rpath-link\", \":\".join(self.__library_directories)]\n return ret", "def get_bibfiles(folder: str) -> t.List[str]:\n full_pathname = os.path.normpath(os.path.abspath(folder))\n bib_files = []\n for f in os.listdir(full_pathname):\n fullname = os.path.join(full_pathname, f)\n if f.endswith(\".bib\") and os.path.isfile(fullname):\n logging.debug(f'get bibfile \"{f}\" from directory \"{full_pathname}\"')\n bib_files.append(fullname)\n return bib_files", "def get_file_names(top_dir):\n\n if top_dir.startswith(\"s3://\"):\n list_of_files = get_s3_file_names(top_dir)\n else:\n top_dir = top_dir[:-1] if top_dir[-1] == \"/\" else top_dir\n list_of_files = glob.glob(top_dir+'/*.csv', recursive=True)\n\n return list_of_files", "def get_filelist(import_path, extension):\n filelist = []\n for root, dirs, files in os.walk(import_path):\n filelist += glob.glob(os.path.join(root, '*.' + extension))\n return filelist", "def library_dirs(self):", "def list_library(command):\n namespace = app.main(command)\n assert namespace.command == 'll' or namespace.command == \"listlibrary\"", "def get_libraries(name_only=False):\n\n libs = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM libraries ')\n\n for lib in cur.execute(sql):\n libs.append(lib[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM libraries '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_LIBRARY\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n libs.append(item)\n\n return libs", "def get_third_party_package_module_names():\n # type: () -> List[str]\n result = [] # type: List[str]\n\n def is_python_package(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n \"\"\"\n Return package name if the provided file path is a Python package, None otherwise.\n \"\"\"\n file_name = os.path.basename(file_path)\n init_file_path = os.path.join(file_path, \"__init__.py\")\n\n if os.path.isdir(file_path) and os.path.isfile(init_file_path):\n # Package\n return (True, file_name)\n\n return (False, None)\n\n def is_python_module(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n \"\"\"\n Return module name if the provided file path is a Python module, None otherwise.\n \"\"\"\n if (\n os.path.isfile(file_path)\n and file_path.endswith(\".py\")\n and file_name != \"__init__.py\"\n ):\n # Single file module (e.g. six.py)\n module_name = file_name.replace(\".py\", \"\")\n return (True, module_name)\n\n return (False, None)\n\n for directory_path in THIRD_PARTY_DIRECTORIES:\n file_names = os.listdir(directory_path)\n\n for file_name in file_names:\n file_path = os.path.join(directory_path, file_name)\n\n python_package, package_name = is_python_package(directory_path, file_path)\n python_module, module_name = is_python_module(directory_path, file_path)\n\n if python_package and package_name:\n result.append(package_name)\n elif python_module and module_name:\n result.append(module_name)\n\n return result", "def list_files(directory) -> List:\n return sorted(f for f in listdir(directory) if f.endswith('.py') and '__init__.py' not in f)", "def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def getFilmTitles(checkFolder):\n\n files = [str(x) for x in Path(checkFolder).rglob(\"*\")]\n libFilmFiles = list(map(os.path.basename,files)) # Remove the path\n libFilmTitles = [os.path.splitext(x)[0] for x in libFilmFiles]\n return libFilmTitles", "def lsFiles(ruta = getcwd()):\r\n files = [arch.name for arch in scandir(ruta) if arch.is_file()]\r\n return files", "def get_library_keys(self):\n return list({\n self._create_library_locator(library_index, branch=None)\n for library_index\n in self.find_matching_course_indexes(branch=\"library\")\n })" ]
[ "0.68197584", "0.6612801", "0.65860647", "0.651696", "0.634841", "0.6239582", "0.6212851", "0.6169241", "0.61667246", "0.61667246", "0.6149375", "0.6090225", "0.6061063", "0.6045163", "0.6041969", "0.60409045", "0.602381", "0.60000515", "0.5995119", "0.5987065", "0.5984702", "0.59807116", "0.5980651", "0.5970195", "0.596421", "0.5955355", "0.59547055", "0.5951631", "0.5933368", "0.5914613" ]
0.68200177
0
Build sample map ``dict`` for the given flowcell.
def build_sample_map(flowcell): result = {} rows = [(lane, lib["name"]) for lib in flowcell["libraries"] for lane in lib["lanes"]] i = 1 for _, name in sorted(set(rows)): if name not in result: result[name] = "S{}".format(i) i += 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def smp_dict():\n out = base_dict()\n out['mro']['current'] = ['Sample']\n out['name']['current'] = 'Sample'\n ao(out, 'idx', 'Integer', attr=['Hidden'])\n ao(out, 'ii', 'Integer', attr=['Hidden'])\n ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension')\n return out", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CHEL').get('abstractTypes')\n exolinks = globalMap.get('CHEL').get('exolinks')\n\n # DataType HalfLifeType\n currentMap = {}\n abstractTypes['HalfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'] = currentMap\n loadMaps['CHEL.HalfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.HalfLifeType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class ChemElement\n currentMap = {}\n abstractTypes['ChemElement'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'] = currentMap\n loadMaps['CHEL.ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElements'\n currentMap['objkey'] = 'symbol'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElement.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElement.atomNumber\n currentMap = {}\n contentMap['atomNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'] = currentMap\n loadMaps['CHEL.ChemElement.atomNumber'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'\n currentMap['name'] = 'atomNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute ChemElement.atomicRadius\n currentMap = {}\n contentMap['atomicRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'] = currentMap\n loadMaps['CHEL.ChemElement.atomicRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomicRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'\n currentMap['name'] = 'atomicRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.covalentRadius\n currentMap = {}\n contentMap['covalentRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'] = currentMap\n loadMaps['CHEL.ChemElement.covalentRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.covalentRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'\n currentMap['name'] = 'covalentRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'] = currentMap\n loadMaps['CHEL.ChemElement.mass'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'] = currentMap\n loadMaps['CHEL.ChemElement.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Attribute ChemElement.symbol\n currentMap = {}\n contentMap['symbol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'] = currentMap\n loadMaps['CHEL.ChemElement.symbol'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.symbol'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'\n currentMap['name'] = 'symbol'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Role ChemElement.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElement.isotopes\n currentMap = {}\n contentMap['isotopes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'] = currentMap\n loadMaps['CHEL.ChemElement.isotopes'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.isotopes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'\n currentMap['name'] = 'isotopes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElement\n\n currentMap = abstractTypes.get('ChemElement')\n aList = ['atomNumber', 'atomicRadius', 'covalentRadius', 'mass', 'name', 'symbol']\n currentMap['headerAttrs'] = aList\n aList = ['isotopes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopes']\n currentMap['children'] = aList\n\n # Class ChemElementStore\n currentMap = {}\n abstractTypes['ChemElementStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'] = currentMap\n loadMaps['CHEL.ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElementStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElementStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElementStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute ChemElementStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'] = currentMap\n loadMaps['CHEL.ChemElementStore.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ChemElementStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElementStore.chemElements\n currentMap = {}\n contentMap['chemElements'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'] = currentMap\n loadMaps['CHEL.ChemElementStore.chemElements'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.chemElements'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'\n currentMap['name'] = 'chemElements'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElementStore\n\n currentMap = abstractTypes.get('ChemElementStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['chemElements', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemElements']\n currentMap['children'] = aList\n\n # Class Isotope\n currentMap = {}\n abstractTypes['Isotope'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'] = currentMap\n loadMaps['CHEL.Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopes'\n currentMap['objkey'] = 'massNumber'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotope.abundance\n currentMap = {}\n contentMap['abundance'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'] = currentMap\n loadMaps['CHEL.Isotope.abundance'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.abundance'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'\n currentMap['name'] = 'abundance'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00058')\n\n # Attribute Isotope.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotope.gyroMagneticRatio\n currentMap = {}\n contentMap['gyroMagneticRatio'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'] = currentMap\n loadMaps['CHEL.Isotope.gyroMagneticRatio'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.gyroMagneticRatio'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'\n currentMap['name'] = 'gyroMagneticRatio'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.halfLife\n currentMap = {}\n contentMap['halfLife'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'] = currentMap\n loadMaps['CHEL.Isotope.halfLife'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLife'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'\n currentMap['name'] = 'halfLife'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeError\n currentMap = {}\n contentMap['halfLifeError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeError'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'\n currentMap['name'] = 'halfLifeError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeType\n currentMap = {}\n contentMap['halfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'\n currentMap['name'] = 'halfLifeType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'unknown'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002')\n\n # Attribute Isotope.magneticMoment\n currentMap = {}\n contentMap['magneticMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'] = currentMap\n loadMaps['CHEL.Isotope.magneticMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.magneticMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'\n currentMap['name'] = 'magneticMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'] = currentMap\n loadMaps['CHEL.Isotope.mass'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.massNumber\n currentMap = {}\n contentMap['massNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'] = currentMap\n loadMaps['CHEL.Isotope.massNumber'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.massNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'\n currentMap['name'] = 'massNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotope.quadrupoleMoment\n currentMap = {}\n contentMap['quadrupoleMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'] = currentMap\n loadMaps['CHEL.Isotope.quadrupoleMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.quadrupoleMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'\n currentMap['name'] = 'quadrupoleMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.receptivity\n currentMap = {}\n contentMap['receptivity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'] = currentMap\n loadMaps['CHEL.Isotope.receptivity'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.receptivity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'\n currentMap['name'] = 'receptivity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.spin\n currentMap = {}\n contentMap['spin'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'] = currentMap\n loadMaps['CHEL.Isotope.spin'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.spin'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'\n currentMap['name'] = 'spin'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Isotope.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Isotope\n\n currentMap = abstractTypes.get('Isotope')\n aList = ['abundance', 'gyroMagneticRatio', 'halfLife', 'halfLifeError', 'halfLifeType', 'magneticMoment', 'mass', 'massNumber', 'quadrupoleMoment', 'receptivity', 'spin']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to ChemElement\n currentMap = {}\n exolinks['ChemElement'] = currentMap\n loadMaps['CHEL.exo-ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElement'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['name'] = 'ChemElement'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n\n # Out-of-package link to ChemElementStore\n currentMap = {}\n exolinks['ChemElementStore'] = currentMap\n loadMaps['CHEL.exo-ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElementStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['name'] = 'ChemElementStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to Isotope\n currentMap = {}\n exolinks['Isotope'] = currentMap\n loadMaps['CHEL.exo-Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.exo-Isotope'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['name'] = 'Isotope'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CCLB').get('abstractTypes')\n exolinks = globalMap.get('CCLB').get('exolinks')\n\n # Class AtomLabel\n currentMap = {}\n abstractTypes['AtomLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'] = currentMap\n loadMaps['CCLB.AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'atomLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AtomLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AtomLabel.isotopeCode\n currentMap = {}\n contentMap['isotopeCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'] = currentMap\n loadMaps['CCLB.AtomLabel.isotopeCode'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.isotopeCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'\n currentMap['name'] = 'isotopeCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'] = currentMap\n loadMaps['CCLB.AtomLabel.name'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.subType\n currentMap = {}\n contentMap['subType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'] = currentMap\n loadMaps['CCLB.AtomLabel.subType'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.subType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'\n currentMap['name'] = 'subType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AtomLabel.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'] = currentMap\n loadMaps['CCLB.AtomLabel.weight'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role AtomLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AtomLabel\n\n currentMap = abstractTypes.get('AtomLabel')\n aList = ['isotopeCode', 'name', 'subType', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ChemCompLabel\n currentMap = {}\n abstractTypes['ChemCompLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'] = currentMap\n loadMaps['CCLB.ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemCompLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemCompLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemCompLabel.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'] = currentMap\n loadMaps['CCLB.ChemCompLabel.ccpCode'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ChemCompLabel.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'] = currentMap\n loadMaps['CCLB.ChemCompLabel.molType'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ChemCompLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemCompLabel.isotopomers\n currentMap = {}\n contentMap['isotopomers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'] = currentMap\n loadMaps['CCLB.ChemCompLabel.isotopomers'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.isotopomers'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'\n currentMap['name'] = 'isotopomers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of ChemCompLabel\n\n currentMap = abstractTypes.get('ChemCompLabel')\n aList = ['ccpCode', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['isotopomers', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopomers']\n currentMap['children'] = aList\n\n # Class Isotopomer\n currentMap = {}\n abstractTypes['Isotopomer'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'] = currentMap\n loadMaps['CCLB.Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopomers'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotopomer.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotopomer.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'] = currentMap\n loadMaps['CCLB.Isotopomer.serial'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotopomer.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'] = currentMap\n loadMaps['CCLB.Isotopomer.weight'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role Isotopomer.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Isotopomer.atomLabels\n currentMap = {}\n contentMap['atomLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'] = currentMap\n loadMaps['CCLB.Isotopomer.atomLabels'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.atomLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'\n currentMap['name'] = 'atomLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of Isotopomer\n\n currentMap = abstractTypes.get('Isotopomer')\n aList = ['serial', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['atomLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['atomLabels']\n currentMap['children'] = aList\n\n # Class LabelingScheme\n currentMap = {}\n abstractTypes['LabelingScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'] = currentMap\n loadMaps['CCLB.LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'labelingSchemes'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute LabelingScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute LabelingScheme.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'] = currentMap\n loadMaps['CCLB.LabelingScheme.details'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute LabelingScheme.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute LabelingScheme.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'] = currentMap\n loadMaps['CCLB.LabelingScheme.longName'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute LabelingScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'] = currentMap\n loadMaps['CCLB.LabelingScheme.name'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role LabelingScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role LabelingScheme.chemCompLabels\n currentMap = {}\n contentMap['chemCompLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'] = currentMap\n loadMaps['CCLB.LabelingScheme.chemCompLabels'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.chemCompLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'\n currentMap['name'] = 'chemCompLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of LabelingScheme\n\n currentMap = abstractTypes.get('LabelingScheme')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy', 'name']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'longName']\n currentMap['simpleAttrs'] = aList\n aList = ['chemCompLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemCompLabels']\n currentMap['children'] = aList\n\n # Out-of-package link to AtomLabel\n currentMap = {}\n exolinks['AtomLabel'] = currentMap\n loadMaps['CCLB.exo-AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-AtomLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['name'] = 'AtomLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ChemCompLabel\n currentMap = {}\n exolinks['ChemCompLabel'] = currentMap\n loadMaps['CCLB.exo-ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-ChemCompLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['name'] = 'ChemCompLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n\n # Out-of-package link to Isotopomer\n currentMap = {}\n exolinks['Isotopomer'] = currentMap\n loadMaps['CCLB.exo-Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.exo-Isotopomer'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['name'] = 'Isotopomer'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to LabelingScheme\n currentMap = {}\n exolinks['LabelingScheme'] = currentMap\n loadMaps['CCLB.exo-LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.exo-LabelingScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['name'] = 'LabelingScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def get_dict(MGs, dist_AMT, Tcnt, lcnt, GA_param):\n for i in range(0, Tcnt):\n for j in range(0, GA_param-1):\n if j==0:\n MGs['tree-'+str(i+1)]['dict_'+str(j+1)] = map_nodes(MGs['tree-'+str(i+1)]['dist_'+str(j+1)], dist_AMT, lcnt, 'none')\n else:\n MGs['tree-'+str(i+1)]['dict_'+str(j+1)] = map_nodes(MGs['tree-'+str(i+1)]['dist_'+str(j+1)], MGs['tree-'+str(i+1)]['dist_'+str(j)], lcnt, 'none')\n return MGs", "def create_cell_map(dim):\n for cell, faces in cell_face_map.iteritems():\n \n for face in faces:\n nds = face_list[face - 1][1]\n \n if not cell in cell_map:\n cell_map[cell] = copy(nds)\n \n else:\n cell_map[cell] = list(Set(cell_map[cell] + nds))", "def createDict( self ):\n d = {}\n devTup = ( 'endcap', 'comp', 'shutter','397intensity' )\n for dev in devTup:\n d[dev] = {'devChannels':{}}\n endcap = ( ( 1, 1 ), ( 2, 0 ) )\n comp = ( ( 1, 4 ), ( 2, 2 ), ( 'common', 3 ) )\n shutter = ( ( 1, 5 ), ( 2, 6 ), ( 3, 7 ) )\n intensity397 = (('397intensity',8),)\n chanTup = ( endcap, comp, shutter ,intensity397 )\n for dev, value in zip( devTup, chanTup ):\n for chanPair in value:\n d[dev]['devChannels'][chanPair[0]] = {'value':None, 'channel':chanPair[1]}\n ecRange = ( 0.0, 40.0 )\n compRange = ( -40.0, 40.0 )\n shutterRange = ( 0.0, 5.0 )\n intensity397Range = (0.0,2500.0)\n rangeTup = ( ecRange, compRange, shutterRange, intensity397Range )\n for dev, value in zip( devTup, rangeTup ): d[dev]['range'] = value\n self.dcDict = d", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def create_single_map(self,tod,x,y,x0,y0):\n maps = {'map':np.zeros((self.Nx*self.Ny)),\n 'cov':np.zeros((self.Nx*self.Ny))}\n\n\n pixels,xp,yp,r_x, r_y = self.get_pixel_positions(x,y,x0,y0,0,invertx=True)\n mask = np.ones(pixels.size,dtype=int)\n\n mask[(pixels == -1) | np.isnan(tod) | np.isinf(tod)] = 0\n rms = stats.AutoRMS(tod)\n weights = {'map':tod.astype(np.float64)/rms**2,\n 'cov':np.ones(tod.size)/rms**2}\n for k in maps.keys():\n binFuncs.binValues(maps[k],\n pixels,\n weights=weights[k],mask=mask)\n maps[k] = np.reshape(maps[k],(self.Ny,self.Nx))\n return maps", "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs", "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''", "def cell_map_from_database(self) -> None:\n for row in self.session.query(DatamapItem).all():\n self.cell_map.append(\n Cell(\n datamap_id=row.id,\n cell_key=row.key,\n cell_value=None,\n template_sheet=row.bicc_sheet,\n bg_colour=None,\n fg_colour=None,\n number_format=None,\n verification_list=None,\n cell_reference=row.bicc_cellref))", "def mapped_reconstructed_data_dict(\r\n self,\r\n ) -> Dict[LinearObj, Visibilities]:\r\n mapped_reconstructed_data_dict = {}\r\n\r\n image_dict = self.mapped_reconstructed_image_dict\r\n\r\n for linear_obj in self.linear_obj_list:\r\n visibilities = self.transformer.visibilities_from(\r\n image=image_dict[linear_obj]\r\n )\r\n\r\n visibilities = Visibilities(visibilities=visibilities)\r\n\r\n mapped_reconstructed_data_dict[linear_obj] = visibilities\r\n\r\n return mapped_reconstructed_data_dict", "def build_map(model: str, n: int, kwc: int) -> Map:\n PKWS.clear()\n fited = cluster(n, model)\n return Map(\n cats=list(map(\"c-{}\".format, range(1, n + 1))),\n kws=list(\n map(\n lambda c: \", \".join(\n map(\n lambda x: x[0],\n count_it(\n Counter(\n chain.from_iterable(\n map(\n lambda ie: model == \"bert\"\n and SS_BERT.get(YS[model][ie[0]], [])\n or model == \"glove\"\n and SS_TFIDF[ie[0]]\n or SS_GLOVE[ie[0]],\n filter(\n lambda ie: ie[1] == c,\n enumerate(fited),\n ),\n ),\n )\n ),\n kwc,\n ),\n )\n ),\n range(n),\n )\n ),\n points=list(\n map(\n lambda y, x_y, x: Point(\n question=y, x=x_y[0], y=x_y[1], catagory=x,\n ),\n YS[model],\n XY[model],\n fited,\n )\n ),\n )", "def generate_grid_dict(height, width):\n board = {}\n for i in range(height):\n for j in range(width):\n position = (i, j)\n board[position] = 0\n return board", "def test_build_map_dict_by_name():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n # Get pygal country code map\n pygal_countries = {'KEN':'Kenya', 'IDN':'Indonesia'}\n\n # 1960\n res = build_map_dict_by_name(gdpinfo, pygal_countries, \"1960\")\n print(res)", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def feed_dict_generator(self, a_random_walk, step, gamma):\n\n batch_inputs = batch_input_generator(a_random_walk,\n self.args.random_walk_length,\n self.args.window_size)\n\n batch_labels = batch_label_generator(a_random_walk,\n self.args.random_walk_length,\n self.args.window_size)\n\n feed_dict = {self.walker_layer.train_labels: batch_labels,\n self.walker_layer.train_inputs: batch_inputs,\n self.gamma: gamma,\n self.step: float(step)}\n\n return feed_dict", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CLAS').get('abstractTypes')\n exolinks = globalMap.get('CLAS').get('exolinks')\n\n # Class AbstractCategory\n currentMap = {}\n abstractTypes['AbstractCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'\n currentMap['eType'] = 'cplx'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.AbstractCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractCategory.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'] = currentMap\n loadMaps['CLAS.AbstractCategory.details'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute AbstractCategory.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'] = currentMap\n loadMaps['CLAS.AbstractCategory.name'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AbstractCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AbstractCategory\n\n currentMap = abstractTypes.get('AbstractCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Classification\n currentMap = {}\n abstractTypes['Classification'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'] = currentMap\n loadMaps['CLAS.Classification'] = currentMap\n currentMap['tag'] = 'CLAS.Classification'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'classifications'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'namingSystem'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Classification.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Classification.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute Classification.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.namingSystem\n currentMap = {}\n contentMap['namingSystem'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'] = currentMap\n loadMaps['CLAS.Classification.namingSystem'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.namingSystem'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'\n currentMap['name'] = 'namingSystem'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role Classification.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Classification.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'] = currentMap\n loadMaps['CLAS.Classification.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.experimentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'] = currentMap\n loadMaps['CLAS.Classification.hazardPhrases'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.hazardPhrases'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.holderCategorys\n currentMap = {}\n contentMap['holderCategorys'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'] = currentMap\n loadMaps['CLAS.Classification.holderCategorys'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.holderCategorys'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'\n currentMap['name'] = 'holderCategorys'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'] = currentMap\n loadMaps['CLAS.Classification.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.instrumentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'] = currentMap\n loadMaps['CLAS.Classification.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleCategories'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleComponentCategory\n currentMap = {}\n contentMap['sampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'] = currentMap\n loadMaps['CLAS.Classification.sampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleComponentCategory'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'\n currentMap['name'] = 'sampleComponentCategory'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'] = currentMap\n loadMaps['CLAS.Classification.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetScoreboards'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'] = currentMap\n loadMaps['CLAS.Classification.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetStatus'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n # End of Classification\n\n currentMap = abstractTypes.get('Classification')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['namingSystem']\n currentMap['simpleAttrs'] = aList\n aList = ['targetStatus', 'targetScoreboards', 'sampleComponentCategory', 'sampleCategories', 'instrumentTypes', 'holderCategorys', 'hazardPhrases', 'experimentTypes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['experimentTypes', 'hazardPhrases', 'holderCategorys', 'instrumentTypes', 'sampleCategories', 'sampleComponentCategory', 'targetScoreboards', 'targetStatus']\n currentMap['children'] = aList\n\n # Class SampleComponentCategory\n currentMap = {}\n abstractTypes['SampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'] = currentMap\n loadMaps['CLAS.SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleComponentCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponentCategory'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponentCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponentCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleComponentCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleComponentCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleComponentCategory\n\n currentMap = abstractTypes.get('SampleComponentCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ExperimentType\n currentMap = {}\n abstractTypes['ExperimentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'] = currentMap\n loadMaps['CLAS.ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'experimentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ExperimentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ExperimentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute ExperimentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role ExperimentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ExperimentType.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'] = currentMap\n loadMaps['CLAS.ExperimentType.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.instrumentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role ExperimentType.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'] = currentMap\n loadMaps['CLAS.ExperimentType.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.sampleCategories'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of ExperimentType\n\n currentMap = abstractTypes.get('ExperimentType')\n aList = ['details', 'name', 'instrumentTypes', 'sampleCategories']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetScoreboard\n currentMap = {}\n abstractTypes['TargetScoreboard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'] = currentMap\n loadMaps['CLAS.TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetScoreboards'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetScoreboard.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetScoreboard.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetScoreboard.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetScoreboard.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetScoreboard.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'] = currentMap\n loadMaps['CLAS.TargetScoreboard.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard.targetStatus'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetScoreboard\n\n currentMap = abstractTypes.get('TargetScoreboard')\n aList = ['details', 'name', 'targetStatus']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HolderCategory\n currentMap = {}\n abstractTypes['HolderCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'] = currentMap\n loadMaps['CLAS.HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.HolderCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'holderCategorys'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HolderCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HolderCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HolderCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role HolderCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HolderCategory\n\n currentMap = abstractTypes.get('HolderCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HazardPhrase\n currentMap = {}\n abstractTypes['HazardPhrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'] = currentMap\n loadMaps['CLAS.HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'hazardPhrases'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HazardPhrase.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HazardPhrase.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HazardPhrase.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Attribute HazardPhrase.phrase\n currentMap = {}\n contentMap['phrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'] = currentMap\n loadMaps['CLAS.HazardPhrase.phrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase.phrase'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'\n currentMap['name'] = 'phrase'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role HazardPhrase.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HazardPhrase\n\n currentMap = abstractTypes.get('HazardPhrase')\n aList = ['details', 'name', 'phrase']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class InstrumentType\n currentMap = {}\n abstractTypes['InstrumentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'] = currentMap\n loadMaps['CLAS.InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'instrumentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute InstrumentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute InstrumentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute InstrumentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role InstrumentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role InstrumentType.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'] = currentMap\n loadMaps['CLAS.InstrumentType.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of InstrumentType\n\n currentMap = abstractTypes.get('InstrumentType')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleCategory\n currentMap = {}\n abstractTypes['SampleCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'] = currentMap\n loadMaps['CLAS.SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleCategories'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleCategory.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'] = currentMap\n loadMaps['CLAS.SampleCategory.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of SampleCategory\n\n currentMap = abstractTypes.get('SampleCategory')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetStatus\n currentMap = {}\n abstractTypes['TargetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'] = currentMap\n loadMaps['CLAS.TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetStatus'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetStatus.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetStatus.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetStatus.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetStatus.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetStatus.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'] = currentMap\n loadMaps['CLAS.TargetStatus.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus.targetScoreboards'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetStatus\n\n currentMap = abstractTypes.get('TargetStatus')\n aList = ['details', 'name', 'targetScoreboards']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to Classification\n currentMap = {}\n exolinks['Classification'] = currentMap\n loadMaps['CLAS.exo-Classification'] = currentMap\n currentMap['tag'] = 'CLAS.exo-Classification'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['name'] = 'Classification'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to SampleComponentCategory\n currentMap = {}\n exolinks['SampleComponentCategory'] = currentMap\n loadMaps['CLAS.exo-SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleComponentCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['name'] = 'SampleComponentCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to ExperimentType\n currentMap = {}\n exolinks['ExperimentType'] = currentMap\n loadMaps['CLAS.exo-ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-ExperimentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['name'] = 'ExperimentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetScoreboard\n currentMap = {}\n exolinks['TargetScoreboard'] = currentMap\n loadMaps['CLAS.exo-TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetScoreboard'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['name'] = 'TargetScoreboard'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HolderCategory\n currentMap = {}\n exolinks['HolderCategory'] = currentMap\n loadMaps['CLAS.exo-HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HolderCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['name'] = 'HolderCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HazardPhrase\n currentMap = {}\n exolinks['HazardPhrase'] = currentMap\n loadMaps['CLAS.exo-HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HazardPhrase'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['name'] = 'HazardPhrase'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to InstrumentType\n currentMap = {}\n exolinks['InstrumentType'] = currentMap\n loadMaps['CLAS.exo-InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-InstrumentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['name'] = 'InstrumentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleCategory\n currentMap = {}\n exolinks['SampleCategory'] = currentMap\n loadMaps['CLAS.exo-SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['name'] = 'SampleCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetStatus\n currentMap = {}\n exolinks['TargetStatus'] = currentMap\n loadMaps['CLAS.exo-TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetStatus'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['name'] = 'TargetStatus'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))", "def initializeFromDict(self, inputDict):\n for idx, val in enumerate(inputDict['outcome']):\n self.mapping[val] = inputDict['state'][idx]\n self.values.add(val)\n\n self.checkDistParams()", "def build_I_map(self):\n raise NotImplementedError", "def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]", "def create_observations_dict(instances, unprocessed_map_data):\n # print(instances)\n results = {}\n for idx in instances:\n results[idx] = {}\n if idx == instances[0]:\n tag_filter_pixel_corners = B.compute_corner_pixels(\n idx, unprocessed_map_data\n ).tolist()\n # prettified_corner_pixels = [tag_filter_pixel_corners[:2,0],tag_filter_pixel_corners[:2,1],tag_filter_pixel_corners[:2,2],tag_filter_pixel_corners[:2,3]]\n # results[idx][\"corner_pixels\"] = [pixel_pair.tolist() for pixel_pair in prettified_corner_pixels]\n results[idx][\"corner_pixels\"] = tag_filter_pixel_corners\n results[idx][\"tag_pose\"] = B.compute_tag_pose(\n idx, unprocessed_map_data\n ).tolist()\n results[idx][\"camera_pose\"] = B.compute_camera_pose(\n idx, unprocessed_map_data\n ).tolist()\n\n return results", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}" ]
[ "0.65350825", "0.6120123", "0.5987722", "0.5963165", "0.5949888", "0.57955086", "0.57083774", "0.5609627", "0.55620164", "0.5537006", "0.54921776", "0.54663897", "0.54227394", "0.5419855", "0.5395792", "0.5388628", "0.53751236", "0.5370007", "0.53461546", "0.534331", "0.53185105", "0.53127515", "0.53127015", "0.53031856", "0.52829677", "0.52460754", "0.52334", "0.52316886", "0.5224446", "0.5209654" ]
0.7460185
0
Return list with FASTQC results files.
def get_result_files_fastqc(config): res_zip = [] res_html = [] for path in get_result_files_demux(config): ext = ".fastq.gz" if path.endswith(ext): folder = os.path.dirname(path) base = os.path.basename(path)[: -len(ext)] res_zip.append(os.path.join(folder, "qc", "fastqc", base + "_fastqc.zip")) res_html.append(os.path.join(folder, "qc", "fastqc", base + "_fastqc.html")) return {"zip": res_zip, "html": res_html}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _list_test_files(self, results_list):\n return [results[INPUT_FILE_PATH] for results in results_list]", "def get_fastqc_files(sample, unit, pairs, config, pre):\n if config[\"preprocessing\"][\"fastqc\"]:\n files = expand(config[\"paths\"][\"results\"]+\"/intermediate/fastqc/{sample}_{unit}_{pair}{PREPROCESS}_fastqc.zip\",\n sample=sample, unit=unit, pair=pairs, PREPROCESS=pre)\n return files\n return []", "def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []", "def results_files(self, pattern=None, regex=None):\n return self._files_in_subdir(self.results_dir, pattern, regex)", "def get_fastq_files(self) -> List[Path]:\n return list(self.sequence_data_paths.fastq_path.glob(\"*.fastq.gz\")) # type: ignore", "def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover", "def find_all_fastqs(location):\n fastqs = []\n for name, dirs, files in os.walk(location):\n fastqs.extend(os.path.join(name, f) for f in files if f.endswith('.fastq.gz'))\n app_logger.debug('Found %s fastqs in %s', len(fastqs), location)\n return fastqs", "def _get_results_from_all_files(self, output_ojt):\n return [output[output_ojt] for output in self.output_objects]", "def collect_results( results_dir = \"experiments\" ) :\n #%%\n import pandas as pd\n exps_fn = os.listdir( results_dir )\n dics = []\n for fname in exps_fn :\n with open( results_dir + \"/\" + fname, \"rt\", encoding=\"utf8\" ) as f_out :\n dics.append( json.load( f_out ) )\n\n results_df = pd.DataFrame( dics )\n #%%\n return results_df", "def process_results(self):\n processes = {\"*.csv\": _process_csv}\n custom_processes = self.custom_processes\n if custom_processes:\n processes.update(custom_processes)\n\n try:\n results = []\n for glob, process in processes.items():\n results.extend(\n [\n (\n file.basename(),\n process(\n file,\n working_dir=os.getcwd(),\n simulname=self.output_prefix,\n ),\n )\n for file in self.simulation_dir.files(glob)\n ]\n )\n except FileNotFoundError:\n raise ValueError(\"No results to process. Have you called IDF.simulate()?\")\n else:\n return results", "def get_file_names():\n all_file_names = []\n cwd = os.getcwd()\n # Change to dir with result files to analyze\n os.chdir(args.dir)\n \n for file in glob.glob(\"*.csv\"):\n all_file_names.append(file)\n\n # Return to current working directory\n os.chdir(cwd)\n return all_file_names", "def get_all_files_to_instrument():\n sql=\"SELECT * FROM files\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def get_files_data_from_results(results):\n files = []\n if results:\n for result in results:\n raw_file = get_fields_from_hit_object(result, 'file_indicators')\n file_data = filter_object_entries_by_dict_values(raw_file, 'file_indicators')\n files.append(file_data)\n return files", "def get_config_files(self):\n self.clear_lists()\n print self.abs_directory\n for file in os.listdir(self.abs_directory):\n print file\n if file.endswith('.json') and \"qemii\" in file:\n self.txt_files.append(file)", "def getSFFFiles(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n items = []\n con.cursor().callproc('qiime_assets.get_sff_files', [study_id, results])\n for row in results:\n items.append(row[0])\n return items\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def list_results(cls, output_dir, **kwargs):\n results = cls.load(output_dir, **kwargs)\n return results.get_results()", "async def stream_result_files(self) -> AsyncGenerator[StreamInfoUrl, None]:\n for dataset in self.datasets:\n async for file in dataset.get_data_rootfiles_stream(self.query.value()):\n yield file", "def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']", "def listFiles(self):\n pass", "def get_all_output_files():\n files = []\n for fmt in RunInfo.FORMATS:\n files.append(RunInfo.get_runinfo_basename() + \".\" + fmt)\n return files", "def get_fastq_files(wildcards):\n return expand(os.path.join(fastq_dir, \"{sample}_{readpair}.fastq\"), readpair=[1, 2], **wildcards)", "def fastq_qc(demultiplex_result, out_dir, config):\n\n pigz_cores = int(config['fastqTrim']['pigz_cores'])\n cutadapt_cores = int(config['fastqTrim']['cutadapt_cores'])\n\n r1_adapter = config['fastqTrim']['r1_adapter']\n r2_adapter = config['fastqTrim']['r1_adapter']\n length_threshold = config['fastqTrim']['length_threshold']\n quality_threshold = config['fastqTrim']['quality_threshold']\n r1_left_cut = config['fastqTrim']['r1_left_cut']\n r1_right_cut = config['fastqTrim']['r1_right_cut']\n r2_left_cut = config['fastqTrim']['r2_left_cut']\n r2_right_cut = config['fastqTrim']['r2_right_cut']\n overlap = config['fastqTrim']['overlap']\n total_reads_threshold = int(config['fastqTrim']['total_reads_threshold'])\n\n results = []\n for (uid, index_name), sub_df in demultiplex_result.groupby(['uid', 'index_name']):\n sample_demultiplex_total = sub_df['Trimmed'].sum()\n if sample_demultiplex_total < total_reads_threshold:\n log.info(f'In uid {uid}: index {index_name} skipped '\n f'due to too less reads: {sample_demultiplex_total}')\n continue\n # process R1\n r1_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R1.fq.gz'\n r1_out = f'{out_dir}/{uid}_{index_name}_R1.trimed.fq.gz'\n r1_cmd = f'pigz -cd -p {pigz_cores} {r1_path_pattern} | ' \\\n f'cutadapt -j {cutadapt_cores} --report=minimal -O {overlap} ' \\\n f'-q {quality_threshold} -u {r1_left_cut} ' \\\n f'-u -{r1_right_cut} -m {length_threshold} ' \\\n f'-a {r1_adapter} -o {r1_out} -'\n r1_result = subprocess.run(r1_cmd, stdout=subprocess.PIPE,\n encoding='utf8', shell=True, check=True)\n\n # get R1 result stat\n lines = []\n for line in r1_result.stdout.split('\\n'):\n ll = line.split('\\t')\n if len(ll) > 1:\n lines.append(ll)\n s = pd.Series({name: number for name, number in zip(*lines)})\n s['uid'] = uid\n s['index_name'] = index_name\n s['read_type'] = 'R1'\n results.append(s)\n\n # process R2\n r2_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R2.fq.gz'\n r2_out = f'{out_dir}/{uid}_{index_name}_R2.trimed.fq.gz'\n r2_cmd = f'pigz -cd -p {pigz_cores} {r2_path_pattern} | ' \\\n f'cutadapt -j {cutadapt_cores} --report=minimal -O {overlap} ' \\\n f'-q {quality_threshold} -u {r2_left_cut} ' \\\n f'-u -{r2_right_cut} -m {length_threshold} ' \\\n f'-a {r2_adapter} -o {r2_out} -'\n r2_result = subprocess.run(r2_cmd, stdout=subprocess.PIPE,\n encoding='utf8', shell=True, check=True)\n # get R2 result stat\n lines = []\n for line in r2_result.stdout.split('\\n'):\n ll = line.split('\\t')\n if len(ll) > 1:\n lines.append(ll)\n s = pd.Series({name: number for name, number in zip(*lines)})\n s['uid'] = uid\n s['index_name'] = index_name\n s['read_type'] = 'R2'\n results.append(s)\n\n fastq_final_result = pd.DataFrame(results)\n if len(results) == 0:\n # all sample skipped\n return fastq_final_result\n fastq_final_result['out_reads_rate'] = \\\n fastq_final_result['out_reads'].astype(int) / fastq_final_result['in_reads'].astype(int)\n fastq_final_result['out_bp_rate'] = \\\n fastq_final_result['out_reads'].astype(int) / fastq_final_result['in_reads'].astype(int)\n\n # clean up\n for (uid, index_name), sub_df in demultiplex_result.groupby(['uid', 'index_name']):\n r_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R*.fq.gz'\n r_rm_cmd = f'rm -f {r_path_pattern}'\n subprocess.run(r_rm_cmd, shell=True)\n # remove unknown reads\n r_path_pattern = f'{out_dir}/{uid}_L*_unknown_R*.fq.gz'\n r_rm_cmd = f'rm -f {r_path_pattern}'\n subprocess.run(r_rm_cmd, shell=True)\n\n return fastq_final_result", "def files(self):\n files = [self.submission]\n if self.kind == 'script':\n files.append(self.exec_script)\n if self.kind == 'function':\n files.append(self.function)\n return files", "def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files", "def outList(self,list=False):\n txt = ''\n txt += 'echo \">>> list of expected files on output sandbox\"\\n'\n listOutFiles = []\n stdout = 'CMSSW_$NJob.stdout'\n stderr = 'CMSSW_$NJob.stderr'\n if len(self.output_file) <= 0:\n msg =\"WARNING: no output files name have been defined!!\\n\"\n msg+=\"\\tno output files will be reported back/staged\\n\"\n common.logger.info(msg)\n\n if (self.return_data == 1):\n for file in (self.output_file):\n listOutFiles.append(numberFile(file, '$OutUniqueID'))\n for file in (self.output_file_sandbox):\n listOutFiles.append(numberFile(file, '$NJob'))\n listOutFiles.append(stdout)\n listOutFiles.append(stderr)\n listOutFiles.append('Watchdog_$NJob.log.gz')\n\n txt += 'echo \"output files: '+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'filesToCheck=\"'+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'export filesToCheck\\n'\n taskinfo={}\n taskinfo['outfileBasename'] = self.output_file\n common._db.updateTask_(taskinfo)\n\n if list : return self.output_file\n return txt", "def get_fastq_files(directory, work_dir, item, fc_name, bc_name=None, glob_ext=\"_fastq.txt\",\n config=None, unpack=True):\n if \"files\" in item and bc_name is None:\n names = item[\"files\"]\n if isinstance(names, basestring):\n names = [names]\n files = [x if os.path.isabs(x) else os.path.join(directory, x) for x in names]\n\n else:\n assert fc_name is not None\n lane = item[\"lane\"]\n if bc_name:\n glob_str = \"%s_*%s_%s_*%s\" % (lane, fc_name, bc_name, glob_ext)\n else:\n glob_str = \"%s_*%s*%s\" % (lane, fc_name, glob_ext)\n files = glob.glob(os.path.join(directory, glob_str))\n \n # Include gzipped files\n glob_str = \"%s.gz\" % glob_str\n files.extend(glob.glob(os.path.join(directory, glob_str)))\n \n files.sort()\n if len(files) > 2 or len(files) == 0:\n raise ValueError(\"Did not find correct files for %s %s %s %s\" %\n (directory, lane, fc_name, files))\n ready_files = []\n for fname in files:\n if fname.endswith(\".gz\") and unpack:\n # TODO: Parallelize using pgzip\n ready_name = os.path.splitext(fname)[0]\n ready_files.append(ready_name)\n if not os.path.exists(ready_name):\n cl = [\"gunzip\", fname]\n subprocess.check_call(cl)\n elif fname.endswith(\".bam\"):\n ready_files = convert_bam_to_fastq(fname, work_dir, config)\n else:\n assert os.path.exists(fname), fname\n ready_files.append(fname)\n ready_files = [x for x in ready_files if x is not None]\n return ready_files[0], (ready_files[1] if len(ready_files) > 1 else None)", "def get_all_files_to_instrument_for_live_session():\n sql=\"SELECT * FROM files WHERE should_instrument=1 AND is_history=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def poretools_fastq():\n dirs = os.listdir(my_dir)\n for folder in dirs:\n path_to_folder = os.path.join(my_dir, folder)\n subprocess.check_output(\"poretools fastq --type fwd {}//*.fast5 > {}_poretools.fq\"\n .format(path_to_folder, path_to_folder), shell=True)\n print(\"Finished folder {}\".format(folder))\n print(\"Finished extractions of FASTQs.\")", "def get_submission():\n result_files = []\n for filename in os.listdir(\".\"):\n if filename.endswith(\"_output.csv\"):\n result_files.append(filename)\n return result_files[0]", "def get_files(self):\r\n return self._filelist" ]
[ "0.6973742", "0.6926766", "0.6716288", "0.6668739", "0.6503206", "0.64518744", "0.6330193", "0.6203264", "0.61732537", "0.6152446", "0.6145649", "0.6103352", "0.6013842", "0.59969056", "0.595812", "0.5938934", "0.59029835", "0.589157", "0.587857", "0.5876882", "0.5875305", "0.58742964", "0.5845911", "0.582566", "0.5811957", "0.58083045", "0.58061665", "0.580258", "0.5781635", "0.5779373" ]
0.7692394
0
Return marker file for either bcl2fastq, bcl2fastq2 or picard for snakemake
def get_tool_marker(config): if len(config["flowcell"]["demux_reads_override"]) > 1: if config["demux_tool"] == "bcl2fastq2": return "bcl2fastq2.done" else: raise InvalidConfiguration( "Only bcl2fastq2 supports more than one bases mask at once, but you have {}".format( " and ".join(config["flowcell"]["demux_reads_override"]) ) ) elif "M" in config["flowcell"]["demux_reads"]: if config["demux_tool"] == "picard": return "picard.done" else: raise InvalidConfiguration( "Only picard can be used to write UMIs to separate FASTQ file. There is an 'M' " "in your bases mask, but you wanted to run bcl2fastq(2)." ) elif config["demux_tool"] == "bcl2fastq1": return "bcl2fastq1.done" elif config["demux_tool"] == "picard": return "picard.done" else: return "bcl2fastq2.done"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_barcodes(fastq1,\r\n fastq2=None,\r\n output_dir=\".\",\r\n input_type=\"barcode_single_end\",\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n char_delineator=\":\",\r\n switch_bc_order=False,\r\n map_fp=None,\r\n attempt_read_orientation=False,\r\n disable_header_match=False):\r\n\r\n # Turn off extra file creation for single read.\r\n if input_type == \"barcode_single_end\" and attempt_read_orientation:\r\n attempt_read_orientation = False\r\n if attempt_read_orientation:\r\n header, mapping_data, run_description, errors, warnings =\\\r\n process_id_map(map_fp)\r\n forward_primers, reverse_primers = get_primers(header, mapping_data)\r\n output_bc_not_oriented = open(join(output_dir,\r\n \"barcodes_not_oriented.fastq.incomplete\"), \"w\")\r\n fastq1_out_not_oriented = open(join(output_dir,\r\n \"reads1_not_oriented.fastq.incomplete\"), \"w\")\r\n fastq2_out_not_oriented = open(join(output_dir,\r\n \"reads2_not_oriented.fastq.incomplete\"), \"w\")\r\n else:\r\n forward_primers = None\r\n reverse_primers = None\r\n output_bc_not_oriented = None\r\n fastq1_out_not_oriented = None\r\n fastq2_out_not_oriented = None\r\n\r\n output_bc_fastq = open(join(output_dir, \"barcodes.fastq.incomplete\"), \"w\")\r\n if input_type in [\"barcode_single_end\", \"barcode_paired_stitched\"]:\r\n output_fastq1 = open(join(output_dir, \"reads.fastq.incomplete\"), \"w\")\r\n output_fastq2 = None\r\n final_fastq1_name = join(output_dir, \"reads.fastq\")\r\n elif input_type in [\"barcode_paired_end\"]:\r\n output_fastq1 = open(join(output_dir, \"reads1.fastq.incomplete\"), \"w\")\r\n output_fastq2 = open(join(output_dir, \"reads2.fastq.incomplete\"), \"w\")\r\n final_fastq1_name = join(output_dir, \"reads1.fastq\")\r\n else:\r\n output_fastq1 = None\r\n output_fastq2 = None\r\n\r\n if not fastq2:\r\n fastq2 = cycle([\"@\", \"AAAAAAAAAAAA\", \"+\", \"AAAAAAAAAAAA\"])\r\n not_paired = True\r\n else:\r\n not_paired = False\r\n\r\n check_header_match_f = get_casava_version(fastq1)\r\n\r\n header_index = 0\r\n\r\n for read1_data, read2_data in izip(\r\n parse_fastq(fastq1, strict=False),\r\n parse_fastq(fastq2, strict=False)):\r\n if not disable_header_match:\r\n if not check_header_match_f(read1_data[header_index],\r\n read2_data[header_index]):\r\n raise FastqParseError(\"Headers of read1 and read2 do not match. Can't continue. \"\r\n \"Confirm that the fastq sequences that you are \"\r\n \"passing match one another. --disable_header_match can be \"\r\n \"used to suppress header checks.\")\r\n\r\n if input_type == \"barcode_single_end\":\r\n process_barcode_single_end_data(read1_data, output_bc_fastq,\r\n output_fastq1, bc1_len, rev_comp_bc1)\r\n\r\n elif input_type == \"barcode_paired_end\":\r\n process_barcode_paired_end_data(read1_data, read2_data,\r\n output_bc_fastq, output_fastq1, output_fastq2, bc1_len, bc2_len,\r\n rev_comp_bc1, rev_comp_bc2, attempt_read_orientation,\r\n forward_primers, reverse_primers, output_bc_not_oriented,\r\n fastq1_out_not_oriented, fastq2_out_not_oriented)\r\n\r\n elif input_type == \"barcode_paired_stitched\":\r\n process_barcode_paired_stitched(read1_data,\r\n output_bc_fastq, output_fastq1, bc1_len, bc2_len,\r\n rev_comp_bc1, rev_comp_bc2, attempt_read_orientation,\r\n forward_primers, reverse_primers, output_bc_not_oriented,\r\n fastq1_out_not_oriented, switch_bc_order)\r\n\r\n elif input_type == \"barcode_in_label\":\r\n if not_paired:\r\n curr_read2_data = False\r\n else:\r\n curr_read2_data = read2_data\r\n process_barcode_in_label(read1_data, curr_read2_data,\r\n output_bc_fastq, bc1_len, bc2_len,\r\n rev_comp_bc1, rev_comp_bc2, char_delineator)\r\n\r\n output_bc_fastq.close()\r\n rename(output_bc_fastq.name, join(output_dir, \"barcodes.fastq\"))\r\n if output_fastq1:\r\n output_fastq1.close()\r\n rename(output_fastq1.name, final_fastq1_name)\r\n if output_fastq2:\r\n output_fastq2.close()\r\n rename(output_fastq2.name, join(output_dir, \"reads2.fastq\"))\r\n if output_bc_not_oriented:\r\n rename(output_bc_not_oriented.name,\r\n join(output_dir, \"barcodes_not_oriented.fastq\"))\r\n if fastq1_out_not_oriented:\r\n rename(fastq1_out_not_oriented.name,\r\n join(output_dir, \"reads1_not_oriented.fastq\"))\r\n if fastq2_out_not_oriented:\r\n rename(fastq2_out_not_oriented.name,\r\n join(output_dir, \"reads2_not_oriented.fastq\"))", "def process_barcode_in_label(read1_data,\r\n read2_data,\r\n output_bc_fastq,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n char_delineator=\":\"):\r\n header_index = 0\r\n\r\n # Check for char_delineator in sequence\r\n try:\r\n bc1_read = read1_data[header_index].split(\r\n char_delineator)[-1][0:bc1_len]\r\n # If there is an index error, it means the char_delineator wasn't found\r\n except IndexError:\r\n raise IndexError(\"Found sequence lacking character delineator. \"\r\n \"Sequence header %s, character delineator %s\" %\r\n (read1_data[header_index], char_delineator))\r\n\r\n # Create fake quality scores, using 6 here to match the existing qual fake\r\n # qual scores that were all F.\r\n bc1_qual = np.ones(len(bc1_read), dtype=np.int8) * 6\r\n if rev_comp_bc1:\r\n bc1_read = str(DNA(bc1_read).rc())\r\n\r\n if read2_data:\r\n bc2_read =\\\r\n read2_data[header_index].strip().split(\r\n char_delineator)[-1][0:bc2_len]\r\n bc2_qual = np.ones(len(bc2_read), dtype=np.int8) * 6\r\n if rev_comp_bc2:\r\n bc2_read = str(DNA(bc2_read).rc())\r\n else:\r\n bc2_read = \"\"\r\n bc2_qual = np.array([], dtype=np.int8)\r\n\r\n if not bc1_read and not bc2_read:\r\n raise ValueError(\"Came up with empty barcode sequence, please check \"\r\n \"character delineator with -s, and fastq label \"\r\n \"%s\" % read1_data[header_index])\r\n\r\n bc_lines = format_fastq_record(read1_data[header_index],\r\n bc1_read + bc2_read,\r\n np.hstack([bc1_qual, bc2_qual]))\r\n\r\n output_bc_fastq.write(bc_lines)\r\n\r\n return", "def test_bcl2fastq(self):\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv').command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','Unaligned',\n '--sample-sheet','SampleSheet.csv'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n output_dir='run/bcl2fastq').command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','run/bcl2fastq',\n '--sample-sheet','SampleSheet.csv'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n output_dir='run/bcl2fastq',\n ignore_missing_bcl=True).command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','run/bcl2fastq',\n '--sample-sheet','SampleSheet.csv',\n '--ignore-missing-bcls'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n output_dir='run/bcl2fastq',\n mismatches=1,\n no_lane_splitting=True).command_line,\n ['bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','run/bcl2fastq',\n '--sample-sheet','SampleSheet.csv',\n '--barcode-mismatches','1',\n '--no-lane-splitting'])\n self.assertEqual(bcl2fastq.bcl2fastq2(\n '/runs/150107_NB123000_0001_ABCX',\n 'SampleSheet.csv',\n bcl2fastq_exe='/opt/bin/bcl2fastq').command_line,\n ['/opt/bin/bcl2fastq',\n '--runfolder-dir','/runs/150107_NB123000_0001_ABCX',\n '--output-dir','Unaligned',\n '--sample-sheet','SampleSheet.csv'])", "def get_fastq(wildcards):\n if sample_is_single_end(wildcards.sample):\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\"]].dropna()\n else:\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\", \"fq2\"]].dropna()", "def make_fastq(pair, filename, id):\n \n fname = filename + \"-R1.fastq\"\n with open(fname, \"w\") as r1:\n r1.write(\"@\" + id + \"\\n\")\n r1.write(pair[0])\n r1.write(\"\\n+\\n\")\n r1.write(\"E\" * len(pair[0]))\n\n fname = filename + \"-R2.fastq\"\n with open(fname, \"w\") as r2:\n r2.write(\"@\" + id + \"\\n\")\n r2.write(pair[1])\n r2.write(\"\\n+\\n\")\n r2.write(\"E\" * len(pair[1]))", "def test_process_barcode_paired_stitched(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n\r\n # With no matches, should write to the not_oriented files, and keep\r\n # in the same order of output file\r\n process_barcode_paired_stitched(fastq1_data,\r\n bcs_out, reads1_out, bc1_len=3, bc2_len=4,\r\n rev_comp_bc1=True, rev_comp_bc2=True,\r\n attempt_read_orientation=True,\r\n forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq_out_not_oriented=fastq1_out_not_oriented,\r\n switch_bc_order=True)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'CGATGAT', '+', '7654&%$', '']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads =\\\r\n ['@HWI-ST830', 'GATCGATCGATCG', '+', \"'()*+,-./0123\", '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)", "def map_STAR(args):\n for type in ['joined', 'merged']:\n for strand in ['watson', 'crick']:\n if strand == 'watson':\n n = 1\n else:\n n = 3\n STAR_index_dir = os.path.join(args.output_dir,'STAR_%s_%s'%(type, strand))\n cmd = \"STAR --runThreadN %s --genomeDir %s\"%(args.threads, STAR_index_dir)\n\n if type == 'merged':\n cmd += \" --readFilesIn %s\" % vars(args)['%s_%s' % (strand, type)]\n else:\n #TODO: define custom parameters for PE reads\n cmd += \" --readFilesIn %s \" % vars(args)['%s_%s_r1' % (strand, type)]\n cmd += \" %s\" % vars(args)['%s_%s_r2' % (strand, type)]\n\n cmd += \" --outSAMattributes NM MD AS --outSAMtype SAM\"\n cmd += \" --outFileNamePrefix %s\" % (os.path.join(args.output_dir,'%s_%s'%(strand, type)))\n cmd += \" --outReadsUnmapped Fastx\" #output of unmapped reads for inspection\n cmd += \" --scoreGapATAC -2 --scoreGapNoncan -2\"\n #outFilterScoreMinOverLread : float: sam as outFilterMatchNmin, but normalized to the read length (sum of mates’ lengths for paired-end reads)\n #outFilterMatchNminOverLread: float: same as outFilterScoreMin, but normalized to read length (sum of mates’ lengths for paired-end reads)\n\n # –outFilterMultimapNmax 1 int: maximum number of loci the read is allowed to map to. Alignments (all of\n # them) will be output only if the read maps to no more loci than this value.\n cmd += \" --outFilterMismatchNoverLmax 0.95\"\n # TODO: implement --alignEndsType endtoend mapping after joined reads are merged\n cmd += \"--outFilterMatchNminOverLread 0.9 --scoreGap -4 \" \\\n \" --alignEndsType EndToEnd\" \\\n \" --alignSoftClipAtReferenceEnds No\" \\\n \" --outSAMorder PairedKeepInputOrder\" \\\n \" --outFilterMultimapNmax 1\" \\\n \" --scoreInsOpen -1\" \\\n #make sure we have a bam file sorted by name\n if args.extraflags:\n cmd += ' %s' % args.extraflags\n log = \"run STAR for % strand on %s reads\"%(strand, type)\n run_subprocess([cmd],args, log)\n log = \"write final log of STAR to normal log\"\n cmd = \"cat %s \" % os.path.join(args.output_dir, '%s_%s' % (strand, type) + 'Log.final.out')\n run_subprocess([cmd], args, log)\n return args", "def test_extract_barcodes_paired_end(self):\r\n\r\n fastq1_lines =\\\r\n \"@HWI-ST830\\nAAAATTTTCCCCGGGG\\n+\\n1234567890ABCDEF\".split('\\n')\r\n fastq2_lines =\\\r\n \"@HWI-ST830\\nGGGGTTTTAAAACCCC\\n+\\n1234567890ABCDEF\".split('\\n')\r\n\r\n extract_barcodes(fastq1=fastq1_lines, fastq2=fastq2_lines,\r\n input_type=\"barcode_paired_end\", output_dir=self.output_dir)\r\n\r\n output_bcs_fp = open(join(self.output_dir, \"barcodes.fastq\"), \"U\")\r\n actual_bcs = [line for line in output_bcs_fp]\r\n expected_bcs =\\\r\n ['@HWI-ST830\\n', 'AAAATTGGGGTT\\n', '+\\n', '123456123456\\n']\r\n\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n # reads 1 output\r\n output_reads_fp = open(join(self.output_dir, \"reads1.fastq\"), \"U\")\r\n actual_reads = [line for line in output_reads_fp]\r\n expected_reads = [\r\n '@HWI-ST830\\n',\r\n 'TTCCCCGGGG\\n',\r\n '+\\n',\r\n '7890ABCDEF\\n']\r\n\r\n self.assertEqual(actual_reads, expected_reads)\r\n # reads 2 output\r\n output_reads_fp = open(join(self.output_dir, \"reads2.fastq\"), \"U\")\r\n actual_reads = [line for line in output_reads_fp]\r\n expected_reads = [\r\n '@HWI-ST830\\n',\r\n 'TTAAAACCCC\\n',\r\n '+\\n',\r\n '7890ABCDEF\\n']\r\n\r\n self.assertEqual(actual_reads, expected_reads)", "def bcl_to_fastq_info(path=None):\n # Initialise\n bcl2fastq_path = ''\n package_name = ''\n package_version = ''\n # Locate the core script\n if not path:\n exes = available_bcl2fastq_versions()\n if exes:\n bcl2fastq_path = exes[0]\n else:\n bcl2fastq_path = os.path.abspath(path)\n # Identify the version\n if os.path.basename(bcl2fastq_path) == 'configureBclToFastq.pl':\n # Found CASAVA or bcl2fastq 1.8.* version\n # Look for the top-level directory\n path = os.path.dirname(bcl2fastq_path)\n # Look for etc directory\n etc_dir = os.path.join(os.path.dirname(path),'etc')\n if os.path.isdir(etc_dir):\n for d in bcf_utils.list_dirs(etc_dir):\n m = re.match(r'^(bcl2fastq|CASAVA)-([0-9.]+)$',d)\n if m:\n package_name = m.group(1)\n package_version = m.group(2)\n break\n elif os.path.basename(bcl2fastq_path) == 'bcl2fastq':\n # Found bcl2fastq v2.*\n # Run the program to get the version\n version_cmd = applications.Command(bcl2fastq_path,'--version')\n output = version_cmd.subprocess_check_output()[1]\n for line in output.split('\\n'):\n if line.startswith('bcl2fastq'):\n # Extract version from line of the form\n # bcl2fastq v2.17.1.14\n package_name = 'bcl2fastq'\n try:\n package_version = line.split()[1][1:]\n except ex:\n logging.warning(\"Unable to get version from '%s': %s\" %\n (line,ex))\n else:\n # No package supplied or located\n logging.warning(\"Unable to identify bcl-to-fastq conversion package \"\n \"from '%s'\" % bcl2fastq_path)\n # Return what we found\n return (bcl2fastq_path,package_name,package_version)", "def bcl2fastq_wrapper(config):\n rta_version = config_to_rta_version(config)\n if rta_version >= RTA_MIN_BCL2FASTQ2:\n return \"bcl2fastq2\"\n else:\n return \"bcl2fastq\"", "def process_barcode_paired_stitched(read_data,\r\n output_bc_fastq,\r\n output_fastq,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n attempt_read_orientation=False,\r\n forward_primers=None,\r\n reverse_primers=None,\r\n output_bc_not_oriented=None,\r\n fastq_out_not_oriented=None,\r\n switch_bc_order=False):\r\n\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n read_seq = read_data[sequence_index]\r\n read_qual = read_data[quality_index]\r\n\r\n found_primer_match = False\r\n # Break from orientation search as soon as a match is found\r\n if attempt_read_orientation:\r\n for curr_primer in forward_primers:\r\n if curr_primer.search(read_data[sequence_index]):\r\n found_primer_match = True\r\n break\r\n if not found_primer_match:\r\n for curr_primer in reverse_primers:\r\n if curr_primer.search(read_data[sequence_index]):\r\n read_seq = str(DNA(read_seq).rc())\r\n read_qual = read_qual[::-1]\r\n found_primer_match = True\r\n break\r\n\r\n if not found_primer_match and attempt_read_orientation:\r\n output_bc = output_bc_not_oriented\r\n output_read = fastq_out_not_oriented\r\n else:\r\n output_bc = output_bc_fastq\r\n output_read = output_fastq\r\n\r\n bc_read1 = read_seq[0:bc1_len]\r\n bc_read2 = read_seq[-bc2_len:]\r\n bc_qual1 = read_qual[0:bc1_len]\r\n bc_qual2 = read_qual[-bc2_len:]\r\n\r\n if rev_comp_bc1:\r\n bc_read1 = str(DNA(bc_read1).rc())\r\n bc_qual1 = bc_qual1[::-1]\r\n if rev_comp_bc2:\r\n bc_read2 = str(DNA(bc_read2).rc())\r\n bc_qual2 = bc_qual2[::-1]\r\n\r\n if switch_bc_order:\r\n bc_read1, bc_read2 = bc_read2, bc_read1\r\n bc_qual1, bc_qual2 = bc_qual2, bc_qual1\r\n\r\n bc_lines = format_fastq_record(read_data[header_index],\r\n bc_read1 + bc_read2,\r\n np.hstack([bc_qual1, bc_qual2]))\r\n output_bc.write(bc_lines)\r\n seq_lines = format_fastq_record(read_data[header_index],\r\n read_seq[bc1_len:-bc2_len], read_qual[bc1_len:-bc2_len])\r\n output_read.write(seq_lines)\r\n\r\n return", "def make_fastq_single(in_fasta, quals, out_fp,\r\n label_transform=split_lib_transform):\r\n outfile = open(out_fp, 'w')\r\n for rec, seq_id in iter_fastq(in_fasta, quals, label_transform):\r\n outfile.write(rec + '\\n')\r\n outfile.close()", "def test_illumina_data_to_fastq(self):\r\n in1 = (\r\n \"M10\",\r\n \"68\",\r\n \"1\",\r\n \"1\",\r\n \"28680\",\r\n \"29475\",\r\n \"0\",\r\n \"1\",\r\n \"AACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACG.\",\r\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\r\n \"0\")\r\n expected = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACGN\\n+\\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"\"\", 0\r\n\r\n self.assertEqual(illumina_data_to_fastq(in1), expected)\r\n\r\n expected12 = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAG\\n+\\nBBBBBBBBBBBB\"\"\", 0\r\n self.assertEqual(\r\n illumina_data_to_fastq(\r\n in1,\r\n number_of_bases=12),\r\n expected12)\r\n\r\n # different value in the pass filter field\r\n in2 = (\r\n \"M10\",\r\n \"68\",\r\n \"1\",\r\n \"1\",\r\n \"28680\",\r\n \"29475\",\r\n \"0\",\r\n \"1\",\r\n \"AACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACG.\",\r\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\r\n \"1\")\r\n expected = \"\"\"@M10_68:1:1:28680:29475#0/1\\nAACGAAAGGCAGTTTTGGAAGTAGGCGAATTAGGGTAACGCATATAGGATGCTAATACAACGTGAATGAAGTACTGCATCTATGTCACCAGCTTATTACAGCAGCTTGTCATACATGGCCGTACAGGAAACACACATCATAGCATCACACGN\\n+\\nBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"\"\", 1\r\n\r\n self.assertEqual(illumina_data_to_fastq(in2), expected)", "def _cmd_miraligner(fn, out_file, species, hairpin, out):\n tool = _get_miraligner()\n path_db = op.dirname(op.abspath(hairpin))\n cmd = \"{tool} -freq -i {fn} -o {out_file} -s {species} -db {path_db} -sub 1 -trim 3 -add 3\"\n if not file_exists(out_file):\n logger.info(\"Running miraligner with %s\" % fn)\n do.run(cmd.format(**locals()), \"miraligner with %s\" % fn)\n shutil.move(out_file + \".mirna\", out_file)\n return out_file", "def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))", "def scarf_to_fastq(infile=sys.stdin, outfile=sys.stdout):\n infile = open_gzipped(infile)\n outfile = open_gzipped(outfile, 'wt')\n for line in infile:\n fields = line.rstrip().split(':')\n qual = fields.pop()\n seq = fields.pop()\n outfile.write('{0}\\n{1}\\n+\\n{2}\\n'.format(\n '@' + ':'.join(fields),\n seq,\n qual))", "def convert_bam_to_fastq(in_file, work_dir, config):\n out_dir = safe_makedir(os.path.join(work_dir, \"fastq_convert\"))\n out_files = [os.path.join(out_dir, \"{0}_{1}.fastq\".format(\n os.path.splitext(os.path.basename(in_file))[0], x))\n for x in [\"1\", \"2\"]]\n if _is_paired(in_file):\n out1, out2 = out_files\n else:\n out1 = out_files[0]\n out2 = None\n if not file_exists(out1):\n broad_runner = broad.runner_from_config(config)\n broad_runner.run_fn(\"picard_bam_to_fastq\", in_file, out1, out2)\n if os.path.getsize(out2) == 0:\n out2 = None\n return [out1, out2]", "def _get_fastq_to_sam_cmd(fwd_reads, sample_name, read_group, rev_reads=None):\n\n cmd = [\n 'java', '-jar', '/picard/picard.jar', 'FastqToSam',\n f'F1={fwd_reads}',\n f'O=/dev/stdout',\n 'QUIET=true',\n f'SM={sample_name}',\n f'RG={read_group}'\n ]\n if rev_reads is not None:\n cmd.append(f'F2={rev_reads}')\n\n return cmd", "def get_player2_mark(p1_mark):\r\n if p1_mark == 2:\r\n return markers[0]\r\n else:\r\n return markers[1]", "def test_tb_extract_fastq():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n fastq_file_1 = resource_path + \"tb.Human.SRR1658573_1.fastq\"\n fastq_file_2 = resource_path + \"tb.Human.SRR1658573_2.fastq\"\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n if not os.path.isfile(gem_file):\n generate_gem()\n\n with gzip.open(gem_file + '.gz', 'rb') as fgz_in:\n with open(gem_file, 'w') as f_out:\n f_out.write(fgz_in.read())\n\n with gzip.open(fastq_file_1 + '.gz', 'rb') as fgz_in:\n with open(fastq_file_1, 'w') as f_out:\n f_out.write(fgz_in.read())\n\n with gzip.open(fastq_file_2 + '.gz', 'rb') as fgz_in:\n with open(fastq_file_2, 'w') as f_out:\n f_out.write(fgz_in.read())\n\n assert os.path.isfile(fastq_file_1) is True\n assert os.path.getsize(fastq_file_1) > 0\n assert os.path.isfile(fastq_file_2) is True\n assert os.path.getsize(fastq_file_2) > 0", "def attach_barcode(sam, output):\n \n if output is None:\n output = sam.replace('.sam', '_bcqt.sam')\n infile = pysam.AlignmentFile(sam, \"r\")\n outfile = pysam.AlignmentFile(output, \"wh\", template=infile)\n for read in infile.fetch():\n id_sam = read.query_name\n sep_si = id_sam.index(':')\n# TODO Abort and raise exception if randomer info is not kept properly in the \n# read's name.\n bc_seq = id_sam[0:sep_si]\n sep_qi = sep_si + 1 + len(bc_seq)\n bc_pqs = id_sam[sep_si + 1: sep_qi]\n read.set_tag('BC', bc_seq)\n read.set_tag('QT', bc_pqs)\n read.query_name = id_sam[sep_qi+1:]\n outfile.write(read)\n outfile.close()\n infile.close()", "def tracMapping(sample, fqs, ref, outdir, cpus=25):\n logger.info(\"Start mapping %s.\\n\" % sample)\n od = os.path.join(outdir, sample)\n if not os.path.exists(od):\n os.makedirs(od, exist_ok=True)\n sam = od + \"/\" + sample + \".sam\"\n bam = od + \"/\" + sample + \".bam\"\n if os.path.isfile(sam):\n logger.error(\"%s:%s exists, return.\" % (sample, sam))\n return None\n if os.path.isfile(bam):\n logger.error(\"%s:%s exists, return.\" % (sample, bam))\n return None\n doBowtie = \"bowtie2 -p {cpus} -q --local --very-sensitive -x {ref} -1 {fq1} -2 {fq2} -S {sam}\".format(\n cpus=cpus, ref=ref, fq1=fqs[0], fq2=fqs[1], sam=sam)\n logger.info(doBowtie)\n stat, output = subprocess.getstatusoutput(doBowtie)\n #trim with \"Warning\"\n output = output.split(\"\\n\")\n output = [t for t in output if not t.startswith(\"Warning\")]\n output = \"\\n\".join(output)\n logger.info(\"FLAG_A:\" + sample + \"\\n\" + output + \"\\nFLAG_A\\n\")\n return sam", "def get_fastq(wildcards):\n fastqs = caseinfo.loc[(wildcards.sample, wildcards.unit), [\"fq1\", \"fq2\"]].dropna()\n if len(fastqs) == 2:\n return {\"R1\": fastqs.fq1, \"R2\": fastqs.fq2}\n return {\"R1\": fastqs.fq1, \"R2\": fastqs.fq2}", "def snakefile(self):\n return os.path.join(\n self.home, \"pipelines\", self.name, f\"{self.name}.sk\"\n )", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),", "def fastq_filename(fastq_base):\n return fastq_base+\"_1.fastq\", fastq_base+\"_2.fastq\"", "def run_bcl2fastq_2_17(*args,**kws):\n return run_bcl2fastq_2(*args,**kws)", "def map_reads(SRA):\n\n #1. bowtie to rRNA\n print(\"Bowtie alignement on contaminant RNA...\")\n cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'\n output = subprocess.run(cmd_bowtie, shell=True)\n\n # 2. STAR to ref genome\n print(\"STAR alignement to yeast genome...\")\n cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n # 3. Samtools keep uniquely mapped reads and sort\n print(\"Samtools to keep uniquely mapped reads and sort...\")\n cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'\n output = subprocess.run(cmd_samtools1, shell=True)\n\n cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'\n output = subprocess.run(cmd_samtools2, shell=True)\n\n cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'\n output = subprocess.run(cmd_samtools3, shell=True)", "def main(argv):\r\n\r\n mapperAbbrs = {'C':'cushaw', 'S':'shrimp', 'B':'bfast', 'W':'bwa-mem', 'N':'novoalign'}\r\n\r\n #Dictionary of commands to use for various mappers - configure your mapper commands here\r\n aligner_dict = {\r\n\t'B,CS,S':[\r\n\t\t'bfast fasta2brg -f DDiFasta -A 0',\r\n\t\t'bfast fasta2brg -f DDiFasta -A 1',\r\n\t\t'bfast index -f DDiFasta -m 1111111111111111111111 -w 14 -i 1 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111110100111110011111111111 -w 14 -i 2 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 10111111011001100011111000111111 -w 14 -i 3 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111100101111000001100011111011 -w 14 -i 4 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111110001111110011111111 -w 14 -i 5 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 11111011010011000011000110011111111 -w 14 -i 6 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111111110011101111111 -w 14 -i 7 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111011000011111111001111011111 -w 14 -i 8 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1110110001011010011100101111101111 -w 14 -i 9 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111001000110001011100110001100011111 -w 14 -i 10 -A 1 -n DDiProcs',\r\n\t\t'bfast match -f DDiFasta -A 1 -i 1-10 -k 18 -K 100000 -w 0 -t -n DDiProcs -Q 100000 -l -r DDiFastq1 > DDiBMF',\r\n\t\t'bfast localalign -f DDiFasta -m DDiBMF -A 1 -n DDiProcs -U -q 20 -Q 100000 -t > DDiBAF',\r\n\t\t'rm DDiBMF',\r\n\t\t'bfast postprocess -f DDiFasta -i DDiBAF -o DDiAligned -O 1 -a 3 -z -n DDiProcs -q 20 -Q 100000 -t > DDiSAM',\r\n\t\t'rm DDiBAF'\r\n\t ],\r\n 'C,CS,S':[\r\n 'cushaw3 index DDiFasta -c -p bwtindex',\r\n 'cushaw3 calign -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,S':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,P':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -q DDiFastq1 DDiFastq2 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'S,CS,S':[\r\n 'gmapper-cs -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,S':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,P':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts -1 DDiFastq1 -2 DDiFastq2 DDiFasta > DDiSAM'\r\n ],\r\n\t'W,NT,S':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 > DDiSAM'\r\n ],\r\n\t'W,NT,P':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 DDiFastq2 > DDiSAM'\r\n ],\r\n\t'N,NT,S':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM -d DDiNIX -f DDiFastq1 > DDiSAM'\r\n ],\r\n\t'N,NT,P':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM NovoOpts -d DDiNIX -f DDiFastq1 DDiFastq2 > DDiSAM'\r\n ]\r\n }\r\n\r\n #Arguments that are required\r\n required = ['fastqFiles', 'mappingRefSeqFiles', 'outputDir']\r\n\r\n parser = argparse.ArgumentParser(description='Iteratively calls 3rd party mappers and DDiMap executable')\r\n\r\n #Argument options\r\n parser.add_argument('-q', type=str, metavar='file', nargs='+', help='list of fastq files', dest='fastqFiles')\r\n parser.add_argument('-r', type=str, metavar='file', nargs='+', help='list of files to use for reference sequences', dest='mappingRefSeqFiles')\r\n parser.add_argument('-j', type=str, metavar='file', nargs='+', help='list of files to use for junctions', dest='junctionRefSeqFiles')\r\n parser.add_argument('-o', type=str, metavar='directory', help='output directory', dest='outputDir')\r\n \r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('-p', '--paired', action='store_true', help='fastq files have paired ends', dest='pairedEnds')\r\n group.add_argument('-s', '--single', action='store_false', help='fastq files have single ends', dest='pairedEnds')\r\n parser.add_argument('-n', type=int, metavar='cpus', help='number of processors to use', dest='nProcs')\r\n parser.add_argument('-c', type=str, metavar='config_file', help='location of config file', dest='configFile')\r\n parser.add_argument('-v', action='store_true', help='turns on verbosity', dest='verbose')\r\n\r\n parser.add_argument('--aligner_order', type=str, metavar='{'+','.join(mapperAbbrs.keys())+'}', help='mapper sequence as a string. ie CSC', dest='alignerOrder')\r\n parser.add_argument('--first_iter', metavar='n', type=int, help='first iteration', dest='firstIter')\r\n parser.add_argument('--max_iters', metavar='n', type=int, help='maximum iterations', dest='maxIters')\r\n parser.add_argument('--read_length', metavar='n', type=int, help='read length', dest='readLength')\r\n parser.add_argument('--read_type', type=str, help='read type', choices=['CS','NT'], dest='readType')\r\n parser.add_argument('--req_frag_conv', help='require frags to converge as well as SNVs', action='store_true', dest='reqFragConv')\r\n parser.add_argument('--no-req_frag_conv', help='does not require frags to converge as well as SNVs', action='store_false', dest='reqFragConv')\r\n\r\n parser.add_argument('--frag_maker_thresh',type=float, metavar='threshold', help='verified frag maker threshold', dest='fragMakerThresh')\r\n parser.add_argument('--frag_thresh', type=float, metavar='threshold', help='unverified frag maker threshold', dest='fragThresh')\r\n parser.add_argument('--min_absolute_cover', type=int, metavar='n', help='minimum absolute cover', dest='minAbsoluteCover')\r\n parser.add_argument('--snv_thresh', type=float, metavar='threshold', help='SNV threshold', dest='SNVthresh')\r\n parser.add_argument('--snv_type2_thresh', type=float, metavar='threshold', help='SNV type 2 threshold', dest='SNVtype2thresh')\r\n parser.add_argument('--snv_type3_thresh', type=float, metavar='threshold', help='SNV type 3 threshold', dest='SNVtype3thresh')\r\n parser.add_argument('--roa_size', type=int, metavar='size', help='Size to use for region of analysis in DDiMAP', dest='roaSize')\r\n\r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('--use_DI', action='store_true', help='use reads mapped with deletion and insertion', dest='useDI')\r\n group.add_argument('--no-use_DI', action='store_false', help='do not use reads mapped with deletion and insertion', dest='useDI')\r\n\r\n parser.add_argument('--cushaw_opts', type=str, metavar=\"'options'\", help='cushaw specific options', dest='cushawOpts')\r\n parser.add_argument('--shrimp_opts', type=str, metavar=\"'options'\", help='shrimp specific options', dest='shrimpOpts')\r\n parser.add_argument('--bwamem_opts', type=str, metavar=\"'options'\", help='bwa-mem specific options', dest='bwaMemOpts')\r\n parser.add_argument('--novo_opts', type=str, metavar=\"'options'\", help='novoalign specific options', dest='novoOpts')\r\n\r\n\r\n #Parse args and check for config file\r\n args = parser.parse_args()\r\n if args.configFile:\r\n configFile = args.configFile\r\n if not path.isfile(configFile):\r\n print 'config file specified, but not found'\r\n exit(1)\r\n else:\r\n configFile = 'DDiMap.cfg'\r\n\r\n #Read in settings from config file\r\n Settings = read_config(configFile)\r\n\r\n # Loop over each section and replace values with those passed in on command line. \r\n # Also create a local variable that matches the keys in the settings dictionary.\r\n\r\n for section in Settings.keys():\r\n for key in Settings[section].keys():\r\n if getattr(args, key):\r\n Settings[section][key] = getattr(args, key)\r\n exec '%s = Settings[section][key]' % key\r\n if key in required and not Settings[section][key]:\r\n print '%s not specified on command line or in config file. Aborting...' % key\r\n print Settings[section][key]\r\n parser.print_help()\r\n exit(1)\r\n if (type(Settings[section][key]) == list):\r\n Settings[section][key] = ', '.join(Settings[section][key])\r\n\r\n if useDI: # reads with CIGARs containing both I and D are processed\r\n kFlag='-k'\r\n else: # reads with CIGARs containing both I and D are not processed\r\n kFlag=''\r\n\r\n if pairedEnds:\r\n pair_str='P'\r\n else:\r\n pair_str='S'\r\n\r\n # do the work - set up for the iteration\r\n aligners = list(alignerOrder)\r\n iterMin = len(aligners)\r\n iterMax = max(maxIters, iterMin); # always do as many iters as are in alignerOrder string\r\n aligners = aligners + list(repeat(aligners[-1], iterMax - iterMin)) # define the aligner ID sequence to be used over the iterations\r\n\r\n\r\n # Make paths absolute\r\n fastqFiles = [path.abspath(x) for x in fastqFiles]\r\n mappingRefSeqFiles = [path.abspath(x) for x in mappingRefSeqFiles]\r\n junctionRefSeqFiles = [path.abspath(x) for x in junctionRefSeqFiles]\r\n outputDir = path.abspath(outputDir) + '/'\r\n\r\n # Make sure the output directory exists\r\n\r\n if not path.isdir(outputDir):\r\n makedirs(outputDir)\r\n\r\n # Write configuration file in outputDir\r\n write_config(outputDir, Settings)\r\n\r\n # INITIAL VALUES OF LOOP CONTROL PARAMETERS\r\n converged = False\r\n prevFragList = [] # this will be replaced by counts of fragments created for each baseline refernce sequence\r\n prevSNVList = [] # this will be replaced by counts of SNV candidates found for each baseline reference sequence\r\n\r\n thisIter = firstIter\r\n\r\n\r\n for RefSeqFile in fastqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find fastqFile at ' + RefSeqFile\r\n exit(1)\r\n\r\n # Delete old enhanced fast file if present. It should never be...\r\n\r\n enhancedFastaFile = outputDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): # see if one is already here - need to zap it\r\n remove(enhancedFastaFile) # remove if present because fastawrite appends to existing files\r\n output_handle = open(enhancedFastaFile, 'a')\r\n\r\n # Add reference sequences to file with _Ref tag\r\n RefSeqs=[]\r\n for RefSeqFile in mappingRefSeqFiles:\r\n\tprint 'ref seq file = ' + RefSeqFile\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n SeqIO.write(formattedRefSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n \r\n # Create junctions if they are needed and then add to ref seq file as mapping targets for chimeric reads\r\n RefSeqs=[]\r\n for RefSeqFile in junctionRefSeqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n junctionSeqs = make_junctions(formattedRefSeqs,readLength);\r\n SeqIO.write(junctionSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n\r\n # allows restarts\r\n if thisIter > 1: # there is no previous iteration, so start fresh\r\n prevWorkingDir = outputDir + ('Gen%d/' % (thisIter-1))\r\n for i in range(1, thisIter):\r\n prevWorkingDir = '%sGen%d/' % (outputDir, i) \r\n fragFile = prevWorkingDir + 'fasta.fa'\r\n snvFile = prevWorkingDir + 'snv.csv'\r\n ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv)\r\n\r\n\r\n while not converged and thisIter <= iterMax:\r\n \r\n print '======= Iteration %d of %d ========' % (thisIter, iterMax)\r\n\r\n # creates working dir if not present\r\n thisWorkingDir = outputDir + ('Gen%d/' % thisIter)\r\n if path.isdir(thisWorkingDir):\r\n rmtree(thisWorkingDir)\r\n makedirs(thisWorkingDir)\r\n \r\n # Delete old enhanced fast file if present. It should never be...\r\n enhancedFastaFile = thisWorkingDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): \r\n remove(enhancedFastaFile) \r\n copyfile(outputDir + 'refSeqEnhanced.fa', enhancedFastaFile)\r\n\r\n output_handle = open(enhancedFastaFile, 'a')\r\n \r\n # Append frags from previous iteration if any (these sequences are tagged as fragments when the file is written by DDiMAP)\r\n if (thisIter > 1):\r\n prevFragFile=prevWorkingDir + '/fasta.fa'\r\n if path.isfile(prevFragFile) and path.getsize(prevFragFile) > 0:\r\n fragSeqs=list(SeqIO.parse(prevFragFile, 'fasta'))\r\n SeqIO.write(fragSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n # Setup variables for aligner\r\n thisAligner=aligners[thisIter-1]\r\n thisAligned='DDiMAP_%s' % thisAligner\r\n \r\n if path.isfile(thisWorkingDir + 'mapper.log'):\r\n remove(thisWorkingDir + 'mapper.log')\r\n\r\n if not ','.join([thisAligner,readType,pair_str]) in aligner_dict.keys():\r\n print mapperAbbrs[thisAligner] + ' does not support ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n\r\n # execute commands for aligner\r\n\r\n open(thisWorkingDir + 'mapper.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'mapper.log'])\r\n\r\n # set substitutions for aligner commands\r\n commandsubs={'DDiFastq1':fastqFiles[0], \r\n 'DDiProcs':nProcs, \r\n 'DDiFasta':enhancedFastaFile, \r\n 'DDiBMF':thisAligned + '.bmf', \r\n 'DDiBAF':thisAligned + '.baf', \r\n 'DDiSAM':thisAligned + '.sam',\r\n 'DDiNIX':thisAligned + '.nix', \r\n 'DDiAligned':thisAligned, \r\n 'CushawOpts':cushawOpts, \r\n 'ShrimpOpts':shrimpOpts, \r\n 'BwaMemOpts':bwaMemOpts, \r\n 'NovoOpts':novoOpts}\r\n\r\n if (len(fastqFiles) > 1):\r\n commandsubs['DDiFastq2']=fastqFiles[1]\r\n\r\n for command in aligner_dict[','.join([thisAligner,readType,pair_str])]:\r\n cmdlist=re.split('\\s*',command)\r\n #remove empty arguments and subsitute in values from commandsubs \r\n args=filter(None,[str(commandsubs[x]) if x in commandsubs.keys() else x for x in cmdlist])\r\n args=re.split('\\s*',' '.join(args)) \r\n print ' '.join(args) # output actual command\r\n if 'DDiFastq2' in args: #This hasn't been substituted because one wasn't provided\r\n print mapperAbbrs[thisAligner] + ' expects 2 fastq files for use with ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n # Now we need to detect stdout redirection and do it properly using pOpen\r\n if '>' in args: \r\n i = args.index('>')\r\n outfile = args[i+1]\r\n del args[i:i+2]\r\n else:\r\n outfile = None\r\n \r\n log_file = open(thisWorkingDir + 'mapper.log', 'a')\r\n \r\n if (outfile):\r\n with open(thisWorkingDir + outfile, 'w') as output_file:\r\n a=Popen(args, cwd=thisWorkingDir, stdout=output_file, stderr=log_file)\r\n else:\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n\r\n success=a.wait()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** mapper exited with error', success\r\n print 'See ' + thisWorkingDir + 'mapper.log' + ' for more details'\r\n exit(success)\r\n\r\n if verbose:\r\n b.terminate()\r\n # Perform sam to bam conversion for DDiMap\r\n args=['samtools', 'view', '-b', '-S', '-o', thisAligned + '.bam', thisAligned + '.sam']\r\n print ' '.join(args) \r\n\r\n open(thisWorkingDir + 'samtools.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'samtools.log'])\r\n log_file = open(thisWorkingDir + 'samtools.log', 'w')\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n success=a.wait()\r\n log_file.close()\r\n if verbose:\r\n b.terminate()\r\n if not success == 0:\r\n print '*** samtools exited with error', success\r\n print 'See ' + thisWorkingDir + 'samtools.log' + ' for more details' \r\n exit(success)\r\n # remove the uncompressed sam file\r\n args=['rm', thisAligned + '.sam'];\r\n a=Popen(args, cwd=thisWorkingDir)\r\n\r\n # now run the DDiMAP code\r\n thisAlignedFile = thisWorkingDir + thisAligned + '.bam'\r\n args = (['DDiMAP', kFlag, '-r', roaSize, '-f', enhancedFastaFile, '-b', \r\n thisAlignedFile, '-c', minAbsoluteCover, '-n', fragThresh, '-a', \r\n fragMakerThresh, '-p', SNVthresh, '-s', SNVtype2thresh, '-l', \r\n SNVtype3thresh, '-o', thisWorkingDir])\r\n args = [str(x) for x in args]\r\n print ' '.join(args)\r\n open(thisWorkingDir + 'DDiMap.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'DDiMap.log'])\r\n log_file = open(thisWorkingDir + 'DDiMap.log', 'a')\r\n a = Popen(args, cwd=thisWorkingDir, stdout=log_file, stderr=log_file)\r\n success=a.wait()\r\n if verbose:\r\n b.terminate()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** DDiMap exited with error', success\r\n print 'See ' + thisWorkingDir + 'DDiMap.log' + ' for more details'\r\n exit(success)\r\n \r\n # now check for convergence\r\n \r\n fragFile = thisWorkingDir + 'fasta.fa'\r\n snvFile = thisWorkingDir + 'snv.csv'\r\n \r\n # call to the convergence test matlab function\r\n # result history kept in currFrags/prevFrags and currSNVs/prevSNVs\r\n \r\n if ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv):\r\n print 'Convergence found. Stopping...'\r\n break\r\n\r\n prevWorkingDir = thisWorkingDir; # all done with the previous, this will be the next iteration previous directory\r\n thisIter = thisIter+1\r\n else:\r\n print 'Failed to converge'\r\n\r\n print '%10s %10s %10s' % ('Iteration', 'nFrags', 'nSNVs')\r\n for i, (frags, snvs) in enumerate(zip(prevFragList, prevSNVList)):\r\n print '%10d %10d %10d' % (i+1, sum(frags), sum(snvs))\r\n\r\n # put final results into outputDir\r\n # make renamed copies of the final iteration result files, naming them using\r\n copyfile(thisWorkingDir+'fasta.fa',outputDir+'convergedFrags.fa')\r\n copyfile(thisWorkingDir+'dictionary.csv',outputDir+'convergedDictionary.csv')\r\n copyfile(thisWorkingDir+'snv.csv',outputDir+'convergedSNVs.csv')\r\n copyfile(thisWorkingDir+'coverage.csv',outputDir+'convergedCoverage.csv')\r\n copyfile(thisWorkingDir+'refSeqEnhanced.fa',outputDir+'convergedEnhancedRefSeqs.fa')" ]
[ "0.5712822", "0.5585956", "0.55091625", "0.53963697", "0.5361064", "0.524931", "0.5248761", "0.51786894", "0.5167982", "0.51404357", "0.51352984", "0.5132739", "0.5129055", "0.5116135", "0.5114908", "0.5113061", "0.5099794", "0.5082188", "0.5070987", "0.50707835", "0.5056807", "0.50429195", "0.50330603", "0.5020311", "0.5018242", "0.5015425", "0.5011562", "0.5002994", "0.49972245", "0.4966091" ]
0.6935189
0
Sets the price_source of this QuoteSeriesId.
def price_source(self, price_source): self._price_source = price_source
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_price(self, price):\n assert isinstance(price, float), 'Price must be a float'\n self._price = price", "def price(self, price):\n\n self._price = price", "def price(self, price):\n\n self._price = price", "def price(self, price):\n\n self._price = price", "def price(self, price):\n\n self._price = price", "def set_price(self, _price):\n self.price = _price\n return self.price", "def price(self, price: float):\n\n self._price = price", "def setPrice(self, val):\n self.price = val", "def price_link(self, price_link):\n\n self._price_link = price_link", "def price(self, price):\n if price is None:\n raise ValueError(\"Invalid value for `price`, must not be `None`\")\n\n self._price = price", "def set_data_source(self, source_id):\n self.data_source = source_id", "def source_id(self, source_id):\n\n self._source_id = source_id", "def source_id(self, source_id):\n\n self._source_id = source_id", "def target_prices(self, target_prices: List[float]):\n\n self._target_prices = target_prices", "def price_low(self, price_low):\n\n self._price_low = price_low", "def price_bound(self, price_bound):\n\n self._price_bound = price_bound", "def source_id(self, source_id: str):\n\n self._source_id = source_id", "def original_price(self, original_price):\n\n self._original_price = original_price", "def set_source(self, source):\n self.data['source'] = source", "def attribute_price(self, attribute_price):\n\n self._attribute_price = attribute_price", "def price_group(self, price_group: str):\n\n self._price_group = price_group", "def price(self, value):\n self._price = Decimal(value)", "def setPIDSourceType(self, pidSource: PIDSourceType) -> None:\n self.pidSource = ...", "def set_source(self, source_name):\n self.source = source_name", "def fill_price(self, fill_price):\n\n self._fill_price = fill_price", "def base_price(self, base_price):\n if base_price is None:\n raise ValueError(\"Invalid value for `base_price`, must not be `None`\")\n\n self._base_price = base_price", "def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0", "def price_high(self, price_high):\n\n self._price_high = price_high", "def set_flow_source(self, source):\n self._source = source", "def price_offset(self, price_offset):\n\n self._price_offset = price_offset" ]
[ "0.6167126", "0.6058284", "0.6058284", "0.6058284", "0.6058284", "0.60502464", "0.6015236", "0.5999806", "0.59924823", "0.59658754", "0.58039397", "0.5769713", "0.5769713", "0.57653004", "0.5605163", "0.5595293", "0.55577815", "0.55550915", "0.5468279", "0.54497796", "0.53435624", "0.5294272", "0.5280004", "0.5237649", "0.51355445", "0.5109216", "0.5088466", "0.508282", "0.50532895", "0.5029016" ]
0.8176036
0
Sets the instrument_id of this QuoteSeriesId.
def instrument_id(self, instrument_id): if self.local_vars_configuration.client_side_validation and instrument_id is None: # noqa: E501 raise ValueError("Invalid value for `instrument_id`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and instrument_id is not None and len(instrument_id) < 1): raise ValueError("Invalid value for `instrument_id`, length must be greater than or equal to `1`") # noqa: E501 self._instrument_id = instrument_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_instrument(self, instrument_id, channel=0):\n if not 0 <= instrument_id <= 127:\n raise ValueError(f\"Undefined instrument id: {instrument_id}\")\n\n if not 0 <= channel <= 15:\n raise ValueError(\"Channel not between 0 and 15.\")\n\n self.write_short(0xC0 + channel, instrument_id)", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def instrument(self, instrument):\n\n self._instrument = instrument", "def setInstrument(self,instrument):\n self.instrument = instrument\n self.instrument.attach(self)", "def survey_id(self, survey_id):\n\n self.logger.debug(\"In 'survey_id' setter.\")\n\n self._survey_id = survey_id", "def instrumentName(self, instrumentName):\n\n self._instrumentName = instrumentName", "def stock_id(self, stock_id):\n\n self._stock_id = stock_id", "def instrument_ref(self, instrument_ref):\n\n self._instrument_ref = instrument_ref", "def instrument_id_type(self, instrument_id_type):\n allowed_values = [None,\"LusidInstrumentId\", \"Figi\", \"RIC\", \"QuotePermId\", \"Isin\", \"CurrencyPair\", \"ClientInternal\", \"Sedol\", \"Cusip\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and instrument_id_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `instrument_id_type` ({0}), must be one of {1}\" # noqa: E501\n .format(instrument_id_type, allowed_values)\n )\n\n self._instrument_id_type = instrument_id_type", "def instrumentToken(self, instrumentToken):\n\n self._instrumentToken = instrumentToken", "def enclosure_id(self, enclosure_id):\n\n self._enclosure_id = enclosure_id", "def trace_id(self, trace_id):\n\n self._trace_id = trace_id", "def trace_id(self, trace_id):\n\n self._trace_id = trace_id", "def survey_id(self, survey_id):\n\n self._survey_id = survey_id", "def ion_id(self, ion_id):\n\n self._ion_id = ion_id", "def setId(self, identifier):\n self.identifier = identifier", "def set_ident(self, new_ident: int):\n if not isinstance(new_ident, int):\n raise TypeError(\"Spectrum set identifiers may ONLY be positive integers\")\n self._set_ident = new_ident", "def setID(self, id):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def merchant_id(self, merchant_id):\n\n self._merchant_id = merchant_id", "def iris_quarted_id(self, iris_quarted_id):\n\n self._iris_quarted_id = iris_quarted_id", "def study_id(self, study_id):\n\n self._study_id = study_id", "def study_id(self, study_id):\n\n self._study_id = study_id", "def study_id(self, study_id):\n\n self._study_id = study_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def transaction_id(self, transaction_id):\n\n self._transaction_id = transaction_id", "def warehouse_id(self, warehouse_id):\n\n self._warehouse_id = warehouse_id", "def station_id(self, station_id: str):\n\n self._station_id = station_id", "def instrument(self, i: int) -> None:\n if i > INSTRUMENT_MAX:\n self._instrument = INSTRUMENT_MAX\n if i < INSTRUMENT_MIN:\n self._instrument = INSTRUMENT_MIN\n else:\n self._instrument = i" ]
[ "0.6593195", "0.65317917", "0.65317917", "0.65317917", "0.65317917", "0.6389134", "0.63215214", "0.6160165", "0.6136501", "0.61186457", "0.60556626", "0.60402614", "0.603777", "0.5880715", "0.5880715", "0.58299214", "0.576715", "0.5766734", "0.566574", "0.55925006", "0.55914956", "0.558829", "0.55674237", "0.55674237", "0.55674237", "0.55562645", "0.55562645", "0.5506503", "0.55035114", "0.5495302" ]
0.70015216
0
Sets the instrument_id_type of this QuoteSeriesId.
def instrument_id_type(self, instrument_id_type): allowed_values = [None,"LusidInstrumentId", "Figi", "RIC", "QuotePermId", "Isin", "CurrencyPair", "ClientInternal", "Sedol", "Cusip"] # noqa: E501 if self.local_vars_configuration.client_side_validation and instrument_id_type not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `instrument_id_type` ({0}), must be one of {1}" # noqa: E501 .format(instrument_id_type, allowed_values) ) self._instrument_id_type = instrument_id_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def id_type(self, id_type):\n\n self._id_type = id_type", "def type_id(self, type_id):\n if type_id is None:\n raise ValueError(\"Invalid value for `type_id`, must not be `None`\")\n\n self._type_id = type_id", "def identifier_type(self, identifier_type):\n self._identifier_type = identifier_type", "def signer_id_type(self, signer_id_type):\n\n self._signer_id_type = signer_id_type", "def setDataSetType(self, type):\n self.__data_set_type__ = type", "def vertex_id_type(self, vertex_id_type):\n self._vertex_id_type = vertex_id_type", "def session_type_id(self, session_type_id):\n\n self._session_type_id = session_type_id", "def payment_type_id(self, payment_type_id):\n\n self._payment_type_id = payment_type_id", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self._type = type", "def data_type_id(self, data_type_id):\n\n self._data_type_id = data_type_id", "def event_type_id(self, event_type_id):\n\n self._event_type_id = event_type_id", "def data_type_id(self, value: str):\n self._data_type_id = value", "def engine_type(self, engine_type):\n\n self._engine_type = engine_type", "def type(self, type: str):\n\n self._type = type", "def instrument_type(self):\n \n raise NotImplementedError()", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.7232628", "0.65018576", "0.6447048", "0.6281578", "0.61472297", "0.6039474", "0.6032719", "0.5949014", "0.5932484", "0.5932484", "0.59108263", "0.58610237", "0.5860774", "0.5670331", "0.56138617", "0.56090164", "0.5546033", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215", "0.55052215" ]
0.8050317
0
Sets the quote_type of this QuoteSeriesId.
def quote_type(self, quote_type): allowed_values = [None,"Price", "Spread", "Rate", "LogNormalVol", "NormalVol", "ParSpread", "IsdaSpread", "Upfront", "Index", "Ratio", "Delta", "PoolFactor"] # noqa: E501 if self.local_vars_configuration.client_side_validation and quote_type not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `quote_type` ({0}), must be one of {1}" # noqa: E501 .format(quote_type, allowed_values) ) self._quote_type = quote_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_quote_kind(self, quote_kind: QuoteKind):\n if quote_kind != self.quote_kind:\n self.quote_kind = quote_kind\n if self.geo is None:\n self.load_candles()\n else:\n self.geo.update(quote_kind=quote_kind)\n self.chart.redraw(self.geo)", "def id_type(self, id_type):\n\n self._id_type = id_type", "def setDataSetType(self, type):\n self.__data_set_type__ = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self._type = type", "def entity_type(self, entity_type):\n\n self._entity_type = entity_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def entity_type(self, entity_type):\n self._entity_type = entity_type", "def type_id(self, type_id):\n if type_id is None:\n raise ValueError(\"Invalid value for `type_id`, must not be `None`\")\n\n self._type_id = type_id", "def entity_type(self, entity_type: str):\n\n self._entity_type = entity_type", "def set_target_buy_policy_type(self, type):\n self.single_selection_from_kendo_dropdown(self.target_buy_policy_type_kendo_dropdown_locator, type)", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.6272611", "0.5894381", "0.585228", "0.57743025", "0.57743025", "0.57674825", "0.55836105", "0.5559522", "0.5559522", "0.5559522", "0.5559522", "0.55181074", "0.5478835", "0.54692364", "0.5365564", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443", "0.5340443" ]
0.75797933
0
Defines a list of coordinates to consider neighbors.
def define_neighbors(x: int, y: int, z: int) -> list: diffs = range(-1, 2) coords = [] # might need to add some if guards (if x > 0) (if x < len(blah) etc) xdiffs = (x + diff for diff in diffs) ydiffs = (y + diff for diff in diffs) zdiffs = (z + diff for diff in diffs) neighbors = product(xdiffs, ydiffs, zdiffs) for index, neighbor in enumerate(neighbors): if neighbor != (x, y, z) and all(c >= 0 for c in neighbor): coords.append(neighbor) return coords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neighbors(self, x):\n pass", "def neighbors(self, coord):\n if not self.check_coord(coord):\n raise ValueError(\"Invalid coordinates\")\n x, y = coord\n n = [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]\n ret = [c for c in n if self.check_coord(c)]\n\n return ret", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]", "def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def _get_neighbors(self, coord):\n neighbors = []\n for change in self._changes:\n neighbor_coord = np.array(coord) + change\n if np.logical_or(np.any(neighbor_coord < 0), np.any(neighbor_coord >= self._grid_shape)):\n continue\n neighbors.append(tuple(neighbor_coord))\n return neighbors", "def get_neighbours(self):\n return []", "def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list", "def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors", "def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def neighbor(self, start):\n x = start[0] + random.uniform(-20, 20)\n y = start[1] + random.uniform(-20, 20)\n x = max(min(x, xbounds[1]), xbounds[0])\n y = max(min(y, ybounds[1]), ybounds[0])\n return [x,y]", "def __find_neighbors(self, list_of_nodes):\n for node in list_of_nodes:\n x_pos = node.location[0]\n y_pos = node.location[1]\n if x_pos - 1 >= 0:\n # find the node in the list of nodes\n # add it as a neighbor of the current node\n neighbor = self.__find_neighbor_at(x_pos - 1, y_pos, list_of_nodes)\n node.add_neighbor(neighbor)\n if x_pos + 1 <= self.__n_rows - 1:\n neighbor = self.__find_neighbor_at(x_pos + 1, y_pos, list_of_nodes)\n node.add_neighbor(neighbor)\n if y_pos - 1 >= 0:\n neighbor = self.__find_neighbor_at(x_pos, y_pos - 1, list_of_nodes)\n node.add_neighbor(neighbor)\n if y_pos + 1 <= self.__n_columns - 1:\n neighbor = self.__find_neighbor_at(x_pos, y_pos + 1, list_of_nodes)\n node.add_neighbor(neighbor)", "def __getNeighbours(self, x: int, y: int) -> List:\n\t\tneighbours = []\n\t\tneighbours.append((x, y + 1))\n\t\tneighbours.append((x, y - 1))\n\t\tneighbours.append((x + 1, y))\n\t\tneighbours.append((x - 1, y))\n\t\tneighbours.append((x + 1, y + 1))\n\t\tneighbours.append((x - 1, y + 1))\n\t\tneighbours.append((x - 1, y - 1))\n\t\tneighbours.append((x + 1, y - 1))\n\n\t\tvalid_neighbours = [x for x in neighbours if x[0] > 0 and x[0] <= 5 and x[1] > 0 and x[1] <= 5]\n\n\t\treturn valid_neighbours", "def _valid_neighbors(location, some_num):\n xloc, yloc = location\n vector = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n ret_v = []\n for vect in vector:\n xpos = xloc + vect[0]\n ypos = yloc + vect[1]\n if xpos <= 0 or ypos <= 0:\n continue\n if xpos > some_num or ypos > some_num:\n continue\n ret_v.append((xpos, ypos))\n return ret_v", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def set_neighbors(self):\n for loc, cell in self._land_cells.items():\n neighbor_cells = [\n self.landscape[(loc[0] - 1, loc[1])],\n self.landscape[(loc[0], loc[1] + 1)],\n self.landscape[(loc[0] + 1, loc[1])],\n self.landscape[(loc[0], loc[1] - 1)],\n ]\n cell.land_cell_neighbors = [\n neighbor for neighbor in neighbor_cells if neighbor.type != \"Water\"\n ]", "def set_neighbors(self, neighbors):\n\t\tif self.is_last_zone==False:\n\t\t\tleft_neighbor, bottom_neighbor, right_neighbor = neighbors\n\t\t\tself.left_neighbor = left_neighbor\n\t\t\tself.right_neighbor = right_neighbor\n\t\t\tself.bottom_neighbor = bottom_neighbor\n\t\t\n\t\telif self.is_last_zone==True:\n\t\t\t### By convention, the left neighbor of the last zone\n\t\t\t### is zone n-2, while the right neigbor is zone 0\n\t\t\tleft_neighbor, upper_neighbors, right_neighbor = neighbors\n\t\t\tself.left_neighbor = left_neighbor\n\t\t\tself.right_neighbor = right_neighbor\n\t\t\tself.upper_neighbors = upper_neighbors", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))", "def list_neighbors(current_row, current_col, grid_size):\n neighbors = []\n for row_offset, col_offset in [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)]:\n new_row = current_row + row_offset\n new_col = current_col + col_offset\n if (new_row >= 0 and new_row < grid_size and new_col >= 0\n and new_col < grid_size):\n neighbors.append((new_row, new_col))\n return neighbors", "def neighbors(self):\n return self._neighbors", "def get_neighbors(self, pos):\n y, x = pos\n neighbors = []\n\n # Check if sarting position for dijsktra.\n if y == sys.maxsize:\n neighbors = [(self.size-1,i) for i in range(self.size)]\n elif y == -sys.maxsize:\n neighbors = [(0,i) for i in range(self.size)]\n elif x == sys.maxsize:\n neighbors = [(i,self.size-1) for i in range(self.size)]\n elif x == -sys.maxsize:\n neighbors = [(i,0) for i in range(self.size)]\n # Position inside board\n else:\n if y-1 >= 0:\n neighbors.append((y-1, x))\n if y+1 < self.size:\n neighbors.append((y+1, x))\n if y-1 >= 0 and x+1 <= self.size-1:\n neighbors.append((y-1, x+1))\n if y+1 < self.size and x-1 >= 0:\n neighbors.append((y+1, x-1))\n if x+1 < self.size:\n neighbors.append((y, x+1))\n if x-1 >= 0:\n neighbors.append((y, x-1))\n return neighbors" ]
[ "0.7418102", "0.7229692", "0.72068447", "0.71410286", "0.7103896", "0.7096486", "0.70576566", "0.691093", "0.68926316", "0.6864414", "0.68611205", "0.685419", "0.6842417", "0.6837201", "0.6831657", "0.6794186", "0.679363", "0.6752783", "0.67450017", "0.6739434", "0.6675337", "0.665451", "0.6616888", "0.6616888", "0.6616888", "0.6616888", "0.6614753", "0.6583829", "0.65835714", "0.65833205" ]
0.72768927
1
Factory method for Mechanism; returns the type of mechanism specified or a default mechanism. If called with no arguments, returns the `default mechanism `. Arguments
def mechanism(mech_spec=None, params=None, context=None): # Called with a keyword if mech_spec in MechanismRegistry: return MechanismRegistry[mech_spec].mechanismSubclass(params=params, context=context) # Called with a string that is not in the Registry, so return default type with the name specified by the string elif isinstance(mech_spec, str): return Mechanism_Base.defaultMechanism(name=mech_spec, params=params, context=context) # Called with a Mechanism type, so return instantiation of that type elif isclass(mech_spec) and issubclass(mech_spec, Mechanism): return mech_spec(params=params, context=context) # Called with Mechanism specification dict (with type and params as entries within it), so: # - get mech_type from kwMechanismType entry in dict # - pass all other entries as params elif isinstance(mech_spec, dict): # Get Mechanism type from kwMechanismType entry of specification dict try: mech_spec = mech_spec[kwMechanismType] # kwMechanismType config_entry is missing (or mis-specified), so use default (and warn if in VERBOSE mode) except (KeyError, NameError): if Mechanism.classPreferences.verbosePref: print("{0} entry missing from mechanisms dict specification ({1}); default ({2}) will be used". format(kwMechanismType, mech_spec, Mechanism_Base.defaultMechanism)) return Mechanism_Base.defaultMechanism(name=kwProcessDefaultMechanism, context=context) # Instantiate Mechanism using mech_spec dict as arguments else: return mech_spec(context=context, **mech_spec) # Called without a specification, so return default type elif mech_spec is None: return Mechanism_Base.defaultMechanism(name=kwProcessDefaultMechanism, context=context) # Can't be anything else, so return empty else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n variable=None,\n params=None,\n name=None,\n prefs=None,\n context=None):\n\n # Forbid direct call to base class constructor\n if not isinstance(context, type(self)) and not kwValidate in context:\n raise MechanismError(\"Direct call to abstract class Mechanism() is not allowed; \"\n \"use mechanism() or one of the following subclasses: {0}\".\n format(\", \".join(\"{!s}\".format(key) for (key) in MechanismRegistry.keys())))\n\n# IMPLEMENT **args (PER State)\n\n self._execution_id = None\n\n # Register with MechanismRegistry or create one\n if not context is kwValidate:\n register_category(entry=self,\n base_class=Mechanism_Base,\n name=name,\n registry=MechanismRegistry,\n context=context)\n\n # Create mechanism's _stateRegistry and state type entries\n from PsyNeuLink.Components.States.State import State_Base\n self._stateRegistry = {}\n # InputState\n from PsyNeuLink.Components.States.InputState import InputState\n register_category(entry=InputState,\n base_class=State_Base,\n registry=self._stateRegistry,\n context=context)\n # ParameterState\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n register_category(entry=ParameterState,\n base_class=State_Base,\n registry=self._stateRegistry,\n context=context)\n # OutputState\n from PsyNeuLink.Components.States.OutputState import OutputState\n register_category(entry=OutputState,\n base_class=State_Base,\n registry=self._stateRegistry,\n context=context)\n\n # Mark initialization in context\n if not context or isinstance(context, object) or inspect.isclass(context):\n context = INITIALIZING + self.name + SEPARATOR_BAR + self.__class__.__name__\n else:\n context = context + SEPARATOR_BAR + INITIALIZING + self.name\n\n super(Mechanism_Base, self).__init__(variable_default=variable,\n param_defaults=params,\n prefs=prefs,\n name=name,\n context=context)\n\n # FUNCTIONS:\n\n# IMPLEMENTATION NOTE: REPLACE THIS WITH ABC (ABSTRACT CLASS)\n # Assign class functions\n self.classMethods = {\n kwMechanismExecuteFunction: self.execute,\n # kwMechanismAdjustFunction: self.adjust_function,\n # kwMechanismTerminateFunction: self.terminate_execute\n }\n self.classMethodNames = self.classMethods.keys()\n\n # Validate class methods:\n # make sure all required ones have been implemented in (i.e., overridden by) subclass\n for name, method in self.classMethods.items():\n try:\n method\n except (AttributeError):\n raise MechanismError(\"{0} is not implemented in mechanism class {1}\".\n format(name, self.name))\n\n self.value = self._old_value = None\n self._status = INITIALIZING\n self._receivesProcessInput = False\n self.phaseSpec = None\n self.processes = {}\n self.systems = {}", "def client_authenticator_factory(mechanism):\n authenticator = CLIENT_MECHANISMS_D[mechanism]\n return authenticator()", "def mechanism(self):", "def all_mechanism_types():\n global _mechtype_cache\n if _mechtype_cache is None:\n _mechtype_cache = collections.OrderedDict()\n mname = neuron.h.ref('')\n # Iterate over two mechanism types (distributed, point/artificial)\n for i in [0, 1]:\n mt = neuron.h.MechanismType(i)\n nmech = int(mt.count())\n # Iterate over all mechanisms of this type\n for j in range(nmech):\n mt.select(j)\n mt.selected(mname)\n \n # General mechanism properties\n name = mname[0] # convert hoc string ptr to python str\n \n desc = {\n 'point_process': bool(i),\n 'netcon_target': bool(mt.is_netcon_target(j)),\n 'has_netevent': bool(mt.has_net_event(j)),\n 'artificial_cell': bool(mt.is_artificial(j)),\n 'internal_type': int(mt.internal_type()),\n }\n \n # Collect information about 4 different types of variables\n for k,ptype in [(-1, 'globals'), (1, 'parameters'), \n (2, 'assigned'), (3, 'state')]:\n desc[ptype] = {} # collections.OrderedDict()\n ms = neuron.h.MechanismStandard(name, k)\n for l in range(int(ms.count())):\n psize = ms.name(mname, l)\n pname = mname[0] # parameter name\n desc[ptype][pname] = int(psize)\n \n # Assemble everything in one place\n _mechtype_cache[name] = desc\n \n return _mechtype_cache", "def protocol_factory_method(self):\n pass", "def jobtype_factory(jobtype_toolkit):\n\n jobtype_toolkits = {'aimless_shooting': jobtype.AimlessShooting(),\n 'committor_analysis': jobtype.CommittorAnalysis(),\n 'equilibrium_path_sampling': jobtype.EquilibriumPathSampling(),\n 'find_ts': jobtype.FindTS(),\n 'umbrella_sampling': jobtype.UmbrellaSampling()}\n\n if jobtype_toolkit not in jobtype_toolkits.keys():\n raise ValueError('unsupported JobType name: ' + jobtype_toolkit)\n\n return jobtype_toolkits[jobtype_toolkit]", "def get_factory():", "def pattern_factory(self):\n\t\treturn self.args[1]", "def get_factory(self):\n\n return Factory(type(self), self.kwargs)", "def server_authenticator_factory(mechanism, password_database):\n authenticator = SERVER_MECHANISMS_D[mechanism]\n return authenticator(password_database)", "def get_model_creator(flags):\n if flags.model_name == 'policy_gradient':\n model_creator = model.PolicyGradient\n elif flags.model_name == 'a2c':\n model_creator = a2c.A2C\n elif flags.model_name == 'a3c':\n model_creator = a3c.A3C\n else:\n raise ValueError(\"Unknown model name %s!\" % flags.model_name)\n \n return model_creator", "def __init__(self, mechanism, security_protocol=None):\n\n self.mechanism = mechanism\n self.handshake_version = None\n self.auth_version = None\n self.security_protocol = security_protocol\n self._broker_connection = None", "def factory(self):\n raise NotImplementedError()", "def default(self):\n raise NotImplementedError", "def factory_method(self):\n pass", "def factory_method(self):\n pass", "def get_default(cls):\n raise NotImplementedError", "def get_strategy(self): \n if self._strategy is ServerSide:\n self.arguments = [ask_for_port()]\n\n elif self._strategy is ClientSide:\n self.arguments = [ask_for_recipient(), \n ask_for_listening_port(),\n self._get_header(),\n ask_for_test()]\n return self._strategy(*self.arguments)", "def _CreateScheme(self):\n scheme_type = self.settings[\"scheme_type\"].GetString()\n if scheme_type == \"dynamic\":\n solution_scheme = StructuralMechanicsApplication.EigensolverDynamicScheme()\n else: # here e.g. a stability scheme could be added\n err_msg = \"The requested scheme type \\\"\" + scheme_type + \"\\\" is not available!\\n\"\n err_msg += \"Available options are: \\\"dynamic\\\"\"\n raise Exception(err_msg)\n\n return solution_scheme", "def mechanism(self):\n return self._config[\"sasl.mechanism\"]", "def _get_default(self):\n if callable(self.default):\n return self.default()\n else:\n return self.default", "def get_default(self) -> T | None:\n return (\n self.default # TODO: deepcopy mutable defaults?\n if self.default_factory is None\n else self.default_factory()\n )", "def _get_default_behavior(self):\n return self.__default_behavior", "def get_default_factory_for_field(\n field: ModelField,\n) -> Union[NoArgAnyCallable, UnsetType]:\n default_factory = field.default_factory\n default = field.default\n\n has_factory = default_factory is not None and default_factory is not UNSET\n has_default = default is not None and default is not UNSET\n\n # defining both default and default_factory is not supported\n\n if has_factory and has_default:\n default_factory = cast(NoArgAnyCallable, default_factory)\n\n raise BothDefaultAndDefaultFactoryDefinedError(\n default=default, default_factory=default_factory\n )\n\n # if we have a default_factory, we should return it\n\n if has_factory:\n default_factory = cast(NoArgAnyCallable, default_factory)\n\n return default_factory\n\n # if we have a default, we should return it\n\n if has_default:\n return lambda: smart_deepcopy(default)\n\n # if we don't have default or default_factory, but the field is not required,\n # we should return a factory that returns None\n\n if not field.required:\n return lambda: None\n\n return UNSET", "def default() -> \"SwitchTo\":\n return SwitchTo(None)", "def choose_class(self, *args, **kwargs):", "def get_model(model_name, problem_type):\n # if user isn't \"sallamander\", it's on a dedicated instance - use all the cores\n num_usable_cores = multiprocessing.cpu_count() \\\n if os.environ['USER'] != 'sallamander' else 1\n rand_state=609\n\n if model_name == 'linear':\n model = ElasticNet(random_state=rand_state)\n elif model_name == 'logistic': \n model = LogisticRegression(random_state=rand_state)\n elif model_name == 'random_forest':\n if problem_type == 'regression':\n model = RandomForestRegressor(n_jobs = num_usable_cores, \n random_state=rand_state)\n elif problem_type == 'classification': \n model = RandomForestClassifier(n_jobs = num_usable_cores, \n random_state=rand_state)\n else: \n raise RuntimeError('Unsupported `model_name` inputted!')\n\n return model", "def init_default_builder(\n agent_type: DefaultSupportedAgent,\n agent_config: ma_types.AgentConfig,\n) -> jax_builders.GenericActorLearnerBuilder:\n if agent_type == DefaultSupportedAgent.TD3:\n assert isinstance(agent_config, td3.TD3Config)\n return td3.TD3Builder(agent_config)\n elif agent_type == DefaultSupportedAgent.SAC:\n assert isinstance(agent_config, sac.SACConfig)\n return sac.SACBuilder(agent_config)\n elif agent_type == DefaultSupportedAgent.PPO:\n assert isinstance(agent_config, ppo.PPOConfig)\n return ppo.PPOBuilder(agent_config)\n else:\n raise ValueError(f'Unsupported agent type: {agent_type}.')", "def _get_algorithm(name: str) -> Any:\n algo_cls = getattr(hashes, name.upper(), None) # hack: get class object by name\n if algo_cls is None:\n raise ValueError(f'Unsupported algorithm: hashes.{name}'.format(name=name.upper()))\n\n return algo_cls() # pylint: disable=not-callable", "def default():" ]
[ "0.5741221", "0.5434691", "0.53697497", "0.53344387", "0.5290686", "0.5281561", "0.51853085", "0.5178649", "0.51337963", "0.51319104", "0.5109797", "0.5094142", "0.50054866", "0.49880317", "0.49759454", "0.49759454", "0.49715793", "0.49620983", "0.49507004", "0.49353167", "0.4857707", "0.485656", "0.48517284", "0.4849178", "0.48438802", "0.48392564", "0.48122054", "0.4796288", "0.47900662", "0.4787242" ]
0.8268517
0
Add rather than override INPUT_STATES and/or OUTPUT_STATES Allows specification of INPUT_STATES or OUTPUT_STATES in params dictionary to be added to, rather than override those in paramClassDefaults (the default behavior)
def _filter_params(self, params): # INPUT_STATES: try: input_states_spec = params[INPUT_STATES] except KeyError: pass else: # Convert input_states_spec to list if it is not one if not isinstance(input_states_spec, list): input_states_spec = [input_states_spec] # Get inputStates specified in paramClassDefaults default_input_states = self.paramClassDefaults[INPUT_STATES].copy() # Convert inputStates from paramClassDeafults to a list if it is not one if not isinstance(default_input_states, list): default_input_states = [default_input_states] # Add inputState specified in params to those in paramClassDefaults # Note: order is important here; new ones should be last, as paramClassDefaults defines the # the primary inputState which must remain first for the inputStates OrderedDictionary default_input_states.extend(input_states_spec) # Assign full set back to params_arg params[INPUT_STATES] = default_input_states # OUTPUT_STATES: try: output_states_spec = params[OUTPUT_STATES] except KeyError: pass else: # Convert output_states_spec to list if it is not one if not isinstance(output_states_spec, list): output_states_spec = [output_states_spec] # Get outputStates specified in paramClassDefaults default_output_states = self.paramClassDefaults[OUTPUT_STATES].copy() # Convert outputStates from paramClassDeafults to a list if it is not one if not isinstance(default_output_states, list): default_output_states = [default_output_states] # Add outputState specified in params to those in paramClassDefaults # Note: order is important here; new ones should be last, as paramClassDefaults defines the # the primary outputState which must remain first for the outputStates OrderedDictionary default_output_states.extend(output_states_spec) # Assign full set back to params_arg params[OUTPUT_STATES] = default_output_states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(self, state_dicts):\n raise NotImplementedError", "def __call__(self, inputs, states, **kwargs):\n raise NotImplementedError()", "def load_state_dict(self, state_dict):\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)", "def load_params(self, params):\n super(MlpModel, self).load_params(params)\n self.input_shape = [None,] + self.params.data_shape\n self.label_shape = [None, self.params.num_classes]\n self.mlp_act_funcs = [activation_picker(act_func_str)\n for act_func_str in self.params.mlp_activation_functions]", "def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['transition'] = self.transition\n paramDict['steadyStatePb'] = self.steadyStatePb\n return paramDict", "def _inject_params(self, params):\n\n params.extend([LocaleParam(), CompileDomainsParam(),\n UseFuzzyParam(), StatisticsParam(),\n DirectoryParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def __init__(self, inputs=1, outputs=1, states=None, name=None, **kwargs):\n super().__init__(\n name=name, inputs=inputs, outputs=outputs, states=states, **kwargs)", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def _inject_params(self, params):\n\n params.extend([DomainParam(), InputTemplateFileParam(),\n OutputDirectoryParam(), LocaleParam(),\n WidthParam(), NoWrapParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def update_input_states(self, input_values):", "def _build_param_dict(self):\n # Add parameter handlers to parameter dict. \n self._param_dict.add(SBE37Parameter.OUTPUTSAL,\n r'(do not )?output salinity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.OUTPUTSV,\n r'(do not )?output sound velocity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.NAVG,\n r'number of samples to average = (\\d+)',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.SAMPLENUM,\n r'samplenumber = (\\d+), free = \\d+',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.INTERVAL,\n r'sample interval = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.STORETIME,\n r'(do not )?store time with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.TXREALTIME,\n r'(do not )?transmit real-time data',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCMODE,\n r'serial sync mode (enabled|disabled)',\n lambda match : False if (match.group(1)=='disabled') else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCWAIT,\n r'wait time after serial sync sampling = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.TCALDATE,\n r'temperature: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.TA0,\n r' +TA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA1,\n r' +TA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA2,\n r' +TA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA3,\n r' +TA3 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CCALDATE,\n r'conductivity: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.CG,\n r' +G = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CH,\n r' +H = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CI,\n r' +I = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CJ,\n r' +J = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.WBOTC,\n r' +WBOTC = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CTCOR,\n r' +CTCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CPCOR,\n r' +CPCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PCALDATE,\n r'pressure .+ ((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.PA0,\n r' +PA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA1,\n r' +PA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA2,\n r' +PA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA0,\n r' +PTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA1,\n r' +PTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA2,\n r' +PTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB0,\n r' +PTCSB0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB1,\n r' +PTCSB1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB2,\n r' +PTCSB2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.POFFSET,\n r' +POFFSET = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RCALDATE,\n r'rtc: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.RTCA0,\n r' +RTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA1,\n r' +RTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA2,\n r' +RTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def setParams(self, tfInputGraph=None, inputMapping=None, outputMapping=None, tfHParms=None):\n super(TFTransformer, self).__init__()\n kwargs = self._input_kwargs\n # Further conanonicalization, e.g. converting dict to sorted str pairs happens here\n return self._set(**kwargs)", "def prepare_inputs(self, **inputs):\n true_inputs = {'imain_loc' : self.get_input_tensor(0, 'loc'),\n 'imain_prec' : self.get_input_tensor(0, 'prec'),\n 'ids_prec' : self.get_input_tensor(1, 'prec'),\n 'ids_A' : self.get_input_tensor(1, 'A'),\n 'iprior_scale' : self.get_input_tensor(2, 'scale')}\n \n if inputs:\n print(\"\\t\\tUpdating defaults,\", self.name, \"with\", list(inputs.keys()))\n true_inputs.update(inputs)\n return true_inputs", "def set_params(self, **kwargs):\n for param_name, value in kwargs.iteritems():\n # only set parameters that are in the default\n if param_name in self._default_params():\n setattr(self, param_name, value)\n self.params[param_name] = value\n else:\n print('AdjustedStat class does not accept %s as a ' \\\n 'parameter and will be ignored' % param_name)", "def init_params_on_input(self, train_valid_iterator: TrainValidIterator) -> dict:\n suggested_params = copy(self.default_params)\n task = train_valid_iterator.train.task\n\n assert \"sklearn\" in task.losses, \"Sklearn loss should be defined\"\n\n if task.name == \"reg\":\n # suggested_params['cs'] = list(map(lambda x: 1 / (2 * x), suggested_params['cs']))\n suggested_params[\"cs\"] = [1 / (2 * i) for i in suggested_params[\"cs\"]]\n\n return suggested_params", "def load_custom_states(self, states, *args, **kwargs):\n pass", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['C', 'dual', 'penalty', 'l1_ratio', 'tol', 'fit_intercept',\n 'solver','intercept_scaling', 'max_iter', 'multi_class',\n 'class_weight', 'random_state'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()", "def initialize_params(self, params):\n pass", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def _initialize_params(self, model, use_logits, input_layer, output_layer, custom_activation):\n import keras.backend as k\n\n if hasattr(model, 'inputs'):\n self._input_layer = input_layer\n self._input = model.inputs[input_layer]\n else:\n self._input = model.input\n self._input_layer = 0\n\n if hasattr(model, 'outputs'):\n self._output = model.outputs[output_layer]\n self._output_layer = output_layer\n else:\n self._output = model.output\n self._output_layer = 0\n\n _, self._nb_classes = k.int_shape(self._output)\n self._input_shape = k.int_shape(self._input)[1:]\n self._custom_activation = custom_activation\n logger.debug('Inferred %i classes and %s as input shape for Keras classifier.', self.nb_classes,\n str(self.input_shape))\n\n # Get predictions and loss function\n label_ph = k.placeholder(shape=self._output.shape)\n if not hasattr(self._model, 'loss'):\n logger.warning('Keras model has no loss set. Trying to use `k.sparse_categorical_crossentropy`.')\n loss_function = k.sparse_categorical_crossentropy\n else:\n if isinstance(self._model.loss, six.string_types):\n loss_function = getattr(k, self._model.loss)\n else:\n loss_function = getattr(k, self._model.loss.__name__)\n\n self._use_logits = use_logits\n if not use_logits:\n if k.backend() == 'tensorflow':\n if custom_activation:\n preds = self._output\n loss_ = loss_function(label_ph, preds, from_logits=False)\n else:\n # We get a list of tensors that comprise the final \"layer\" -> take the last element\n preds = self._output.op.inputs[-1]\n loss_ = loss_function(label_ph, preds, from_logits=True)\n else:\n loss_ = loss_function(label_ph, self._output, from_logits=use_logits)\n\n # Convert predictions to logits for consistency with the other cases\n eps = 10e-8\n preds = k.log(k.clip(self._output, eps, 1. - eps))\n else:\n preds = self._output\n loss_ = loss_function(label_ph, self._output, from_logits=use_logits)\n if preds == self._input: # recent Tensorflow version does not allow a model with an output same as the input.\n preds = k.identity(preds)\n loss_grads = k.gradients(loss_, self._input)\n\n if k.backend() == 'tensorflow':\n loss_grads = loss_grads[0]\n elif k.backend() == 'cntk':\n raise NotImplementedError('Only TensorFlow and Theano support is provided for Keras.')\n\n # Set loss, grads and prediction functions\n self._preds_op = preds\n self._loss = loss_\n self._loss_grads = k.function([self._input, label_ph], [loss_grads])\n self._preds = k.function([self._input], [preds])\n\n # Set check for the shape of y for loss functions that do not take labels in one-hot encoding\n self._reduce_labels = (hasattr(self._loss.op, 'inputs') and\n not all(len(input_.shape) == len(self._loss.op.inputs[0].shape)\n for input_ in self._loss.op.inputs))\n\n # Get the internal layer\n self._layer_names = self._get_layers()", "def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name\n return BaseInputParams(cls)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(StateInstantiation, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.state_path is None:\n self.state_path = ''\n if self.state_class is None:\n self.state_class = ''\n if self.initial_state_name is None:\n self.initial_state_name = ''\n if self.input_keys is None:\n self.input_keys = []\n if self.output_keys is None:\n self.output_keys = []\n if self.cond_outcome is None:\n self.cond_outcome = []\n if self.cond_transition is None:\n self.cond_transition = []\n if self.behavior_class is None:\n self.behavior_class = ''\n if self.parameter_names is None:\n self.parameter_names = []\n if self.parameter_values is None:\n self.parameter_values = []\n if self.position is None:\n self.position = [0.] * 2\n if self.outcomes is None:\n self.outcomes = []\n if self.transitions is None:\n self.transitions = []\n if self.autonomy is None:\n self.autonomy = []\n if self.userdata_keys is None:\n self.userdata_keys = []\n if self.userdata_remapping is None:\n self.userdata_remapping = []\n else:\n self.state_path = ''\n self.state_class = ''\n self.initial_state_name = ''\n self.input_keys = []\n self.output_keys = []\n self.cond_outcome = []\n self.cond_transition = []\n self.behavior_class = ''\n self.parameter_names = []\n self.parameter_values = []\n self.position = [0.] * 2\n self.outcomes = []\n self.transitions = []\n self.autonomy = []\n self.userdata_keys = []\n self.userdata_remapping = []", "def overwrite_hyperparams(self):\n try:\n default_hyperparams = self.hyperparams\n for key in default_hyperparams:\n try:\n flag = self.FLAGS[key]\n param_value = flag.value\n if param_value is not None:\n self.hyperparams[key] = param_value\n except:\n pass\n except:\n pass", "def _register_global_params(self, params):\n\n for name,obj in self.params().items():\n global_params.add(**{name:obj})\n\n for name,val in params.items():\n global_params.params(name).default=val\n\n params.update(global_params.get_param_values())\n params[\"name\"]=self.name", "def set_params(cls, param_dict):\n for param in param_dict:\n if param in cls.params:\n cls.params[param] = param_dict[param]\n else:\n raise AttributeError(\"Invalid parameter dictionary! Format: {'<param>': <value>}\")", "def addParams(self, *params):\n for param in params:\n self.addParam(param)\n self.params = list(set(self.params))", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj" ]
[ "0.64630955", "0.6062027", "0.60496604", "0.5760335", "0.5741909", "0.570772", "0.56517667", "0.56494284", "0.55282074", "0.5507505", "0.5502789", "0.54870814", "0.54785043", "0.544063", "0.5429442", "0.54287565", "0.5422423", "0.5397873", "0.5389292", "0.53798217", "0.53676504", "0.5359714", "0.53562725", "0.53114176", "0.5309186", "0.5275937", "0.52745205", "0.52671444", "0.5216511", "0.52051413" ]
0.76059157
0
validate TimeScale, INPUT_STATES, FUNCTION_PARAMS, OUTPUT_STATES and MONITOR_FOR_CONTROL
def _validate_params(self, request_set, target_set=None, context=None): # Perform first-pass validation in Function.__init__(): # - returns full set of params based on subclass paramClassDefaults super(Mechanism, self)._validate_params(request_set,target_set,context) params = target_set #region VALIDATE TIME SCALE try: param_value = params[TIME_SCALE] except KeyError: if COMMAND_LINE in context: pass else: self.timeScale = timeScaleSystemDefault else: if isinstance(param_value, TimeScale): self.timeScale = params[TIME_SCALE] else: if self.prefs.verbosePref: print("Value for {0} ({1}) param of {2} must be of type {3}; default will be used: {4}". format(TIME_SCALE, param_value, self.name, type(TimeScale), timeScaleSystemDefault)) #endregion #region VALIDATE INPUT STATE(S) # MODIFIED 6/10/16 # FIX: SHOULD CHECK LENGTH OF INPUT_STATES PARAM (LIST OF NAMES OR SPECIFICATION DICT) AGAINST LENGTH OF # FIX: self.variable 2D ARRAY AND COMPARE variable SPECS, IF PROVIDED, WITH CORRESPONDING ELEMENTS OF # FIX: self.variable 2D ARRAY try: param_value = params[INPUT_STATES] except KeyError: if COMMAND_LINE in context: pass else: # INPUT_STATES not specified: # - set to None, so that it is set to default (self.variable) in instantiate_inputState # - if in VERBOSE mode, warn in instantiate_inputState, where default value is known params[INPUT_STATES] = None else: # INPUT_STATES is specified, so validate: # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_inputState) if not isinstance(param_value, (list, OrderedDict)): param_value = [param_value] # Validate each item in the list or OrderedDict # Note: # * number of inputStates is validated against length of the owner mechanism's execute method variable (EMV) # in instantiate_inputState, where an inputState is assigned to each item (value) of the EMV i = 0 for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value): from PsyNeuLink.Components.States.InputState import InputState # If not valid... if not ((isclass(item) and (issubclass(item, InputState) or # InputState class ref issubclass(item, Projection))) or # Project class ref isinstance(item, InputState) or # InputState object isinstance(item, dict) or # InputState specification dict isinstance(item, ParamValueProjection) or # ParamValueProjection tuple isinstance(item, str) or # Name (to be used as key in inputStates dict) iscompatible(item, **{kwCompatibilityNumeric: True})): # value # set to None, so it is set to default (self.variable) in instantiate_inputState param_value[key] = None if self.prefs.verbosePref: print("Item {0} of {1} param ({2}) in {3} is not a" " InputState, specification dict or value, nor a list of dict of them; " "variable ({4}) of execute method for {5} will be used" " to create a default outputState for {3}". format(i, INPUT_STATES, param_value, self.__class__.__name__, self.variable, self.execute.__self__.name)) i += 1 params[INPUT_STATES] = param_value #endregion #region VALIDATE EXECUTE METHOD PARAMS try: function_param_specs = params[FUNCTION_PARAMS] except KeyError: if COMMAND_LINE in context: pass elif self.prefs.verbosePref: print("No params specified for {0}".format(self.__class__.__name__)) else: if not (isinstance(function_param_specs, dict)): raise MechanismError("{0} in {1} must be a dict of param specifications". format(FUNCTION_PARAMS, self.__class__.__name__)) # Validate params from PsyNeuLink.Components.States.ParameterState import ParameterState for param_name, param_value in function_param_specs.items(): try: default_value = self.paramInstanceDefaults[FUNCTION_PARAMS][param_name] except KeyError: raise MechanismError("{0} not recognized as a param of execute method for {1}". format(param_name, self.__class__.__name__)) if not ((isclass(param_value) and (issubclass(param_value, ParameterState) or issubclass(param_value, Projection))) or isinstance(param_value, ParameterState) or isinstance(param_value, Projection) or isinstance(param_value, dict) or isinstance(param_value, ParamValueProjection) or iscompatible(param_value, default_value)): params[FUNCTION_PARAMS][param_name] = default_value if self.prefs.verbosePref: print("{0} param ({1}) for execute method {2} of {3} is not a ParameterState, " "projection, ParamValueProjection, or value; default value ({4}) will be used". format(param_name, param_value, self.execute.__self__.componentName, self.__class__.__name__, default_value)) #endregion # FIX: MAKE SURE OUTPUT OF EXECUTE FUNCTION / SELF.VALUE IS 2D ARRAY, WITH LENGTH == NUM OUTPUT STATES #region VALIDATE OUTPUT STATE(S) # FIX: MAKE SURE # OF OUTPUTS == LENGTH OF OUTPUT OF EXECUTE FUNCTION / SELF.VALUE try: param_value = params[OUTPUT_STATES] except KeyError: if COMMAND_LINE in context: pass else: # OUTPUT_STATES not specified: # - set to None, so that it is set to default (self.value) in instantiate_outputState # Notes: # * if in VERBOSE mode, warning will be issued in instantiate_outputState, where default value is known # * number of outputStates is validated against length of owner mechanism's execute method output (EMO) # in instantiate_outputState, where an outputState is assigned to each item (value) of the EMO params[OUTPUT_STATES] = None else: # OUTPUT_STATES is specified, so validate: # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_outputState) if not isinstance(param_value, (list, OrderedDict)): param_value = [param_value] # Validate each item in the list or OrderedDict i = 0 for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value): from PsyNeuLink.Components.States.OutputState import OutputState # If not valid... if not ((isclass(item) and issubclass(item, OutputState)) or # OutputState class ref isinstance(item, OutputState) or # OutputState object isinstance(item, dict) or # OutputState specification dict isinstance(item, str) or # Name (to be used as key in outputStates dict) iscompatible(item, **{kwCompatibilityNumeric: True})): # value # set to None, so it is set to default (self.value) in instantiate_outputState param_value[key] = None if self.prefs.verbosePref: print("Item {0} of {1} param ({2}) in {3} is not a" " OutputState, specification dict or value, nor a list of dict of them; " "output ({4}) of execute method for {5} will be used" " to create a default outputState for {3}". format(i, OUTPUT_STATES, param_value, self.__class__.__name__, self.value, self.execute.__self__.name)) i += 1 params[OUTPUT_STATES] = param_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_processing_validate(cfg, app, win, events):", "def __check_inputs__(self):\n # | - __check_inputs__\n # #####################################################################\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n # #####################################################################\n\n if stop_mode == \"num_generations\":\n mess_i = \"stop_mode='num_generations', \\\n Must pass int to 'stop_num_generations'\"\n assert type(stop_num_generations) == type(1), mess_i\n #__|", "def check_all_user_inputs_valid(self):\n self.check_RNN_layers_valid()\n self.check_activations_valid()\n self.check_embedding_dimensions_valid()\n self.check_initialiser_valid()\n self.check_y_range_values_valid()\n self.check_return_final_seq_only_valid()", "def state_capture_validate(cfg, app, win, events):", "def validateInputParams(self): \n # Return dictionary\n retval = {}\n retval['status'] = True\n retval['axis'] = ''\n \n # Get the separationaxis of input MMS. \n sepaxis = ph.axisType(self.__args['vis'])\n if sepaxis.isspace() or sepaxis.__len__() == 0:\n sepaxis = 'unknown'\n elif sepaxis == 'scan,spw':\n sepaxis = 'auto'\n \n #Get list of subMSs in MMS\n subMSList = ParallelTaskHelper.getReferencedMSs(self.__args['vis'])\n \n if self.__taskname == \"mstransform\":\n \n if (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == False):\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n # Get dictionary with spwids of all subMS in the MMS\n spwdict = ph.getScanSpwSummary(subMSList) \n # For each subMS, check if it has the spw selection\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine or separate spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'scan'\n break\n \n elif (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan') and \\\n (self.__args['combinespws'] == False and self.__args['nspw'] == 1):\n # Get the value of timebin as a float\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n # For each subms, check if scans length is <= timebin\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when timespan=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'spw'\n break\n \n # Two transformations are requested.\n elif (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan'):\n # Check spws and scans in subMSs\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw'])\n spwdict = ph.getScanSpwSummary(subMSList) \n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n if self.__isSpwContained(spwsel, slist):\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('The subMSs of input MMS do not contain the necessary scans','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break \n else:\n casalog.post('The subMSs of input MMS do not contain the necessary spws','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break\n \n \n elif self.__taskname == \"split2\" or self.__taskname == \"split\": \n if (sepaxis != 'spw' and self.__args['combine'] == 'scan'):\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when combine=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n retval['status'] = False\n retval['axis'] = ''\n break\n\n elif self.__taskname == \"cvel2\" and sepaxis != 'scan':\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n spwdict = ph.getScanSpwSummary(subMSList) \n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = ''\n break\n \n\n return retval", "def validate(env):\n exit_code = ErrorCode.NO_ERROR\n if not env.function_name:\n print('Mandatory parameter (function_name) is missing')\n PARSER.print_help()\n exit_code = ErrorCode.MANDATORY_PARAM_MISSING\n if not MetricsUtil.validate_date(env.start_datetime, env.end_datetime):\n PARSER.print_help()\n exit_code = ErrorCode.WRONG_DATE\n if exit_code != ErrorCode.NO_ERROR:\n MetricsUtil.bail_out(ErrorCode.MANDATORY_PARAM_MISSING)\n \n return True", "def state_preview_validate(cfg, app, win, events):", "def validate():", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def Validate(self):\n \n hklmin = self.hklmin_txtCtrl.GetValue()\n hklmax = self.hklmax_txtCtrl.GetValue()\n hklsteps = self.hkl_steps_ctrl.GetValue()\n \n wmin = self.wmin_txtCtrl.GetValue()\n wmax = self.wmax_txtCtrl.GetValue()\n wsteps = self.w_steps_ctrl.GetValue()\n \n kx = self.kx_txtCtrl.GetValue()\n ky = self.ky_txtCtrl.GetValue()\n kz = self.kz_txtCtrl.GetValue()\n \n zmin = self.zmin_ctrl.GetValue()\n zmax = self.zmax_ctrl.GetValue()\n colorbar_bool = self.color_bar_box.GetValue()\n \n temp = self.temp_ctrl.GetValue()\n sphavg_bool = self.spherical_avg_box.GetValue()\n \n bgColor = \"pink\"\n failed = False\n \n #Validate hkl values\n num_hklmin = None\n num_hklmax = None\n try:\n num_hklmin = float(hklmin)*np.pi\n self.hklmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_hklmax = float(hklmax)*np.pi\n self.hklmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate w values\n num_wmin = None\n num_wmax = None\n try:\n num_wmin = float(wmin)\n self.wmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_wmax = float(wmax)\n self.wmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate kx,ky,kz,temp,zmin,zmax values\n num_kx = None\n num_ky = None\n num_kz = None\n num_temp = None\n num_zmin = None\n num_zmax = None\n try:\n num_kx = float(kx)\n self.kx_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kx_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_ky = float(ky)\n self.ky_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.ky_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n try:\n num_kz = float(kz)\n self.kz_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kz_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_temp = float(temp)\n self.temp_ctrl.SetBackgroundColour(\"white\")\n except:\n self.temp_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmin = float(zmin)\n self.zmin_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmin_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmax = float(zmax)\n self.zmax_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmax_ctrl.SetBackgroundColour(bgColor)\n failed = True\n \n #Validate File Fields\n int_str = self.int_file_txtCtrl.GetValue()\n spin_str = self.spin_file_txtCtrl.GetValue()\n tau_str = self.tau_file_txtCtrl.GetValue()\n out_str = self.output_file_txtCtrl.GetValue()\n if int_str:\n self.int_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.int_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if spin_str:\n self.spin_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.spin_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if tau_str:\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if out_str:\n self.output_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.output_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n \n direction = {}\n direction['kx'] = num_kx\n direction['ky'] = num_ky\n direction['kz'] = num_kz\n hkl_interval = [num_hklmin, num_hklmax, int(self.hkl_steps_ctrl.GetValue())]\n w_interval = [num_wmin, num_wmax, int(self.w_steps_ctrl.GetValue())]\n \n tau_text = ''\n try:\n tau_file = open(tau_str,'r')\n tau_text = tau_file.read()\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n\n items = tau_text.split()\n if len(items)%3 and not len(items):\n failed = True\n\n tau_list = []\n i = 0\n while not failed and i < len(items)-3:\n tau1, tau2, tau3 = None, None, None\n try:\n tau1 = float(items[i])\n tau2 = float(items[i+1])\n tau3 = float(items[i+2])\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n tau_list.append([tau1,tau2,tau3])\n i+=3\n \n self.Refresh()\n# self.window.Show(True,True)\n \n plotstats = [zmin, zmax, colorbar_bool]\n \n return failed, hkl_interval, w_interval, tau_list, direction, num_temp, sphavg_bool, plotstats", "def testValid(self):\n validate = timing_util.ValidateMeasurementsFlag\n self.assertIs(validate([]), True)\n self.assertIs(validate(['none']), True)\n self.assertIs(validate(['end_to_end_runtime']), True)\n self.assertIs(validate(['runtimes']), True)\n self.assertIs(validate(['timestamps']), True)\n self.assertIs(validate(['end_to_end_runtime', 'runtimes']), True)\n self.assertIs(validate(['end_to_end_runtime', 'timestamps']), True)\n self.assertIs(validate(['runtimes', 'timestamps']), True)\n self.assertIs(\n validate(['end_to_end_runtime', 'runtimes', 'timestamps']), True)", "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def _data_params_validation(self) -> None:\n extra_regressor_names = set(self.params._reqd_regressor_names)\n # univariate case\n if self.data.is_univariate():\n if len(extra_regressor_names) != 0:\n msg = (\n f\"Missing data for extra regressors: {self.params._reqd_regressor_names}! \"\n \"Please include the missing regressors in `data`.\"\n )\n raise ValueError(msg)\n # multivariate case\n else:\n value_cols = set(self.data.value.columns)\n if \"y\" not in value_cols:\n msg = \"`data` should contain a column called `y` representing the responsive value.\"\n raise ValueError(msg)\n if not extra_regressor_names.issubset(value_cols):\n msg = f\"`data` should contain all columns listed in {extra_regressor_names}.\"\n raise ValueError(msg)\n # validate cap\n if (self.params.cap is True) and (\"cap\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `cap` representing the cap when `cap = True`.\"\n _error_msg(msg)\n # validate floor\n if (self.params.floor is True) and (\"floor\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `floor` representing the floor when `floor = True`.\"\n _error_msg(msg)", "def _validateInputs(self):\n if self.args[\"Counties\"] == [] and self.args[\"BBox\"] == None:\n raise Exception(\"Invalid arguments provided. Must provide either a geographical bounding box or a list of counties.\")\n\n if self.args[\"StartDateTime\"] > self.args[\"EndDateTime\"]:\n raise Exception(\"Invalid arguments provided. StartDateTime cannot be after EndDateTime\")", "def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)", "def _check_params(self):\n if self.n_estimators <= 0:\n raise ValueError(\"n_estimators must be greater than 0 but \"\n \"was %r\" % self.n_estimators)\n\n if self.learning_rate <= 0.0:\n raise ValueError(\"learning_rate must be greater than 0 but \"\n \"was %r\" % self.learning_rate)\n\n if (self.loss not in self._SUPPORTED_LOSS\n or self.loss not in LOSS_FUNCTIONS):\n raise ValueError(\"Loss '{0:s}' not supported. \".format(self.loss))\n\n if self.loss == 'deviance':\n loss_class = (MultinomialDeviance\n if len(self.classes_) > 2\n else BinomialDeviance)\n else:\n loss_class = LOSS_FUNCTIONS[self.loss]\n\n if self.loss in ('huber', 'quantile'):\n self.loss_ = loss_class(self.n_classes_, self.alpha)\n else:\n self.loss_ = loss_class(self.n_classes_)\n\n if not (0.0 < self.subsample <= 1.0):\n raise ValueError(\"subsample must be in (0,1] but \"\n \"was %r\" % self.subsample)\n\n if self.init is not None:\n if isinstance(self.init, six.string_types):\n if self.init not in INIT_ESTIMATORS:\n raise ValueError('init=\"%s\" is not supported' % self.init)\n else:\n if (not hasattr(self.init, 'fit')\n or not hasattr(self.init, 'predict')):\n raise ValueError(\"init=%r must be valid BaseEstimator \"\n \"and support both fit and \"\n \"predict\" % self.init)\n\n if not (0.0 < self.alpha < 1.0):\n raise ValueError(\"alpha must be in (0.0, 1.0) but \"\n \"was %r\" % self.alpha)\n\n if isinstance(self.max_features, six.string_types):\n if self.max_features == \"auto\":\n # if is_classification\n if self.n_classes_ > 1:\n max_features = max(1, int(np.sqrt(self.n_features_)))\n else:\n # is regression\n max_features = self.n_features_\n elif self.max_features == \"sqrt\":\n max_features = max(1, int(np.sqrt(self.n_features_)))\n elif self.max_features == \"log2\":\n max_features = max(1, int(np.log2(self.n_features_)))\n else:\n raise ValueError(\"Invalid value for max_features: %r. \"\n \"Allowed string values are 'auto', 'sqrt' \"\n \"or 'log2'.\" % self.max_features)\n elif self.max_features is None:\n max_features = self.n_features_\n elif isinstance(self.max_features, (numbers.Integral, np.integer)):\n max_features = self.max_features\n else: # float\n if 0. < self.max_features <= 1.:\n max_features = max(int(self.max_features *\n self.n_features_), 1)\n else:\n raise ValueError(\"max_features must be in (0, n_features]\")\n\n self.max_features_ = max_features", "def state_wait_validate(cfg, app, win, events):", "def _check_inputs(self):\n\n self._check_resident_prefs()\n self._check_hospital_prefs()", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def _validate_input(self):\n self.data.validate()\n self.meta_hybridizer.validate_input()", "def _validateInputArguments(self):\n\n # Verify input arguments types\n if not isinstance(self._verbose, bool):\n raise WrongTypeForInputParameter(type(self._verbose),\n 'verbose', 'bool')\n\n if not isinstance(self._include_close_feature, bool):\n raise WrongTypeForInputParameter(type(self._include_close_feature),\n 'include_close_feature', 'bool')\n\n if not isinstance(self._include_volume_feature, bool):\n raise WrongTypeForInputParameter(\n type(self._include_volume_feature), 'include_volume_feature',\n 'bool')\n\n # Make columns case insensitive\n self._input_data.columns = \\\n [c.lower() for c in self._input_data.columns]\n\n # Verify that features have been included and required data are\n # available\n if self._ti_features is None:\n self._ti_features = []\n\n if len(self._ti_features) == 0 and not self._include_close_feature \\\n and not self._include_volume_feature:\n raise NoFeaturesSelectedForMLData(\n ti_features=self._ti_features,\n include_close_feature=self._include_close_feature,\n include_volume_feature=self._include_volume_feature)\n\n if 'close' not in self._input_data.columns:\n raise InputDataMissingForMLData('close')\n\n if self._include_volume_feature and \\\n 'volume' not in self._input_data.columns:\n raise InputDataMissingForMLData('volume')\n\n # Validate ti_features format\n supported_indicators = []\n for item in ALL_TI_FEATURES:\n supported_indicators.append(item['ti'])\n\n if isinstance(self._ti_features, list) and len(self._ti_features) > 0:\n for item in self._ti_features:\n\n for key in item.keys():\n if key not in ['ti', 'kwargs']:\n raise WrongTypeForInputParameter(\n self._ti_features, 'ti_features',\n '[{\\'ti\\': \\'indicator_class_name\\', \\'kwargs\\':' +\n ' {...}, ...]')\n\n if 'ti' not in item.keys() or 'kwargs' not in item.keys():\n raise WrongTypeForInputParameter(\n self._ti_features, 'ti_features',\n '[{\\'ti\\': \\'indicator_class_name\\', \\'kwargs\\':' +\n ' {...}, ...]')\n\n if item['ti'] not in supported_indicators:\n raise WrongValueForInputParameter(\n item['ti'], 'ti_features[\\'ti\\']',\n str(supported_indicators))\n\n if not isinstance(item['kwargs'], dict):\n raise WrongTypeForInputParameter(\n self._ti_features, 'ti_features',\n '[{\\'ti\\': \\'indicator_class_name\\', \\'kwargs\\': ' +\n '{...}, ...]')\n\n elif isinstance(self._ti_features, list) and \\\n len(self._ti_features) == 0:\n pass\n\n else:\n raise WrongTypeForInputParameter(\n self._ti_features, 'ti_features',\n '[{\\'ti\\': \\'indicator_class_name\\', \\'kwargs\\': ' +\n '{...}, ...]')\n\n # Verify that enough input rows have been given\n if len(self._input_data.index) - 1 <= 0:\n raise NotEnoughInputData(\n '', '> 1',\n len(self._input_data.index),\n 'Not enough input data. Minimum required data are '\n '(<req_data_num>), but (<data_num>) found.')\n\n # Construct and validate the indicators set\n if self._verbose:\n print('\\nIndicators set (ML data features)')\n\n for indicator in self._ti_features:\n\n if self._verbose:\n print('- create indicator instance:', indicator)\n\n self._indicators_set.append(eval(indicator['ti'])(\n input_data=self._input_data, **indicator['kwargs']))", "def check_params(self, name, fs_in, fs_out, window):\n if not isinstance(name, str):\n raise TypeError('name must be a string, not %s' % name)\n if fs_in <= 0:\n raise ValueError('fs_in should not be less than 0.')\n if fs_out <= 0:\n raise ValueError('fs_out should not be less than 0.')\n if window <= 0:\n raise ValueError('window must be greater than than 0.')", "def validate(self):\n variables = ['bottomDepth', 'layerThickness', 'maxLevelCell',\n 'temperature', 'salinity']\n compare_variables(\n test_case=self, variables=variables,\n filename1='initial_state/initial_state.nc')\n\n variables = ['temperature', 'layerThickness']\n compare_variables(\n test_case=self, variables=variables,\n filename1='forward/output/output.0001-01-01_00.00.00.nc')\n\n if self.with_particles:\n # just do particle validation at coarse res\n variables = [\n 'xParticle', 'yParticle', 'zParticle', 'zLevelParticle',\n 'buoyancyParticle', 'indexToParticleID', 'currentCell',\n 'transfered', 'numTimesReset']\n compare_variables(test_case=self, variables=variables,\n filename1='forward/analysis_members/'\n 'lagrPartTrack.0001-01-01_00.00.00.nc')\n\n timers = ['init_lagrPartTrack', 'compute_lagrPartTrack',\n 'write_lagrPartTrack', 'restart_lagrPartTrack',\n 'finalize_lagrPartTrack']\n compare_timers(self, timers, rundir1='forward')", "def validate_params(self) -> None:\n # cap must be given when using logistic growth\n if (self.growth == \"logistic\") and (self.cap is False):\n msg = \"Capacity must be provided for logistic growth\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If custom_seasonalities passed, ensure they contain the required keys.\n reqd_seasonality_keys = [\"name\", \"period\", \"fourier_order\"]\n if not all(\n req_key in seasonality\n for req_key in reqd_seasonality_keys\n for seasonality in self.custom_seasonalities\n ):\n msg = f\"Custom seasonality dicts must contain the following keys:\\n{reqd_seasonality_keys}\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If extra_regressors passed, ensure they contain the required keys.\n all_regressor_keys = {\"name\", \"prior_scale\", \"mode\"}\n for regressor in self.extra_regressors:\n if not isinstance(regressor, dict):\n msg = f\"Elements in `extra_regressor` should be a dictionary but receives {type(regressor)}.\"\n _error_msg(msg)\n if \"name\" not in regressor:\n msg = \"Extra regressor dicts must contain the following keys: 'name'.\"\n _error_msg(msg)\n if not set(regressor.keys()).issubset(all_regressor_keys):\n msg = f\"Elements in `extra_regressor` should only contain keys in {all_regressor_keys} but receives {regressor.keys()}.\"\n _error_msg(msg)\n self._reqd_regressor_names = [\n regressor[\"name\"] for regressor in self.extra_regressors\n ]\n # check floor and cap\n if (self.cap is not False) and (\"cap\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"cap\")\n if self.floor is not False and (\"floor\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"floor\")", "def _validate_parameters(self, epochs, log_interval):\n\n if not epochs > 0:\n msg = (\n \"The number of training epochs = {} should be strictly\"\n \" positive.\"\n )\n self.logger.error(msg.format(epochs))\n raise ValueError(msg.format(epochs))\n\n if not log_interval > 0:\n msg = (\n \"The number of batches to wait before printting the\"\n \" training status should be strictly positive, but got {}\"\n \" instead.\"\n )\n self.logger.error(msg.format(log_interval))\n raise ValueError(msg.format(log_interval))\n\n if not 0 < self.shrinkage_rate <= 1:\n msg = (\n \"The shrinkage rate should be in the range (0, 1], but got\"\n \" {} instead.\"\n )\n self.logger.error(msg.format(self.shrinkage_rate))\n raise ValueError(msg.format(self.shrinkage_rate))", "def onCheckParameters(self, evt): \n \n print(\"version\", self.config.version)\n \n if isinstance(self.config.iSPV, ( int, long )): pass\n else: \n msg = (\"SPV value should be an integer!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iScanTime, ( int, float )): pass\n else: \n msg = (\"Scan time value should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iStartVoltage, ( int, float )): pass\n else: \n msg = (\"Start voltage should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iEndVoltage, ( int, float )): pass\n else: \n msg = (\"End voltage should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iStepVoltage, ( int, float )): pass\n else: \n msg = (\"Step voltage should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if self.config.iActivationMode == \"Exponential\":\n if isinstance(self.config.iExponentPerct, ( int, float )): pass\n else: \n msg = (\"Exponential % value should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iExponentIncre, ( int, float )): pass\n else: \n msg = (\"Exponential increment value should be an float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n elif self.config.iActivationMode == \"Boltzmann\":\n if isinstance(self.config.iBoltzmann, ( int, float )): pass\n else: \n msg = (\"Boltzmann offset value should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n return False\n \n \n if (abs(self.config.iEndVoltage) <= abs(self.config.iStartVoltage)):\n msg = ('End voltage has to be larger than starting voltage')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n return\n \n if (abs(self.config.iEndVoltage) > 200):\n msg = ('The highest possible voltage is 200 V. Set to default: 200')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iEndVoltage = 200\n self.view.panelControls.endVoltage_input.SetValue(str(self.config.iEndVoltage))\n \n if (abs(self.config.iStartVoltage) < 0):\n msg = ('The lowest possible voltage is 0 V. Set to default: 0')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iStartVoltage = 0\n self.view.panelControls.startVoltage_input.SetValue(str(self.config.iStartVoltage))\n \n if self.config.iSPV <= 0:\n msg = ('SPV must be larger than 0! Set to default: 3')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iSPV = 3\n self.view.panelControls.spv_input.SetValue(str(self.config.iSPV))\n \n if self.config.iScanTime <= 0:\n msg = ('Scan time must be larger than 0! Set to default: 5')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iScanTime = 5\n self.view.panelControls.scanTime_input.SetValue(str(self.config.iScanTime))\n\n if self.config.iActivationMode == \"Exponential\":\n if self.config.iExponentPerct < 0:\n msg = ('Exponential % must be larger or equal to 0! Set to default: 0')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentPerct = 0\n elif self.config.iExponentPerct >= 100:\n msg = ('Exponential % must be smaller than 100! Set to default: 0')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentPerct = 0\n self.view.panelControls.exponentialPerct_input.SetValue(str(self.config.iExponentPerct))\n \n if self.config.iExponentIncre <= 0:\n msg = ('Exponential increment must be larger than 0! Set to default: 0.01')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentIncre = 0.01\n elif self.config.iExponentIncre > 0.075:\n msg = ('Exponential increment must be smaller than 0.075! Set to default: 0.075')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentIncre = 0.075\n self.view.panelControls.exponentialIncrm_input.SetValue(str(self.config.iExponentIncre))\n elif self.config.iActivationMode == \"Boltzmann\":\n if self.config.iBoltzmann < 10:\n msg = ('Boltzmann offset must be larger than 10! Set to default: 10')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg,\n type=\"Error\")\n self.config.iBoltzmann = 10\n elif self.config.iBoltzmann >= 100:\n msg = ('Boltzmann offset must be smaller than 100! Set to default: 25')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg,\n type=\"Error\")\n self.config.iBoltzmann = 25\n self.view.panelControls.boltzmann_input.SetValue(str(self.config.iBoltzmann))\n \n # All good\n return True", "def state_failsafe_validate(cfg, app, win, events):", "def validate_inputs(scenario_id, subscenarios, subproblem, stage, conn):\n\n # TODO: check that hours in full period is within x and y\n # (\"within\" check or \"validate\" check in param definition returns obscure\n # error message that isn't helpful).\n\n periods = get_inputs_from_database(\n scenario_id, subscenarios, subproblem, stage, conn\n )\n\n df = cursor_to_df(periods)\n\n # Get expected dtypes\n expected_dtypes = get_expected_dtypes(conn=conn, tables=[\"inputs_temporal_periods\"])\n # Hard-code data type for hours_in_period_timepoints\n expected_dtypes[\"hours_in_period_timepoints\"] = \"numeric\"\n\n # Check dtypes\n dtype_errors, error_columns = validate_dtypes(df, expected_dtypes)\n write_validation_to_database(\n conn=conn,\n scenario_id=scenario_id,\n subproblem_id=subproblem,\n stage_id=stage,\n gridpath_module=__name__,\n db_table=\"inputs_temporal_periods\",\n severity=\"High\",\n errors=dtype_errors,\n )\n\n # Check valid numeric columns are non-negative\n numeric_columns = [c for c in df.columns if expected_dtypes[c] == \"numeric\"]\n valid_numeric_columns = set(numeric_columns) - set(error_columns)\n write_validation_to_database(\n conn=conn,\n scenario_id=scenario_id,\n subproblem_id=subproblem,\n stage_id=stage,\n gridpath_module=__name__,\n db_table=\"inputs_temporal_periods\",\n severity=\"Mid\",\n errors=validate_values(df, valid_numeric_columns, \"period\", min=0),\n )", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('rw', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Well radius rw must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Well radius rw must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n return(flag, warnings) # End Function", "def __validate():\n # TODO: implement" ]
[ "0.6713494", "0.6423591", "0.6374488", "0.6332665", "0.62544966", "0.61950964", "0.61750597", "0.6171884", "0.6162123", "0.6159163", "0.6152436", "0.6104958", "0.6092222", "0.60662305", "0.6036017", "0.60181445", "0.59977424", "0.59622264", "0.59588736", "0.5957072", "0.5951476", "0.59455925", "0.5941615", "0.59283644", "0.5917428", "0.59117204", "0.59065205", "0.59055334", "0.58845216", "0.5877637" ]
0.6468564
1
Call State._instantiate_input_states to instantiate orderedDict of inputState(s) This is a stub, implemented to allow Mechanism subclasses to override _instantiate_input_states
def _instantiate_input_states(self, context=None): from PsyNeuLink.Components.States.InputState import _instantiate_input_states _instantiate_input_states(owner=self, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwds):\n if args or kwds:\n super(StateInstantiation, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.state_path is None:\n self.state_path = ''\n if self.state_class is None:\n self.state_class = ''\n if self.initial_state_name is None:\n self.initial_state_name = ''\n if self.input_keys is None:\n self.input_keys = []\n if self.output_keys is None:\n self.output_keys = []\n if self.cond_outcome is None:\n self.cond_outcome = []\n if self.cond_transition is None:\n self.cond_transition = []\n if self.behavior_class is None:\n self.behavior_class = ''\n if self.parameter_names is None:\n self.parameter_names = []\n if self.parameter_values is None:\n self.parameter_values = []\n if self.position is None:\n self.position = [0.] * 2\n if self.outcomes is None:\n self.outcomes = []\n if self.transitions is None:\n self.transitions = []\n if self.autonomy is None:\n self.autonomy = []\n if self.userdata_keys is None:\n self.userdata_keys = []\n if self.userdata_remapping is None:\n self.userdata_remapping = []\n else:\n self.state_path = ''\n self.state_class = ''\n self.initial_state_name = ''\n self.input_keys = []\n self.output_keys = []\n self.cond_outcome = []\n self.cond_transition = []\n self.behavior_class = ''\n self.parameter_names = []\n self.parameter_values = []\n self.position = [0.] * 2\n self.outcomes = []\n self.transitions = []\n self.autonomy = []\n self.userdata_keys = []\n self.userdata_remapping = []", "def init_states(self, batch_size: int) -> NestedMap:\n raise NotImplementedError('Abstract method')", "def load_state_dict(self, state_dict):\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)", "def initialize(self,inputDict):\n pass", "def initializeFromDict(self, inputDict):\n for idx, val in enumerate(inputDict['outcome']):\n self.mapping[val] = inputDict['state'][idx]\n self.values.add(val)\n\n self.checkDistParams()", "def __init__(self, n_states: int, n_actions: int):\n self._p = {s: {a: [] for a in range(n_actions)} for s in range(n_states)}", "def fromState(state):", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def __init__(self,\n outcomes=[],\n input_keys=[],\n output_keys=[]):\n smach.state.State.__init__(self, outcomes, input_keys, output_keys)\n\n self.userdata = smach.UserData()\n \"\"\"Userdata to be passed to child states.\"\"\"\n\n # Callback lists\n self._start_cbs = []\n self._transition_cbs = []\n self._termination_cbs = []", "def initializeFromDict(self, inputDict):\n pass", "def setup_states(self, state_dict, start_state):\n self.state_dict = state_dict\n self.state_name = start_state\n self.state = self.state_dict[self.state_name]()", "def __init__(self):\n self.inputs = {}", "def initialize_state(self):\n raise NotImplementedError()", "def __init__(self,ParamFunctionStateTuples):\n self.mDict = dict()\n for stateInit,param,func,stateFinal in ParamFunctionStateTuples:\n assert param not in stateInit\n self.mDict[param] = StateDict.EmitObj(stateInit,func,stateFinal)", "def load_from_state_dict(self, state_dict):\n raise NotImplementedError", "def load_state_dict(self, state_dict):\n self.XY_net.load_state_dict(state_dict['XY_net'])\n self.XY_optimizer_minee.load_state_dict(\n state_dict['XY_optimizer_minee'])\n self.X_net.load_state_dict(state_dict['X_net'])\n self.X_optimizer_minee.load_state_dict(state_dict['X_optimizer_minee'])\n self.Y_net.load_state_dict(state_dict['Y_net'])\n self.Y_optimizer_minee.load_state_dict(state_dict['Y_optimizer_minee'])\n self.X = state_dict['X']\n self.Y = state_dict['Y']\n if 'lr' in state_dict:\n self.lr = state_dict['lr']\n if 'batch_size' in state_dict:\n self.batch_size = state_dict['batch_size']\n if 'ref_batch_factor' in state_dict:\n self.ref_batch_factor = state_dict['ref_batch_factor']", "def state_dict(self, *args, **kwargs):\n return self.module.state_dict(*args, **kwargs)", "def init_weights_and_state(self, input_signature):\n if self._mode == 'predict':\n cache_signature = input_signature[4:6]\n self.state = self._fast_inference_init_state(cache_signature)", "def load_state_dict(self, arg):\n self.TrajectoryAutoencoder.load_state_dict(torch.load(arg))", "def ordered_real_state_space(a2_data, py_order, a2_order):\n aux_dic = OrderedDict()\n for py_index, key in enumerate(py_order):\n if key in a2_data:\n try:\n a2_index = a2_order.index(key)\n except ValueError:\n a2_index = None\n aux_dic[StateParPickable(key, py_index, a2_index)] = a2_data[key]\n\n return aux_dic", "def _generate_initial_state(self, inputs, batch_size_tensor, state_size, dtype):\n if batch_size_tensor is None or dtype is None:\n raise ValueError(\n 'batch_size and dtype cannot be None while constructing initial state: '\n 'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))\n\n def create_init_values(unnested_state_size):\n flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()\n init_state_size = [batch_size_tensor] + flat_dims\n if self.learned_init == 'dynamic':\n return inputs[:,0,:] @ self.w + self.b\n elif self.learned_init == 'static':\n # Broadcast learned init vector to batch size\n return tf.broadcast_to(self.b, init_state_size)\n else:\n return array_ops.zeros(init_state_size, dtype=dtype)\n\n if nest.is_nested(state_size):\n return nest.map_structure(create_init_values, state_size)\n else:\n return create_init_values(state_size)", "def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n *args,\n **kwargs,\n ) -> NamedTuple:\n return super().load_state_dict(state_dict, *args)", "def update_input_states(self, input_values):", "def _generate_initial_state(self, inputs, batch_size_tensor, state_size, dtype):\n if batch_size_tensor is None or dtype is None:\n raise ValueError(\n 'batch_size and dtype cannot be None while constructing initial state: '\n 'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))\n\n def create_init_values(unnested_state_size, s_i=0):\n flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()\n init_state_size = [batch_size_tensor] + flat_dims\n if self.learned_init == 'dynamic':\n return inputs[:,0,:] @ self.w[s_i] + self.b[s_i]\n elif self.learned_init == 'static':\n # Broadcast learned init vector to batch size\n return tf.broadcast_to(self.b[s_i], init_state_size)\n else:\n return array_ops.zeros(init_state_size, dtype=dtype)\n\n if nest.is_nested(state_size):\n return nest.map_structure(create_init_values, state_size, list(range(len(state_size))) )\n else:\n return create_init_values(state_size)", "def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass", "def make_initial_state(self):\n pass", "def load_state_dict(self, state_dict):\n self.epoch = state_dict['epoch']\n itr_pos = state_dict.get('iterations_in_epoch', 0)\n if itr_pos > 0:\n # fast-forward epoch iterator\n itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))\n if itr_pos < len(itr):\n self._next_epoch_itr = itr.skip(itr_pos)", "def __call__(self, inputs, states, **kwargs):\n raise NotImplementedError()", "def create_inputs(self):\n return {}" ]
[ "0.6572184", "0.6553156", "0.6509993", "0.63697404", "0.6302613", "0.6276372", "0.61691505", "0.6134963", "0.6134963", "0.6125563", "0.60870796", "0.60617393", "0.5963223", "0.59394586", "0.593735", "0.5896586", "0.5886525", "0.5884756", "0.5863762", "0.58548564", "0.5840322", "0.58155656", "0.58032477", "0.58014303", "0.579867", "0.5794976", "0.57268333", "0.5725487", "0.56967586", "0.56961536" ]
0.7393264
0
Call State._instantiate_parameter_states to instantiate a parameterStates for each parameter in user_params This is a stub, implemented to allow Mechanism subclasses to override _instantiate_parameter_states
def _instantiate_parameter_states(self, context=None): from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states _instantiate_parameter_states(owner=self, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, user_params):\n self.ParseParameters(\n toggle_param_name=self._TOGGLE_PARAM,\n required_param_names=self._REQUIRED_PARAMS,\n optional_param_names=self._OPTIONAL_PARAMS,\n user_params=user_params)", "def set_params(self, state_dicts):\n raise NotImplementedError", "def initialize_params(self, params):\n pass", "def _filter_params(self, params):\n\n # INPUT_STATES:\n try:\n input_states_spec = params[INPUT_STATES]\n except KeyError:\n pass\n else:\n # Convert input_states_spec to list if it is not one\n if not isinstance(input_states_spec, list):\n input_states_spec = [input_states_spec]\n # Get inputStates specified in paramClassDefaults\n default_input_states = self.paramClassDefaults[INPUT_STATES].copy()\n # Convert inputStates from paramClassDeafults to a list if it is not one\n if not isinstance(default_input_states, list):\n default_input_states = [default_input_states]\n # Add inputState specified in params to those in paramClassDefaults\n # Note: order is important here; new ones should be last, as paramClassDefaults defines the\n # the primary inputState which must remain first for the inputStates OrderedDictionary\n default_input_states.extend(input_states_spec)\n # Assign full set back to params_arg\n params[INPUT_STATES] = default_input_states\n\n # OUTPUT_STATES:\n try:\n output_states_spec = params[OUTPUT_STATES]\n except KeyError:\n pass\n else:\n # Convert output_states_spec to list if it is not one\n if not isinstance(output_states_spec, list):\n output_states_spec = [output_states_spec]\n # Get outputStates specified in paramClassDefaults\n default_output_states = self.paramClassDefaults[OUTPUT_STATES].copy()\n # Convert outputStates from paramClassDeafults to a list if it is not one\n if not isinstance(default_output_states, list):\n default_output_states = [default_output_states]\n # Add outputState specified in params to those in paramClassDefaults\n # Note: order is important here; new ones should be last, as paramClassDefaults defines the\n # the primary outputState which must remain first for the outputStates OrderedDictionary\n default_output_states.extend(output_states_spec)\n # Assign full set back to params_arg\n params[OUTPUT_STATES] = default_output_states", "def __init__(self, params: Iterable[nn.Parameter]):\n self.params = params\n self.param_states = [p.requires_grad for p in self.params]", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(StateInstantiation, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.state_path is None:\n self.state_path = ''\n if self.state_class is None:\n self.state_class = ''\n if self.initial_state_name is None:\n self.initial_state_name = ''\n if self.input_keys is None:\n self.input_keys = []\n if self.output_keys is None:\n self.output_keys = []\n if self.cond_outcome is None:\n self.cond_outcome = []\n if self.cond_transition is None:\n self.cond_transition = []\n if self.behavior_class is None:\n self.behavior_class = ''\n if self.parameter_names is None:\n self.parameter_names = []\n if self.parameter_values is None:\n self.parameter_values = []\n if self.position is None:\n self.position = [0.] * 2\n if self.outcomes is None:\n self.outcomes = []\n if self.transitions is None:\n self.transitions = []\n if self.autonomy is None:\n self.autonomy = []\n if self.userdata_keys is None:\n self.userdata_keys = []\n if self.userdata_remapping is None:\n self.userdata_remapping = []\n else:\n self.state_path = ''\n self.state_class = ''\n self.initial_state_name = ''\n self.input_keys = []\n self.output_keys = []\n self.cond_outcome = []\n self.cond_transition = []\n self.behavior_class = ''\n self.parameter_names = []\n self.parameter_values = []\n self.position = [0.] * 2\n self.outcomes = []\n self.transitions = []\n self.autonomy = []\n self.userdata_keys = []\n self.userdata_remapping = []", "def init_params(self):\n blah", "def state_dict(self, *args, **kwargs):\n destination = super().state_dict(*args, **kwargs)\n for name in self.lazy_parameter_names:\n if isinstance(getattr(self, name), UninitializedParameter):\n del destination[name]\n return destination", "def _init_parametric_user(cls) -> None:\n\n # For ParametricType grandchildren we have to deal with possible\n # TypeVar remapping and generally check for type sanity.\n\n ob = getattr(cls, '__orig_bases__', ())\n generic_params: list[type] = []\n\n for b in ob:\n if (\n isinstance(b, type)\n and not isinstance(b, GenericAlias)\n and issubclass(b, ParametricType)\n and b is not ParametricType\n ):\n raise TypeError(\n f'{cls.__name__}: missing one or more type arguments for'\n f' base {b.__name__!r}'\n )\n\n if not typing_inspect.is_generic_type(b):\n continue\n\n org = typing_inspect.get_origin(b)\n if not isinstance(org, type):\n continue\n if not issubclass(org, ParametricType):\n generic_params.extend(getattr(b, '__parameters__', ()))\n continue\n\n base_params = getattr(org, '__parameters__', ())\n base_non_type_params = getattr(org, '_non_type_params', {})\n args = typing_inspect.get_args(b)\n expected = len(base_params)\n if len(args) != expected:\n raise TypeError(\n f'{b.__name__} expects {expected} type arguments'\n f' got {len(args)}'\n )\n\n base_map = dict(cls._type_param_map)\n subclass_map = {}\n\n for i, arg in enumerate(args):\n if i in base_non_type_params:\n continue\n if not typing_inspect.is_typevar(arg):\n raise TypeError(\n f'{b.__name__} expects all arguments to be'\n f' TypeVars'\n )\n\n base_typevar = base_params[i]\n attr = base_map.get(base_typevar)\n if attr is not None:\n subclass_map[arg] = attr\n\n if len(subclass_map) != len(base_map):\n raise TypeError(\n f'{cls.__name__}: missing one or more type arguments for'\n f' base {org.__name__!r}'\n )\n\n cls._type_param_map = subclass_map\n\n cls._non_type_params = {\n i: p for i, p in enumerate(generic_params)\n if p not in cls._type_param_map\n }", "def from_params(self, params):\n raise NotImplementedError()", "def _get_state_args(\n self, source, mixed_permeate_properties, initialize_guess, state_args\n ):\n\n # assumptions\n if initialize_guess is None:\n initialize_guess = {}\n # TODO: enable deltaP guess when pressure drop is added\n if \"deltaP\" not in initialize_guess:\n initialize_guess[\"deltaP\"] = 0\n if \"solvent_recovery\" not in initialize_guess:\n initialize_guess[\"solvent_recovery\"] = 0.1\n if \"solute_recovery\" not in initialize_guess:\n initialize_guess[\"solute_recovery\"] = 0.1\n if \"cp_modulus\" not in initialize_guess:\n initialize_guess[\"cp_modulus\"] = 1\n\n if state_args is None:\n state_args = {}\n state_dict = source.define_port_members()\n\n for k in state_dict.keys():\n if state_dict[k].is_indexed():\n state_args[k] = {}\n for m in state_dict[k].keys():\n state_args[k][m] = state_dict[k][m].value\n else:\n state_args[k] = state_dict[k].value\n\n if \"flow_mol_phase_comp\" not in state_args.keys():\n raise ConfigurationError(\n f\"{self.__class__.__name__} initialization routine expects \"\n \"flow_mol_phase_comp as a state variable. Check \"\n \"that the property package supports this state \"\n \"variable or that the state_args provided to the \"\n \"initialize call includes this state variable\"\n )\n\n # slightly modify initial values for other state blocks\n state_args_retentate = deepcopy(state_args)\n state_args_permeate = deepcopy(state_args)\n\n state_args_retentate[\"pressure\"] += initialize_guess[\"deltaP\"]\n state_args_permeate[\"pressure\"] = mixed_permeate_properties.pressure.value\n for j in self.config.property_package.solvent_set:\n state_args_retentate[\"flow_mol_phase_comp\"][(\"Liq\", j)] *= (\n 1 - initialize_guess[\"solvent_recovery\"]\n )\n state_args_permeate[\"flow_mol_phase_comp\"][(\"Liq\", j)] *= initialize_guess[\n \"solvent_recovery\"\n ]\n for j in (\n self.config.property_package.solute_set\n | self.config.property_package.ion_set\n ):\n state_args_retentate[\"flow_mol_phase_comp\"][(\"Liq\", j)] *= (\n 1 - initialize_guess[\"solute_recovery\"]\n )\n state_args_permeate[\"flow_mol_phase_comp\"][(\"Liq\", j)] *= initialize_guess[\n \"solute_recovery\"\n ]\n\n state_args_interface_in = deepcopy(state_args)\n state_args_interface_out = deepcopy(state_args_retentate)\n\n for j in (\n self.config.property_package.solute_set\n | self.config.property_package.ion_set\n ):\n state_args_interface_in[\"flow_mol_phase_comp\"][\n (\"Liq\", j)\n ] *= initialize_guess[\"cp_modulus\"]\n state_args_interface_out[\"flow_mol_phase_comp\"][\n (\"Liq\", j)\n ] *= initialize_guess[\"cp_modulus\"]\n\n return {\n \"feed_side\": state_args,\n \"retentate\": state_args_retentate,\n \"permeate\": state_args_permeate,\n \"interface_in\": state_args_interface_in,\n \"interface_out\": state_args_interface_out,\n }", "def get_state_parameters(self):\n return self.__get_one_type_params(StateParameter)", "def __init__(self,ParamFunctionStateTuples):\n self.mDict = dict()\n for stateInit,param,func,stateFinal in ParamFunctionStateTuples:\n assert param not in stateInit\n self.mDict[param] = StateDict.EmitObj(stateInit,func,stateFinal)", "def set_user_parameters(self, **params: dict):\n\n assert params, \"params variable can't be None\"\n for p, val in params.items():\n setattr(self, p, val)\n self.construct_repr_length()", "def init_user_param_dict():\n\n i = 5\n for (name, latex_name) in config.user_params:\n user_params_index[name] = i\n user_params_latex[name] = latex_name\n i += 1", "def _validate_params(self, request_set, target_set=None, context=None):\n\n # Perform first-pass validation in Function.__init__():\n # - returns full set of params based on subclass paramClassDefaults\n super(Mechanism, self)._validate_params(request_set,target_set,context)\n\n params = target_set\n\n #region VALIDATE TIME SCALE\n try:\n param_value = params[TIME_SCALE]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n self.timeScale = timeScaleSystemDefault\n else:\n if isinstance(param_value, TimeScale):\n self.timeScale = params[TIME_SCALE]\n else:\n if self.prefs.verbosePref:\n print(\"Value for {0} ({1}) param of {2} must be of type {3}; default will be used: {4}\".\n format(TIME_SCALE, param_value, self.name, type(TimeScale), timeScaleSystemDefault))\n #endregion\n\n #region VALIDATE INPUT STATE(S)\n\n # MODIFIED 6/10/16\n # FIX: SHOULD CHECK LENGTH OF INPUT_STATES PARAM (LIST OF NAMES OR SPECIFICATION DICT) AGAINST LENGTH OF\n # FIX: self.variable 2D ARRAY AND COMPARE variable SPECS, IF PROVIDED, WITH CORRESPONDING ELEMENTS OF\n # FIX: self.variable 2D ARRAY\n try:\n param_value = params[INPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # INPUT_STATES not specified:\n # - set to None, so that it is set to default (self.variable) in instantiate_inputState\n # - if in VERBOSE mode, warn in instantiate_inputState, where default value is known\n params[INPUT_STATES] = None\n\n else:\n # INPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_inputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n # Note:\n # * number of inputStates is validated against length of the owner mechanism's execute method variable (EMV)\n # in instantiate_inputState, where an inputState is assigned to each item (value) of the EMV\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.InputState import InputState\n # If not valid...\n if not ((isclass(item) and (issubclass(item, InputState) or # InputState class ref\n issubclass(item, Projection))) or # Project class ref\n isinstance(item, InputState) or # InputState object\n isinstance(item, dict) or # InputState specification dict\n isinstance(item, ParamValueProjection) or # ParamValueProjection tuple\n isinstance(item, str) or # Name (to be used as key in inputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.variable) in instantiate_inputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" InputState, specification dict or value, nor a list of dict of them; \"\n \"variable ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n INPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.variable,\n self.execute.__self__.name))\n i += 1\n params[INPUT_STATES] = param_value\n #endregion\n\n #region VALIDATE EXECUTE METHOD PARAMS\n try:\n function_param_specs = params[FUNCTION_PARAMS]\n except KeyError:\n if COMMAND_LINE in context:\n pass\n elif self.prefs.verbosePref:\n print(\"No params specified for {0}\".format(self.__class__.__name__))\n else:\n if not (isinstance(function_param_specs, dict)):\n raise MechanismError(\"{0} in {1} must be a dict of param specifications\".\n format(FUNCTION_PARAMS, self.__class__.__name__))\n # Validate params\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n for param_name, param_value in function_param_specs.items():\n try:\n default_value = self.paramInstanceDefaults[FUNCTION_PARAMS][param_name]\n except KeyError:\n raise MechanismError(\"{0} not recognized as a param of execute method for {1}\".\n format(param_name, self.__class__.__name__))\n if not ((isclass(param_value) and\n (issubclass(param_value, ParameterState) or\n issubclass(param_value, Projection))) or\n isinstance(param_value, ParameterState) or\n isinstance(param_value, Projection) or\n isinstance(param_value, dict) or\n isinstance(param_value, ParamValueProjection) or\n iscompatible(param_value, default_value)):\n params[FUNCTION_PARAMS][param_name] = default_value\n if self.prefs.verbosePref:\n print(\"{0} param ({1}) for execute method {2} of {3} is not a ParameterState, \"\n \"projection, ParamValueProjection, or value; default value ({4}) will be used\".\n format(param_name,\n param_value,\n self.execute.__self__.componentName,\n self.__class__.__name__,\n default_value))\n #endregion\n # FIX: MAKE SURE OUTPUT OF EXECUTE FUNCTION / SELF.VALUE IS 2D ARRAY, WITH LENGTH == NUM OUTPUT STATES\n\n #region VALIDATE OUTPUT STATE(S)\n\n # FIX: MAKE SURE # OF OUTPUTS == LENGTH OF OUTPUT OF EXECUTE FUNCTION / SELF.VALUE\n try:\n param_value = params[OUTPUT_STATES]\n\n except KeyError:\n if COMMAND_LINE in context:\n pass\n else:\n # OUTPUT_STATES not specified:\n # - set to None, so that it is set to default (self.value) in instantiate_outputState\n # Notes:\n # * if in VERBOSE mode, warning will be issued in instantiate_outputState, where default value is known\n # * number of outputStates is validated against length of owner mechanism's execute method output (EMO)\n # in instantiate_outputState, where an outputState is assigned to each item (value) of the EMO\n params[OUTPUT_STATES] = None\n\n else:\n # OUTPUT_STATES is specified, so validate:\n # If it is a single item or a non-OrderedDict, place in a list (for use here and in instantiate_outputState)\n if not isinstance(param_value, (list, OrderedDict)):\n param_value = [param_value]\n # Validate each item in the list or OrderedDict\n i = 0\n for key, item in param_value if isinstance(param_value, dict) else enumerate(param_value):\n from PsyNeuLink.Components.States.OutputState import OutputState\n # If not valid...\n if not ((isclass(item) and issubclass(item, OutputState)) or # OutputState class ref\n isinstance(item, OutputState) or # OutputState object\n isinstance(item, dict) or # OutputState specification dict\n isinstance(item, str) or # Name (to be used as key in outputStates dict)\n iscompatible(item, **{kwCompatibilityNumeric: True})): # value\n # set to None, so it is set to default (self.value) in instantiate_outputState\n param_value[key] = None\n if self.prefs.verbosePref:\n print(\"Item {0} of {1} param ({2}) in {3} is not a\"\n \" OutputState, specification dict or value, nor a list of dict of them; \"\n \"output ({4}) of execute method for {5} will be used\"\n \" to create a default outputState for {3}\".\n format(i,\n OUTPUT_STATES,\n param_value,\n self.__class__.__name__,\n self.value,\n self.execute.__self__.name))\n i += 1\n params[OUTPUT_STATES] = param_value", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def _optimizer_state_init(opt_states):\n prefix_list = [\"moments\", \"accum\", \"moment1\", \"moment2\", \"lamb_m\", \"lamb_v\", \"mean_grad\",\n \"mean_square\", \"prev\"]\n for opt_param in opt_states:\n prefix = opt_param.name[:opt_param.name.find(\".\")]\n if opt_param.has_init and (prefix in prefix_list or opt_param.name == \"global_step\"):\n opt_param.init_data()", "def _gen_policy_params(self, state: State) -> Tensor:\n ...", "def _get_mechanism_param_values(self):\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n return dict((param, value.value) for param, value in self.paramsCurrent.items()\n if isinstance(value, ParameterState) )", "def set_state(self, params):\n self._param_store.set_state(self.best_params)", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['transition'] = self.transition\n paramDict['steadyStatePb'] = self.steadyStatePb\n return paramDict", "def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)", "def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])", "def load_params(self):\n\n self.curr_ts_state = None\n\n # Get TS from param\n self.transition_system = import_ts_from_file(rospy.get_param('transition_system_textfile'))\n\n # Get monitored TS state model\n self.state_dimension_name = rospy.get_param(\"~state_dimension_name\", \"load\")\n\n # Get monitored action\n self.monitored_action = rospy.get_param(\"~monitored_action\", \"pick\")\n \n # Create dict to retrieve next state given current state and next action\n self.action_to_state = dict()\n for state in self.transition_system['state_models'][self.state_dimension_name]['nodes']:\n temp_dict = dict()\n for connected_state in self.transition_system['state_models'][self.state_dimension_name]['nodes'][state]['connected_to']:\n temp_dict.update({self.transition_system['state_models'][self.state_dimension_name]['nodes'][state]['connected_to'][connected_state]: connected_state})\n self.action_to_state.update({state: temp_dict})", "def __init__(self, params={}, verbosity=0, testing_level=1, testing_verbosity=1):\r\n self.verbosity = verbosity\r\n self.testing_unit = UnitTests.ParticleSwarmUnitTests(testing_level=testing_level, verbosity=testing_verbosity)\r\n\r\n for key, val in params.items():\r\n self.set(key, val) # invoke set so that all continuous checking for changed parameters happens only once\r\n # place\r", "def init_params(self, params, algo_input):\n if algo_input is None:\n raise AlgorithmError(\"EnergyInput instance is required.\")\n\n operator = algo_input.qubit_op\n\n qpe_params = params.get(QuantumAlgorithm.SECTION_KEY_ALGORITHM)\n num_time_slices = qpe_params.get(QPE.PROP_NUM_TIME_SLICES)\n paulis_grouping = qpe_params.get(QPE.PROP_PAULIS_GROUPING)\n expansion_mode = qpe_params.get(QPE.PROP_EXPANSION_MODE)\n expansion_order = qpe_params.get(QPE.PROP_EXPANSION_ORDER)\n num_ancillae = qpe_params.get(QPE.PROP_NUM_ANCILLAE)\n\n # Set up initial state, we need to add computed num qubits to params\n init_state_params = params.get(QuantumAlgorithm.SECTION_KEY_INITIAL_STATE)\n init_state_params['num_qubits'] = operator.num_qubits\n init_state = get_initial_state_instance(init_state_params['name'])\n init_state.init_params(init_state_params)\n\n # Set up iqft, we need to add num qubits to params which is our num_ancillae bits here\n iqft_params = params.get(QuantumAlgorithm.SECTION_KEY_IQFT)\n iqft_params['num_qubits'] = num_ancillae\n iqft = get_iqft_instance(iqft_params['name'])\n iqft.init_params(iqft_params)\n\n self.init_args(\n operator, init_state, iqft, num_time_slices, num_ancillae,\n paulis_grouping=paulis_grouping, expansion_mode=expansion_mode,\n expansion_order=expansion_order)", "def unfold_params(params, nstates=2):\n init = params[:nstates]\n # emiss = params[nstates:nstates * 5].reshape((nstates, 4))\n trans = params[nstates:(nstates * (nstates + 1))].reshape((nstates, nstates))\n scale = params[-1]\n return init, trans, scale", "def fromState(state):", "def _instantiate_input_states(self, context=None):\n from PsyNeuLink.Components.States.InputState import _instantiate_input_states\n _instantiate_input_states(owner=self, context=context)" ]
[ "0.63577443", "0.6197192", "0.6121723", "0.60824007", "0.5890091", "0.5702104", "0.5656338", "0.55621827", "0.5540772", "0.5516861", "0.54957217", "0.5488176", "0.5467186", "0.54659873", "0.5458149", "0.545528", "0.5446187", "0.5445284", "0.542465", "0.5403631", "0.53810585", "0.534862", "0.53453946", "0.53236437", "0.5322527", "0.53071433", "0.52823913", "0.5263463", "0.5225944", "0.52237487" ]
0.73195183
0
Call State._instantiate_output_states to instantiate orderedDict of outputState(s) This is a stub, implemented to allow Mechanism subclasses to override _instantiate_output_states
def _instantiate_output_states(self, context=None): from PsyNeuLink.Components.States.OutputState import _instantiate_output_states _instantiate_output_states(owner=self, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_output_states(self):\n for mech in self.terminalMechanisms.mechanisms:\n self.outputStates[mech.name] = mech.outputStates", "def init_output_dict(self):\n return {\n \"outputs\": torch.FloatTensor(),\n \"pred_probs\": torch.FloatTensor(),\n \"labels\": torch.LongTensor(),\n }", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(StateInstantiation, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.state_path is None:\n self.state_path = ''\n if self.state_class is None:\n self.state_class = ''\n if self.initial_state_name is None:\n self.initial_state_name = ''\n if self.input_keys is None:\n self.input_keys = []\n if self.output_keys is None:\n self.output_keys = []\n if self.cond_outcome is None:\n self.cond_outcome = []\n if self.cond_transition is None:\n self.cond_transition = []\n if self.behavior_class is None:\n self.behavior_class = ''\n if self.parameter_names is None:\n self.parameter_names = []\n if self.parameter_values is None:\n self.parameter_values = []\n if self.position is None:\n self.position = [0.] * 2\n if self.outcomes is None:\n self.outcomes = []\n if self.transitions is None:\n self.transitions = []\n if self.autonomy is None:\n self.autonomy = []\n if self.userdata_keys is None:\n self.userdata_keys = []\n if self.userdata_remapping is None:\n self.userdata_remapping = []\n else:\n self.state_path = ''\n self.state_class = ''\n self.initial_state_name = ''\n self.input_keys = []\n self.output_keys = []\n self.cond_outcome = []\n self.cond_transition = []\n self.behavior_class = ''\n self.parameter_names = []\n self.parameter_values = []\n self.position = [0.] * 2\n self.outcomes = []\n self.transitions = []\n self.autonomy = []\n self.userdata_keys = []\n self.userdata_remapping = []", "def __init__(self, n_states: int, n_actions: int):\n self._p = {s: {a: [] for a in range(n_actions)} for s in range(n_states)}", "def __init__(self,\n outcomes=[],\n input_keys=[],\n output_keys=[]):\n smach.state.State.__init__(self, outcomes, input_keys, output_keys)\n\n self.userdata = smach.UserData()\n \"\"\"Userdata to be passed to child states.\"\"\"\n\n # Callback lists\n self._start_cbs = []\n self._transition_cbs = []\n self._termination_cbs = []", "def ordered_real_state_space(a2_data, py_order, a2_order):\n aux_dic = OrderedDict()\n for py_index, key in enumerate(py_order):\n if key in a2_data:\n try:\n a2_index = a2_order.index(key)\n except ValueError:\n a2_index = None\n aux_dic[StateParPickable(key, py_index, a2_index)] = a2_data[key]\n\n return aux_dic", "def __init__(self, name, inputs, outputs, strategy_func):\n assert str(name) == name\n assert not [i for i in inputs if not isinstance(i, Variable)]\n assert not [o for o in outputs if not isinstance(o, Variable)]\n\n self.name = name\n self.inputs = inputs\n self.outputs = outputs\n\n # Create an array with all possible combinations of states of inputs\n input_states = list(product(*[i.states for i in inputs]))\n self.input_states = input_states\n\n # We need arrays to hold the results of each output (note: they could\n # be different sizes)\n self.per_state_results = [\n np.zeros((len(input_states), o.n_states),\n dtype=float) for o in outputs]\n\n # Create a lookup table based on the strategy function. Then we can\n # discard the function (very useful if we're interested in pickling).\n self.lookup = {}\n\n for i, states in enumerate(input_states):\n # Get out relevant states to fill out\n results = [c[i] for c in self.per_state_results]\n\n # Send arguments as (input, input, ..., output, output, ...)\n args = [s for s in states]\n args.extend(results)\n\n strategy_func(*args)\n\n # Each of the output distributions must sum to 1.0\n for r in results:\n if not np.isclose(r.sum(), 1.0):\n raise RuntimeError(\n \"Probabilities must add to 1.0: {}\".format(r))\n\n # Keep this around\n self.lookup[states] = results", "def init_states(self, batch_size: int) -> NestedMap:\n raise NotImplementedError('Abstract method')", "def __init__(self,ParamFunctionStateTuples):\n self.mDict = dict()\n for stateInit,param,func,stateFinal in ParamFunctionStateTuples:\n assert param not in stateInit\n self.mDict[param] = StateDict.EmitObj(stateInit,func,stateFinal)", "def finalize_output_dict(self):\n self.output_dict = {\n key: torch.cat(value).numpy() for key, value in self.output_dict.items()\n }", "def state_dict(self, *args, **kwargs):\n return self.module.state_dict(*args, **kwargs)", "def load_state_dict(self, state_dict):\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)", "def _update_output_states(self, runtime_params=None, time_scale=None, context=None):\n for i in range(len(self.outputStates)):\n state = list(self.outputStates.values())[i]\n state.update(params=runtime_params, time_scale=time_scale, context=context)\n # self.outputValue[i] = state.value\n\n # Assign value of each outputState to corresponding item in self.outputValue\n self.outputValue = list(state.value for state in list(self.outputStates.values()))", "def get_output(self, state: NestedMap) -> NestedMap:\n raise NotImplementedError('Abstract method')", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def _load_state_dict(self, state: dict):\n for o, dct in zip(self.optimizers, state.get('optimizers', [])):\n o.load_state_dict(dct)\n for s, dct in zip(self.schedulers, state.get('schedulers', [])):\n s.load_state_dict(dct)", "def finalize_output_dict(self, output_dict):\n return {key: output_dict[key].cpu().numpy() for key in output_dict.keys()}", "def state_dict(self) -> dict:\n _state_dict: dict[str, Any] = super().state_dict\n _state_dict[\"rng_state\"] = self.rng.get_state()\n _state_dict[\"seed\"] = self.seed\n _state_dict[\"strategy\"] = self.strategy.state_dict\n return _state_dict", "def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):\n\n with tf.variable_scope(\"inference_initial_state\"):\n n_layers = self.attention_layers\n n_heads = self.attention_heads\n batch_size = tf.shape(encoder_outputs)[0]\n n_features = self.num_mels + self.num_freq\n\n state = {\n \"iteration\": tf.constant(0),\n \"inputs\": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),\n \"finished\": tf.cast(tf.zeros([batch_size]), tf.bool),\n \"alignment_positions\": tf.zeros([n_layers, batch_size, n_heads, 1],\n dtype=tf.int32),\n \"outputs\": {\n \"spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.zeros([0, 0, 0, 0, 0])\n ],\n \"stop_token_logits\": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),\n \"lengths\": tf.zeros([batch_size], dtype=tf.int32),\n \"mag_spec\": tf.zeros([batch_size, 0, self.num_freq * self.reduction_factor])\n },\n \"encoder_outputs\": encoder_outputs,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias\n }\n\n state_shape_invariants = {\n \"iteration\": tf.TensorShape([]),\n \"inputs\": tf.TensorShape([None, None, n_features * self.reduction_factor]),\n \"finished\": tf.TensorShape([None]),\n \"alignment_positions\": tf.TensorShape([n_layers, None, n_heads, None]),\n \"outputs\": {\n \"spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.TensorShape([None, None, None, None, None]),\n ],\n \"stop_token_logits\": tf.TensorShape([None, None, 1 * self.reduction_factor]),\n \"lengths\": tf.TensorShape([None]),\n \"mag_spec\": tf.TensorShape([None, None, None])\n },\n \"encoder_outputs\": encoder_outputs.shape,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias.shape\n }\n\n return state, state_shape_invariants", "def initialize_output_dict(self, label: Optional[str] = None):\n if label is not None or not self._does_output_dict_contain_info():\n for species in self.species_list:\n if label is None or species.label == label:\n if species.label not in self.output:\n self.output[species.label] = dict()\n if 'paths' not in self.output[species.label]:\n self.output[species.label]['paths'] = dict()\n path_keys = ['geo', 'freq', 'sp', 'composite']\n for key in path_keys:\n if key not in self.output[species.label]['paths']:\n self.output[species.label]['paths'][key] = ''\n if 'irc' not in self.output[species.label]['paths'] and species.is_ts:\n self.output[species.label]['paths']['irc'] = list()\n if 'job_types' not in self.output[species.label]:\n self.output[species.label]['job_types'] = dict()\n for job_type in list(set(self.job_types.keys())) + ['opt', 'freq', 'sp', 'composite', 'onedmin']:\n if job_type in ['rotors', 'bde']:\n # rotors could be invalidated due to many reasons,\n # also could be falsely identified in a species that has no torsional modes.\n self.output[species.label]['job_types'][job_type] = True\n else:\n self.output[species.label]['job_types'][job_type] = False\n keys = ['conformers', 'isomorphism', 'convergence', 'restart', 'errors', 'warnings', 'info']\n for key in keys:\n if key not in self.output[species.label]:\n if key == 'convergence':\n self.output[species.label][key] = None\n else:\n self.output[species.label][key] = ''", "def __init__(self):\n super(ExponentialOutputs, self).__init__()\n #dictionary of time, outputs\n self.out_pop_time_series = []", "def create_result_states(self):\n\n # Dictionary of Final TP States (== the winners)\n self.final_TPStates = dict()\n for stimulus in self.inputNames:\n for i in range(self.settings['epochs']):\n key = stimulus + \"/\" + str(i)\n self.final_TPStates[key] = 0", "def __init__(self, state_size, action_size, memory_size=40, output_size=11): \n self.memory_size = memory_size\n self.output_size = output_size\n self.action_size = action_size\n self.state_size = state_size\n\n self.output_len = output_size * state_size + (output_size-1) * action_size\n\n # Once filled it will be a list of lists of [x,u] <- both tensors\n self.memory = [[np.zeros(state_size), np.zeros(action_size)]]*memory_size\n\n self.last_state = None\n self.last_augmented = None\n self.last_action = None", "def __init__(self):\n self.run = OrderedDict()", "def _instantiate_input_states(self, context=None):\n from PsyNeuLink.Components.States.InputState import _instantiate_input_states\n _instantiate_input_states(owner=self, context=context)", "def _populate_output(self):\n pass", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def _create_sample(self, policy_output, next_state, reward, done, info,\n env_id):\n return {\n \"policy_output\": policy_output,\n \"next_state\": next_state,\n \"reward\": reward,\n \"done\": done,\n \"info\": info,\n \"env_id\": env_id\n }", "def get_output_states(self):\n return self.states[-self.num_output_states:]" ]
[ "0.661413", "0.615965", "0.6121957", "0.6087392", "0.5996337", "0.5966779", "0.5943477", "0.59083056", "0.590116", "0.58777857", "0.5782015", "0.57762533", "0.57592046", "0.5745391", "0.5730775", "0.5730775", "0.5689425", "0.5666847", "0.56508785", "0.563107", "0.56296754", "0.5610221", "0.5593046", "0.558317", "0.5567347", "0.55429274", "0.5539331", "0.5539331", "0.55331385", "0.55278426" ]
0.73715085
0
Add projection to specified state
def _add_projection_from_mechanism(self, receiver, state, projection, context=None): from PsyNeuLink.Components.Projections.Projection import _add_projection_from _add_projection_from(sender=self, state=state, projection_spec=projection, receiver=receiver, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parallel_projection(self, state):\n self.camera.parallel_projection = state\n self.Modified()", "def projection(self):\n pass", "def setCrsIsProjection(self):\n self.isgeographic = False", "def test_project(self):\n basis = self.Dummy()\n state = 5\n assert basis.project(state) == (state + 1)\n assert basis.projection_error(state, relative=False) == 1\n assert basis.projection_error(state, relative=True) == 1/state", "def set_project_srid(self):\n\n crs = QgsCoordinateReferenceSystem(\n self.selected_crs_int, QgsCoordinateReferenceSystem.EpsgCrsId\n )\n self.canvas.setDestinationCrs(crs)\n self.iface.messageBar().pushMessage(\n \"Info\",\n \"\"\"The LINZ Data Importer Plugin has changed the projects\n projection to that of the imported layer\"\"\",\n level=Qgis.Info,\n duration=6,\n )", "def project(record, selected, pkey_name) -> StateDictInterface:\n if selected:\n keys = set(selected.keys()) | {pkey_name}\n return record.projection(keys)\n else:\n return record", "def projection(self, point):\n return gs.copy(point)", "def __init__(self, projection_point):\n self.projection_point = vector(projection_point)\n self.dim = self.projection_point.degree()\n \n pproj = vector(RDF,self.projection_point)\n self.psize = norm(pproj)\n if (self.psize).is_zero():\n raise ValueError, \"projection direction must be a non-zero vector.\"\n v = vector(RDF, [0.0]*(self.dim-1) + [self.psize]) - pproj\n polediff = matrix(RDF,v).transpose()\n denom = RDF((polediff.transpose()*polediff)[0][0])\n if denom.is_zero():\n self.house = identity_matrix(RDF,self.dim)\n else:\n self.house = identity_matrix(RDF,self.dim) \\\n - 2*polediff*polediff.transpose()/denom # Householder reflector", "def project(self):\n # update positions compared to observer\n pos = self.pos.copy()\n\n # center coordinates around obs coords\n pos[:, 0] -= np.sin(self.theta) * self.V * self.time_elapsed\n pos[:, 2] -= np.cos(self.theta) * self.V * self.time_elapsed\n\n # wrap in a novel box around obs coords\n for i in range(3):\n pos[:, i] = self.bounds[2*i] + np.mod(pos[:, i], self.bounds[2*i + 1]-self.bounds[2*i])\n\n d = (pos**2).sum(axis=1)**.5\n # ind_visible = (pos[:, 2] > 0) * (self.d_min<d) * (d<self.d_max)\n ind_visible = (pos[:, 2] > self.d_min) * (d < self.d_max)\n N_visible = int(np.sum(ind_visible))\n\n # self.state = [X, Y, size]\n self.state = np.ones((N_visible, 7))\n for i in range(2):\n self.state[:, i] = self.mag * pos[ind_visible, i] / pos[ind_visible, 2]\n print(i, self.state[:, i].min(), self.state[:, i].max())\n self.state[:, 2] = self.size / d[ind_visible]\n\n # colors do not change\n self.state[:, 3:] = pos[ind_visible, 3:]\n\n # TODO: larger transparency at larger distance => too fancy :-)\n # self.state[:, 2] = self.size / d[ind_visible]\n\n # for i in range(3):\n # self.state[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n # self.state[:, i] -= self.bounds[2*i]", "def projectionManip(*args, fitBBox: bool=True, projType: int=0, switchType: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))", "def proj(self, X, G):\n raise NotImplementedError", "def process_state(state):\n grid = state.grid\n pos = state.pos\n reshaped_grid = np.reshape(grid,(1, grid_size*grid_size)) # Only use squared for square matrices\n reshaped_grid = reshaped_grid[0]\n processed_state = np.concatenate((pos, reshaped_grid))\n processed_state = np.array([processed_state])\n # processed_state.reshape(1, 1, grid_size*grid_size+2, 1)\n #print(processed_state.shape)\n\n return processed_state", "def AddUnmappedState(self, state: str, point: str) -> None:\n self._valid = False\n self._unmapped_states.append((point, state))", "def add_instigator_state(self, state: InstigatorState):", "def _load_projection(self):\n input_dim = self.filter_dims\n self.projection = nn.Linear(input_dim, self.char_cnn_output_dim, bias=True)\n weight = self.npz_weights['W_proj']\n bias = self.npz_weights['b_proj']\n self.projection.weight.data.copy_(torch.div(torch.FloatTensor(np.transpose(weight)), 10.0))\n self.projection.bias.data.copy_(torch.div(torch.FloatTensor(np.transpose(bias)), 10.0))\n self.projection.weight.requires_grad = self._finetune_pretrained_weights\n self.projection.bias.requires_grad = self._finetune_pretrained_weights", "def assign_state(self, state):\n raise NotImplementedError()", "def add(label, state, remapping={}):\n # Get currently opened container\n self = Concurrence._currently_opened_container()\n\n # Store state\n self._states[label] = state\n self._remappings[label] = remapping\n\n return state", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def __setstate__(self,state):\n self.__dict__.update(state)\n self.KDTreeFinder = spatial.KDTree(self.featureVals)", "def add_state(self, state):\n if self.rate_variation:\n if not self.feature_rates:\n # Set all rates to 1.0 in a big plate\n plate = ET.SubElement(state, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n param = ET.SubElement(plate, \"parameter\", {\n \"id\":\"featureClockRate:%s:$(feature)\" % self.name,\n \"name\":\"stateNode\"})\n param.text=\"1.0\"\n else:\n # Give each rate a custom value\n for f in self.features:\n param = ET.SubElement(state, \"parameter\", {\n \"id\":\"featureClockRate:%s:%s\" % (self.name, f),\n \"name\":\"stateNode\"})\n param.text=str(self.feature_rates.get(f,1.0))\n\n # Give Gamma shape parameter a finite domain\n # Must be > 1.0 for the distribution to be bell-shaped,\n # rather than L-shaped. The domain [1.1,1000] limits feature\n # rate variation to the realms of vague plausibity\n parameter = ET.SubElement(state, \"parameter\", {\"id\":\"featureClockRateGammaShape:%s\" % self.name, \"lower\":\"1.1\",\"upper\":\"100.0\",\"name\":\"stateNode\"})\n parameter.text=\"5.0\"\n # Gamma scale parameter's domain is defined *implicilty*\n # by the fact that the operators maintain shape*scale = 1.0\n parameter = ET.SubElement(state, \"parameter\", {\"id\":\"featureClockRateGammaScale:%s\" % self.name, \"name\":\"stateNode\"})\n parameter.text=\"0.2\"", "def add_loc_proj(i, warning=None):\n # -- Adding Location Data ----------------------------------------------- #\n state_code = tbl_project['txt_StateCode'][i]\n state_name = lkp_stateandfhwadistrict.loc[\n lkp_stateandfhwadistrict['txt_StateCode'] == state_code,\n 'txt_StateName'].values[0] if not pd.isna(state_code) else None\n country_code = tbl_project['txt_CountryCode'][i]\n country_name = tbl_country.loc[\n tbl_country['txt_CountryCode'] == country_code,\n 'txt_CountryDescription'].values[0]\n loc = Locations(\n address=tbl_project['txt_Address'][i]\n if not pd.isna(tbl_project['txt_Address'][i]) else None,\n city=tbl_project['txt_City'][i]\n if not pd.isna(tbl_project['txt_City'][i]) else None,\n county=tbl_project['txt_County'][i]\n if not pd.isna(tbl_project['txt_County'][i]) else None,\n state=state_name,\n country=country_name,\n latitude=tbl_project['dbl_Latitude'][i]\n if abs(tbl_project['dbl_Latitude'][i]) < 100 else None,\n longitude=tbl_project['dbl_Longitude'][i]\n if abs(tbl_project['dbl_Longitude'][i]) < 100 else None,\n )\n db.session.add(loc)\n\n # -- Adding Project Data ------------------------------------------------ #\n prj = Projects(\n location=loc,\n user_id=data_owner().id,\n source_db='FHWA DFLTD v.2',\n source_id=int(tbl_project['lng_KeyProject'][i]),\n description=tbl_project['mem_Remarks'][i]\n if not pd.isna(tbl_project['mem_Remarks'][i]) else None,\n site_name=tbl_project['txt_ProjectName'][i]\n if not pd.isna(tbl_project['txt_ProjectName'][i]) else None,\n source_ref=tbl_project['txt_Publication'][i]\n if not pd.isna(tbl_project['txt_Publication'][i]) else None,\n contractor=tbl_project['txt_GeneralContractor'][i]\n if not pd.isna(tbl_project['txt_GeneralContractor'][i]) else None,\n number=tbl_project['txt_ProjectID'][i]\n if not pd.isna(tbl_project['txt_ProjectID'][i]) else None,\n title=tbl_project['txt_Title'][i]\n if not pd.isna(tbl_project['txt_Title'][i]) else None,\n date_added=pd.to_datetime(\n tbl_project['dte_AddDate'][i] if not\n pd.isna(tbl_project['dte_AddDate'][i]) else None),\n warning=warning\n )\n\n return prj", "def Add(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Add(self, *args)", "def projection(self):\n self.projection = Projection(self)\n return self.projection", "def set_params_proj(ima, p, xform = \"xform.projection\"):\n\tfrom EMAN2 import Vec2f\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2]})\n\tt.set_trans(Vec2f(-p[3], -p[4]))\n\tima.set_attr(xform, t)", "def _init_projection(self):\n radius = 6370e3\n \n # Spherical latlon used by WRF\n self.latlon_sphere = pyproj.Proj(proj='latlong',\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n # Lambert Conformal Conic used by WRF\n self.lambert_grid = pyproj.Proj(proj='lcc',\n lat_1=self.truelats[0],\n lat_2=self.truelats[1],\n lat_0=self.ref_latlon[0],\n lon_0=self.stand_lon,\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n grid_size_i = (self.domain_size[0] - 2) * self.cell_size[0]\n grid_size_j = (self.domain_size[1] - 2) * self.cell_size[1]\n\n grid_center_i, grid_center_j = pyproj.transform(\n self.latlon_sphere, self.lambert_grid,\n self.ref_latlon[1], self.ref_latlon[0])\n \n self.offset_i = grid_center_i - grid_size_i * .5\n self.offset_j = grid_center_j - grid_size_j * .5", "def update_state_call(state_id):\n update_state = plot_choropleth('state', state_id).to_html()\n return update_state", "def set_projection_type(self, p_type):\n self.scenes[self.current_scene].set_projection_type(p_type)", "def draw_state(subplot, name, **kwargs):\n global _color_idx\n if name not in state2poly:\n if get_statename(name) in state2poly:\n name = get_statename(name)\n else:\n print \"state %s not found\" % name\n return\n\n kwargs['color'] = \"#FFFFFF\"\n for polygon in state2poly[name]:\n draw_polygon(subplot, polygon, **kwargs)", "def _initWithProjection(self, unitsPerPixel=None):\n inProj = self._proj4Proj(NeededInitPrefix + 'epsg:4326')\n # Since we already converted to bytes decoding is safe here\n outProj = self._proj4Proj(self.projection)\n if outProj.crs.is_geographic:\n msg = ('Projection must not be geographic (it needs to use linear '\n 'units, not longitude/latitude).')\n raise TileSourceError(msg)\n if unitsPerPixel:\n self.unitsAcrossLevel0 = float(unitsPerPixel) * self.tileSize\n else:\n self.unitsAcrossLevel0 = ProjUnitsAcrossLevel0.get(self.projection)\n if self.unitsAcrossLevel0 is None:\n # If unitsPerPixel is not specified, the horizontal distance\n # between -180,0 and +180,0 is used. Some projections (such as\n # stereographic) will fail in this case; they must have a\n # unitsPerPixel specified.\n equator = pyproj.Transformer.from_proj(inProj, outProj, always_xy=True).transform(\n [-180, 180], [0, 0])\n self.unitsAcrossLevel0 = abs(equator[0][1] - equator[0][0])\n if not self.unitsAcrossLevel0:\n msg = 'unitsPerPixel must be specified for this projection'\n raise TileSourceError(msg)\n if len(ProjUnitsAcrossLevel0) >= ProjUnitsAcrossLevel0_MaxSize:\n ProjUnitsAcrossLevel0.clear()\n ProjUnitsAcrossLevel0[self.projection] = self.unitsAcrossLevel0\n # This was\n # self.projectionOrigin = pyproj.transform(inProj, outProj, 0, 0)\n # but for consistency, it should probably always be (0, 0). Whatever\n # renders the map would need the same offset as used here.\n self.projectionOrigin = (0, 0)\n # Calculate values for this projection\n self.levels = int(max(int(math.ceil(\n math.log(self.unitsAcrossLevel0 / self.getPixelSizeInMeters() / self.tileWidth) /\n math.log(2))) + 1, 1))\n # Report sizeX and sizeY as the whole world\n self.sizeX = 2 ** (self.levels - 1) * self.tileWidth\n self.sizeY = 2 ** (self.levels - 1) * self.tileHeight" ]
[ "0.65063393", "0.5794265", "0.55886", "0.551689", "0.5484942", "0.5446427", "0.53433836", "0.5326458", "0.5302247", "0.52575725", "0.5254479", "0.52378243", "0.5235828", "0.52268904", "0.5216963", "0.5197505", "0.51842076", "0.5154298", "0.51300246", "0.5101129", "0.5098088", "0.5097756", "0.5076032", "0.5047937", "0.50330096", "0.50264555", "0.50208205", "0.50168854", "0.49957645", "0.4994839" ]
0.62162536
1
Execute function for each outputState and assign result of each to corresponding item of self.outputValue
def _update_output_states(self, runtime_params=None, time_scale=None, context=None): for i in range(len(self.outputStates)): state = list(self.outputStates.values())[i] state.update(params=runtime_params, time_scale=time_scale, context=context) # self.outputValue[i] = state.value # Assign value of each outputState to corresponding item in self.outputValue self.outputValue = list(state.value for state in list(self.outputStates.values()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_output_states(self):\n for mech in self.terminalMechanisms.mechanisms:\n self.outputStates[mech.name] = mech.outputStates", "def __set_outputs__(self):\n self.__set_in_out_var__(None, 1)", "def outputStateValues(self):\n values = []\n for item in self.mechanisms:\n for output_state_name, output_state in list(item.outputStates.items()):\n values.append(output_state.value)\n return values", "def get_outputs(self):\r\n # check to see if the outputs exist\r\n try:\r\n self.times\r\n except AttributeError:\r\n self.times = []\r\n try:\r\n self.outputs\r\n except AttributeError:\r\n self.outputs = []\r\n\r\n # append the time to the times list\r\n self.times.append(self.t)\r\n\r\n outlist = [] # initalize\r\n\r\n # run for the output functions\r\n for func in self.out_funcs:\r\n # get the function values\r\n fv = func()[0]\r\n\r\n # if the output is a list, use extend\r\n if type(fv) is list:\r\n outlist.extend(fv)\r\n\r\n # otherwise, use append\r\n else:\r\n outlist.append(fv)\r\n\r\n self.outputs.append(outlist)", "def calculate_output(self):", "def apply_func(output, func):\n new_output = []\n for dict in output:\n mnemonic = copy.deepcopy(dict['mnemonic'])\n values = dict['values']\n new_values = func(values)\n new_output.append({'mnemonic': mnemonic, 'values': new_values})\n return new_output", "def outputs(self):\r\n return self._outputs", "def compute_output_from_current_state(self):\n\n assert self.Wout is not None, \"Matrix Wout is not initialized/trained yet\"\n\n self.output_values = (self.Wout @ self.state).astype(self.typefloat)\n return self.output_values.copy().ravel()", "def output(self):\r\n self.logic ( )\r\n return self.output", "def _output_update(self):\n self._outputtype = self.inputs.outputtype", "def evaluate_outputs(self):\n raise NotImplementedError(\n 'evaluate_outputs called but not implemented in the derived class.'\n )", "def process(self):\n while not self.halted:\n self.step()\n return self.outputs", "def get_output_state(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n if 'output_state' not in c.keys():\n try:\n output_state = yield self.query(c,\n slot_number,\n \"EXON?\")\n self.wait_for_completion(c)\n output_state = bool(int(output_state))\n c['output_state'] = output_state \n except:\n #self.initialize_mainframe(c)\n output_state = yield self.query(c,\n slot_number,\n \"EXON?\")\n self.wait_for_completion(c)\n output_state = bool(int(output_state))\n c['output_state'] = output_state \n else:\n raise ValueError(self.no_selection_msg())\n returnValue(c['output_state'] )", "def _call_hooks(self, func, output=None):\n # Only after iter hook will receive output\n for hook in self.hooks:\n if output is None:\n getattr(hook, func)(self)\n else:\n getattr(hook, func)(self, *output)", "def step(self,inp): ## function responsible for exciting the machine with a SINGLE INPUT VALUE\n (s, o) = self.getNextValues(self.state,inp)\n # will store the state and return the output\n self.state =s\n return o", "def op_output_values(self):\n return self.solid_output_values", "def run(self):\n \n #calculate node\n print (\"{} run()\".format(self.getName()))\n\n #feed outputs\n result = 0\n for i in self.getInputPorts():\n v = i.getValue()\n # print v, self.getName()\n if v:\n result += float(v)\n\n for i in self.getOutputPorts(): #for every output port\n i.setValue(result) #set test value\n print (\"Output: {}\".format(i.getValue()))\n\n # print \"\"", "def outputs(self):\n pass", "def output(self, state):\n h, t = state\n\n return h", "def process_output(self, state: str, data: SimData, tb_manager: TestbenchManager\n ) -> Tuple[bool, str, Dict[str, Any]]:\n return False, '', {}", "def update_input_states(self, input_values):", "def outputs(self) -> set['ValueBase']:\n raise NotImplementedError", "def get_outputs(self):\n raise NotImplementedError", "def outputs(self):\n return self.outputs", "def outputs(self):\n\t\treturn {k: v * self.throughput for k, v in self.per_process_outputs.items()}", "def process(self, *args, **kwargs):\n from copy import copy\n\n # set default values\n options = copy(self._process_default_options_)\n options.update(kwargs)\n\n # perform iteration\n it = self.iter_process(*args, **options)\n for _ in it:\n pass\n\n # process output: filtering accepting results\n only_accepted = options['only_accepted']\n it_output = [result for result in it.result()\n if not only_accepted or result[0]]\n\n # process output: returning a list output\n if (len(it_output) > 1 and options['list_of_outputs'] is None or\n options['list_of_outputs']):\n return [self._process_convert_output_(out, **options)\n for out in it_output]\n\n # process output: cannot return output to due input parameters\n if options['list_of_outputs'] is False:\n if not it_output and only_accepted:\n raise ValueError('No accepting output was found but according '\n 'to the given options, an accepting output '\n 'should be returned. Change only_accepted '\n 'and/or list_of_outputs options.')\n elif len(it_output) > 1:\n raise ValueError('Got more than one output, but only allowed '\n 'to show one. Change list_of_outputs option.')\n # At this point it_output has length 0 or 1.\n\n # process output: create non-accepting output if needed\n if not it_output:\n if only_accepted:\n return []\n NoneState = FSMState(None, allow_label_None=True)\n it_output = [(False, NoneState, None)]\n\n return self._process_convert_output_(it_output[0], **options)", "def run_states(self):\n if (self.state == \"off\"):\n if (self.in_power.value == 1):\n self.off_to_on()\n \n elif self.state == \"on\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 1):\n self.on_to_alert()\n \n elif self.state == \"alert\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 0):\n self.alert_to_was_alert()\n\n elif self.state == \"was_alert\":\n if (self.in_power.value == 0):\n self.any_to_off()", "def _update_value(self):\n args = [] # todo: pyscript support for list comprehension\n for s in self._upstream:\n args.append(s())\n value = self._call_func(*args)\n self._set_value(value)", "def tree_analysisOutput(self, *args, **kwargs):\n fn_outputcallback = None\n for k, v in kwargs.items():\n if k == 'outputcallback': fn_outputcallback = v\n index = 1\n total = len(self.d_inputTree.keys())\n for path, d_analysis in self.d_outputTree.items():\n self.simpleProgress_show(index, total)\n self.dp.qprint(\"Processing analysis results in output: %s\" % path)\n d_output = fn_outputcallback((path, d_analysis), **kwargs)\n return {\n 'status': True\n }", "def _process_convert_output_(self, output_data, **kwargs):\n accept_input, current_state, output = output_data\n return (accept_input, current_state, output)" ]
[ "0.67218447", "0.6400146", "0.63258046", "0.63037884", "0.62867457", "0.61272424", "0.6066108", "0.5995027", "0.5982473", "0.5972857", "0.5955611", "0.5936051", "0.5929208", "0.5926685", "0.59119964", "0.5906643", "0.58903944", "0.58846825", "0.5864265", "0.58555716", "0.58159214", "0.5795552", "0.57678324", "0.57545567", "0.5750277", "0.57437265", "0.57310414", "0.5672829", "0.5672506", "0.5667063" ]
0.6929011
0
Return dict with current value of each ParameterState in paramsCurrent
def _get_mechanism_param_values(self): from PsyNeuLink.Components.States.ParameterState import ParameterState return dict((param, value.value) for param, value in self.paramsCurrent.items() if isinstance(value, ParameterState) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state(self) -> Dict:\n state_dict = {}\n for param in self.optim_objs:\n param_name = pyro.get_param_store().param_name(param)\n state_dict[param_name] = _get_state_dict(self.optim_objs[param])\n return state_dict", "def _get_current_params(self):\n return [\n param.cpu().detach().clone().numpy() for param in self.model.parameters()\n ]", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def Current(self):\r\n return dict([(vname, copy.deepcopy(getattr(self.module, vname)))\r\n for vname in self.module.state ])", "def getstate(self):\r\n return Parameterized.getstate(self) + [self.parts,\r\n self.num_parts,\r\n self.num_params,\r\n self.input_dim,\r\n self.input_slices,\r\n self.param_slices\r\n ]", "def get_next_params(self) -> dict:\n params = {arg_name: caller() for arg_name, caller in self.parameters}\n return params", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def get_state_parameters(self):\n return self.__get_one_type_params(StateParameter)", "def current_parameters(self):\n current = []\n for core_param in range(len(self.q)):\n for approx_param in range(self.q[core_param].param_no):\n current.append(self.q[core_param].vi_return_param(approx_param))\n return np.array(current)", "def parameters_dict(self):\n return dict(zip(self.parameters_names(), self.parameters_list))", "def parameters_dict(self):\n return", "def getstate(self):\r\n return Parameterized.getstate(self) + \\\r\n [self.priors, self.optimization_runs,\r\n self.sampling_runs, self.preferred_optimizer]", "def get_params(self, deep=False):\n return {\"alpha\": self.alpha, \"beta\": self.beta, \"gamma\": self.gamma, \"W\": self.W, \"bias\": self.bias, \"add_bias\": self.add_bias, \"opts\": self.opts}", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def params(self):\n\t\treturn self.params_", "def get_params(self):\n return list(self.params.values())", "def param_values(self):\n return self._param_values", "def parameters(self):\n return self._params", "def inspect_state(self):\n for name in self._param_store.get_all_param_names():\n self._logger.info(\"Param [%s]: %r\", name,\n pyro.param(name).data.numpy())", "def __iter__(self):\n return dict(self.parameters)", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def __getstate__(self):\n state = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n 'parameters' : self.parameters,\n 'mrsm_instance' : self.instance_keys,\n }\n return state", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def _get_state_dict(self) -> dict:\n return {\n 'optimizers': [o.state_dict() for o in self.optimizers],\n 'schedulers': [s.state_dict() for s in self.schedulers],\n }", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def current_state(self):\n curr_state = dict(\n logfile=os.path.basename(self.logfile),\n time=self.time,\n converged=self.converged,\n solve_completed=self.solve_completed,\n converged_time=self.converged_time,\n failed=self.failed,\n fields=list(self.res_files.keys()),\n bounding_fields=list(self.bound_files.keys()))\n return curr_state", "def parameters(self):\n return dict(self._register)", "def _get_current_training_params(self):\n params = {}\n params[\"lyap_relu_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.lyapunov_relu.state_dict())\n if not self.R_options.fixed_R:\n params[\"R_params\"] = self.R_options._variables.clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n params[\"controller_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.system.controller_network.\n state_dict())\n return params", "def get_params_snapshot(self):\n ...", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}" ]
[ "0.7163409", "0.70638406", "0.6930823", "0.6792512", "0.678194", "0.67451435", "0.6702676", "0.6664253", "0.6624333", "0.6506222", "0.64265686", "0.641892", "0.6387138", "0.6354353", "0.63507247", "0.6341828", "0.63148063", "0.6303014", "0.6279106", "0.62353814", "0.6232896", "0.62100714", "0.619149", "0.619149", "0.6180817", "0.617715", "0.6175188", "0.6174308", "0.6157162", "0.6152125" ]
0.7810785
0
Evaluate whether spec is a valid Mechanism specification
def _is_mechanism_spec(spec): if inspect.isclass(spec) and issubclass(spec, Mechanism): return True if isinstance(spec, Mechanism): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def satisfies(self, spec):\n if spec.is_abs and self.is_abs and self.path != spec.path:\n return False\n if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():\n return False\n if spec.architecture is not None and spec.architecture != self.architecture:\n return False\n\n for our, req in zip((self.major, self.minor, self.micro), (spec.major, spec.minor, spec.micro)):\n if req is not None and our is not None and our != req:\n return False\n return True", "def spec(self) -> bool:\n\t\treturn True", "def validate(self):\r\n return self.specs.validate(self)", "def validate(self):\n logger.debug(\"Validating spec: %s\", self._spec)\n try:\n validate(instance=self._spec, schema=MODEL_SPEC_SCHEMA)\n except ValidationError as e:\n raise SpecError(e.message) from e", "def validate(protocol_specification: ProtocolSpecification) -> Tuple[bool, str]:\n # Validate speech-acts section\n (\n result_speech_acts_validation,\n msg_speech_acts_validation,\n performatives_set,\n custom_types_set,\n ) = _validate_speech_acts_section(protocol_specification)\n if not result_speech_acts_validation:\n return result_speech_acts_validation, msg_speech_acts_validation\n\n # Validate protocol buffer schema code snippets\n result_protobuf_validation, msg_protobuf_validation = _validate_protocol_buffer_schema_code_snippets(protocol_specification, custom_types_set) # type: ignore\n if not result_protobuf_validation:\n return result_protobuf_validation, msg_protobuf_validation\n\n # Validate dialogue section\n result_dialogue_validation, msg_dialogue_validation = _validate_dialogue_section(protocol_specification, performatives_set) # type: ignore\n if not result_dialogue_validation:\n return result_dialogue_validation, msg_dialogue_validation\n\n return True, \"Protocol specification is valid.\"", "def _validate_confusion_matrix_spec(\n self, spec: standard_component_specs, confusion_matrix_dict: dict\n ):\n for key in spec.CONFUSION_MATRIX_DICT:\n if key not in confusion_matrix_dict:\n raise ValueError(f\"Missing mandatory key - {key}\")\n if key in confusion_matrix_dict:\n self._type_check(\n actual_value=confusion_matrix_dict[key],\n key=key,\n spec_dict=spec.CONFUSION_MATRIX_DICT,\n )", "def _processSpec(self, spec):\n if isinstance(spec, list):\n for k in spec:\n if isinstance(k, Specifier):\n self._spec.append(k)\n else:\n raise NotAValidSpecifierError(str(type(k)))\n elif isinstance(spec, Specifier):\n self._spec.append(spec)\n else:\n # This point we need to go to the symboltable\n # and look for structs and unions.\n raise NotAValidSpecifierError(str(type(spec)))", "def is_tensor_spec(self) -> bool:\n return self.inputs and isinstance(self.inputs[0], TensorSpec)", "def verify_spec_name(spec_name):\n if not isinstance(spec_name, text_type):\n raise ValueError(\n \"expected spec name of string type, but got '{0}' of type '{1}'\".\n format(spec_name, to_str(type(spec_name))))", "def is_valid(self, user_specific_config: Any, factor: str) -> bool:", "def _check_spec(spec, image_format):\n input_space = spec.input_space\n output_space = spec.output_space\n if getattr(input_space, 'shape', None) is None:\n raise ValueError(f'input_space to CNNModule is {input_space}, but should be an akro.Box or akro.Image')\n elif len(input_space.shape) != 3:\n raise ValueError(f'Input to CNNModule is {input_space}, but should have three dimensions.')\n if output_space is not None and not (hasattr(output_space, 'shape') and len(output_space.shape) == 1):\n raise ValueError(f'output_space to CNNModule is {output_space}, but should be an akro.Box with a single dimension or None')\n if image_format == 'NCHW':\n in_channels = spec.input_space.shape[0]\n height = spec.input_space.shape[1]\n width = spec.input_space.shape[2]\n elif image_format == 'NHWC':\n height = spec.input_space.shape[0]\n width = spec.input_space.shape[1]\n in_channels = spec.input_space.shape[2]\n else:\n raise ValueError(f\"image_format has value {image_format!r}, but must be either 'NCHW' or 'NHWC'\")\n if in_channels not in (1, 3):\n warnings.warn(f'CNNModule input has {in_channels} channels, but 1 or 3 channels are typical. Consider changing the CNN image_format.')\n return in_channels, height, width", "def is_spec_ea(*args):\n return _ida_segment.is_spec_ea(*args)", "def test_no_specification_error():\n try:\n bad_arm = survey.get_spiral_slice()\n except SyntaxError:\n assert True\n else:\n assert False", "def is_spec_segm(*args):\n return _ida_segment.is_spec_segm(*args)", "def provided(self) -> bool:\n return bool(self.address_specs) or bool(self.filesystem_specs)", "def provided(self) -> bool:\n return bool(self.address_specs) or bool(self.filesystem_specs)", "def assert_spec_compatibility(input_spec: TensorSpec, other_spec: TensorSpec):\n if not input_spec:\n return False\n if isinstance(input_spec, (tuple, list)) and all([isinstance(item, numbers.Integral) for item in input_spec]):\n input_spec = TensorSpec(shape=to_tensor(input_spec))\n\n if isinstance(other_spec, (tuple, list)) and all([isinstance(item, numbers.Integral) for item in other_spec]):\n other_spec = TensorSpec(shape=to_tensor(other_spec))\n\n if (input_spec.ndim is not None or\n input_spec.min_ndim is not None or\n input_spec.max_ndim is not None):\n if other_spec.ndim is None:\n print('Other_spec ' + ' is incompatible with input_spec: '\n 'its rank is undefined, but input_spec requires a '\n 'defined rank.')\n return False\n\n # Check ndim.\n if input_spec.ndim is not None:\n ndim = other_spec.ndim\n if ndim != input_spec.ndim:\n print('Other_spec is incompatible with the input_spec: expected ndim=' + str(input_spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(other_spec._shape_tuple))\n return False\n if input_spec.max_ndim is not None:\n ndim = other_spec.ndim\n if ndim is not None and ndim > input_spec.max_ndim:\n print('Other_spec is incompatible with the input_spec: expected max_ndim=' + str(input_spec.max_ndim) +\n ', found ndim=' + str(ndim))\n return False\n if input_spec.min_ndim is not None:\n ndim = other_spec.ndim\n if ndim is not None and ndim < input_spec.min_ndim:\n print('Other_spec is incompatible with the input_spec: expected min_ndim=' + str(input_spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(other_spec._shape_tuple))\n return False\n # Check dtype.\n if input_spec.dtype is not None:\n if other_spec.dtype != input_spec.dtype:\n print('Other_spec is incompatible with the input_spec: expected dtype=' + str(input_spec.dtype) +\n ', found dtype=' + str(other_spec.dtype))\n return False\n # Check specific shape axes.\n if input_spec.axes:\n shape = other_spec._shape_tuple\n if shape is not None:\n for axis, value in input_spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n print(\n 'Other_spec is incompatible with input_spec: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n return False\n # Check shape.\n if input_spec.shape is not None:\n shape = other_spec._shape_tuple\n is_compatible=TensorShape(input_spec.shape).is_compatible_with(TensorShape(other_spec._shape_tuple))\n if is_compatible:\n return is_compatible\n if shape is not None:\n for spec_dim, dim in zip(other_spec._shape_tuple, input_spec._shape_tuple):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n print('Other_spec is incompatible with input_spec: expected shape=' + str(input_spec._shape_tuple) +\n ', found shape=' + str(shape))\n return False\n return True", "def check_tbe_support(json_desc):\n if \"buffer_stitch\" in json_desc:\n logger.info(\"TBE not supports buffer stitch\")\n return False\n\n if \"parallel_fusion\" in json_desc:\n logger.info(\"TBE not supports parallel fusion\")\n return False\n\n if not json_desc.get(\"input_desc\"):\n logger.info(\"TBE not supports empty inputs\")\n return False\n\n for op in json_desc[\"op_desc\"]:\n op_name = op[\"name\"]\n if not get_op_reg_info(op_name, \"func\", False):\n logger.info(\"TBE op not registered: {}\".format(op_name))\n return False\n return True", "def semver_validate(mask):\n try:\n specmask = SpecMask(mask, validate_only=True)\n except (InvalidSemverError, ValueError) as e:\n return False\n return True # all mask exceptions are raised by instantiation", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def test_invalid_type_cr_spec(self):\n QPS_SPECS_NONAMES = {\n \"circuits\": [{\n \"quantum_registers\": [{\n \"size\": 3}],\n \"classical_registers\": [{\n \"name\": 1,\n \"size\": 3}]\n }]\n }\n\n self.assertRaises(QISKitError, QuantumProgram, specs=QPS_SPECS_NONAMES)", "def is_our_certrequest(spec, **_):\n issuer = spec.get(\"issuerRef\")\n return (issuer is not None) and (issuer.get(\"group\") == GROUP)", "def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()", "def _is_consistent(self) -> bool:\n try:\n enforce(\n isinstance(self.dialogue_reference, tuple),\n \"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.\".format(\n type(self.dialogue_reference)\n ),\n )\n enforce(\n isinstance(self.dialogue_reference[0], str),\n \"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.\".format(\n type(self.dialogue_reference[0])\n ),\n )\n enforce(\n isinstance(self.dialogue_reference[1], str),\n \"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.\".format(\n type(self.dialogue_reference[1])\n ),\n )\n enforce(\n type(self.message_id) is int,\n \"Invalid type for 'message_id'. Expected 'int'. Found '{}'.\".format(\n type(self.message_id)\n ),\n )\n enforce(\n type(self.target) is int,\n \"Invalid type for 'target'. Expected 'int'. Found '{}'.\".format(\n type(self.target)\n ),\n )\n\n # Light Protocol Rule 2\n # Check correct performative\n enforce(\n isinstance(self.performative, SigningMessage.Performative),\n \"Invalid 'performative'. Expected either of '{}'. Found '{}'.\".format(\n self.valid_performatives, self.performative\n ),\n )\n\n # Check correct contents\n actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE\n expected_nb_of_contents = 0\n if self.performative == SigningMessage.Performative.SIGN_TRANSACTION:\n expected_nb_of_contents = 2\n enforce(\n isinstance(self.terms, CustomTerms),\n \"Invalid type for content 'terms'. Expected 'Terms'. Found '{}'.\".format(\n type(self.terms)\n ),\n )\n enforce(\n isinstance(self.raw_transaction, CustomRawTransaction),\n \"Invalid type for content 'raw_transaction'. Expected 'RawTransaction'. Found '{}'.\".format(\n type(self.raw_transaction)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGN_MESSAGE:\n expected_nb_of_contents = 2\n enforce(\n isinstance(self.terms, CustomTerms),\n \"Invalid type for content 'terms'. Expected 'Terms'. Found '{}'.\".format(\n type(self.terms)\n ),\n )\n enforce(\n isinstance(self.raw_message, CustomRawMessage),\n \"Invalid type for content 'raw_message'. Expected 'RawMessage'. Found '{}'.\".format(\n type(self.raw_message)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGNED_TRANSACTION:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.signed_transaction, CustomSignedTransaction),\n \"Invalid type for content 'signed_transaction'. Expected 'SignedTransaction'. Found '{}'.\".format(\n type(self.signed_transaction)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGNED_MESSAGE:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.signed_message, CustomSignedMessage),\n \"Invalid type for content 'signed_message'. Expected 'SignedMessage'. Found '{}'.\".format(\n type(self.signed_message)\n ),\n )\n elif self.performative == SigningMessage.Performative.ERROR:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.error_code, CustomErrorCode),\n \"Invalid type for content 'error_code'. Expected 'ErrorCode'. Found '{}'.\".format(\n type(self.error_code)\n ),\n )\n\n # Check correct content count\n enforce(\n expected_nb_of_contents == actual_nb_of_contents,\n \"Incorrect number of contents. Expected {}. Found {}\".format(\n expected_nb_of_contents, actual_nb_of_contents\n ),\n )\n\n # Light Protocol Rule 3\n if self.message_id == 1:\n enforce(\n self.target == 0,\n \"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.\".format(\n self.target\n ),\n )\n except (AEAEnforceError, ValueError, KeyError) as e:\n _default_logger.error(str(e))\n return False\n\n return True", "def spec(self) -> Optional['outputs.ValidatingAdmissionPolicySpec']:\n return pulumi.get(self, \"spec\")", "def test_spec(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def inferSpecification(inputExamples, outputExamples, components):", "def checkValid(self):\n if (self.noteName is not None) and (self.accidental is not None) and (self.octave is not None):\n return True\n else:\n return False", "def satisfies(self, spec, impl_must_match): # noqa: C901\n if spec.path:\n if self.executable == os.path.abspath(spec.path):\n return True # if the path is a our own executable path we're done\n if not spec.is_abs:\n # if path set, and is not our original executable name, this does not match\n basename = os.path.basename(self.original_executable)\n spec_path = spec.path\n if sys.platform == \"win32\":\n basename, suffix = os.path.splitext(basename)\n if spec_path.endswith(suffix):\n spec_path = spec_path[: -len(suffix)]\n if basename != spec_path:\n return False\n\n if (\n impl_must_match\n and spec.implementation is not None\n and spec.implementation.lower() != self.implementation.lower()\n ):\n return False\n\n if spec.architecture is not None and spec.architecture != self.architecture:\n return False\n\n for our, req in zip(self.version_info[0:3], (spec.major, spec.minor, spec.micro)):\n if req is not None and our is not None and our != req:\n return False\n return True", "def check(self):\n\n if not self.target.ok():\n return False\n\n if not self.progid.ok():\n return False\n\n if not self.prinapp.ok():\n return False\n\n if not self.observers.ok():\n return False\n\n return True" ]
[ "0.64936244", "0.62401366", "0.59179676", "0.57815427", "0.574602", "0.5699332", "0.5636195", "0.56295824", "0.56248266", "0.55786574", "0.55650556", "0.55578905", "0.5553887", "0.55463713", "0.5500796", "0.5500796", "0.5499033", "0.54248726", "0.54123193", "0.5409698", "0.5399525", "0.53821796", "0.5372123", "0.5358452", "0.5352391", "0.5341482", "0.53402066", "0.5328665", "0.5295581", "0.5283698" ]
0.7282306
0
Return specified mechanism in MechanismList
def __getitem__(self, item): # return list(self.mech_tuples[item])[MECHANISM] return self.mech_tuples[item].mechanism
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mechanism(self):", "def mechanism(self):\n return self._config[\"sasl.mechanism\"]", "def mechanism(mech_spec=None, params=None, context=None):\n\n # Called with a keyword\n if mech_spec in MechanismRegistry:\n return MechanismRegistry[mech_spec].mechanismSubclass(params=params, context=context)\n\n # Called with a string that is not in the Registry, so return default type with the name specified by the string\n elif isinstance(mech_spec, str):\n return Mechanism_Base.defaultMechanism(name=mech_spec, params=params, context=context)\n\n # Called with a Mechanism type, so return instantiation of that type\n elif isclass(mech_spec) and issubclass(mech_spec, Mechanism):\n return mech_spec(params=params, context=context)\n\n # Called with Mechanism specification dict (with type and params as entries within it), so:\n # - get mech_type from kwMechanismType entry in dict\n # - pass all other entries as params\n elif isinstance(mech_spec, dict):\n # Get Mechanism type from kwMechanismType entry of specification dict\n try:\n mech_spec = mech_spec[kwMechanismType]\n # kwMechanismType config_entry is missing (or mis-specified), so use default (and warn if in VERBOSE mode)\n except (KeyError, NameError):\n if Mechanism.classPreferences.verbosePref:\n print(\"{0} entry missing from mechanisms dict specification ({1}); default ({2}) will be used\".\n format(kwMechanismType, mech_spec, Mechanism_Base.defaultMechanism))\n return Mechanism_Base.defaultMechanism(name=kwProcessDefaultMechanism, context=context)\n # Instantiate Mechanism using mech_spec dict as arguments\n else:\n return mech_spec(context=context, **mech_spec)\n\n # Called without a specification, so return default type\n elif mech_spec is None:\n return Mechanism_Base.defaultMechanism(name=kwProcessDefaultMechanism, context=context)\n\n # Can't be anything else, so return empty\n else:\n return None", "def mechanisms(self):\n return self._allMechanisms.mechanisms", "def mechanisms(self):\n return list(self)", "def all_mechanism_types():\n global _mechtype_cache\n if _mechtype_cache is None:\n _mechtype_cache = collections.OrderedDict()\n mname = neuron.h.ref('')\n # Iterate over two mechanism types (distributed, point/artificial)\n for i in [0, 1]:\n mt = neuron.h.MechanismType(i)\n nmech = int(mt.count())\n # Iterate over all mechanisms of this type\n for j in range(nmech):\n mt.select(j)\n mt.selected(mname)\n \n # General mechanism properties\n name = mname[0] # convert hoc string ptr to python str\n \n desc = {\n 'point_process': bool(i),\n 'netcon_target': bool(mt.is_netcon_target(j)),\n 'has_netevent': bool(mt.has_net_event(j)),\n 'artificial_cell': bool(mt.is_artificial(j)),\n 'internal_type': int(mt.internal_type()),\n }\n \n # Collect information about 4 different types of variables\n for k,ptype in [(-1, 'globals'), (1, 'parameters'), \n (2, 'assigned'), (3, 'state')]:\n desc[ptype] = {} # collections.OrderedDict()\n ms = neuron.h.MechanismStandard(name, k)\n for l in range(int(ms.count())):\n psize = ms.name(mname, l)\n pname = mname[0] # parameter name\n desc[ptype][pname] = int(psize)\n \n # Assemble everything in one place\n _mechtype_cache[name] = desc\n \n return _mechtype_cache", "def _get_tuple_for_mech(self, mech):\n if list(item.mechanism for item in self.mech_tuples).count(mech):\n if self.owner.verbosePref:\n print(\"PROGRAM ERROR: {} found in more than one mech_tuple in {} in {}\".\n format(append_type_to_name(mech), self.__class__.__name__, self.owner.name))\n return next((mech_tuple for mech_tuple in self.mech_tuples if mech_tuple.mechanism is mech), None)", "def sasl_mechanism(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sasl_mechanism\")", "def get_material(material):\n for libn,tdict in liblist:\n if material in tdict:\n return tdict[material]\n print (material, \" not found\")\n raise KeyError", "def filter_mechanism_list(mechanisms, properties, allow_insecure = False,\n server_side = False):\n # pylint: disable=W0212\n result = []\n for mechanism in mechanisms:\n try:\n if server_side:\n klass = SERVER_MECHANISMS_D[mechanism]\n else:\n klass = CLIENT_MECHANISMS_D[mechanism]\n except KeyError:\n logger.debug(\" skipping {0} - not supported\".format(mechanism))\n continue\n secure = properties.get(\"security-layer\")\n if not allow_insecure and not klass._pyxmpp_sasl_secure and not secure:\n logger.debug(\" skipping {0}, as it is not secure\".format(mechanism))\n continue\n if not klass.are_properties_sufficient(properties):\n logger.debug(\" skipping {0}, as the properties are not sufficient\"\n .format(mechanism))\n continue\n result.append(mechanism)\n return result", "def get_provider(self):\r\n if self.provided_by:\r\n return list(self.provided_by)[0]", "def _check_for_target_mechanism(self):\n\n from PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.ObjectiveMechanism import ObjectiveMechanism\n def trace_learning_objective_mechanism_projections(mech):\n \"\"\"Recursively trace projections to Objective mechanisms;\n return TARGET ObjectiveMechanism if one is found upstream;\n return None if no TARGET ObjectiveMechanism is found.\n \"\"\"\n for input_state in mech.inputStates.values():\n for projection in input_state.receivesFromProjections:\n sender = projection.sender.owner\n # If projection is not from another ObjectiveMechanism, ignore\n if not isinstance(sender, (ObjectiveMechanism)):\n continue\n if isinstance(sender, ObjectiveMechanism) and sender.learning_role is TARGET:\n return sender\n if sender.inputStates:\n target_mech = trace_learning_objective_mechanism_projections(sender)\n if target_mech:\n return target_mech\n else:\n continue\n else:\n continue\n\n if not self.learning:\n raise ProcessError(\"PROGRAM ERROR: _check_for_target_mechanism should only be called\"\n \" for a process if it has a learning specification\")\n\n target_mechs = list(mech_tuple.mechanism\n for mech_tuple in self._mech_tuples\n if (isinstance(mech_tuple.mechanism, ObjectiveMechanism) and\n mech_tuple.mechanism.learning_role is TARGET))\n\n if not target_mechs:\n\n # Trace projections to first learning ObjectiveMechanism (which is for the last mechanism in the process)\n # (in case terminal mechanism of process is part of another process that has learning implemented)\n # in which case, shouldn't assign target ObjectiveMechanism, but rather WeightedError ObjectiveMechanism)\n target_mech = trace_learning_objective_mechanism_projections(self._monitoring_mech_tuples[0][0])\n if target_mech:\n if self.prefs.verbosePref:\n warnings.warn(\"{} itself has no Target Mechanism, but its TERMINAL_MECHANISM ({}) \"\n \"appears to be in one or more pathways ({}) that has one\".\n format(self.name,\n # list(self.terminalMechanisms)[0].name,\n self.lastMechanism.name,\n list(process.name for process in target_mech.processes)))\n self.targetMechanism = None\n else:\n\n raise ProcessError(\"PROGRAM ERROR: {} has a learning specification ({}) \"\n \"but no TARGET ObjectiveMechanism\".format(self.name, self.learning))\n\n elif len(target_mechs) > 1:\n target_mech_names = list(targetMechanism.name for targetMechanism in target_mechs)\n raise ProcessError(\"PROGRAM ERROR: {} has more than one targetMechanism mechanism: {}\".\n format(self.name, target_mech_names))\n\n else:\n self.targetMechanism = target_mechs[0]\n self._target_mech_tuples.append(MechanismTuple(target_mechs[0], None, None))\n if self.prefs.verbosePref:\n print(\"\\'{}\\' assigned as TARGET ObjectiveMechanism for output of \\'{}\\'\".\n format(self.targetMechanism.name, self.name))", "def __init__(self, mechanism, security_protocol=None):\n\n self.mechanism = mechanism\n self.handshake_version = None\n self.auth_version = None\n self.security_protocol = security_protocol\n self._broker_connection = None", "def get(cls, algname):\n return cls._list[algname] if algname in cls._list else None", "def names(self):\n return list(item.name for item in self.mechanisms)", "def get_technique_related_to_technique():\n global technique_related_to_technique\n\n if not technique_related_to_technique:\n technique_related_to_technique = rsh.technique_related_to_technique(get_srcs())\n\n return technique_related_to_technique", "def test_start_wrong_mechanism(self):\r\n success, mechanism, response = self.sasl.start(mechanism='WRONG')\r\n self.assertFalse(success)\r\n self.assertEqual(mechanism, 'WRONG')\r\n self.assertIsNone(response)\r\n self.assertEqual(self.sasl.getError(), 'None of the mechanisms listed meet all required properties')", "def get_hardware(cls, hardware_name):\n if cls.hardware_dict is None:\n # Init the hardware_dict once.\n cls.hardware_dict = {hw.name: hw for hw in cls.get_pb().hardware}\n return cls.hardware_dict.get(hardware_name)", "def get_technique_mitigated_by_mitigation():\n global technique_mitigated_by_mitigation\n\n if not technique_mitigated_by_mitigation:\n technique_mitigated_by_mitigation = rsh.technique_mitigated_by_mitigation(get_srcs())\n\n return technique_mitigated_by_mitigation", "def metaphor_magnet(word):\r\n try:\r\n link = \"http://ngrams.ucd.ie/metaphor-magnet-acl/q?kw=\" + word\r\n f = urllib.request.urlopen(link)\r\n myfile = f.read()\r\n myfile = myfile.decode()\r\n data = myfile[myfile.index(\"data.setCell\"):myfile.rindex(\"data.setCell\")]\r\n data = data.split(\"data.setCell\")\r\n choice = 1\r\n magnet_list = []\r\n while choice < len(data)-1:\r\n words = data[choice][data[choice].index(\"\\\"\")+1:data[choice].rindex(\",\")]\r\n magnet_list.append(words)\r\n choice +=3 \r\n \r\n return magnet_list\r\n # if nothing is found, try again with a different word\r\n except ValueError:\r\n choice = random.randint(0,len(RAND_LIST)-1)\r\n return metaphor_magnet(RAND_LIST[choice])", "def thing_percept(self, thing, agent):\n return thing.__class__.__name__", "def get_parameter(self, topic):\n \n for attr in self.parm_list:\n if attr.topic == topic:\n return attr\n\n self.logging.error(\"Can't find topic: \"+topic)\n return None", "def find_material(material):\n for libn,tdict in liblist:\n if material in tdict:\n print(libn)", "def find_mip(subsystem, direction, mechanism, purview):\n\trepertoire = subsystem._get_repertoire(direction)\n\n\t# We default to the null MIP (the MIP of a reducible mechanism)\n\tmip = pyphi.subsystem._null_mip(direction, mechanism, purview)\n\n\t#if not purview:\n\t#\treturn mip\n\n\tphi_min = float('inf')\n\t# Calculate the unpartitioned repertoire to compare against the\n\t# partitioned ones\n\tunpartitioned_repertoire = repertoire(mechanism, purview)\n\t\n\tbipartitions = pyphi.subsystem.mip_bipartitions(mechanism, purview)\n\t\n\tpartitions = [] # stores the results for a given partition\n\tbipartition_list = [] # stores the bipartition schemes (the parts)\n\t\n\t# Loop over possible MIP bipartitions\n\tfor part0, part1 in bipartitions:\n\t\t# Find the distance between the unpartitioned repertoire and\n\t\t# the product of the repertoires of the two parts, e.g.\n\t\t# D( p(ABC/ABC) || p(AC/C) * p(B/AB) )\n\t\tpart1rep = repertoire(part0.mechanism, part0.purview)\n\t\tpart2rep = repertoire(part1.mechanism, part1.purview)\n\t\tpartitioned_repertoire = part1rep * part2rep\n\n\t\tphi = pyphi.utils.hamming_emd(unpartitioned_repertoire,\n\t\t\t\t\t\t\t\tpartitioned_repertoire)\n\t\tphi = round(phi, pyphi.config.PRECISION)\n\t\t\n\t\t# Add bipartition results to list\n\t\tpartition = pyphi.subsystem.Mip(direction=direction,\n\t\t\tmechanism=mechanism,\n\t\t\tpurview=purview,\n\t\t\tpartition=(part0, part1),\n\t\t\tunpartitioned_repertoire=unpartitioned_repertoire,\n\t\t\tpartitioned_repertoire=partitioned_repertoire,\n\t\t\tphi=phi)\n\t\t\n\t\tpartitions.append(partition)#.to_json())\n\t\tbipartition_list.append((part0, part1))\n\n\t\t# Update MIP if it's more minimal.\n\t\tif phi < phi_min:\n\t\t\tphi_min = phi\n\t\t\t# TODO: Use properties here to infer mechanism and purview from\n\t\t\t# partition yet access them with `.mechanism` and `.purview`.\n\t\t\tmip = partition\n\tprint(partitions)\n\tprint(bipartitions)\n\tprint(mip)\n\treturn partitions, bipartitions, mip", "def get_list_beams(self,typ,file_number):\n if typ == 'emis':\n # multiply by the mass due to ADAS\n return self.beam_emis[file_number].adas_beam\n elif typ == 'atte':\n return self.beam_atte[file_number].adas_beam\n else:\n raise NameError('No list with this name: {0}'.format(typ))", "def find_multiplex_by_name(self, multiplex_name: str) -> Multiplex:\n return self.influence_graph.find_multiplex_by_name(multiplex_name)", "def readMechanism(infile, thermo=\"\"):\n g = Solution(infile, thermo)\n __data.g = g\n __data.nsp = g.n_species", "def get_sasl_mechanisms(self):\n return self.__capabilities[\"SASL\"].split()", "def _get_mechanism_param_values(self):\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n return dict((param, value.value) for param, value in self.paramsCurrent.items()\n if isinstance(value, ParameterState) )", "def enumerate(self, capability: int = Capability.ALL) -> List[Technique]:\n\n # Update the cache for the current user\n self.find_suid()\n\n known_techniques = []\n for user, paths in self.suid_paths.items():\n for path in paths:\n binary = gtfobins.Binary.find(self.pty.which, path=path)\n if binary is not None:\n if (capability & binary.capabilities) == 0:\n continue\n\n known_techniques.append(\n Technique(user, self, binary, binary.capabilities)\n )\n\n return known_techniques" ]
[ "0.65825075", "0.6185662", "0.6071519", "0.5876531", "0.5666677", "0.55671114", "0.55444735", "0.5027946", "0.50223094", "0.49115574", "0.4835837", "0.4697217", "0.4634386", "0.4627337", "0.4619326", "0.46091038", "0.46083066", "0.45971072", "0.45962238", "0.45860544", "0.45553407", "0.45415863", "0.45253927", "0.44690883", "0.44596165", "0.44348085", "0.44218734", "0.44202584", "0.43955547", "0.43880644" ]
0.7015279
0
Return first mechanism tuple containing specified mechanism from the list of mech_tuples
def _get_tuple_for_mech(self, mech): if list(item.mechanism for item in self.mech_tuples).count(mech): if self.owner.verbosePref: print("PROGRAM ERROR: {} found in more than one mech_tuple in {} in {}". format(append_type_to_name(mech), self.__class__.__name__, self.owner.name)) return next((mech_tuple for mech_tuple in self.mech_tuples if mech_tuple.mechanism is mech), None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tuple_for_process(self, process):\n # FIX:\n # if list(item[MECHANISM] for item in self.mech_tuples).count(mech):\n # if self.owner.verbosePref:\n # print(\"PROGRAM ERROR: {} found in more than one mech_tuple in {} in {}\".\n # format(append_type_to_name(mech), self.__class__.__name__, self.owner.name))\n return next((ProcessTuple for ProcessTuple in self.process_tuples if ProcessTuple.process is process), None)", "def __getitem__(self, item):\n # return list(self.mech_tuples[item])[MECHANISM]\n return self.mech_tuples[item].mechanism", "def first(pair):\n\treturn pair[0]", "def mech_tuples_sorted(self):\n return sorted(self.mech_tuples, key=lambda mech_tuple: mech_tuple[0].name)", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _one_emotion(emotions_lst, emotion):\n for cur_el in emotions_lst:\n if cur_el[0] == emotion:\n return cur_el\n return None", "def _item_from_first_of(providers, looking_for):\n for (provider, container) in providers:\n try:\n return (provider, _item_from(container, provider.index))\n except _EXTRACTION_EXCEPTIONS:\n pass\n providers = [p[0] for p in providers]\n raise exceptions.NotFound(\n \"Unable to find result %r, expected to be able to find it\"\n \" created by one of %s but was unable to perform successful\"\n \" extraction\" % (looking_for, providers))", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def get_message(self, sender=None):\n if sender == None:\n if self.public_key == None:\n return None\n participant = self.public_key\n else:\n participant = sender\n following = [tx.follow for block in self.__chain for tx in block.chipsactions if tx.sender == participant] \n tx_recipient2 = [tx.message for block in self.__chain for tx in block.messsactions if tx.follower in following]\n print(\"tx_recipient2\")\n print(tx_recipient2)\n return tx_recipient2", "def get_motif_from_meme(meme, motif=\"MOTIF\"):\n name = \"\"\n areapwm = {}\n areapwm[\"A\"] = []\n areapwm[\"C\"] = []\n areapwm[\"G\"] = []\n areapwm[\"T\"] = []\n flag = 0\n check = 0\n with open(meme, \"r\") as f1:\n for line in f1:\n if line.startswith('MOTIF'):\n if line.split(\" \")[1] == motif:\n # if str(motif) in line:\n name = line.split(\" \")[1]\n flag += 1\n if \"letter-probability\" in line and flag == 1:\n w = line.split(\" \")[5]\n flag += 1\n continue\n if flag == 2 and int(check) < int(w):\n # print line\n if line == \"\\n\":\n continue\n else:\n words = line.split()\n areapwm[\"A\"].append(float(words[0]))\n areapwm[\"C\"].append(float(words[1]))\n areapwm[\"G\"].append(float(words[2]))\n areapwm[\"T\"].append(float(words[3]))\n check += 1\n return areapwm, name", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def existing_paradigm(aff_accepted, morphemes):\n for t in morphemes:\n if set(morphemes[t][1]) == set(aff_accepted):\n return t\n return None", "def get_string_from_tuple_list(lstTuples, number):\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack", "def get_func_tuples():\n func_tuples = [\n ('met_gumeJ1_3sopt_tr20', 'Rel-UME J1', 'C1-.'),\n ('met_gumeJ5_3sopt_tr20', 'Rel-UME J5', 'r-^'),\n ('met_gfssdJ1_3sopt_tr20', 'Rel-FSSD J1', 'C4--'),\n ('met_gfssdJ5_3sopt_tr20', 'Rel-FSSD J5', 'b-x'),\n\n ('met_gmmd_med', 'Rel-MMD', 'k-.'),\n ('met_gmmd_med_bounliphone', 'Rel-MMD medboun', 'k-'),\n\n ('met_gfssdJ1_3sopt_tr50', 'FSSD-opt3 J1', 'b-^'),\n ('met_gfssdJ5_3sopt_tr50', 'FSSD-opt3 J5', 'b-.h'),\n\n ('met_gumeJ1_2V_rand', 'UME-rand J1', 'r--^'),\n ('met_gumeJ1_1V_rand', 'UME-rand J1 1V', 'y-'),\n ('met_gumeJ2_2V_rand', 'UME-rand J2', 'g--^'),\n ('met_gumeJ3_2V_rand', 'UME-rand J3', 'b--^'),\n ('met_gumeJ5_2V_rand', 'UME-rand J5', 'k--^'),\n\n ('met_gumeJ1_2sopt_tr20', 'Rel-UME-opt2 J1', 'C2-.'),\n ('met_gumeJ5_2sopt_tr20', 'Rel-UME-opt2 J5', 'g-'),\n ('met_gumeJ1_2sopt_tr50', 'Rel-UME-opt2 J1', 'r-.h'),\n\n ('met_gumeJ1_3sopt_tr50', 'UME-opt3 J1', 'r-'),\n ('met_gumeJ5_3sopt_tr50', 'UME-opt3 J5', 'k-'),\n\n\n ]\n return func_tuples", "def find_which(self, mu, which_to_find):\n ind, which = self.find(mu)\n if ind is not None and which_to_find in which:\n return which[which_to_find]\n else:\n return None", "def _selectParseHeader(self, header):\n log.debug(\"Finding an authenticator for {0}\".format(header))\n scheme, elements = header.split(' ', 1)\n for fact in self._credentialFactories:\n if fact.scheme.lower() == scheme.lower():\n log.debug(\"Found an authenticator: {0}\".format(fact))\n return (fact, elements)\n log.warn(\"No matching authenticator found for {0}\".format(scheme))\n return (None, None)", "def task9_find_before_tuple(lst):\n result = 0\n for elem in lst:\n if isinstance(elem, tuple):\n result = lst[lst.index(elem) - 1]\n break\n return result", "def get_single_value_from_beams(plan, keyword):\n\n values = set()\n\n for beam in plan.BeamSequence:\n try:\n value = getattr(beam, keyword)\n except AttributeError:\n continue\n\n try:\n values.add(value)\n except TypeError:\n values.add(tuple(value))\n\n if not values:\n raise DICOMEntryMissing(f\"{keyword} was not found within the plan\")\n\n if len(values) > 1:\n raise ValueError(f\"More than one disagreeing {keyword} found\")\n\n return values.pop()", "def find(data, teacher):\r\n result = []\r\n for rownum, row in enumerate(data):\r\n for colnum, col in enumerate(row):\r\n if teacher in str(col):\r\n result.append((rownum, col))\r\n return result", "def get_material(material):\n for libn,tdict in liblist:\n if material in tdict:\n return tdict[material]\n print (material, \" not found\")\n raise KeyError", "def find_pass(pass_list, service):\r\n for pass_info in pass_list:\r\n if pass_info[1] == service:\r\n return pass_info[2]", "def get_strategy_with_mid(bdaqmid):\n\n strats = []\n\n for strat in self.stratgroup.strategies:\n if bdaqmid in strat.get_marketids()[const.BDAQID]:\n strats.append(strat)\n\n # sort according to utick attribute\n strats.sort(key = attrgetter(UTICK))\n\n if strats:\n return strats[0]\n return None", "def find_summon_point(self) -> tuple:\n for name in self.names:\n point = self.compare(name)\n if point is not None:\n return point\n\n self.logger.info(f\"No {self.names}\")\n self.logger.info(\"Random choose one from top 3 summons\")\n return self.random_chose_summon(3)", "def pick(self, mess, args):\n return random.choice(args)", "def infer_emg_channels(ch_names):\n emg = ['EMG Chin']\n found = []\n\n # find frontal channel\n for ch in ch_names:\n if any([x in ch for x in emg]):\n found.append(ch)\n return found", "def _extract_weight_tuples(model):\n mlist = get_modules(model)\n return tuple([(m,'weight') for m in mlist])", "def pick(ln, edge, get_edge):\n me = [x for x in edge_map[edge] if x != ln][0]\n mtile = tiles[me]\n for mtile in moves(mtile):\n if edge == get_edge(mtile):\n break\n return me, mtile", "def mechanism(self):" ]
[ "0.6098379", "0.59788513", "0.5090942", "0.50808626", "0.5065904", "0.5063802", "0.49735767", "0.4960984", "0.4960984", "0.47289914", "0.46709287", "0.4653381", "0.4653381", "0.46489593", "0.46172774", "0.46143988", "0.4596028", "0.45947242", "0.4572531", "0.45490372", "0.45208463", "0.4513514", "0.44778895", "0.445165", "0.44507307", "0.44473648", "0.44424087", "0.44410664", "0.44340906", "0.44308472" ]
0.753147
0
Return list of mech_tuples sorted by mechanism name
def mech_tuples_sorted(self): return sorted(self.mech_tuples, key=lambda mech_tuple: mech_tuple[0].name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result", "def process_tuples_sorted(self):\n return sorted(self.process_tuples, key=lambda process_tuple: process_tuple[0].name)", "def names(self):\n return list(item.name for item in self.mechanisms)", "def ordered_channel_names(self):\n channel_list = []\n for k in self.__dict__.keys():\n if k.startswith('channel_'):\n channel_list.append(\n [int(k.split('channel_')[1]), self.__dict__[k]]\n )\n channel_list.sort()\n if len(channel_list) == 0:\n print('********* warning!! empty channel list - are there ay channel_N attributes? ')\n return [i[1] for i in channel_list]", "def monomers(self):\n return sorted(set([self[x.split(\"_\")[-1]][\"name\"] for x in self.keys]), key=lambda x: -len(x))", "def protocol_names(self):\n\n return tuple([k.name for k in self.query(Protocol).order_by(Protocol.name)])", "def get_listu_analitickih_metoda(self):\n popis = sorted(list(self.analitickeMetode.keys()))\n return popis", "def _get_tuple_for_mech(self, mech):\n if list(item.mechanism for item in self.mech_tuples).count(mech):\n if self.owner.verbosePref:\n print(\"PROGRAM ERROR: {} found in more than one mech_tuple in {} in {}\".\n format(append_type_to_name(mech), self.__class__.__name__, self.owner.name))\n return next((mech_tuple for mech_tuple in self.mech_tuples if mech_tuple.mechanism is mech), None)", "def get_sorted_topics(self, bow):\n return sorted(self.lda[bow], key=lambda x: x[1], reverse=True)", "def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def meters_names(self):\n return sorted(self.meters.keys())", "def mechanisms(self):\n return list(self)", "def sorted_gnames():\n return sorted(group_names.keys())", "def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset.lower() in pair:\n holder.append(pair.upper())\n pairsByTickers[asset] = holder\n return pairsByTickers", "def get_results(cls):\n cls.all_hoechstzahls.sort(key=attrgetter('value', 'topic.category.weight'), reverse=True)\n for hoechstzahl in cls.all_hoechstzahls:\n yield hoechstzahl", "def find_elements(channel_names):\n\n elements = []\n for i in range(1, 110, 1): \n elements.append(str(ELEMENTS[i].symbol))\n\n elements = sorted(set(channel_names) & set(elements), key = channel_names.index)\n\n return elements", "def _getSortedFactoryList(self):\n def myfilter(fact):\n if fact.get_rank() < 64 :\n return False\n klass = fact.get_klass()\n if not (\"Demuxer\" in klass or \"Decoder\" in klass or \"Parse\" in klass):\n return False\n return True\n reg = gst.registry_get_default()\n res = [x for x in reg.get_feature_list(gst.ElementFactory) if myfilter(x)]\n res.sort(lambda a, b: int(b.get_rank() - a.get_rank()))\n return res", "def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)", "def get_listu_komponenti(self):\n popis = sorted(list(self.komponente.keys()))\n return popis", "def get_sorted_suit_list(self):\n return [x[0] for x in sorted(self.suit_dict.items(), key=lambda x: x[1], reverse=True)]", "def lexers():\n result = [(lexer[0], lexer[1][0]) for lexer in get_all_lexers()]\n result.sort()\n return result", "def list_systems():\n return sorted(systems.keys())", "def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset.lower() in pair:\n holder.append(pair)\n pairsByTickers[asset] = holder\n return pairsByTickers", "def sorted_herbivores(self):\n fitness_dict = {herb: herb.fitness for herb in self.herbivores}\n sorted_tuples = sorted(fitness_dict.items(), key=lambda x: x[1], reverse=False)\n\n return sorted_tuples", "def get_list(self):\n return sorted(self.__entries.keys())", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def all_mechanism_types():\n global _mechtype_cache\n if _mechtype_cache is None:\n _mechtype_cache = collections.OrderedDict()\n mname = neuron.h.ref('')\n # Iterate over two mechanism types (distributed, point/artificial)\n for i in [0, 1]:\n mt = neuron.h.MechanismType(i)\n nmech = int(mt.count())\n # Iterate over all mechanisms of this type\n for j in range(nmech):\n mt.select(j)\n mt.selected(mname)\n \n # General mechanism properties\n name = mname[0] # convert hoc string ptr to python str\n \n desc = {\n 'point_process': bool(i),\n 'netcon_target': bool(mt.is_netcon_target(j)),\n 'has_netevent': bool(mt.has_net_event(j)),\n 'artificial_cell': bool(mt.is_artificial(j)),\n 'internal_type': int(mt.internal_type()),\n }\n \n # Collect information about 4 different types of variables\n for k,ptype in [(-1, 'globals'), (1, 'parameters'), \n (2, 'assigned'), (3, 'state')]:\n desc[ptype] = {} # collections.OrderedDict()\n ms = neuron.h.MechanismStandard(name, k)\n for l in range(int(ms.count())):\n psize = ms.name(mname, l)\n pname = mname[0] # parameter name\n desc[ptype][pname] = int(psize)\n \n # Assemble everything in one place\n _mechtype_cache[name] = desc\n \n return _mechtype_cache", "def organisms_to_tuples(organisms):\n tuples = []\n for organism in organisms:\n for record in organism[\"records\"]:\n genes = seqrecord_to_tuples(record, organism[\"name\"])\n tuples.extend(genes)\n return tuples", "def UniqueBehaviors(hotkey_data):\n return sorted(set((behavior, description) for (behavior, _, description)\n in hotkey_data),\n cmp=lambda x, y: cmp(ToMessageName(x[0]), ToMessageName(y[0])))" ]
[ "0.6864003", "0.6052932", "0.5644417", "0.5614844", "0.5578877", "0.54707015", "0.5399016", "0.5365707", "0.52200633", "0.52057385", "0.51932687", "0.51804656", "0.5163078", "0.5157249", "0.51540923", "0.5145842", "0.51417196", "0.51395947", "0.51385456", "0.5123252", "0.51197815", "0.51188606", "0.51162344", "0.51043457", "0.50965744", "0.5096055", "0.50954425", "0.5021044", "0.49871016", "0.49849105" ]
0.8169331
0
Return list of all mechanisms in MechanismList
def mechanisms(self): return list(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mechanisms(self):\n return self._allMechanisms.mechanisms", "def all_mechanism_types():\n global _mechtype_cache\n if _mechtype_cache is None:\n _mechtype_cache = collections.OrderedDict()\n mname = neuron.h.ref('')\n # Iterate over two mechanism types (distributed, point/artificial)\n for i in [0, 1]:\n mt = neuron.h.MechanismType(i)\n nmech = int(mt.count())\n # Iterate over all mechanisms of this type\n for j in range(nmech):\n mt.select(j)\n mt.selected(mname)\n \n # General mechanism properties\n name = mname[0] # convert hoc string ptr to python str\n \n desc = {\n 'point_process': bool(i),\n 'netcon_target': bool(mt.is_netcon_target(j)),\n 'has_netevent': bool(mt.has_net_event(j)),\n 'artificial_cell': bool(mt.is_artificial(j)),\n 'internal_type': int(mt.internal_type()),\n }\n \n # Collect information about 4 different types of variables\n for k,ptype in [(-1, 'globals'), (1, 'parameters'), \n (2, 'assigned'), (3, 'state')]:\n desc[ptype] = {} # collections.OrderedDict()\n ms = neuron.h.MechanismStandard(name, k)\n for l in range(int(ms.count())):\n psize = ms.name(mname, l)\n pname = mname[0] # parameter name\n desc[ptype][pname] = int(psize)\n \n # Assemble everything in one place\n _mechtype_cache[name] = desc\n \n return _mechtype_cache", "def values(self):\n return list(item.value for item in self.mechanisms)", "def filter_mechanism_list(mechanisms, properties, allow_insecure = False,\n server_side = False):\n # pylint: disable=W0212\n result = []\n for mechanism in mechanisms:\n try:\n if server_side:\n klass = SERVER_MECHANISMS_D[mechanism]\n else:\n klass = CLIENT_MECHANISMS_D[mechanism]\n except KeyError:\n logger.debug(\" skipping {0} - not supported\".format(mechanism))\n continue\n secure = properties.get(\"security-layer\")\n if not allow_insecure and not klass._pyxmpp_sasl_secure and not secure:\n logger.debug(\" skipping {0}, as it is not secure\".format(mechanism))\n continue\n if not klass.are_properties_sufficient(properties):\n logger.debug(\" skipping {0}, as the properties are not sufficient\"\n .format(mechanism))\n continue\n result.append(mechanism)\n return result", "def names(self):\n return list(item.name for item in self.mechanisms)", "def list(self):\n\n result = []\n for i in self.bots:\n result.append(i.name)\n return result", "def list_characteristics(self):\n raise NotImplementedError", "def execution_graph_mechs(self):\n return list(mech_tuple[0] for mech_tuple in self.executionGraph)", "def list(self):\n return self._get_list()", "def getEssentialList(self):\n return self.essentials", "def getList(self):\n pass", "def phis_lst ( self ) :\n return self.__phi_list", "def list_motions(self):\n return [x for x,y in self.devices.items() if y.device_type == \"Motion\"]", "def getList(self):", "def getList(self):", "def getAll(self):\n return self.__lst", "def residueList(self):\n\n\t\trl = []\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\trl.append(res)\n\n\t\treturn rl", "def get_all_chains() -> List[ChainInfo]:\n return list(registry.chain_dict.values())", "def list_systems():\n return sorted(systems.keys())", "def getList(self):\n\treturn self.list", "def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al", "def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]", "def mechanism(self):", "def circuit_list(self):\r\n return self.circuits.itervalues()", "def get_sasl_mechanisms(self):\n return self.__capabilities[\"SASL\"].split()", "def list_methods(self):\n return list(self.methods.keys())", "def _list_modules():\r\n return [\r\n desc.module_class\r\n for desc\r\n in _list_descriptors()\r\n ]", "def connected_emulators(self, host=enums.JLinkHost.USB):\n res = self._dll.JLINKARM_EMU_GetList(host, 0, 0)\n if res < 0:\n raise errors.JLinkException(res)\n\n num_devices = res\n info = (structs.JLinkConnectInfo * num_devices)()\n num_found = self._dll.JLINKARM_EMU_GetList(host, info, num_devices)\n if num_found < 0:\n raise errors.JLinkException(num_found)\n\n return list(info)[:num_found]", "def manufacturers(self):\n return self._manufacturers", "def list(self):\n return [self.inUse, self.type, self.previousBlock, self.amount,\n self.blocks, self.nextBlock, self.items]" ]
[ "0.7457537", "0.6442857", "0.64399815", "0.6387835", "0.63475055", "0.57350904", "0.563451", "0.55697984", "0.556451", "0.55643797", "0.55007714", "0.54672724", "0.5466031", "0.5441859", "0.5441859", "0.54243684", "0.54232997", "0.54208225", "0.54185826", "0.5391006", "0.5389896", "0.53768057", "0.5372667", "0.53693146", "0.5336544", "0.5313915", "0.5308842", "0.5302608", "0.5300176", "0.52746713" ]
0.8228584
0
Return names of all mechanisms in MechanismList
def names(self): return list(item.name for item in self.mechanisms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mechanisms(self):\n return list(self)", "def mechanisms(self):\n return self._allMechanisms.mechanisms", "def all_mechanism_types():\n global _mechtype_cache\n if _mechtype_cache is None:\n _mechtype_cache = collections.OrderedDict()\n mname = neuron.h.ref('')\n # Iterate over two mechanism types (distributed, point/artificial)\n for i in [0, 1]:\n mt = neuron.h.MechanismType(i)\n nmech = int(mt.count())\n # Iterate over all mechanisms of this type\n for j in range(nmech):\n mt.select(j)\n mt.selected(mname)\n \n # General mechanism properties\n name = mname[0] # convert hoc string ptr to python str\n \n desc = {\n 'point_process': bool(i),\n 'netcon_target': bool(mt.is_netcon_target(j)),\n 'has_netevent': bool(mt.has_net_event(j)),\n 'artificial_cell': bool(mt.is_artificial(j)),\n 'internal_type': int(mt.internal_type()),\n }\n \n # Collect information about 4 different types of variables\n for k,ptype in [(-1, 'globals'), (1, 'parameters'), \n (2, 'assigned'), (3, 'state')]:\n desc[ptype] = {} # collections.OrderedDict()\n ms = neuron.h.MechanismStandard(name, k)\n for l in range(int(ms.count())):\n psize = ms.name(mname, l)\n pname = mname[0] # parameter name\n desc[ptype][pname] = int(psize)\n \n # Assemble everything in one place\n _mechtype_cache[name] = desc\n \n return _mechtype_cache", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def names(self) -> list[str]:", "def names(self) -> List:\n ...", "def names(self):\n\t\treturn", "def outputStateNames(self):\n names = []\n for item in self.mechanisms:\n for output_state in item.outputStates:\n names.append(output_state)\n return names", "def namelist(self):\n return []", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval", "def list(self):\n\n result = []\n for i in self.bots:\n result.append(i.name)\n return result", "def names(cls) -> List[str]:", "def getNames(self) -> List[unicode]:\n ...", "def names(self):\n return [da.name for da in self]", "def names(self):\n return self.__names", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def get_names(self):\n\n return self.mod_suites.keys()", "def get_real_motoneurons_names(self):\n\t\treturn self._realMotoneuronsNames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names():\n pass", "def getPeripheralNames(self):\n pass", "def values(self):\n return list(item.value for item in self.mechanisms)", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def names(self):\n return [x for x in self._dict.keys()]", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]" ]
[ "0.7161391", "0.7059877", "0.62531763", "0.61171705", "0.61171705", "0.605576", "0.605576", "0.60354906", "0.59550285", "0.5927225", "0.59185094", "0.5845466", "0.5844643", "0.58219707", "0.57987136", "0.572241", "0.5700778", "0.5663072", "0.564435", "0.56276786", "0.5585007", "0.5575553", "0.5572505", "0.5572505", "0.5566355", "0.5554015", "0.555389", "0.55412936", "0.5539257", "0.5527189" ]
0.7593656
0
Return values of all mechanisms in MechanismList
def values(self): return list(item.value for item in self.mechanisms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mechanisms(self):\n return list(self)", "def mechanisms(self):\n return self._allMechanisms.mechanisms", "def all_mechanism_types():\n global _mechtype_cache\n if _mechtype_cache is None:\n _mechtype_cache = collections.OrderedDict()\n mname = neuron.h.ref('')\n # Iterate over two mechanism types (distributed, point/artificial)\n for i in [0, 1]:\n mt = neuron.h.MechanismType(i)\n nmech = int(mt.count())\n # Iterate over all mechanisms of this type\n for j in range(nmech):\n mt.select(j)\n mt.selected(mname)\n \n # General mechanism properties\n name = mname[0] # convert hoc string ptr to python str\n \n desc = {\n 'point_process': bool(i),\n 'netcon_target': bool(mt.is_netcon_target(j)),\n 'has_netevent': bool(mt.has_net_event(j)),\n 'artificial_cell': bool(mt.is_artificial(j)),\n 'internal_type': int(mt.internal_type()),\n }\n \n # Collect information about 4 different types of variables\n for k,ptype in [(-1, 'globals'), (1, 'parameters'), \n (2, 'assigned'), (3, 'state')]:\n desc[ptype] = {} # collections.OrderedDict()\n ms = neuron.h.MechanismStandard(name, k)\n for l in range(int(ms.count())):\n psize = ms.name(mname, l)\n pname = mname[0] # parameter name\n desc[ptype][pname] = int(psize)\n \n # Assemble everything in one place\n _mechtype_cache[name] = desc\n \n return _mechtype_cache", "def names(self):\n return list(item.name for item in self.mechanisms)", "def __getitem__(self, item):\n # return list(self.mech_tuples[item])[MECHANISM]\n return self.mech_tuples[item].mechanism", "def outputStateValues(self):\n values = []\n for item in self.mechanisms:\n for output_state_name, output_state in list(item.outputStates.items()):\n values.append(output_state.value)\n return values", "def as_values(self):\n return [x for x in map(lambda h: h.as_values(), self.hands_list)]", "def _get_mechanism_param_values(self):\n from PsyNeuLink.Components.States.ParameterState import ParameterState\n return dict((param, value.value) for param, value in self.paramsCurrent.items()\n if isinstance(value, ParameterState) )", "def mechanism(self):", "def list(self):\n if self.handle == None: return []\n return self.handle.variables.keys()", "def phis_lst ( self ) :\n return self.__phi_list", "def get_all_motors():\n return mc.get('motor_values')", "def execution_graph_mechs(self):\n return list(mech_tuple[0] for mech_tuple in self.executionGraph)", "def circuit_list(self):\r\n return self.circuits.itervalues()", "def list_characteristics(self):\n raise NotImplementedError", "def filter_mechanism_list(mechanisms, properties, allow_insecure = False,\n server_side = False):\n # pylint: disable=W0212\n result = []\n for mechanism in mechanisms:\n try:\n if server_side:\n klass = SERVER_MECHANISMS_D[mechanism]\n else:\n klass = CLIENT_MECHANISMS_D[mechanism]\n except KeyError:\n logger.debug(\" skipping {0} - not supported\".format(mechanism))\n continue\n secure = properties.get(\"security-layer\")\n if not allow_insecure and not klass._pyxmpp_sasl_secure and not secure:\n logger.debug(\" skipping {0}, as it is not secure\".format(mechanism))\n continue\n if not klass.are_properties_sufficient(properties):\n logger.debug(\" skipping {0}, as the properties are not sufficient\"\n .format(mechanism))\n continue\n result.append(mechanism)\n return result", "def _retrieve_fun(self):\n results = []\n for (dst, answer, ses) in self.mgr:\n encoded_pairs = ([], [])\n if answer:\n try:\n encoded_pairs = ses.decode_response(answer)\n\n except error.SNMPError:\n # SNMP errors lead to empty responses\n pass\n \n results.append(encoded_pairs)\n \n return results", "def values(self) -> List:\n pass", "def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]", "def valuerefs(self):\r\n return self.data.values()", "def __call__(self):\n return self._main._values()", "def values(self):\r\n return [self[k] for k in self]", "def values(self):\n\t\treturn self.myVals", "def getitems(self):\n if self.onlydiag():\n return self.getdiag()\n else:\n return self.items()", "def getEssentialList(self):\n return self.essentials", "def lists(self):\n return dict.items(self)", "def getPredictorList(self):\n return self.non_miList + self.miList", "def getList(self):", "def getList(self):", "def phi_list ( self ) :\n return self.__phi_list" ]
[ "0.77822113", "0.72421145", "0.6594636", "0.6244238", "0.62421274", "0.6206884", "0.59909046", "0.5811207", "0.56841445", "0.56830424", "0.56811684", "0.56572896", "0.56133693", "0.55849606", "0.5560125", "0.55568486", "0.5521477", "0.55109787", "0.54702985", "0.5464897", "0.545058", "0.5420969", "0.54205734", "0.54157346", "0.5407766", "0.539586", "0.53932106", "0.537448", "0.537448", "0.53693056" ]
0.7669883
1
Return names of all outputStates for all mechanisms in MechanismList
def outputStateNames(self): names = [] for item in self.mechanisms: for output_state in item.outputStates: names.append(output_state) return names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outputStateValues(self):\n values = []\n for item in self.mechanisms:\n for output_state_name, output_state in list(item.outputStates.items()):\n values.append(output_state.value)\n return values", "def _assign_output_states(self):\n for mech in self.terminalMechanisms.mechanisms:\n self.outputStates[mech.name] = mech.outputStates", "def get_output_states(self):\n return self.states[-self.num_output_states:]", "def get_output_names(self):\n outputNames = []\n for outVar in self.outputs:\n # outVar is of type InOutVar and the object that it contains is a PyFMI variable\n outputNames.append(outVar.get_object().name)\n return outputNames", "def get_output_names():\n names = [device.name for device in get_devices() if device.is_output]\n return list(sorted(names))", "def getOutputsNames(self):\n self.layersNames = self.net.getLayerNames()\n # Get the names of the output layers, i.e. the layers with unconnected outputs\n return [self.layersNames[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]", "def output_names(self):\n return []", "def final_states(self):\n return list(self.iter_final_states())", "def _list_outputs(self):\n \n outputs = self._outputs().get()\n return outputs", "def get_switch_states(self):\n switches_states = []\n for connection in self.connections:\n if connection.start.is_switch_output():\n switches_states.append((connection.start.switch,\n connection.start.output_nr))\n if connection.end.is_switch_output():\n switches_states.append((connection.end.switch,\n connection.end.output_nr))\n return switches_states", "def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))", "def serialize(self):\n return self.output_names", "def outputs() -> List[str]:\n return Invocation.current.expanded_outputs", "def _getWorklistStateNames(self, worklistname):\n results = [s.getName()\n for s in self.sm.getStates(no_duplicates = 1)\n if s.getTaggedValue('worklist') == worklistname]\n log.debug(\"Associated with worklist '%s' are the \"\n \"following states: %r.\", worklistname, results)\n return results", "def getstate(self):\n return [elem.getstate() for elem in self]", "def get_sink_states(self):\n state1 = State(4, 2)\n return [state1]", "def state_info_specs(self):\n return list()", "def machines():\n return [name for name, state in _status()]", "def list_states(verbose=1):\n statefile = qcodes.config.get('statefile', None)\n if statefile is None:\n statefile = os.path.join(os.path.expanduser('~'), 'qtt_statefile.hdf5')\n if not os.path.exists(statefile):\n return []\n with h5py.File(statefile, 'r') as h5group:\n tags = list(h5group.keys())\n if verbose:\n print('states on system from file %s: ' % (statefile, ), end='')\n print(', '.join([str(x) for x in tags]))\n return tags", "def get_list_of_states(self):\n return self.states", "def getOutputsNames(net):\r\n # Get the names of all the layers in the network\r\n layersNames = net.getLayerNames()\r\n # Get the names of the output layers, i.e. the layers with unconnected outputs\r\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]", "def getOutputsNames(net):\n # Get the names of all the layers in the network\n layersNames = net.getLayerNames()\n # Get the names of the output layers, i.e. the layers with unconnected outputs\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]", "def output_names(self):\n msg = \"output_names() is not implemented by op {}\"\n raise NotImplementedError(msg.format(self.op_type))", "def states_list(self, states):\n self.log('List of states: [{}]'.format(\n ' | '.join([(lambda x: x[1:])(s) for s in\n states.keys()])))\n return", "def getOutputNames(self):\n return self.dirs", "def get_outputs(self):\n return [x[1] for x in self.io_mapping]", "def state_list(self) -> Sequence[TState]:\n pass", "def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__", "def states(self) -> Type[Any]:\n return []", "def get_outputs(self):\n outputs = []\n missing = []\n for i, name in enumerate(self.output_names[:]):\n try:\n value = self.proto.output_env.look_up(name).unwrapped\n except Exception:\n if self.optional_flags[i]:\n value = None\n missing.append((i, name))\n else:\n raise\n outputs.append(value)\n for i, name in reversed(missing):\n del outputs[i]\n del self.output_names[i]\n del self.optional_flags[i]\n if missing:\n return outputs, reversed(missing)\n return outputs" ]
[ "0.7530865", "0.70368683", "0.66437596", "0.6507888", "0.64815927", "0.6261194", "0.618265", "0.6167137", "0.61004376", "0.6096525", "0.6073762", "0.6020162", "0.5990912", "0.5978331", "0.5945884", "0.5944969", "0.5890002", "0.58796084", "0.587672", "0.58669186", "0.5857786", "0.5843482", "0.58321214", "0.5828611", "0.5800779", "0.579611", "0.5792249", "0.578834", "0.57804465", "0.5764043" ]
0.8376996
0
Return values of outputStates for all mechanisms in MechanismList
def outputStateValues(self): values = [] for item in self.mechanisms: for output_state_name, output_state in list(item.outputStates.items()): values.append(output_state.value) return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_output_states(self):\n for mech in self.terminalMechanisms.mechanisms:\n self.outputStates[mech.name] = mech.outputStates", "def outputStateNames(self):\n names = []\n for item in self.mechanisms:\n for output_state in item.outputStates:\n names.append(output_state)\n return names", "def get_output_states(self):\n return self.states[-self.num_output_states:]", "def getstate(self):\n return [elem.getstate() for elem in self]", "def final_states(self):\n return list(self.iter_final_states())", "def _list_outputs(self):\n \n outputs = self._outputs().get()\n return outputs", "def get_outputs(self):\n outputs = set()\n outputs.update(self.get_interaction().get_outputs())\n return outputs", "def _state(self):\n state = [] \n for _temp in self.config[\"performance_targets\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n \n for _temp in self.config[\"states\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n\n state = np.asarray(state)\n \n return state", "def get_sink_states(self):\n state1 = State(4, 2)\n return [state1]", "def get_switch_states(self):\n switches_states = []\n for connection in self.connections:\n if connection.start.is_switch_output():\n switches_states.append((connection.start.switch,\n connection.start.output_nr))\n if connection.end.is_switch_output():\n switches_states.append((connection.end.switch,\n connection.end.output_nr))\n return switches_states", "def full_output_state(self):\n outcomes = self.fock_basis()\n return self.calculate_state_amplitudes(outcomes, reduce_state=False)", "def get_states(self):\n raise NotImplementedError()", "def outputs(self):\r\n return self._outputs", "def outputs(self):\n return self.outputs", "def get_states(self):\n return product(*[phi.automaton().states for phi in self])", "def get_state(self):\n return self.agents, self.foods, self.viruses, self.masses, self.time", "def states(self) -> Type[Any]:\n return []", "def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__", "def get_outputs(self):\n return [x[1] for x in self.io_mapping]", "def state_list(self) -> Sequence[TState]:\n pass", "def outputs(self):\n return self._outputs", "def outputs(self):\n return self._outputs", "def states(self):\n return self._x_list", "def get_hw_switch_states(self):\n raise NotImplementedError", "def S(self):\n return self._states", "def get_list_of_states(self):\n return self.states", "def heralded_output_state(self, detector_pattern):\n outcomes = self.heralded_fock_basis(detector_pattern)\n return self.calculate_state_amplitudes(outcomes, reduce_state=True)", "def getOutLogic(self, state):\n\n flops = self.__flops\n logic = []\n for flop in flops:\n #logic.append(self.__logic[flop])\n #logic.append(self.__stateProp.logic[flop])\n #thisLogic = \"\"\n #for gen in self.__logic[flop]:\n # thisLogic += gen\n logic.append(self.__logic[flop])\n \n #pdb.set_trace()\n\n if state >= 2**len(flops):\n raise Exception(\"Invalid state \" + str(state))\n\n stateStr = bin(state)[2:].rjust(len(flops), '0')\n\n output = applyInv(stateStr, logic)\n output = applyAnd(output)\n return output", "def Get_Outputs(self):\n all_hidden_states = self.Get_States()\n\n all_outputs = tf.map_fn(self.Get_Output, all_hidden_states)\n\n return all_outputs", "def get_outputs(self):\n return self.outputs" ]
[ "0.7471102", "0.7393028", "0.6943229", "0.6363082", "0.6308046", "0.6297066", "0.62822974", "0.6233221", "0.61901164", "0.6188034", "0.61457056", "0.61176836", "0.6104881", "0.60902965", "0.6057253", "0.6047594", "0.60425544", "0.6013339", "0.59914774", "0.5939767", "0.5938493", "0.5938493", "0.593828", "0.59301764", "0.5929404", "0.592763", "0.5915248", "0.5889394", "0.58328164", "0.58282876" ]
0.813667
0
test that the hexagonal number generator generates hexagonal numbers, as expected.
def test_generation(self): generator = math_helpers.hexagonal_number_generator() first_ten_hex_numbers = [next(generator) for _ in range(10)] canonical_values = [1, 6, 15, 28, 45, 66, 91, 120, 153, 190] self.assertEqual(canonical_values, first_ten_hex_numbers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_hexamethylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n image.add_line((574, 400), (661, 350))\n image.add_line((574, 500), (661, 550))\n image.add_line((487, 550), (487, 650))\n image.add_line((400, 500), (313, 550))\n image.add_line((400, 400), (313, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl groups\n [[487, 250]],\n [[661, 350]],\n [[661, 550]],\n [[487, 650]],\n [[313, 550]],\n [[313, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl groups\n [[487, 350, 487, 250]],\n [[574, 400, 661, 350]],\n [[574, 500, 661, 550]],\n [[487, 550, 487, 650]],\n [[400, 500, 313, 550]],\n [[400, 400, 313, 350]]\n ])\n )", "def test_hex_helpers(self, number, expected):\n self.assertEqual(positional.from_hex(expected), number)\n self.assertEqual(positional.to_hex(number), expected)", "def test_cyclohexane(self):\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]]\n ]),\n drawer=lambda image: image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n )", "def test_generation(self):\n generator = math_helpers.pentagonal_number_generator()\n first_ten_pentagonal_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145]\n self.assertEqual(canonical_values, first_ten_pentagonal_numbers)", "def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))", "def test_int_to_hex():\n hex_values = ['61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f',\n '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f']\n index = 0\n for x in range(97, 123):\n assert pi_finder.int_to_hex(x, hex_dict) == hex_values[index]\n index += 1", "def test_hex():\n assert hex(Quantity(1, unit('m'))) == hex(1)", "def main():\n assert convert_to_hex(\"hello world\") == \"68 65 6c 6c 6f 20 77 6f 72 6c 64\"\n assert convert_to_hex(\"Big Boi\") == \"42 69 67 20 42 6f 69\"\n assert convert_to_hex(\n \"Marty Poppinson\") == \"4d 61 72 74 79 20 50 6f 70 70 69 6e 73 6f 6e\"\n assert convert_to_hex(\"abcdefghi\") == \"61 62 63 64 65 66 67 68 69\"\n assert convert_to_hex(\"oh dear\") == \"6f 68 20 64 65 61 72\"\n assert convert_to_hex(\"i hate C#\") == \"69 20 68 61 74 65 20 43 23\"\n assert convert_to_hex(\"i love C++ , not really\") == \\\n \"69 20 6c 6f 76 65 20 43 2b 2b 20 2c 20 6e 6f 74 20 72 65 61 6c 6c 79\"\n print('Passed.')", "def test_generate(self):\n cant_interations = int(random()*100)\n for i in range(cant_interations):\n number = self.sudoku.generate() # Generate number\n self.assertGreaterEqual(number, 0) # number >= 0\n self.assertLessEqual(number, 8) # number <= 8", "def test_generation_index_zero(self):\n generator = math_helpers.triangle_number_generator()\n first_eleven_triangle_numbers = [next(generator) for _ in range(11)]\n canonical_values = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55]\n self.assertEqual(canonical_values, first_eleven_triangle_numbers)", "def is_hex(n):\n hex_test = (1 + sqrt(1 + 8*n))/4\n if hex_test == int(hex_test):\n return True\n return False", "def hexagonal(n: int) -> int:\n return int(n * (2 * n - 1))", "def test_toHex(self):\r\n self.assertEqual(self.black.toHex(), '#000000')\r\n self.assertEqual(self.red.toHex(), '#ff0000')\r\n self.assertEqual(self.pink.toHex(), '#640000')", "def hexagonal_number(n):\n return n * (2 * n - 1)", "def test_1_2_dimethylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n image.add_line((574, 400), (661, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl groups\n [[487, 250]],\n [[661, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl groups\n [[487, 350, 487, 250]],\n [[574, 400, 661, 350]]\n ])\n )", "def test_methylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl group\n [[487, 250]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl group\n [[487, 350, 487, 250]]\n ])\n )", "def test(self):\n # Pick an endian type\n endian_type = random.randrange(0, 3)\n test_output = hexify(number, endian_type)\n\n if endian_type == 0:\n endian_type = \"big\"\n elif endian_type == 1:\n endian_type = \"little\"\n elif endian_type == 2:\n endian_type = \"small\"\n\n # This is the function which will be attached to unittest\n output = test_func(number, endian_type)\n self.assertEqual(output, test_output,\n \"conv_endian returned {},\"\n \" hexify returned {}\".format(output, test_output))", "def test_generation_index_one(self):\n generator = math_helpers.triangle_number_generator(1)\n first_ten_triangle_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 3, 6, 10, 15, 21, 28, 36, 45, 55]\n self.assertEqual(canonical_values, first_ten_triangle_numbers)", "def test_generate_barcode_ean13(self):\n pass", "def test_generate_barcode_ean8(self):\n pass", "def phex(value, expected):\n return f\"{value:#0{expected}x}\"", "def test_get_qiime_hex_string_color(self):\r\n # regular indices are supported\r\n self.assertEqual(get_qiime_hex_string_color(0), '#ff0000')\r\n self.assertEqual(get_qiime_hex_string_color(1), '#0000ff')\r\n\r\n # if there's an overflow, test it rolls-over accordingly\r\n self.assertEqual(get_qiime_hex_string_color(87), '#ff0000')\r\n self.assertEqual(get_qiime_hex_string_color(100), '#7da9d8')\r\n\r\n # test it raises an exception for negative values\r\n self.assertRaises(AssertionError, get_qiime_hex_string_color, -1)", "def test_single_quadrant(self):", "def is_hexagonal(x):\n solution = solve_quad(2, -1, -x)\n return max(solution) % 1 == 0", "def generate_endian_tests(self):\n # CITATION: See Python Docs Contributors, Works Cited at end\n # Generate a number of tests equal to the range set below\n for x in range(10000):\n # CITATION: See ReadTheDocs Python Reference\n # Contributors, Works Cited at end -2147483648\n num = random.randrange(-2147483648, 2147483648)\n # print(\"Number for test{} was {}\".format(x, num))\n test = build_endian_test(conv_endian, num)\n # CITATION: See OSU Course Contributors, Works Cited\n setattr(TestCase, \"test_endian_{}\".format(x), test)", "def is_hexagonal_number(n):\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()", "def test_corruptedbit(self):\n self.assertRaises(ValueError, two_out_five, '1100000111') #Too many 1s must raise a ValueError!\n self.assertRaises(ValueError, two_out_five, '1100000100') #Too many 0s must raise a ValueError!", "def test_heip_e(self):\n c = array([1,2,3,1])\n h = shannon(c, base=e)\n expected = exp(h-1)/3\n self.assertEqual(heip_e(c), expected)", "def draw_raw_hexagons(x, y, n, color1, color2):\n w = x\n e = y\n side_hexagon = math.floor(500 / (2 * n))\n for i in range(math.ceil(n / 2)):\n draw_hexagon(x, y, side_hexagon, color1)\n #Получить координаты для следуующего шестиугольника\n x = turtle.xcor() + 2 * (side_hexagon * math.sqrt(3))\n y = turtle.ycor()\n\n\n turtle.up()\n turtle.goto(w - side_hexagon * math.sqrt(3), e)\n\n for q in range(math.floor(n / 2)):\n x = turtle.xcor() + 2 * (side_hexagon * math.sqrt(3))\n y = turtle.ycor()\n draw_hexagon(x, y, side_hexagon, color2)", "def test_valid_input(self):\n self.assertEqual('0', two_out_five('11000'))\n self.assertEqual('1', two_out_five('00011'))\n self.assertEqual('2', two_out_five('00101'))\n self.assertEqual('3', two_out_five('00110'))\n self.assertEqual('4', two_out_five('01001'))\n self.assertEqual('5', two_out_five('01010'))\n self.assertEqual('6', two_out_five('01100'))\n self.assertEqual('7', two_out_five('10001'))\n self.assertEqual('8', two_out_five('10010'))\n self.assertEqual('9', two_out_five('10100'))\n self.assertEqual('019', two_out_five('110000001110100'))" ]
[ "0.70229656", "0.6910454", "0.67191875", "0.66082317", "0.6522414", "0.6509602", "0.64584404", "0.63799065", "0.6312566", "0.62936777", "0.62752265", "0.62654024", "0.6260626", "0.6257527", "0.6133599", "0.6133188", "0.6108747", "0.60841763", "0.599618", "0.5921129", "0.59114593", "0.5897102", "0.58892787", "0.58677614", "0.58660674", "0.5840851", "0.581704", "0.57705647", "0.5764965", "0.57412106" ]
0.8259293
0
test that the pentagonal number generator generates pentagonal numbers, as expected.
def test_generation(self): generator = math_helpers.pentagonal_number_generator() first_ten_pentagonal_numbers = [next(generator) for _ in range(10)] canonical_values = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145] self.assertEqual(canonical_values, first_ten_pentagonal_numbers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_pentagonal(self):\n generator = math_helpers.pentagonal_number_generator()\n pents = set(next(generator) for _ in range(1000))\n non_pentagonals = set(x for x in range(max(pents)) if x not in pents)\n any_pentagonals = any(map(lambda x: math_helpers.is_pentagonal(x), non_pentagonals))\n self.assertEqual(any_pentagonals, False)", "def test_very_large_pentagonal_numbers(self):\n large_n = [x**9 for x in range(10000,10500)]\n pentagonals = [(n * (3 * n - 1)) // 2 for n in large_n]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), pentagonals))\n self.assertEqual(all_pentagonal, True)", "def test_first_thousand_pentagonal_numbers(self):\n generator = math_helpers.pentagonal_number_generator()\n first_thousand_pentagonal_numbers = [next(generator) for _ in range(1000)]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), first_thousand_pentagonal_numbers))\n self.assertEqual(all_pentagonal, True)", "def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)", "def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2", "def is_pentagonal(P):\n return sqrt(1 + 24 * P) % 6 == 5", "def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False", "def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False", "def test_single_quadrant(self):", "def is_pent(n):\n pen_test = (1 + sqrt(24*n + 1))/6\n if pen_test == int(pen_test):\n return True\n return False", "def test_random_create_P():\n\n max_step = 100\n n = 50\n low = 1\n tol = 1e-8\n\n P_ι = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_δ = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_ζ = np.random.dirichlet(np.random.randint(low, high=max_step, size=50),\n size=2)\n\n P = create_P(P_δ, P_ζ, P_ι)\n\n assert abs(P[:, 0, :, :].sum() - 1.) < tol\n assert abs(P[:, 1, :, :].sum() - 1.) < tol", "def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0", "def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def test_generate(self):\n cant_interations = int(random()*100)\n for i in range(cant_interations):\n number = self.sudoku.generate() # Generate number\n self.assertGreaterEqual(number, 0) # number >= 0\n self.assertLessEqual(number, 8) # number <= 8", "def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)", "def test_generation(self):\n generator = math_helpers.hexagonal_number_generator()\n first_ten_hex_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 6, 15, 28, 45, 66, 91, 120, 153, 190]\n self.assertEqual(canonical_values, first_ten_hex_numbers)", "def test_pythagorean_triples(self):\n\n s = space(0)\n for a, b, c in (\n (3, 4, 5),\n (8, 15, 17),\n (33, 56, 65)\n ):\n self.assertTrue(isclose(\n s.hypot(a, b),\n c\n ))\n self.assertTrue(isclose(\n s.leg(a, c),\n b\n ))", "def test_sequence(self):\n self.assertEqual([1, -3, 9, -27, 81, -243],\n [x for x in GeometricProgression(6, 1, -3)])\n\n self.assertEqual([1, 1, 1, 1, 1],\n [x for x in GeometricProgression(5, 1, 1)])\n\n self.assertEqual([4, 40, 400, 4000, 40000],\n [x for x in GeometricProgression(5, 4, 10)])", "def test_domino_with_9_numbers():\n assert compute(9) == 20, \"Not ok\"", "def test_domino_with_correct_n_numbers():\n assert compute(7) == 12, \"Not ok\"", "def solution():\n pentagonals = []\n n = 1\n while True:\n newp = (n*(3*n-1))/2\n for p in pentagonals:\n diff = abs(newp-p)\n if ispentagonal(diff) and ispentagonal(newp+p):\n return diff\n pentagonals.append(newp)\n n += 1", "def test_domino_with_4_numbers():\n assert compute(4) == 4, \"Not ok\"", "def test_domino_with_1_numbers():\n assert compute(1) == 0, \"Not ok\"", "def test_domino_with_15_numbers():\n assert compute(15) == 56, \"Not ok\"", "def test_domino_with_42_numbers():\n assert compute(42) == 441, \"Not ok\"", "def test_numbers(number):\n assert number ** 2 == number ** 2", "def test_build_poset_lattice():\n lattice = build_poset_lattice(all_games_gen(2))\n assert len(lattice.edges()) == 36", "def test_case_02_equilateral(self):\n self.__assert_equals_test_case(self.yield_equilateral_triangles(), 'Equilateral Triangle')", "def test_MonteCarloP(self):\r\n val = 3.0\r\n random_vals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]\r\n\r\n # test for \"high\" tail (larger values than expected by chance)\r\n p_val = MonteCarloP(val, random_vals, 'high')\r\n self.assertEqual(p_val, 0.7)\r\n\r\n # test for \"low\" tail (smaller values than expected by chance)\r\n p_val = MonteCarloP(val, random_vals, 'low')\r\n self.assertEqual(p_val, 0.4)" ]
[ "0.74531734", "0.7395402", "0.71977144", "0.6693339", "0.6635609", "0.6610927", "0.6543196", "0.65372956", "0.6495699", "0.6472204", "0.64258176", "0.64011145", "0.62475634", "0.6194243", "0.60921097", "0.6078972", "0.6033385", "0.60184103", "0.5992109", "0.5987961", "0.59840953", "0.5961682", "0.5961125", "0.59608555", "0.58992016", "0.5898285", "0.5893379", "0.5873902", "0.5837913", "0.58340573" ]
0.7832997
0
test that the first thousand pentagonal numbers are identified as such.
def test_first_thousand_pentagonal_numbers(self): generator = math_helpers.pentagonal_number_generator() first_thousand_pentagonal_numbers = [next(generator) for _ in range(1000)] all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), first_thousand_pentagonal_numbers)) self.assertEqual(all_pentagonal, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def test_nth_digit_of_fractional_part(self):\n\t\tcounter = 1\n\t\tfor digit in generate_digits(13):\n\t\t\tif counter == 1 or counter == 10 or counter == 12:\n\t\t\t\tself.assertEqual(1, digit)\n\t\t\n\t\t\tcounter += 1", "def mccarthy_ninety_one(number):\n if number > 100:\n print('M(%d) since %d is greater than 100' % (number - 10, number))\n return number - 10\n else:\n print('M(M(%d)) since %d is less than or equal to 100' % (number + 11, number))\n return mccarthy_ninety_one(mccarthy_ninety_one(number + 11))", "def is_pent(n):\n pen_test = (1 + sqrt(24*n + 1))/6\n if pen_test == int(pen_test):\n return True\n return False", "def is_hilbert_number(n):\n return n > 0 and n % 4 == 1", "def must_contain_digit(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"\\d\", str(cell)))", "def is_nine_pandigital(number):\n digits = str(number)\n return bool(len(digits) == len(ALL_NINE) and set(digits) == ALL_NINE)", "def test_hasta_el_numeral(self):\n fwa = FakeWikiArchivo('abcd <a href=\"/wiki/foobar#xy\">FooBar</a> dcba')\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [(u'foobar', SCORE_PEISHRANC)])", "def _isnumber_with_thousands_separator(string):\n try:\n string = string.decode()\n except (UnicodeDecodeError, AttributeError):\n pass\n\n return bool(re.match(_float_with_thousands_separators, string))", "def test_number_simple(self):\r\n self.assertEquals(preview.latex_preview('3.1415'), '3.1415')", "def is_in_county(p):\n try:\n float(p[0:1])\n return True\n except ValueError:\n return False", "def is_self_describing(number):\n number = list(map(int, number))\n counts = Counter(number)\n return all(\n map(lambda x: counts[x[0]] == x[1], enumerate(number))\n )", "def test_number(self):\n\n tokens = list(Lexer(\"123 123.456 .456 .123 .\").generate_tokens())\n answer = [Token(TokenType.NUMBER, 123),\n Token(TokenType.NUMBER, 123.456),\n Token(TokenType.NUMBER, 0.456),\n Token(TokenType.NUMBER, 0.123),\n Token(TokenType.NUMBER, 0.0)]\n self.assertEqual(tokens, answer)", "def test_nonsense_decimal(self):\n test_passes = False\n try:\n self.parser.extract_zt(\"ZT.\")\n test_passes = False\n except Exception as e:\n test_passes = True\n self.assertTrue(test_passes)", "def test_example_day9_pt2():\n assert find_pt2(ex_data, 127) == 62", "def is_special(s):\n for part in xrange(1, 3**len(s)):\n p = part\n sa = 0\n ca = 0\n sb = 0\n cb = 0\n for i, x in enumerate(s):\n if p%3 == 1:\n sa += x\n ca += 1\n elif p%3 == 2:\n sb += x\n cb += 1\n p = p//3\n if ca == 0 or cb == 0:\n continue\n if sa == sb:\n return False\n if ca > cb and sa <= sb:\n return False\n if cb > ca and sb <= sa:\n return False\n return True", "def test_very_large_pentagonal_numbers(self):\n large_n = [x**9 for x in range(10000,10500)]\n pentagonals = [(n * (3 * n - 1)) // 2 for n in large_n]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), pentagonals))\n self.assertEqual(all_pentagonal, True)", "def _contains_number(text):\n return any((True for n in xrange(10) if str(n) in text))", "def test_example_day9_pt1():\n assert find_first_invalid_value(ex_data, 5) == 127", "def test_number_sci_notation(self):\r\n self.assertEquals(\r\n preview.latex_preview('6.0221413E+23'),\r\n r'6.0221413\\!\\times\\!10^{+23}'\r\n )\r\n self.assertEquals(\r\n preview.latex_preview('-6.0221413E+23'),\r\n r'-6.0221413\\!\\times\\!10^{+23}'\r\n )", "def is_numberish(G):\n return True", "def isHappy(self, n):\n nxt = 0\n appeared = set()\n while True:\n nxt += (n%10)*(n%10)\n n /= 10\n if n == 0:\n if nxt == 1:\n return True\n if nxt in appeared:\n return False\n\n appeared.add(nxt)\n n = nxt\n nxt = 0", "def test_number_suffix(self):\r\n self.assertEquals(preview.latex_preview('1.618k'), r'1.618\\text{k}')", "def is_isni(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n try:\n r = 0\n for x in val[:-1]:\n r = (r + int(x)) * 2\n ck = (12 - r % 11) % 11\n return ck == _convert_x_to_10(val[-1])\n except ValueError:\n return False", "def test_strings_first_symbol():\n\n first_result = strings_ops.strings_first_symbol(\"Otus\", \"october\")\n assert first_result is True", "def checkDecimalPlace(num, data):\n if data == \"\" or data is None:\n return True\n else:\n d = calAfterPoint(data)\n num = int(num)\n if abs(d) <= abs(num):\n return True\n else:\n return False", "def test_generation(self):\n generator = math_helpers.pentagonal_number_generator()\n first_ten_pentagonal_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145]\n self.assertEqual(canonical_values, first_ten_pentagonal_numbers)", "def test_number_in_word():\n assert syllapy.count(\"d0g\") == 0", "def not_zero_pound_penny(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, convert to a number\n cell = pd.to_numeric(str(cell), errors=\"coerce\")\n if check_empty(cell):\n return True\n cell = round(cell, 2)\n # Check it's not 0, 1, or 0.01\n return cell == 0 or cell == 1 or cell == 0.01", "def is_interesting(x):\n if any(x.startswith(y) for y in (\"0000\", \"0004\", \"0400\", \"0404\")):\n return False\n # The TM can't leave a state once it gets to it.\n # Either it doesn't use that state, or it loops\n # Neither is TM we are interested in\n if \"0\" == x[0] == x[2] == x[4] or \"1\" == x[6] == x[8] == x[10]:\n return False\n return True" ]
[ "0.62571967", "0.59747773", "0.59705544", "0.5876851", "0.58684933", "0.5842661", "0.5808757", "0.57252103", "0.5721397", "0.56414974", "0.563688", "0.5632761", "0.55704457", "0.55383325", "0.55375504", "0.5511389", "0.550329", "0.5498879", "0.54719704", "0.5465385", "0.5460845", "0.54545146", "0.5453343", "0.5451376", "0.54483986", "0.5443516", "0.5403997", "0.5402796", "0.53957504", "0.5383768" ]
0.7510158
0
test that even large pentagonal numbers are correctly identified as such (i.e. check whether we might expect to run into floatingpoint error)
def test_very_large_pentagonal_numbers(self): large_n = [x**9 for x in range(10000,10500)] pentagonals = [(n * (3 * n - 1)) // 2 for n in large_n] all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), pentagonals)) self.assertEqual(all_pentagonal, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_pentagonal(self):\n generator = math_helpers.pentagonal_number_generator()\n pents = set(next(generator) for _ in range(1000))\n non_pentagonals = set(x for x in range(max(pents)) if x not in pents)\n any_pentagonals = any(map(lambda x: math_helpers.is_pentagonal(x), non_pentagonals))\n self.assertEqual(any_pentagonals, False)", "def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)", "def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False", "def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False", "def test_first_thousand_pentagonal_numbers(self):\n generator = math_helpers.pentagonal_number_generator()\n first_thousand_pentagonal_numbers = [next(generator) for _ in range(1000)]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), first_thousand_pentagonal_numbers))\n self.assertEqual(all_pentagonal, True)", "def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0", "def is_pent(n):\n pen_test = (1 + sqrt(24*n + 1))/6\n if pen_test == int(pen_test):\n return True\n return False", "def is_pentagonal(P):\n return sqrt(1 + 24 * P) % 6 == 5", "def is_perfect_square():", "def test_nearest_boundary_even():\n assert _nearest_boundary(10, 20, 14, 0) == 0\n assert _nearest_boundary(10, 20, 14, 1) == 0\n assert _nearest_boundary(10, 20, 15, 0) == 1\n assert _nearest_boundary(10, 20, 15, 1) == 1", "def is_pentagonal(x):\n solution = solve_quad(3, -1, -2*x)\n return max(solution) % 1 == 0", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def test_under_1000m():\n z = np.array([50.0, 550.0, 850.0])\n h = util.geometric_to_geopotential(z)\n expected_h = np.array([50.0, 550.0, 850.0])\n expected_T = np.array([287.825, 284.575, 282.626])\n expected_p = np.array([100720.0, 94890.0, 91523.0])\n expected_rho = np.array([1.2191, 1.1616, 1.1281])\n\n h, T, p, rho = coesa.table(h)\n\n assert_array_almost_equal(h, expected_h, decimal=0)\n assert_array_almost_equal(T, expected_T, decimal=3)\n assert_array_almost_equal(p, expected_p, decimal=-1)\n assert_array_almost_equal(rho, expected_rho, decimal=4)", "def is_hilbert_square(n):\n return ((-1 + math.sqrt(n)) / 4).is_integer()", "def test_elliptic(self):\n fun = get_problem('elliptic', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 5129555.351959938, delta=2e6)", "def test_numbers(number):\n assert number ** 2 == number ** 2", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def test_outFrac_2sigma68():\n\n arr = np.random.normal(size=int(4e6))\n res = pval.outFrac_2sigma68(arr)\n val = np.sum(np.abs(arr) > 2 * pval.sigma_68(arr)) * 1.0 / len(arr)\n np.testing.assert_almost_equal(res, val, 4)", "def test_inexact_prediace(doctest):", "def test_under_86km():\n z = np.array([50000.0, 70000.0, 86000.0])\n h = util.geometric_to_geopotential(z)\n expected_h = np.array([49610.0, 69238., 84852.0])\n expected_T = np.array([270.65, 219.585, 186.87])\n expected_p = np.array([79.779, 5.2209, 0.37338])\n expected_rho = np.array([0.0010269, 0.000082829, 0.000006958])\n\n h, T, p, rho = coesa.table(h)\n \n assert_array_almost_equal(h, expected_h, decimal=0)\n assert_array_almost_equal(T, expected_T, decimal=2)\n assert_array_almost_equal(p, expected_p, decimal=3)\n assert_array_almost_equal(rho, expected_rho, decimal=7)", "def test_generation(self):\n generator = math_helpers.pentagonal_number_generator()\n first_ten_pentagonal_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145]\n self.assertEqual(canonical_values, first_ten_pentagonal_numbers)", "def test_euclid(test_data: MinEvenNumberTestData):\n answer = min_even_digit(test_data.number)\n assert answer == test_data.answer", "def is_pentagonal_number(n):\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()", "def test_is_odd(self):\n for _ in range(1024):\n obj = ECCBlind()\n x = OpenSSL.BN_new()\n y = OpenSSL.BN_new()\n OpenSSL.EC_POINT_get_affine_coordinates(\n obj.group, obj.Q, x, y, 0)\n self.assertEqual(OpenSSL.BN_is_odd(y),\n OpenSSL.BN_is_odd_compatible(y))", "def test_pkstwo(self):\n self.assertFloatEqual(pkstwo(2.3),[1-5.084e-05],eps=1e-5)", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True" ]
[ "0.7146114", "0.6994994", "0.69625324", "0.69192934", "0.68733996", "0.67430663", "0.67230994", "0.6540389", "0.6376136", "0.62863314", "0.62853134", "0.62769395", "0.6264355", "0.6264355", "0.6261929", "0.6246477", "0.6208479", "0.61807215", "0.617333", "0.6156477", "0.6153163", "0.614221", "0.6141207", "0.6110908", "0.60898215", "0.60860586", "0.6073404", "0.6065788", "0.6059327", "0.60395545" ]
0.7652133
0
test some nonpentagonal numbers, make sure they don't show up as pentagonal.
def test_not_pentagonal(self): generator = math_helpers.pentagonal_number_generator() pents = set(next(generator) for _ in range(1000)) non_pentagonals = set(x for x in range(max(pents)) if x not in pents) any_pentagonals = any(map(lambda x: math_helpers.is_pentagonal(x), non_pentagonals)) self.assertEqual(any_pentagonals, False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False", "def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False", "def test_very_large_pentagonal_numbers(self):\n large_n = [x**9 for x in range(10000,10500)]\n pentagonals = [(n * (3 * n - 1)) // 2 for n in large_n]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), pentagonals))\n self.assertEqual(all_pentagonal, True)", "def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0", "def is_pentagonal(P):\n return sqrt(1 + 24 * P) % 6 == 5", "def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)", "def is_pent(n):\n pen_test = (1 + sqrt(24*n + 1))/6\n if pen_test == int(pen_test):\n return True\n return False", "def is_pentagonal_number(n):\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()", "def test_first_thousand_pentagonal_numbers(self):\n generator = math_helpers.pentagonal_number_generator()\n first_thousand_pentagonal_numbers = [next(generator) for _ in range(1000)]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), first_thousand_pentagonal_numbers))\n self.assertEqual(all_pentagonal, True)", "def is_pentagonal(x):\n solution = solve_quad(3, -1, -2*x)\n return max(solution) % 1 == 0", "def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2", "def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)", "def test_generation(self):\n generator = math_helpers.pentagonal_number_generator()\n first_ten_pentagonal_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145]\n self.assertEqual(canonical_values, first_ten_pentagonal_numbers)", "def test_negativenumbers(self):\n result = ps.pairs([-4, 4, 0, -2, 0], 0)\n self.assertEqual(result[0, 0], -4)\n self.assertEqual(result[0, 1], 4)\n self.assertEqual(result[1, 0], 0)\n self.assertEqual(result[1, 1], 0)", "def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')", "def is_hexagonal_number(n):\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()", "def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_same_non_gaps(y), z)\n\n test(s1, s2, 0.25)\n test(s1, s3, 0)\n test(s2, s3, 0.75)\n test(s1, s4, 0.5)\n test(s4, s5, 0)\n test(s4, s6, 0.6)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 2 / 3.0)\n test(e, s4, 0)", "def isIsotropic( self ) :\n\n for coefficient in self[1:] :\n if( coefficient != 0. ) : return( False )\n return( True )", "def test_isosceles(self):\r\n self.assertEqual(triangle_classification(4, 4, 5), 'Isosceles Triangle')\r\n self.assertEqual(triangle_classification(1234567890, 1234567890, 987654321), 'Isosceles Triangle')\r\n self.assertNotEqual(triangle_classification(3, 4, 5), 'Isosceles Triangle')\r\n self.assertNotEqual(triangle_classification(2, 2, 2.0000000000000001), 'Isosceles Triangle') # precision failure\r\n self.assertEqual(triangle_classification(2, 2, 2.000000000000001), 'Isosceles Triangle')\r\n self.assertEqual(triangle_classification(2, 2, 2.0000000000000001), 'Equilateral Triangle')", "def is_pentomino(pent, pents):\n pidx = get_pent_idx(pent)\n if pidx == -1:\n return False\n true_pent = pents[pidx]\n \n for flipnum in range(3):\n p = np.copy(pent)\n if flipnum > 0:\n p = np.flip(pent, flipnum-1)\n for rot_num in range(4):\n if np.array_equal(true_pent, p):\n return True\n p = np.rot90(p)\n return False", "def solution():\n pentagonals = []\n n = 1\n while True:\n newp = (n*(3*n-1))/2\n for p in pentagonals:\n diff = abs(newp-p)\n if ispentagonal(diff) and ispentagonal(newp+p):\n return diff\n pentagonals.append(newp)\n n += 1", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def test_pythagorean_triples(self):\n\n s = space(0)\n for a, b, c in (\n (3, 4, 5),\n (8, 15, 17),\n (33, 56, 65)\n ):\n self.assertTrue(isclose(\n s.hypot(a, b),\n c\n ))\n self.assertTrue(isclose(\n s.leg(a, c),\n b\n ))", "def is_hexagonal(x):\n solution = solve_quad(2, -1, -x)\n return max(solution) % 1 == 0", "def is_hilbert_square(n):\n return ((-1 + math.sqrt(n)) / 4).is_integer()", "def test_ne(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f48: Fraction = Fraction(4, 8)\n self.assertTrue(f12 != f34)\n self.assertFalse(f12 != f48)\n self.assertFalse(f12 != f12)", "def test_case_04_legal_triangle(self):\n self.__assert_not_equal_test_case([(4, 4, 8), (4, 5, 8)], 'NotATriangle')", "def add_pentomino(board, pent, coord, check_pent=False, valid_pents=None):\n if check_pent and not is_pentomino(pent, valid_pents):\n return False\n for row in range(pent.shape[0]):\n for col in range(pent.shape[1]):\n if pent[row][col] != 0:\n if board[coord[0]+row][coord[1]+col] != 0: # Overlap\n return False\n else:\n board[coord[0]+row][coord[1]+col] = pent[row][col]\n return True", "def test_notequal(self):\n self.assertTrue(Fraction(144,2)!=Fraction(8,4))", "def test_nan():\n assert 'invalid' == classify_triangle(1,2,float('nan'))" ]
[ "0.7697014", "0.766817", "0.7655574", "0.74935836", "0.74237895", "0.72347325", "0.72115386", "0.7103618", "0.69874036", "0.69399244", "0.66400045", "0.65678006", "0.6297902", "0.6223127", "0.59932196", "0.59665614", "0.5896299", "0.58934504", "0.58915144", "0.5882717", "0.58612674", "0.58518237", "0.5827889", "0.58145666", "0.57463956", "0.57376707", "0.57358825", "0.5718966", "0.57048106", "0.5693572" ]
0.8459626
0