query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
enables or disables the entry widgets in options window based on options radio button | def entryToggle(self):
status = "normal" if self.optionVar.get() == 4 else "disabled"
for i in range(3):
self.entry[i].configure(state=status) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def options(self):\n self.checkVar.set(self.menuVar.get())\n #create window then set window size & title\n self.optionsWindow = tk.Toplevel(self)\n self.optionsWindow.grab_set()\n self.optionsWindow.title(\"Options\")\n windowWidth = \"225\"\n windowHeight = \"175\"\n self.optionsWindow.minsize(windowWidth, windowHeight)\n self.optionsWindow.maxsize(windowWidth, windowHeight)\n self.optionsWindow.geometry(windowWidth+'x'+windowHeight)\n \n #creates the frame and self.optionVar\n frame = tk.Frame(self.optionsWindow)\n frame.pack()\n self.optionVar = tk.IntVar(self)\n self.optionVar.set(self.menuVar.get())\n\n #add the choices as radio buttons to the frame\n choices = [\n (\"Beginner\"+\"\\n8 X 8\"+\"\\n10 Mines\", 1),\n (\"Intermediate\"+\"\\n16 X 16\"+\"\\n40 Mines\", 2),\n (\"Expert\"+\"\\n16 X 30\"+\"\\n99 Mines\", 3),\n (\"Custom\", 4)\n ]\n for text, value in choices:\n button = tk.Radiobutton(frame, text=text, value=value, variable=self.optionVar, justify=\"left\", command=self.entryToggle)\n row, col, colspan = value-1, 0, 1\n if value is 4:row, col, colspan = 0, 1, 2\n button.grid(row=row, column=col, columnspan=colspan, sticky=\"W\")\n \n #add the text entry options for the custom game\n frame2 = tk.Frame(frame)\n frame2.grid(row=1, column=1, sticky=\"N\")\n\n rowLabel = tk.Label(frame2, text=\"Height: \", justify=\"left\")\n rowLabel.grid(row=0, column=0)\n colLabel = tk.Label(frame2, text=\"Width: \", justify=\"left\")\n colLabel.grid(row=1, column=0)\n minLabel = tk.Label(frame2, text=\"Mines: \", justify=\"left\")\n minLabel.grid(row=2, column=0)\n\n self.entry = []\n for i in range(3):\n self.entry.append(tk.Entry(frame2,width=10))\n self.entry[i].grid(row=i, column=1)\n self.entryToggle()\n \n #add the submit button to handle options given in the window\n submit = tk.Button(frame, text=\"Play\", command=self.optionSet)\n submit.grid(row=2, column=1, sticky=\"WE\")",
"def enable(self):\n self.colour_combo.config(state=tk.NORMAL)\n self.game_name_entry.config(state=tk.NORMAL)\n self.num_tickets_entry.config(state=tk.NORMAL)",
"def reconfigure(self):\n for key in self.infoboxes.keys():\n self.infoboxes[key].set_enabled(self.options[key].value)\n\n for button in self.buttons:\n if (button.display_opt \n and not self.options[button.display_opt].value):\n button.Hide()\n else:\n button.Show()\n self.view.update_options()",
"def updateOptions(self):\r\n if self.varSegment.get() == \"binary\":\r\n self.checkSaveBinary.config(state=tk.DISABLED)\r\n else:\r\n self.checkSaveBinary.config(state=tk.NORMAL)",
"def on_toggled_radio_ws(self,enabled):\n if enabled:\n self.dlg.text_ws.setEnabled(True)\n self.dlg.text_url.setEnabled(False)\n self.dlg.checkBox_save.setEnabled(False)\n self.dlg.checkBox_save.setChecked(False)\n \n else:\n self.dlg.text_ws.setEnabled(False)\n self.dlg.text_url.setEnabled(True)\n self.dlg.checkBox_save.setEnabled(True)",
"def onToogleAdvancedGUI(updateSettings = True):\n # hide/show Sources menu\n menuSources = getMainWindow().findChild(\"QMenu\", \"menuSources\").menuAction()\n menuSources.visible = not menuSources.visible\n # hide/show Filters menu\n menuFilters = getMainWindow().findChild(\"QMenu\", \"menuFilters\").menuAction()\n menuFilters.visible = not menuFilters.visible\n # hide/show Advance menu\n menuAdvance = getMainWindow().findChild(\"QMenu\", \"menuAdvance\").menuAction()\n menuAdvance.visible = not menuAdvance.visible\n # hide/show view decorator\n getMainWindow().centralWidget().toggleWidgetDecoration()\n # update the UserSettings\n if updateSettings:\n # booleans must be store as int\n newValue = int(not int(getPVSettings().value(\"LidarPlugin/AdvanceFeature/Enable\", 0)))\n getPVSettings().setValue(\"LidarPlugin/AdvanceFeature/Enable\", newValue)",
"def on_toggled_radio_geom(self,enabled):\n if enabled:\n self.dlg.combo_geom.setEnabled(True)\n self.dlg.label_geom.setEnabled(True)\n else:\n self.dlg.combo_geom.setEnabled(False)\n self.dlg.label_geom.setEnabled(False)",
"def on_radioButton_clicked(self):\r\n # TODO: not implemented yet\r",
"def add_option_enable(self):\n logger.debug(\"Adding enable option\")\n chkenable = ttk.Checkbutton(self.optsframe,\n variable=self.vars[\"enabled\"],\n text=\"Enable {}\".format(self.tabname),\n command=self.on_chkenable_change)\n chkenable.pack(side=tk.RIGHT, padx=5, anchor=tk.W)\n Tooltip(chkenable,\n text=\"Enable or disable {} display\".format(self.tabname),\n wraplength=200)",
"def enable_selected(self, window, values, branch_log_dict, key_event):\n utils.convert_to_numeric(values)\n if(values[key_event] in branch_log_dict[key_event]):#if there is branching for the chosen option\n for element_key in branch_log_dict[key_event][values[key_event]]:\n #values the element can take\n if not isinstance(window[element_key], sg.Text):\n window[element_key].update(disabled = False)\n window[element_key].metadata = True\n window[element_key+\"_label\"].update(text_color = \"#FFFFFF\")#every non-text field has a label\n window[element_key].update(visible = True)",
"def activate_ui(self):\n\n self.checkfilename()\n exptype = str(self.exptypeComboBox.currentText())\n\n if exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n self.imstackSpinBox.setEnabled(True)\n self.imnumSpinBox.setEnabled(True)\n self.minexpSpinBox.setEnabled(False)\n self.maxexpSpinBox.setEnabled(False)\n self.tstepSpinBox.setEnabled(False)\n\n if exptype == \"Bias Stack\":\n self.exptimeSpinBox.setEnabled(False)\n else:\n self.exptimeSpinBox.setEnabled(True)\n\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n self.exptimeSpinBox.setEnabled(False)\n self.imstackSpinBox.setEnabled(False)\n self.imnumSpinBox.setEnabled(True)\n self.minexpSpinBox.setEnabled(True)\n self.maxexpSpinBox.setEnabled(True)\n self.tstepSpinBox.setEnabled(True)\n\n else:\n self.imstackSpinBox.setEnabled(False)\n self.imnumSpinBox.setEnabled(True)\n self.minexpSpinBox.setEnabled(False)\n self.maxexpSpinBox.setEnabled(False)\n self.tstepSpinBox.setEnabled(False)\n\n if exptype == \"Bias\":\n self.exptimeSpinBox.setEnabled(False)\n else:\n self.exptimeSpinBox.setEnabled(True)",
"def _enable_entry(self):\n self.insert_entry.configure(state=tk.NORMAL)\n self.insert_button.configure(state=tk.NORMAL)",
"def radioButtonItem_Clicked( self, event ):\n\t\tself.activateTreasureBox(0)",
"def __init__(self, parent, state, position = wx.DefaultPosition):\n ##Set up data.\n self.state = state\n modeName = MODE_LIST[self.state.GetSurface(\"Mode\")]\n wx.Dialog.__init__(self, parent, -1, \"%s Mode Settings\" %(modeName),\n pos = position,\n style = wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | \n wx.MINIMIZE_BOX |\n wx.MAXIMIZE_BOX)\n | wx.TAB_TRAVERSAL)\n ##Jconf pull-down menu.\n \n self.lblStBox1 = wx.StaticBox(self, -1, \"Programs to launch\" )\n ##Name Server checkbox.\n self.cbNameServer = wx.CheckBox(self, -1, \"Name Server\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbNameServer.SetToolTip(wx.ToolTip(\"Run Name Server at Launch\"))\n ##Conductor checkbox.\n self.cbConductor = wx.CheckBox(self, -1, \"Conductor\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbConductor.SetToolTip(wx.ToolTip(\"Run Conductor at Launch\"))\n ##Xplorer checkbox.\n self.cbXplorer = wx.CheckBox(self, -1, \"Xplorer\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbXplorer.SetToolTip(wx.ToolTip(\"Run Xplorer at Launch\"))\n ##Desktop checkbox.\n self.cbDesktop = wx.CheckBox(self, -1, \"Desktop Mode\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbDesktop.SetToolTip(wx.ToolTip(\"Set Desktop Mode for\" +\n \" Conductor and Xplorer\"))\n \n self.lblStBox2 = wx.StaticBox(self, -1, \"Xplorer Configuration\" )\n ##Xplorer Type radio box.\n self.rbXplorer = wx.RadioBox(self, -1, \"Mode\",\n wx.DefaultPosition, wx.DefaultSize,\n RADIO_XPLORER_LIST, 1, wx.RA_SPECIFY_ROWS)\n self.rbXplorer.SetToolTip(wx.ToolTip(\"Which Xplorer format do you\" +\n \" want to launch?\"))\n ##Cluster button.\n self.bCluster = wx.Button(self, -1, \"Cluster Settings\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCluster.SetToolTip(wx.ToolTip(\"Set the computers and extra\" +\n \" variables in the cluster.\"))\n ##Configuration Choice\n self.chJconf = wx.Choice(self, -1, wx.DefaultPosition, [150,-1])\n self.chJconf.SetToolTip(wx.ToolTip(\"Choose Xplorer's configuration.\"))\n ##Edit Jconf button.\n self.bEditJconf = wx.Button(self, -1, \"Edit Configuration List\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bEditJconf.SetToolTip(wx.ToolTip(\"Edit the list of Xplorer\" +\n \" configurations.\")) \n #OK and Cancel button\n if windows:\n self.bOk = wx.Button( self, wx.ID_OK, \"OK\", wx.DefaultPosition, wx.DefaultSize, 0 )\n else:\n self.bOk = wx.Button( self, wx.ID_SAVE, \"Save\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCancel = wx.Button( self, wx.ID_CANCEL, \"Cancel\", wx.DefaultPosition, wx.DefaultSize, 0 )\n \n ##Bind events.\n self.Bind(wx.EVT_LISTBOX, self.Refresh, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.Refresh, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_LISTBOX, self.UpdateData, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.UpdateData, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n if windows:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_OK)\n else:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_SAVE)\n self.Bind(wx.EVT_BUTTON, self.EditJconf, self.bEditJconf)\n self.Bind(wx.EVT_BUTTON, self.EditCluster, self.bCluster)\n \n ##Set sizers.\n vSizerMain = wx.BoxSizer( wx.VERTICAL )\n vSizer1 = wx.BoxSizer( wx.VERTICAL )\n svSizer1 = wx.StaticBoxSizer( self.lblStBox1, wx.VERTICAL )\n svSizer1.Add( self.cbNameServer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n hSizer1 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer1.Add( self.cbConductor, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n spacer1 = wx.StaticText(self, -1, \" \", wx.DefaultPosition, wx.DefaultSize, 0 )\n hSizer1.Add( spacer1, 0, wx.ALIGN_CENTER, 5 )\n hSizer1.Add( self.cbDesktop, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer1.Add( hSizer1, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n svSizer1.Add( self.cbXplorer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n vSizer1.Add( svSizer1, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.TOP, 5 )\n spacer2 = wx.StaticText(self, -1, \"\", wx.DefaultPosition, [10,10], 0 )\n vSizer1.Add( spacer2, 0, wx.ALIGN_CENTER, 5 )\n svSizer2 = wx.StaticBoxSizer( self.lblStBox2, wx.VERTICAL )\n hSizer2 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer2.Add( self.rbXplorer, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer2.Add( self.bCluster, 0, wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.TOP, 5 )\n svSizer2.Add( hSizer2, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer3 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer3.Add( self.chJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer3.Add( self.bEditJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer2.Add( hSizer3, 0, wx.ALIGN_CENTER, 5 )\n vSizer1.Add( svSizer2, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer4 = wx.BoxSizer( wx.HORIZONTAL )\n if windows:\n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n else: \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n vSizer1.Add( hSizer4, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.TOP, 5 )\n vSizerMain.Add( vSizer1, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n \n vSizerMain.SetSizeHints(self)\n self.SetSizer(vSizerMain)\n #self.CenterOnParent(wx.BOTH)\n ##Set the background color.\n #Style(self)\n if not CLUSTER_ENABLED:\n self.bCluster.Hide()\n ##Set up OK button.\n ##Update Display\n self.React()",
"def on_toggled_radio_latlng(self,enabled):\n if enabled:\n self.dlg.combo_lat.setEnabled(True)\n self.dlg.combo_lng.setEnabled(True)\n self.dlg.label_lat.setEnabled(True)\n self.dlg.label_lng.setEnabled(True)\n else:\n self.dlg.combo_lat.setEnabled(False)\n self.dlg.combo_lng.setEnabled(False)\n self.dlg.label_lat.setEnabled(False)\n self.dlg.label_lng.setEnabled(False)",
"def further_plot_options(self):\n\n if self.p_inputs[\"store plots\"].isChecked():\n self.p_inputs[\"superplot\"].setEnabled(True)\n self.p_inputs[\"separate plots\"].setEnabled(True)\n else:\n self.p_inputs[\"superplot\"].setDisabled(True)\n self.p_inputs[\"superplot\"].setChecked(False)\n self.p_inputs[\"separate plots\"].setDisabled(True)\n self.p_inputs[\"separate plots\"].setChecked(False)",
"def add_options(self, options):\n if options and self.check_options(options):\n self._options = options\n self._alternatives = widgets.RadioButtons(options=options,\n description='',\n disabled=False,\n layout=widgets.Layout(width='100%'))\n\n self.options_status = 'OK'\n else:\n self._options = None\n self._alternatives = None\n self.options_status = 'X'",
"def sectionSelect():\r\n self.MyInput = self.buttonValue.get() # gets the value of the radiobutton that the user selected.\r\n if self.MyInput == 1: # if it was the first radio button, prepare for part a\r\n \r\n self.textLabel1.grid(row=1, column = 0, sticky = E) # add and position the relevant widgets for this section of the simulation\r\n self.tickBox1.grid(row=1, column = 1, sticky = E)\r\n self.textLabel4.grid(row=2, column = 0, sticky = E)\r\n self.tickBox4.grid(row=2, column = 1, sticky = E)\r\n self.textLabel2.grid(row=1, column = 2, sticky = E)\r\n self.textInput2.grid(row=1, column = 3, sticky = E)\r\n self.textLabel3.grid(row=1, column = 4, sticky = E) \r\n self.textInput3.grid(row=1, column = 5, sticky = E)\r\n self.textLabel5.grid(row=2, column = 2, sticky = E) \r\n self.textInput5.grid(row=2, column = 3, sticky = E) \r\n self.textLabel6.grid(row=2, column = 4, sticky = E) \r\n self.textInput6.grid(row=2, column = 5, sticky = E)\r\n self.textLabel7.grid(row=3, column = 0, sticky = E) \r\n self.tickBox7.grid(row=3, column = 1, sticky = E)\r\n self.textLabel10.grid(row=4, column = 0, sticky = E) \r\n self.tickBox10.grid(row=4, column = 1, sticky = E)\r\n self.textLabel8.grid(row=3, column = 2, sticky = E) \r\n self.textInput8.grid(row=3, column = 3, sticky = E)\r\n self.textLabel9.grid(row=3, column = 4, sticky = E) \r\n self.textInput9.grid(row=3, column = 5, sticky = E) \r\n self.textLabel11.grid(row=5, column = 0, sticky = E) \r\n self.tickBox11.grid(row=5, column = 1, sticky = E)\r\n self.textLabel13.grid(row=6, column = 0, sticky = E) \r\n self.tickBox13.grid(row=6, column = 1, sticky = E)\r\n self.textInput12.grid(row=5, column = 2, sticky = W)\r\n self.textInput14.grid(row=6, column = 2, sticky = W)\r\n self.textLabel15.grid(row=7, column = 0, sticky = E)\r\n self.textInput15.grid(row=7, column = 1, sticky = E)\r\n \r\n self.textLabel16.grid_forget() #remove these widgets\r\n self.textLabel17.grid_forget()\r\n \r\n self.inputErrorText.set(\"\") #set these text labels to empty\r\n self.outputText.set(\"\")\r\n \r\n #here we add in suggested starting conditions for this section, the user is free to change them.\r\n self.tickBox1.select() #select polar coordinates radiobutton\r\n self.coordinate.set(0)\r\n coordinateSelect()\r\n self.textInput2Variable.set(\"400\")\r\n self.textInput3Variable.set(\"0\")\r\n self.tickBox10.select()\r\n self.circular.set(1)\r\n circularSelect()\r\n self.tickBox11.select()\r\n self.durationType.set(0)\r\n durationTypeSelect()\r\n self.textInput12Variable.set(\"3\")\r\n self.textInput15Variable.set(\"1\")\r\n \r\n elif self.MyInput == 2: # if it is the second radio button then prepare for part b\r\n selectionFrame1.grid_rowconfigure(1, weight = 1) # here we readjust the rows and columns so that we can centre the widgets\r\n selectionFrame1.grid_rowconfigure(2, weight = 1)\r\n selectionFrame1.grid_rowconfigure(3, weight = 1)\r\n selectionFrame1.grid_columnconfigure(0, weight = 1)\r\n selectionFrame1.grid_columnconfigure(1, weight = 1)\r\n \r\n self.textLabel16.grid(row=1, column = 0, sticky = E) # add these widgets\r\n self.textLabel17.grid(row=2, column = 0, sticky = E)\r\n self.textLabel15.grid(row=3, column = 0, sticky = E)\r\n self.textInput15.grid(row=3, column = 1, sticky = W)\r\n \r\n self.textInput14.grid_forget() # remove these widgets\r\n self.textInput12.grid_forget()\r\n self.tickBox13.grid_forget()\r\n self.textLabel13.grid_forget()\r\n self.tickBox11.grid_forget()\r\n self.textLabel11.grid_forget()\r\n self.textInput9.grid_forget()\r\n self.textLabel9.grid_forget()\r\n self.textInput8.grid_forget()\r\n self.textLabel8.grid_forget()\r\n self.tickBox10.grid_forget()\r\n self.textLabel10.grid_forget()\r\n self.tickBox7.grid_forget()\r\n self.textLabel7.grid_forget()\r\n self.textInput6.grid_forget()\r\n self.textLabel6.grid_forget()\r\n self.textInput5.grid_forget()\r\n self.textLabel5.grid_forget()\r\n self.textInput3.grid_forget()\r\n self.textLabel3.grid_forget()\r\n self.textInput2.grid_forget()\r\n self.textLabel2.grid_forget()\r\n self.tickBox4.grid_forget()\r\n self.textLabel4.grid_forget()\r\n self.tickBox1.grid_forget()\r\n self.textLabel1.grid_forget()\r\n self.textLabel15.grid_forget()\r\n self.textInput15.grid_forget()\r\n \r\n self.inputErrorText.set(\"\")\r\n self.outputText.set(\"\")\r\n \r\n elif self.MyInput == 3: # if part b - manual is selected \r\n self.textLabel1.grid(row=1, column = 0, sticky = E) #add these widgets\r\n self.tickBox1.grid(row=1, column = 1, sticky = E)\r\n self.textLabel4.grid(row=2, column = 0, sticky = E)\r\n self.tickBox4.grid(row=2, column = 1, sticky = E)\r\n self.textLabel2.grid(row=1, column = 2, sticky = E)\r\n self.textInput2.grid(row=1, column = 3, sticky = E)\r\n self.textLabel3.grid(row=1, column = 4, sticky = E) \r\n self.textInput3.grid(row=1, column = 5, sticky = E)\r\n self.textLabel5.grid(row=2, column = 2, sticky = E) \r\n self.textInput5.grid(row=2, column = 3, sticky = E) \r\n self.textLabel6.grid(row=2, column = 4, sticky = E) \r\n self.textInput6.grid(row=2, column = 5, sticky = E)\r\n self.textLabel8.grid(row=3, column = 0, sticky = E) \r\n self.textInput8.grid(row=3, column = 1, sticky = E)\r\n self.textLabel9.grid(row=3, column = 2, sticky = E) \r\n self.textInput9.grid(row=3, column = 3, sticky = E) \r\n self.textLabel13.grid(row=4, column = 0, sticky = E) \r\n self.textInput14.grid(row=4, column = 1, sticky = W)\r\n self.textLabel15.grid(row=5, column = 0, sticky = E)\r\n self.textInput15.grid(row=5, column = 1, sticky = E)\r\n \r\n self.textLabel16.grid_forget() #remove these widgets\r\n self.textLabel17.grid_forget()\r\n self.textLabel7.grid_forget()\r\n self.tickBox7.grid_forget()\r\n self.textLabel10.grid_forget()\r\n self.tickBox10.grid_forget()\r\n self.textLabel11.grid_forget()\r\n self.tickBox11.grid_forget()\r\n self.textInput12.grid_forget()\r\n self.tickBox13.grid_forget()\r\n \r\n self.inputErrorText.set(\"\")\r\n self.outputText.set(\"\")\r\n \r\n self.tickBox1.select() #add these suggested starting conditions\r\n self.coordinate.set(0)\r\n coordinateSelect()\r\n self.textInput2Variable.set(\"7000\")\r\n self.textInput3Variable.set(\"270\")\r\n self.circular.set(0)\r\n circularSelect()\r\n self.textInput8Variable.set(\"0\")\r\n self.textInput9Variable.set(\"7569.7\")\r\n self.durationType.set(1)\r\n durationTypeSelect()\r\n self.textInput14Variable.set(\"941760\")\r\n self.textInput15Variable.set(\"1\")\r\n self.textInput15Variable.set(\"50\")",
"def on_radioButton_2_clicked(self):\r\n # TODO: not implemented yet\r",
"def on_radioButton_clicked(self):\n print(\"您选择了A\")",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbs.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Hispasat\n self.hispasat_button = pyxbmct.RadioButton('')\n self.placeControl(self.hispasat_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.hispasat_button, self.hispasat_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'hispasat', 2) == 1:\n self.hispasat_button.setSelected(True)\n else:\n self.hispasat_button.setSelected(False)\n hispasat = pyxbmct.Image(addonfolder+artsfolder+'/hispasat.png')\n self.placeControl(hispasat, 11, 1, rowspan=1, columnspan=4)\n \n\t\t# Astra\n self.astra_button = pyxbmct.RadioButton('')\n self.placeControl(self.astra_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.astra_button, self.astra_button_update)\n# if tools.return_data('TVHWIZARD', 'STRING', 'astra', 2) == 1:\n# self.astra_button.setSelected(True)\n# else:\n# self.astra_button.setSelected(False)\n astra = pyxbmct.Image(addonfolder+artsfolder+'/astra.png')\n self.placeControl(astra, 11, 6, rowspan=1, columnspan=4)\n\n\t\t# Hotbird\n self.hotbird_button = pyxbmct.RadioButton('')\n self.placeControl(self.hotbird_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.hotbird_button, self.hotbird_button_update)\n# if tools.return_data('TVHWIZARD', 'STRING', 'hotbird', 2) == 1:\n# self.hotbird_button.setSelected(True)\n# else:\n# self.hotbird_button.setSelected(False)\n hotbird = pyxbmct.Image(addonfolder+artsfolder+'/hotbird.png')\n self.placeControl(hotbird, 11, 11, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def block_entry(\r\n self\r\n ):\r\n self.e_num_images.config(state = tk.NORMAL)\r\n self.e_tot_time.config(state = tk.NORMAL)\r\n self.e_interval.config(state = tk.NORMAL)\r\n\r\n if self.r_quality_variable.get() == \"High\":\r\n self.e_interval.delete(0,tk.END)\r\n self.e_interval.insert(0,str(0.001))\r\n self.e_interval.config(state = tk.DISABLED)\r\n\r\n self.e_tot_time.delete(0,tk.END)\r\n self.e_tot_time.insert(0,str(int(self.e_num_images.get())*10/60.0))\r\n self.e_tot_time.config(state = tk.DISABLED)\r\n\r\n else:\r\n if self.r_radio_button_variable.get() == 1:\r\n self.e_interval.delete(0,tk.END)\r\n self.e_interval.insert(0,str(float(self.e_tot_time.get())*60/int(self.e_num_images.get())))\r\n self.e_num_images.config(state = tk.NORMAL)\r\n self.e_tot_time.config(state = tk.NORMAL)\r\n self.e_interval.config(state = tk.DISABLED)\r\n \r\n elif self.r_radio_button_variable.get() == 2:\r\n self.e_tot_time.delete(0,tk.END)\r\n self.e_tot_time.insert(0,str(int(self.e_num_images.get())*float(self.e_interval.get())/60))\r\n self.e_num_images.config(state = tk.NORMAL)\r\n self.e_tot_time.config(state = tk.DISABLED)\r\n self.e_interval.config(state = tk.NORMAL)\r\n elif self.r_radio_button_variable.get() == 3:\r\n self.e_num_images.delete(0,tk.END)\r\n self.e_num_images.insert(0,str(int(float(self.e_tot_time.get())*60/float(self.e_interval.get()))))\r\n self.e_num_images.config(state = tk.DISABLED)\r\n self.e_tot_time.config(state = tk.NORMAL)\r\n self.e_interval.config(state = tk.NORMAL)\r\n else:\r\n self.e_num_images.config(state = tk.NORMAL)\r\n self.e_tot_time.config(state = tk.NORMAL)\r\n self.e_interval.config(state = tk.NORMAL)",
"def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)",
"def settingstowidgets(self):\n\n # disconnect before updating, otherwise\n # the current GUI settings will be reinstated\n # after the first GUI element is updated\n self.disconnect_all_widgets()\n\n self.spansliderInt.setLowerValue(int(self.ABsettings[\"intensity_range\"][0]))\n self.spansliderInt.setUpperValue(int(self.ABsettings[\"intensity_range\"][1]))\n print \"vis setting \",self.ABsettings[\"visible\"]\n if self.ABsettings[\"visible\"]:\n print \"setting \",self.objectName(), \" to visible\"\n self.abEnabledCB.setChecked(True)\n else:\n print \"setting \",self.objectName(), \" to invisible\"\n self.abEnabledCB.setChecked(False)\n self.spansliderZ.setLowerValue(int(self.ABsettings[\"zrange\"][0]))\n self.spansliderZ.setUpperValue(int(self.ABsettings[\"zrange\"][1]))\n #self.ABsettings[\"Antibody\"]=self.ab\n self.colorBox.setRGB(self.ABsettings[\"rgb\"])\n if self.isDAPIPanel:\n for rb in self.radiobuttons:\n print \"radio button \", str(rb.objectName())\n if str(rb.objectName()).split(\"_\")[0]==self.ABsettings[\"selected_DAPI_channel\"]:\n rb.setChecked(True)\n print \"is checked\"\n\n # reconnect everything\n self.connect_all_widgets()\n self.updateSettings()",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbc.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Nos\n self.nos_button = pyxbmct.RadioButton('')\n self.placeControl(self.nos_button, 10, 3, rowspan=1, columnspan=4)\n self.connect(self.nos_button, self.nos_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nos', 2) == 1:\n self.nos_button.setSelected(True)\n else:\n self.nos_button.setSelected(False)\n nos = pyxbmct.Image(addonfolder+artsfolder+'/nos.png')\n self.placeControl(nos, 10, 3, rowspan=1, columnspan=4)\n\n\t\t# Nos Madeira\n self.madeira_button = pyxbmct.RadioButton('')\n self.placeControl(self.madeira_button, 12, 6, rowspan=1, columnspan=4)\n self.connect(self.madeira_button, self.madeira_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'madeira', 2) == 1:\n self.madeira_button.setSelected(True)\n else:\n self.madeira_button.setSelected(False)\n madeira = pyxbmct.Image(addonfolder+artsfolder+'/madeira.png')\n self.placeControl(madeira, 12, 6, rowspan=1, columnspan=4)\n\n\t\t# Nowo\n self.nowo_button = pyxbmct.RadioButton('')\n self.placeControl(self.nowo_button, 10, 9, rowspan=1, columnspan=4)\n self.connect(self.nowo_button, self.nowo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nowo', 2) == 1:\n self.nowo_button.setSelected(True)\n else:\n self.nowo_button.setSelected(False)\n nowo = pyxbmct.Image(addonfolder+artsfolder+'/nowo.png')\n self.placeControl(nowo, 10, 9, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def disable_loops_rpy(ui, tab):\n\n if tab == 1:\n status = not ui.radioButton_loops_com.isChecked()\n ui.checkBox_loops_wx.setChecked(status)\n ui.checkBox_loops_wy.setChecked(status)\n ui.checkBox_loops_wz.setChecked(status)\n ui.checkBox_loops_wx.setEnabled(status)\n ui.checkBox_loops_wy.setEnabled(status)\n ui.checkBox_loops_wz.setEnabled(status)\n ui.comboBox_loops_destination.setEnabled(status)\n ui.comboBox_loops_origin.setEnabled(status)\n ui.label_loops_origin.setEnabled(status)\n ui.label_loops_destination.setEnabled(status)\n else:\n if ui.radioButton_loops_none_2.isChecked():\n ui.checkBox_loops_x_2.setEnabled(False)\n ui.checkBox_loops_y_2.setEnabled(False)\n ui.checkBox_loops_z_2.setEnabled(False)\n ui.checkBox_loops_wx_2.setEnabled(False)\n ui.checkBox_loops_wy_2.setEnabled(False)\n ui.checkBox_loops_wz_2.setEnabled(False)\n ui.comboBox_loops_destination_2.setEnabled(False)\n ui.comboBox_loops_origin_2.setEnabled(False)\n ui.label_loops_origin_2.setEnabled(False)\n ui.label_loops_destination_2.setEnabled(False)\n elif ui.radioButton_loops_com_2.isChecked():\n ui.checkBox_loops_wx_2.setChecked(False)\n ui.checkBox_loops_wy_2.setChecked(False)\n ui.checkBox_loops_wz_2.setChecked(False)\n ui.checkBox_loops_wx_2.setEnabled(False)\n ui.checkBox_loops_wy_2.setEnabled(False)\n ui.checkBox_loops_wz_2.setEnabled(False)\n ui.comboBox_loops_destination_2.setEnabled(False)\n ui.comboBox_loops_origin_2.setEnabled(False)\n ui.label_loops_origin_2.setEnabled(False)\n ui.label_loops_destination_2.setEnabled(False)\n ui.checkBox_loops_x_2.setEnabled(True)\n ui.checkBox_loops_y_2.setEnabled(True)\n ui.checkBox_loops_z_2.setEnabled(True)\n else:\n ui.checkBox_loops_wx_2.setChecked(True)\n ui.checkBox_loops_wy_2.setChecked(True)\n ui.checkBox_loops_wz_2.setChecked(True)\n ui.checkBox_loops_wx_2.setEnabled(True)\n ui.checkBox_loops_wy_2.setEnabled(True)\n ui.checkBox_loops_wz_2.setEnabled(True)\n ui.comboBox_loops_destination_2.setEnabled(True)\n ui.comboBox_loops_origin_2.setEnabled(True)\n ui.label_loops_origin_2.setEnabled(True)\n ui.label_loops_destination_2.setEnabled(True)\n ui.checkBox_loops_x_2.setEnabled(True)\n ui.checkBox_loops_y_2.setEnabled(True)\n ui.checkBox_loops_z_2.setEnabled(True)\n ui.checkBox_loops_x_2.setChecked(True)\n ui.checkBox_loops_y_2.setChecked(True)\n ui.checkBox_loops_z_2.setChecked(True)",
"def opt_dialog(self, event):\n dialog = options.OptionsDialog(self, self.options)\n dialog.ShowModal()\n \n self.reconfigure()\n self.info_panel.Layout()\n self.main_panel.Layout()",
"def create_entry_field(self):\n self.e_pm_c = Entry(self.form_box, textvariable=self.df_pc, state='disabled', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n\n self.secondary_currents = [1, 5]\n self.e_sd_c = ttk.Combobox(self.form_box, textvariable=self.df_sc, state='disabled', width=30,\n font=('Arial', 15))\n self.e_sd_c['values'] = self.secondary_currents\n self.e_sd_c.config(background=self.design.color.secondary)\n\n self.user_average_times = [5, 10, 15, 30, 60, 300, 480, 600, 900]\n self.janitza_umg_avg_times = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n self.e_avg_t = ttk.Combobox(self.form_box, textvariable=self.df_avg_t, state='disabled', width=30,\n font=('Arial', 15))\n self.e_avg_t['values'] = self.user_average_times\n self.e_avg_t.config(background=self.design.color.secondary)\n\n self.e_st_nw_t = Radiobutton(self.form_box, text=\"static\", variable=self.nw_t_var, value=0,\n relief=self.design.button_relief,\n command=self.reload, state='disabled', font=('Arial', 15))\n self.e_dn_nw_t = Radiobutton(self.form_box, text=\"dynamic\", variable=self.nw_t_var, value=2,\n relief=self.design.button_relief,\n command=self.reload, state='disabled', font=('Arial', 15))\n\n self.e_ip = Entry(self.form_box, textvariable=self.df_nw_ip, state='disabled', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.e_gw = Entry(self.form_box, textvariable=self.df_nw_gw, state='disabled', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.e_sm = Entry(self.form_box, textvariable=self.df_nw_sm, state='disabled', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.e_mc_a = Entry(self.form_box, state='disabled', textvariable=self.df_mc_a, width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n\n # button\n\n self.cancel_btn = Button(self.save_cancel_box, text='Cancel', command=self.save_cancel_box.quit,\n state='disabled', font=('Arial', 15), bg=self.design.color.secondary)\n\n self.save_btn = Button(self.save_cancel_box, text='Save', command=self.save_dialog, state='disabled',\n foreground=self.design.color.secondary, font=('Arial', 15), bg=self.design.color.primary)\n\n self.send_btn = Button(self.save_cancel_box, text='Send', command=self.send_data_to_device, state='normal',\n foreground=self.design.color.secondary, font=('Arial', 15), bg=self.design.color.primary)",
"def radioButtonWeapon_Clicked( self, event ):\n\t\tself.activateTreasureBox(1)",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/k.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# KI Plus\n self.k1plus_button = pyxbmct.RadioButton('')\n self.placeControl(self.k1plus_button, 8, 1, rowspan=2, columnspan=4)\n self.connect(self.k1plus_button, self.k1plus_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k1plus', 2) == 1:\n self.k1plus_button.setSelected(True)\n else:\n self.k1plus_button.setSelected(False)\n k1plus = pyxbmct.Image(addonfolder+artsfolder+'/k1plus.png')\n self.placeControl(k1plus, 8, 1, rowspan=2, columnspan=4)\n\n\t\t# KI Pro\n self.k1pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k1pro_button, 11, 6, rowspan=2, columnspan=4)\n self.connect(self.k1pro_button, self.k1pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k1pro', 2) == 1:\n self.k1pro_button.setSelected(True)\n else:\n self.k1pro_button.setSelected(False)\n k1pro = pyxbmct.Image(addonfolder+artsfolder+'/k1pro.png')\n self.placeControl(k1pro, 11, 6, rowspan=2, columnspan=4)\n\n\t\t# KII Pro\n self.k2pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k2pro_button, 8, 6, rowspan=2, columnspan=4)\n self.connect(self.k2pro_button, self.k2pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k2pro', 2) == 1:\n self.k2pro_button.setSelected(True)\n else:\n self.k2pro_button.setSelected(False)\n k2pro = pyxbmct.Image(addonfolder+artsfolder+'/k2pro.png')\n self.placeControl(k2pro, 8, 6, rowspan=2, columnspan=4)\n\n\t\t# KIII Pro\n self.k3pro_button = pyxbmct.RadioButton('')\n self.placeControl(self.k3pro_button, 8, 11, rowspan=2, columnspan=4)\n self.connect(self.k3pro_button, self.k3pro_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k3pro', 2) == 1:\n self.k3pro_button.setSelected(True)\n else:\n self.k3pro_button.setSelected(False)\n k3pro = pyxbmct.Image(addonfolder+artsfolder+'/k3pro.png')\n self.placeControl(k3pro, 8, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())"
] | [
"0.6961108",
"0.6948476",
"0.6607761",
"0.6417804",
"0.64009446",
"0.6353846",
"0.63352275",
"0.6329018",
"0.62202984",
"0.6120723",
"0.60597855",
"0.596447",
"0.5951331",
"0.5947885",
"0.5934519",
"0.59198403",
"0.59019685",
"0.5898794",
"0.5886318",
"0.58863086",
"0.5884564",
"0.5864861",
"0.5858559",
"0.58367574",
"0.58249843",
"0.5811543",
"0.57798094",
"0.57711834",
"0.57645553",
"0.57621616"
] | 0.7068373 | 0 |
Gets the ocpc of this Brand. Open Cannabis Product Code for the brand. | def ocpc(self):
return self._ocpc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_occr(self):\n return self._occr_array",
"def getObcType(): \n return simuConfig[\"OBC\"]",
"def getC(self):\n\t\treturn self.c",
"def _get_cbase(self):\n from PSCalib.CalibParsBasePnccdV1 import CalibParsBasePnccdV1\n return CalibParsBasePnccdV1()",
"def brand(self) -> object:\n return self._brand",
"def circpol(self):\n return self._circpol",
"def mcc(self):\n return self._mcc",
"def BrocCVSPath(self):\n return self._module.broc_cvspath",
"def get_coulomb_info(self):\n return",
"def Crc(self):\n return self._get_attribute('crc')",
"def oa_coki(self) -> COKIOpenAccess:\n\n at = self.access_type\n open = at.oa\n closed = not open\n publisher = at.gold_doaj or at.hybrid or at.bronze\n other_platform = at.green\n publisher_only = publisher and not other_platform\n both = publisher and other_platform\n other_platform_only = at.green_only\n\n # Publisher categories\n oa_journal = at.gold_doaj\n hybrid = at.hybrid\n no_guarantees = at.bronze\n publisher_categories = PublisherCategories(oa_journal, hybrid, no_guarantees)\n\n # Other platform categories\n preprint = self.in_unpaywall and any([repo.category == \"Preprint\" for repo in self.repositories])\n domain = self.in_unpaywall and any([repo.category == \"Domain\" for repo in self.repositories])\n institution = self.in_unpaywall and any([repo.category == \"Institution\" for repo in self.repositories])\n public = self.in_unpaywall and any([repo.category == \"Public\" for repo in self.repositories])\n aggregator = self.in_unpaywall and any([repo.category == \"Aggregator\" for repo in self.repositories])\n other_internet = self.in_unpaywall and any([repo.category == \"Other Internet\" for repo in self.repositories])\n unknown = self.in_unpaywall and any([repo.category == \"Unknown\" for repo in self.repositories])\n other_platform_categories = OtherPlatformCategories(\n preprint, domain, institution, public, aggregator, other_internet, unknown\n )\n\n return COKIOpenAccess(\n open,\n closed,\n publisher,\n other_platform,\n publisher_only,\n both,\n other_platform_only,\n publisher_categories,\n other_platform_categories,\n )",
"def c(self):\n return self._c",
"def BrocPath(self):\n return os.path.join(self._module.workspace, self._module.broc_cvspath)",
"def CL(self):\n return self.__CL",
"def get_com(self):\n return self.com",
"def obtem_ciclo_in(self):\n\n return self.ciclo_in",
"def c(self):\r\n return self.__c",
"def getStdChemComp(self):\n dataDict = self.__dict__\n stdCcpCode = self.stdChemCompCode\n if stdCcpCode is None:\n result = None\n else:\n result = self.memopsRoot.findFirstChemComp(molType=self.molType, ccpCode=stdCcpCode)\n return result",
"def business_sub_cat_code(self) -> str:\n return self._business_sub_cat_code",
"def oclc_uri(marc_record: pymarc.record.Record) -> str:\n return 'http://www.worldcat.org/oclc/%s' % marc_record['001'].value()",
"def brand(self) -> str:\n return self._config_entry.data.get(CONF_BRAND, DEFAULT_BRAND)",
"def cci(self) -> float:\n return self._cci",
"def getCL(self):\r\n return self.cL;",
"def ocsp_url(self):\n\n if self._authority_information_access is None:\n return None\n\n for ad in self._authority_information_access:\n if ad['access_method'].native == 'ocsp' and ad['access_location'].name == 'uniform_resource_identifier':\n return ad['access_location'].chosen.native\n\n return None",
"def get_cp_info(self):\n return self.get(COMMAND_CPM, 'GetCpInfo')",
"def get_copp(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.COPP(data)\n if result is None:\n raise IndicatorException\n return result",
"def company(self):\n return self._company",
"def company(self):\n return self._company",
"def Vc(self):\n return self.__central_volume",
"def cpf(self):\n return self._cpf"
] | [
"0.62935966",
"0.6091956",
"0.58740455",
"0.5798331",
"0.5681632",
"0.5631757",
"0.55966634",
"0.558785",
"0.558676",
"0.5518612",
"0.54687154",
"0.5444637",
"0.5432168",
"0.5428139",
"0.5394201",
"0.53779477",
"0.5367507",
"0.53472936",
"0.52756715",
"0.5263588",
"0.5236587",
"0.5236218",
"0.52293724",
"0.5225959",
"0.52015346",
"0.5195431",
"0.5185991",
"0.5185991",
"0.5173113",
"0.51674825"
] | 0.7520303 | 0 |
Sets the ocpc of this Brand. Open Cannabis Product Code for the brand. | def ocpc(self, ocpc):
self._ocpc = ocpc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ocpc(self):\n return self._ocpc",
"def cci(self, cci: float):\n\n self._cci = cci",
"def set_c(self, c):\n self.c = c",
"def set_cpe(self, cpe_model):\n self.cpe_model = cpe_model",
"def getObcType(): \n return simuConfig[\"OBC\"]",
"def oclc_uri(marc_record: pymarc.record.Record) -> str:\n return 'http://www.worldcat.org/oclc/%s' % marc_record['001'].value()",
"def set_roc(self):\n # set roc\n self.roc = self.roc_class(poly=self.bounding_poly, graph=self.graph)\n # set roc with ALL data initially\n self.roc.set_data(self.data[:, 1:], index=self.data_index)",
"def set_coor(self, new_coor):\n self.__x_coor, self.__y_coor = new_coor",
"def brand(self, brand):\n\n self._brand = brand",
"def brand(self, brand):\n\n self._brand = brand",
"def brand(self, brand):\n\n self._brand = brand",
"def setC(self, c):\n\t\tself.c = int(c)",
"def move_to_coc(self):\n coc = scale(self.center_of_charge(), -1.0)\n self.translate(coc)",
"def config_cuerpo(self, cuerpo):\n # type: (Robot_Cuerpo)->None\n self.cuerpo = cuerpo",
"def brand(self, brand: object):\n\n self._brand = brand",
"def bcp_set(self, **kwargs):\n pass",
"def BCC(self, value):\n if not self.reg.C:\n self.reg.PC += value",
"def setC0(self, c0value):\n return self._set(c0=c0value)",
"def __init__(self,iniName = 'cosmopy_camb.ini',cambPath = '../CAMB',\n cambParam=None,**kws):\n if cambParam==None:\n self.cp = param.CambParams(**kws)\n else:\n self.cp = cambParam\n \n self.iniName = iniName\n self.cambPath = cambPath",
"def _setbeneficiary_customer_no_option_59(self, val):\n self.swift_obj.BeneficiaryCustomer = val\n self.swift_obj.BeneficiaryCustomer.swiftTag = '59'",
"def BCS(self, value):\n if self.reg.C:\n self.reg.PC += value",
"def cpf(self, cpf):\n self._cpf = cpf",
"def coupon_cd(self, coupon_cd):\n if coupon_cd is None:\n raise ValueError(\"Invalid value for `coupon_cd`, must not be `None`\") # noqa: E501\n\n self._coupon_cd = coupon_cd",
"def c(self, c):\n\n self._c = c",
"def setRLC(self, r, l, c):\n return",
"def oa_coki(self) -> COKIOpenAccess:\n\n at = self.access_type\n open = at.oa\n closed = not open\n publisher = at.gold_doaj or at.hybrid or at.bronze\n other_platform = at.green\n publisher_only = publisher and not other_platform\n both = publisher and other_platform\n other_platform_only = at.green_only\n\n # Publisher categories\n oa_journal = at.gold_doaj\n hybrid = at.hybrid\n no_guarantees = at.bronze\n publisher_categories = PublisherCategories(oa_journal, hybrid, no_guarantees)\n\n # Other platform categories\n preprint = self.in_unpaywall and any([repo.category == \"Preprint\" for repo in self.repositories])\n domain = self.in_unpaywall and any([repo.category == \"Domain\" for repo in self.repositories])\n institution = self.in_unpaywall and any([repo.category == \"Institution\" for repo in self.repositories])\n public = self.in_unpaywall and any([repo.category == \"Public\" for repo in self.repositories])\n aggregator = self.in_unpaywall and any([repo.category == \"Aggregator\" for repo in self.repositories])\n other_internet = self.in_unpaywall and any([repo.category == \"Other Internet\" for repo in self.repositories])\n unknown = self.in_unpaywall and any([repo.category == \"Unknown\" for repo in self.repositories])\n other_platform_categories = OtherPlatformCategories(\n preprint, domain, institution, public, aggregator, other_internet, unknown\n )\n\n return COKIOpenAccess(\n open,\n closed,\n publisher,\n other_platform,\n publisher_only,\n both,\n other_platform_only,\n publisher_categories,\n other_platform_categories,\n )",
"def _setbeneficiary_customer_59A(self, val):\n self.swift_obj.BeneficiaryCustomer_A = val\n self.swift_obj.BeneficiaryCustomer_A.swiftTag = '59A'",
"def cpos2codon(self, cpos):\n\n return self.std_tpt.cpos2codon(cpos)",
"def _set_bank_operation_code_23B(self, val):\n self.swift_obj.BankOperationCode = val\n self.swift_obj.BankOperationCode.swiftTag = \"23B\"",
"def _get_cbase(self):\n from PSCalib.CalibParsBasePnccdV1 import CalibParsBasePnccdV1\n return CalibParsBasePnccdV1()"
] | [
"0.64819264",
"0.5569003",
"0.54588693",
"0.51602656",
"0.51474535",
"0.5130254",
"0.51200026",
"0.5089867",
"0.5015226",
"0.5015226",
"0.5015226",
"0.5014385",
"0.49917543",
"0.49719885",
"0.4937376",
"0.49258742",
"0.4871183",
"0.4865334",
"0.48471576",
"0.48377594",
"0.48085842",
"0.48074266",
"0.47945756",
"0.47842184",
"0.4743565",
"0.47182825",
"0.4655577",
"0.46401167",
"0.4622864",
"0.4618104"
] | 0.7510026 | 0 |
Sets the qr of this Brand. URL for QR that leads to page on Cannabis Reports. | def qr(self, qr):
self._qr = qr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def qrcode(self):\n\n if not HAS_QRCODE:\n raise AttributeError('QR Code functionality is not enabled. Please add the qrcode '\n 'library to this environment')\n\n return qrcode.make(self.local_config)",
"def set_qr_data(self, qr_data: Sequence[Decoded]):\n self.qr_data = qr_data",
"async def qr(self, ctx, *link: str):\n link = ' '.join(link)\n img = qrcode.make(link) # TODO Run in executor # TODO shrink img size (maybe)\n file = BytesIO()\n img.save(file, 'JPEG') # TODO Run in executor\n file.seek(0)\n url = link if 'http://' in link else 'http://' + link\n await ctx.send(url, file=discord.File(file, 'qr.jpeg'))",
"def qr(self):\n schema = 'bitcoin:{to_address}?amount={amount}&label={label}'\n parsed = schema.format(\n to_address=self.to_address,\n amount=self.amount_btc,\n label=urllib.parse.urlencode({\n \"label\": \"{site}:{project_code}\".format(\n site=self.site.domain,\n project_code=self.project_code\n )\n })[6:]\n )\n qr = qrcode.make(parsed, image_factory=qrcode.image.svg.SvgImage)\n buffer = io.BytesIO()\n qr.save(buffer)\n return buffer.getvalue()",
"def qr(self):\n return self._qr",
"def _set_url(self): \n self.url = self.geturl()",
"def qr(self, text):\n qr_code = qrcode.QRCode(version=4, box_size=4, border=1)\n qr_code.add_data(text)\n qr_code.make(fit=True)\n im = qr_code.make_image()._img.convert(\"RGB\")\n # Convert the RGB image in printable image\n self._convert_and_print_image(im)",
"def render_qrcode(self, width=50, height=50):\n qr = self.scoresaber.get_qrcode(self.user_data.id)\n qr = qr.resize((width, height))\n return qr",
"def setURL(self, url, write=True):\n if self.radioConfig == None:\n raise Exception(\"No RadioConfig has been read\")\n\n # URLs are of the form https://www.meshtastic.org/c/#{base64_channel_settings}\n # Split on '/#' to find the base64 encoded channel settings\n splitURL = url.split(\"/#\")\n decodedURL = base64.urlsafe_b64decode(splitURL[-1])\n self.radioConfig.channel_settings.ParseFromString(decodedURL)\n if write:\n self.writeConfig()",
"def barbican_url(self):",
"def set_uri(self, uri):\r\n self.uri = uri",
"def setBaseURL(self,value):\n self.PDFreactorConfiguration.in1[\"baseURL\"] = value",
"def set_url(self, url):\n self.url = url",
"def set_url(self, url):\n self.url = url",
"def qrscaner():\n title = 'Сканер'\n return render_template(\n 'receipt/qrscaner.html',\n page_title=title,\n )",
"def generateQRCodeFor(website, toLocation):\n valid = validateURL(website)\n\n if not valid:\n raise URLError\n\n img = qrcode.make(website)\n\n img.save(toLocation)",
"def set_url(self, url):\n self.data['url'] = url",
"def get_qr():\n qr = g.driver.get_qr_plain()\n return jsonify({'qr': qr})",
"def setUrl( self, url ):\n self._urlEdit.setText(str(url))",
"def set_url(self, url):\n if url is not None:\n self.url = url",
"def set_url(self, url):\n super(Cabling, self).set_url(url)",
"def setRemoteUrl(self, value, **kwargs):\n if value:\n value = urlparse.urlunparse(urlparse.urlparse(value))\n self.getField('remoteUrl').set(self, value, **kwargs)",
"def qrcode():\n return render_template('sampleCoupon.html')",
"def uri(self, uri):\n self._uri = uri",
"def uri(self, uri):\n self._uri = uri",
"def make_qr(df):\n stuff = df.survey_name.tolist()\n df = pd.DataFrame({'stuff': stuff})\n df[\"num\"] = df.index + 1\n df[\"c\"] = df.num % 3\n df.c = df.c.replace(0, 3)\n df[\"page\"] = ((df.num + 23) // 24)\n df[\"r\"] = (((df.num - 1) // 3) + 1) % 8\n df.r = df.r.replace(0, 8)\n\n dt = str(datetime.datetime.now()).replace(\":\", \"_\")\n ext = \".pdf\"\n pdf_path = f\"{S.pdf_prefix}QR_codes{ext}\"\n\n # MAKE CANVAS\n c = canvas.Canvas(pdf_path)\n c.setPageSize((612, 792))\n last_page = None\n\n # SPLIT INTO PAGES, COLUMNS, ROWS\n for index, row in df.iterrows():\n page = row[\"page\"]\n if page != last_page:\n if last_page == None:\n last_page = page\n\n else:\n c.showPage()\n last_page = page\n\n col = row[\"c\"]\n _row = row[\"r\"]\n i = row[\"stuff\"]\n code = f\"{i}\"\n\n # print(f\"page {page} row {_row} column {col} info {i}\")\n\n # DRAW QR\n def draw_qr(data, base=r\"C:\\Users\\mkreidler\\Desktop\\pdf print barcodes\\\\\"):\n # draw QR\n dt = str(datetime.datetime.now()).replace(\":\", \"_\")\n random = uuid.uuid4()\n img_path = f\"{base}{dt}{random}.png\" ##can't write file paths with certain characters, removing 'i' variable here\n\n qr = qrcode.QRCode()\n qr.add_data(data)\n img = qr.make_image().save(img_path)\n return img_path\n\n # DRAW 1 TAG\n def draw_tag(x_num, y_num, code):\n flip_y = {1: 8,\n 2: 7,\n 3: 6,\n 4: 5,\n 5: 4,\n 6: 3,\n 7: 2,\n 8: 1}\n\n y_num = flip_y[y_num]\n\n qr = draw_qr(code)\n x_increment = 204\n y_increment = 99\n\n x = x_increment * (x_num - 1)\n y = y_increment * (y_num - 1)\n\n c.drawString(102.5491 + x, 33.7174 + y, code)\n c.drawInlineImage(qr, 0 + x, 0 + y, width=99, height=99)\n os.remove(qr)\n c.drawInlineImage(S.logo_path, 102.5491 + x, 71.2396 + y, width=84.2926,\n height=14.0488) # replace maroon with bnw later\n\n draw_tag(col, _row, code)\n\n # SAVE DOCUMENT\n c.save()\n print(\"QR codes done!\")\n return pdf_path",
"def set_uri(self, uri):\n self.__uri = uri",
"def test_generate_barcode_qr_code(self):\n pass",
"def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_cwrexport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))",
"def set_card_link(self):\n self.response.card.type = 'LinkAccount'"
] | [
"0.5993275",
"0.5985283",
"0.54421943",
"0.5390959",
"0.5306285",
"0.5193873",
"0.5183114",
"0.5134523",
"0.5026647",
"0.49453598",
"0.49438182",
"0.49417466",
"0.49301583",
"0.4922631",
"0.49054387",
"0.48905766",
"0.48594776",
"0.48203534",
"0.478166",
"0.47167686",
"0.46874297",
"0.4681973",
"0.46714",
"0.46696258",
"0.46696258",
"0.4661445",
"0.46606225",
"0.46574557",
"0.4656765",
"0.46364293"
] | 0.6674546 | 0 |
Gets the flowers of this Brand. OCPCs of the flowers from this brand. | def flowers(self):
return self._flowers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def funnels(self):\r\n return resources.Funnels(self)",
"def getScatterers(self):\n return self.scatterers",
"def travelers(self):\n return self._travelers",
"def flowers(self, flowers):\n\n self._flowers = flowers",
"def fcvs(self): \n return self._link_reg.fcvs",
"def get_frontends(self):\n ret = []\n for i in self.all_instances:\n if i.is_frontend():\n ret.append(i)\n return ret",
"def ideal_harvesters(self) -> int:\n return self.proto.ideal_harvesters",
"def getScatterers(self):\n raise NotImplementedError(\"The must be overloaded\")",
"def get_thruster_values(self):\n return self.thrusters.get()",
"def reactors(self):\n return self._reactors",
"def flow(self):\n return self._flow",
"def get_chairs(self):\n return self.chairs",
"def getChannels(self) -> List:\n\t\tif self._taking_off:\n\t\t\tself.takeOff()\n\n\t\tif self._landing:\n\t\t\tself.land()\n\n\t\treturn self._altHoldController.getChannels() + [2000]",
"def fan_list(self):\n return self._fan_list",
"def fan_list(self):\n return self._fan_list",
"def chamfered_faces(self) -> Sequence[Face]:\n if not self._cached_chamfered_faces:\n self._cached_chamfered_faces = self._get_faces(self._chamfered_face_indices)\n return list(self._cached_chamfered_faces)",
"def get_cameras(self):\n return self._cameras[:]",
"def get_cffts(self):\n return [\n rfft(self.nx, self.dx, fft=self.tfft, ny=self.ny,\n dy=self.dy).get_cfft(),\n rfft(self.nx, self.dx, fft=self.efft, ny=self.ny,\n dy=self.dy).get_cfft(),\n rfft(self.nx, self.dx, fft=self.bfft, ny=self.ny,\n dy=self.dy).get_cfft()\n ]",
"def pipes(self): \n return self._link_reg.pipes",
"def available_tracers(self):\n return self.target.read_value(self.available_tracers_file).split(' ')",
"def fat(self) -> List[RecipeObjectNutrientsCalories]:\n return self._fat",
"def get_flows(self, num_flows_per_entry):\n flows = []\n for tenant in self._tenants:\n for contract in tenant.get_children(only_class=Contract):\n providing_epgs = contract.get_all_providing_epgs()\n consuming_epgs = contract.get_all_consuming_epgs()\n for providing_epg in providing_epgs:\n vlan_ifs = providing_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n providing_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n providing_phys_if = phys_ifs[0].name\n for consuming_epg in consuming_epgs:\n vlan_ifs = consuming_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n consuming_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n consuming_phys_if = phys_ifs[0].name\n if providing_vlan == consuming_vlan and providing_phys_if == consuming_phys_if:\n # Skip this case since traffic would be switched outside fabric\n continue\n for filter_entry in contract.get_all_filter_entries():\n for i in range(0, num_flows_per_entry):\n flow = Flow()\n flow.ethertype = filter_entry.etherT\n if flow.ethertype == 'arp':\n flow.arp_opcode = filter_entry.arpOpc\n flow.populate_random_ip_addresses()\n elif flow.ethertype == 'ip':\n flow.populate_random_ip_addresses()\n flow.proto = filter_entry.prot\n if flow.proto == '6' or flow.proto == '17':\n dFromPort = int(filter_entry.dFromPort)\n dToPort = int(filter_entry.dToPort)\n sFromPort = int(filter_entry.sFromPort)\n sToPort = int(filter_entry.sToPort)\n if dFromPort == 0:\n dFromPort = 1\n dToPort += 1\n if sFromPort == 0:\n sFromPort = 1\n sToPort += 1\n if dToPort > 65534:\n dToPort = 65534\n if sToPort > 65534:\n sToPort = 65534\n flow.dport = str(random_number(dFromPort,\n dToPort))\n flow.sport = str(random_number(sFromPort,\n sToPort))\n if flow.proto == '6':\n flow.tcp_rules = filter_entry.tcpRules\n flow.svlan = providing_vlan\n flow.dvlan = consuming_vlan\n flow.src_intf = providing_phys_if\n flow.dst_intf = consuming_phys_if\n\n # Is the flow expected to succeed ?\n flow.expected_action = 'drop'\n providing_bd = providing_epg.get_bd()\n consuming_bd = consuming_epg.get_bd()\n if providing_bd and consuming_bd:\n if providing_bd == consuming_bd:\n if providing_bd.get_context() == consuming_bd.get_context():\n flow.expected_action = 'permit'\n flow.populate_random_mac_addresses()\n flows.append(flow)\n return flows",
"def get_channels(self):\n return self.channels",
"def _forecasters(self):\n return self._get_estimator_tuples(self.forecasters, clone_ests=False)",
"def carbs(self) -> List[RecipeObjectNutrientsCalories]:\n return self._carbs",
"def get_front_distances(self):\n return np.array([self.get_distance(name) for name in self.front_distance_sensors])",
"def devices(self):\n return self.parents",
"def get_bayer_images(self) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(c.get_image()) for c in self.cameras]",
"def channels(self):\n return self._channels",
"def spectators(self):\n return self._return_if('_spectators')"
] | [
"0.6138125",
"0.5807703",
"0.54127574",
"0.5399067",
"0.5316327",
"0.5261371",
"0.52180266",
"0.5177034",
"0.51683444",
"0.50811946",
"0.49370635",
"0.49328735",
"0.4917407",
"0.49171916",
"0.49171916",
"0.49038213",
"0.49015483",
"0.4899251",
"0.48930192",
"0.48838463",
"0.48793125",
"0.4876003",
"0.48745733",
"0.48727322",
"0.48634815",
"0.4861667",
"0.48343283",
"0.48312953",
"0.48099825",
"0.4801853"
] | 0.7915461 | 0 |
Sets the flowers of this Brand. OCPCs of the flowers from this brand. | def flowers(self, flowers):
self._flowers = flowers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flowers(self):\n return self._flowers",
"def flows(self, flows):\n\n self._flows = flows",
"def travelers(self, travelers):\n\n self._travelers = travelers",
"def brands(self, brands):\n\n self._brands = brands",
"def set_thruster_values(self, values):\n self.thrusters.set(values)",
"def setSteer(self, steer):\r\n if steer < -1.0:\r\n steer = -1.0\r\n elif steer > 1.0:\r\n steer = 1.0\r\n self.steer = steer\r\n for tire in self.tires:\r\n if tire.steerable:\r\n if ( steer < 0.0 and tire.model.getX() > 0.0 ) or ( steer > 0.0 and tire.model.getX() < 0.0 ):\r\n tire.shape.setSteerAngle( self.innerSteer * steer )\r\n else:\r\n tire.shape.setSteerAngle( self.outerSteer * steer )",
"def set_cameras(self, cameras):\n self._cameras = cameras[:]",
"def set_coefs(self, sites, values):\n self.set_coefs_sites(sites)\n self.epistasis.data.values = values\n self.build()\n return self",
"def reviewers(self, reviewers):\n\n self._reviewers = reviewers",
"def allergies(self, allergies):\n\n self.logger.debug(\"In 'allergies' setter.\")\n\n self._allergies = allergies",
"def set_coefs_sites(self, sites):\n self.order = max([len(s) for s in sites])\n self.add_epistasis()\n return self",
"def signers(self, signers):\n\n self._signers = signers",
"def set_FullfillmentChannel(self, value):\n super(ListOrdersInputSet, self)._set_input('FullfillmentChannel', value)",
"def trailers(self, trailers):\n\n self._trailers = trailers",
"def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices",
"def set_carrier(name):\n _local.carrier = name",
"def flow(self, flow):\n\n self._flow = flow",
"def edges(self, edges):\n\n self._edges = edges",
"def SetFather(self, *args):\n return _XCAFDoc.XCAFDoc_GraphNode_SetFather(self, *args)",
"def set(self, episodes):\n self.episode_set = episodes",
"def set_edges(self, edges):\n self._tree.set_edges(edges)\n self._program = make_propagation_program(self._tree.tree_grid)",
"def set(self):\n\n raise Exception(\"Can't set frmt.\")",
"def devicenodes(self, devicenodes):\n\n self._devicenodes = devicenodes",
"def set_clients(self, pps, pfs):\n\n self._clients = {\n 'pps': pps,\n 'pfs': pfs\n }",
"def wire_chains(self):\n allChains = self.instances.getAllChainInstances()\n for chain in allChains:\n logging.debug(\"%s\", chain)\n allChains[chain].setup_event_path()",
"def setup(self, channels):\n self.channels = channels[:]",
"def bends(self, bends):\n\n self._bends = bends",
"def bends(self, bends):\n\n self._bends = bends",
"def processors(self, processors):\n\n self._processors = processors",
"def sources(self, sources):\n\n self._sources = sources"
] | [
"0.6286461",
"0.59785575",
"0.59650105",
"0.54660434",
"0.5426391",
"0.54213387",
"0.5248241",
"0.520174",
"0.51438886",
"0.5066561",
"0.497986",
"0.4968117",
"0.4878492",
"0.4867621",
"0.48621327",
"0.4842074",
"0.4787131",
"0.47476414",
"0.47450688",
"0.47389582",
"0.4726082",
"0.47116598",
"0.46985644",
"0.46652633",
"0.46589068",
"0.4638177",
"0.46356687",
"0.46356687",
"0.4621218",
"0.4603196"
] | 0.80202395 | 0 |
Gets the extracts of this Brand. OCPCs of the extracts from this brand. | def extracts(self):
return self._extracts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract(self):\n pass",
"def extract(self) -> Entries:\n raise NotImplementedError('This method must be implemented by subclasses')",
"def docExtract(self):\n\n self.fv = []\n for doc in self.documents:\n self.fv.append(self.featureSet.extract(doc))\n\n # Convert to a numpy matrix.\n return np.array(np.asmatrix(self.fv))\n # return self.fv",
"def extracts(self, extracts):\n\n self._extracts = extracts",
"def extract(self, text: str) -> List[Extraction]:\n\n doc = self._parser(text)\n\n extractions = list()\n for sent in doc.sents:\n this_extraction = Extraction(value=sent.text,\n extractor_name=self.name,\n start_token=sent[0],\n end_token=sent[-1],\n start_char=sent.text[0],\n end_char=sent.text[-1])\n extractions.append(this_extraction)\n\n return extractions",
"def extract(self):\r\n raise NotImplementedError()",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def extract(self):\n if self.files:\n return dict((f, self.read_file(f)) for f in self.files)\n else:\n raise RuntimeError(\"Can't extract whole archive without listfile.\")",
"def extract(self):\n\n # print some infos about data\n print(\"\\n--extract batches from data:\\ntrain: {}\\nval: {}\\ntest: {}\\n\".format(self.data[0]['x'].shape, self.data[1]['x'].shape, self.data[2]['x'].shape))\n\n # create batches\n self.x_train, self.y_train, _ = self.create_batches(self.data[0], batch_size=self.batch_size)\n self.x_val, self.y_val, _ = self.create_batches(self.data[1], batch_size=self.batch_size_eval)\n self.x_test, self.y_test, _ = self.create_batches(self.data[2], batch_size=self.batch_size_eval)\n\n # my data\n if len(self.mfcc_data_files) == 4:\n self.x_my, self.y_my, self.z_my = self.create_batches(self.data[3], batch_size=1)",
"def do_extract(self, xpath):\n s = Selector(self.driver.page_source)\n for i, result in enumerate(s.xpath(xpath).getall(), 1):\n print(i, result)",
"def _get_extracts(self, code):\n rows = code.split('\\n')\n extracts = []\n # If the code is shorter than 10 lines, we ignore this file\n if 10 <= len(rows) < 15:\n # If the code is 10 to 15 lines, we use the whole file as corpus\n extracts.append(code)\n elif len(rows) >= 15:\n # If the code is longer than 15 lines, we split it into multiple\n # extracts of lenght generated randomly (10 to 15 lines each)\n while len(rows) > 10:\n # Generate an extract using the first r rows, with r a random\n # number between 10 and 20\n r = random.randint(10, 20)\n extracts.append('\\n'.join(rows[:r]))\n # Remove the first r rows\n rows = rows[r + 1:]\n return extracts",
"def extract(self, images):\n if images.shape[0] > self.ram_size:\n self.logger.warning(f'Number of inputs on RAM is larger than '\n f'{self.ram_size}. Please use '\n f'`self.get_batch_inputs()` to split the inputs! '\n f'Otherwise, it may encounter OOM problem!')\n\n results = []\n for batch_images in self.get_batch_inputs(images):\n results.append(self._extract(batch_images))\n\n return np.concatenate(results, axis=0)",
"def getArcs(self):\n return self.getArcsFrom()",
"def extract(self,*args): \n if not self._extract:\n raise RuntimeError('This archive is read-only!')\n \n lst = [ self._getitem(n) for n in args ]\n \n return lst if len(lst) > 1 else lst[0]",
"def _extract(self):\r\n self._data = []\r\n for fname in self.files:\r\n meta = dict(filename=fname)\r\n\r\n # Perform the actual metadata extraction\r\n fname = os.path.splitext(self.filter_filename(fname))[0]\r\n values = fname.split(self.sep)\r\n\r\n # Handle the case where number of fields is less than the length\r\n # of the extracted values, ie cases where we only want to extract\r\n # a subset of available fields.\r\n if self.index:\r\n values = [val for i, val in enumerate(values) if i in self.index]\r\n\r\n meta.update(dict(zip(self.fields, values)))\r\n if self.split_by in self.fields:\r\n meta[self.split_by] = self._get_split_field_values(meta['filename'])\r\n self._data.append(meta)",
"def extract(self, count):\n return [self.next_gene() for _ in range(0, count)]",
"def extract_files(self) -> list:\n pass",
"def getAtoms(self):\n return self.atoms",
"def extract_features(self, *args, **kwargs):\n return self(*args, **kwargs)",
"def get_image_urls(self):\n return self.get_extract_image_urls(is_first=False)",
"def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}",
"def extract(self):\n \n print('Extracting Metrics data... ',end=''),\n self.df = pd.read_excel(self.file_path, index_col=0)\n print('Done')",
"def get(self) -> list:\n return self.__expedition",
"def extract(self) -> pulumi.Output[Optional['outputs.JobExtract']]:\n return pulumi.get(self, \"extract\")",
"def get_content(self):\r\n return self.parameters, self.spectrum",
"def get_all(self):\n return self.__fetcher.get_fetched()",
"def extract_all_CE(self):\n return np.array([self.CE(p_true, p_model) for (p_true, p_model) in list(zip(self.devY_cat, self.raw_y_pred))])",
"def unpack(self):\n return []",
"def extract(self) -> pd.core.frame.DataFrame:\n pass",
"def ExtractFeatures(self):\n\n self.MFCC = librosa.feature.mfcc(self.sample, sr=self.sample_rate, n_mfcc=13)\n self.MFCC_DELTA = librosa.feature.delta(self.MFCC)\n self.MEL_SPECTROGRAM = librosa.feature.melspectrogram(self.sample, sr=self.sample_rate)\n f, t, SPECTRO = signal.spectrogram(self.sample)\n self.SPECTRO\n self.LPC = np.array(audiolazy.lazy_lpc.lpc.autocor(self.sample, 2).numerator)\n self.FFT = np.fft.fft(self.sample)\n widths = np.arange(1, 31)\n self.CWT = signal.cwt(self.sample, signal.ricker, widths)"
] | [
"0.57091737",
"0.5493332",
"0.5355976",
"0.5308664",
"0.527625",
"0.5262795",
"0.5224808",
"0.51906633",
"0.5173061",
"0.5153938",
"0.5153916",
"0.5119628",
"0.5085625",
"0.50793606",
"0.5076042",
"0.5073633",
"0.5030286",
"0.4993764",
"0.497434",
"0.4949473",
"0.4939414",
"0.49333104",
"0.48742503",
"0.48610845",
"0.4859076",
"0.4845327",
"0.480871",
"0.48012373",
"0.4799239",
"0.4798142"
] | 0.74892557 | 0 |
Sets the extracts of this Brand. OCPCs of the extracts from this brand. | def extracts(self, extracts):
self._extracts = extracts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extracts(self):\n return self._extracts",
"def run_extraction(self):\n self.background_estimator = ReflectedRegionsBackgroundEstimator(\n observations=self.observations, **self.config[\"background\"]\n )\n self.background_estimator.run()\n\n self.extraction = SpectrumExtraction(\n observations=self.observations,\n bkg_estimate=self.background_estimator.result,\n **self.config[\"extraction\"]\n )\n\n self.extraction.run()",
"def from_extract(self, extract: List[Chain], X: np.ndarray, y: np.ndarray) -> 'SklearnModel':\n new_model = deepcopy(self)\n combined_chain = self._combine_chains(extract)\n self._model_samples, self._prediction_samples = combined_chain[\"model\"], combined_chain[\"in_sample_predictions\"]\n self._acceptance_trace = combined_chain[\"acceptance\"]\n new_model.data = self._convert_covariates_to_data(X, y)\n return new_model",
"def is_extracted(self, is_extracted):\n self._is_extracted = is_extracted",
"def extract(self):\n pass",
"def set_additionaldata_extractor(self, extractor):\r\n if not extractor:\r\n raise ValueError(\"extractor must not be null!\")\r\n self.additional_data_extractor = extractor",
"def set_extracte_date(self, extracte_date):\n if extracte_date is not None:\n self.extracte_date = extracte_date",
"def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()",
"def bcp_set(self, **kwargs):\n pass",
"def extract(self, butler, data, **kwargs):\n self.safe_update(**kwargs)\n\n slot = self.config.slot\n\n if butler is not None:\n self.log.warn(\"Ignoring butler\")\n\n mask_files = self.get_mask_files()\n superbias_frame = self.get_superbias_frame(mask_files)\n superflat_file = data[0]\n\n l_frame = self.get_ccd(None,\n superflat_file.replace('.fits', '_l.fits'),\n mask_files)\n h_frame = self.get_ccd(None,\n superflat_file.replace('.fits', '_h.fits'),\n mask_files)\n ratio_frame = self.get_ccd(None,\n superflat_file.replace('.fits', '_r.fits'),\n mask_files)\n\n # This is a dictionary of dictionaries to store all the\n # data you extract from the sflat_files\n row_data_dict = {}\n col_data_dict = {}\n amp_data_dict = {}\n data_dict = dict(row=row_data_dict,\n col=col_data_dict,\n amp=amp_data_dict)\n\n # Analysis goes here, you should fill data_dict with data extracted\n # by the analysis\n #\n\n amps = get_amp_list(ratio_frame)\n for i, amp in enumerate(amps):\n dims = get_dims_from_ccd(ratio_frame)\n regions = get_geom_regions(ratio_frame, amp)\n imaging = regions['imaging']\n l_im = get_raw_image(l_frame, amp).image\n h_im = get_raw_image(h_frame, amp).image\n ratio_im = get_raw_image(ratio_frame, amp).image\n if superbias_frame is not None:\n superbias_im = get_raw_image(superbias_frame, amp).image\n else:\n superbias_im = None\n\n self.low_images[i] = l_im[imaging].array\n self.high_images[i] = h_im[imaging].array\n self.ratio_images[i] = ratio_im[imaging].array\n self.superbias_images[i] = superbias_im[imaging].array\n\n quality_mask = np.zeros(self.superbias_images[i].shape)\n quality_mask += 1. * np.invert(np.fabs(self.superbias_images[i]) < 10)\n quality_mask += 2. * np.invert(np.fabs(self.ratio_images[i] - 0.020) < 0.0015)\n self.quality_masks[i] = quality_mask\n\n row_data_dict['row_i'] = np.linspace(0, dims['nrow_i']-1, dims['nrow_i'])\n row_data_dict['l_med_%s_a%02i' % (slot, i)] = np.median(self.low_images[i], 1)\n row_data_dict['h_med_%s_a%02i' % (slot, i)] = np.median(self.high_images[i], 1)\n row_data_dict['r_med_%s_a%02i' % (slot, i)] = np.median(self.ratio_images[i], 1)\n if superbias_im is not None:\n row_data_dict['sbias_med_%s_a%02i' % (slot, i)] =\\\n np.median(self.superbias_images[i], 1)\n\n col_data_dict['col_i'] = np.linspace(0, dims['ncol_i']-1, dims['ncol_i'])\n col_data_dict['l_med_%s_a%02i' % (slot, i)] = np.median(self.low_images[i], 0)\n col_data_dict['h_med_%s_a%02i' % (slot, i)] = np.median(self.high_images[i], 0)\n col_data_dict['r_med_%s_a%02i' % (slot, i)] = np.median(self.ratio_images[i], 0)\n if superbias_im is not None:\n col_data_dict['sbias_med_%s_a%02i' % (slot, i)] =\\\n np.median(self.superbias_images[i], 0)\n\n amp_data_dict['l_med_%s_a%02i' % (slot, i)] = [np.median(self.low_images[i])]\n amp_data_dict['h_med_%s_a%02i' % (slot, i)] = [np.median(self.high_images[i])]\n amp_data_dict['r_med_%s_a%02i' % (slot, i)] = [np.median(self.ratio_images[i])]\n\n\n dtables = TableDict()\n dtables.make_datatable('files', make_file_dict(None, [slot]))\n for key, val in data_dict.items():\n dtables.make_datatable(key, val)\n\n return dtables",
"def set_attributes(self, customer, **kwargs):\n assert customer is None or isinstance(customer, CustomerBase)\n self._customer = customer\n if 'windfarm_name' in kwargs.keys():\n self._windfarm = kwargs['windfarm_name']\n if 'tag' in kwargs.keys():\n self._tag = kwargs['tag']\n if 'config' in kwargs.keys():\n self._config = kwargs['config']",
"def classifierCopy(self, old_cl, explore_iter):\n self.specified_attributes = copy.deepcopy(old_cl.specified_attributes)\n self.condition = copy.deepcopy(old_cl.condition)\n self.action = copy.deepcopy(old_cl.action)\n self.ga_timestamp = explore_iter\n self.init_timestamp = explore_iter\n self.prediction = old_cl.prediction\n self.error = old_cl.error\n self.fitness = old_cl.fitness",
"def perform_extraction(self) -> None:\n\n self._process_datasets_all_frames()",
"def backups(self, backups):\n\n self._backups = backups",
"def set_coefs(self, sites, values):\n self.set_coefs_sites(sites)\n self.epistasis.data.values = values\n self.build()\n return self",
"def extract_data_sample(self, extract_data_sample):\n\n self._extract_data_sample = extract_data_sample",
"def elf_images_some(self, elf_images_some):\n\n self._elf_images_some = elf_images_some",
"def _init_extractors(self):\n @self.extractors_wrapper(\"networkx\")\n def get_nx_extractor(graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return nx_xtrct.nx_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"neo4j\")\n def get_neo4j_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return neo4j_xtrct.neo4j_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"edgelist\")\n def get_edgelist_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return edgelist_xtrct.edgelist_extractor(\n self.extractor_json[self.extractor_name], graph\n )",
"def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()",
"def _set_attributes(self):",
"def featuresets(self, featuresets):\n\n self._featuresets = featuresets",
"def extract(self, butler, data, **kwargs):\n self.safe_update(**kwargs)\n\n bias_files = data['BIAS']\n\n mask_files = self.get_mask_files()\n\n self.log_info_slot_msg(self.config, \"%i files\" % len(bias_files))\n\n ref_frames = {}\n\n nfiles = len(bias_files)\n s_correl = np.ndarray((16, nfiles-1))\n p_correl = np.ndarray((16, nfiles-1))\n\n for ifile, bias_file in enumerate(bias_files):\n if ifile % 10 == 0:\n self.log_progress(\" %i\" % ifile)\n\n ccd = self.get_ccd(butler, bias_file, mask_files)\n if ifile == 0:\n dims = get_dims_from_ccd(ccd)\n nrow_i = dims['nrow_i']\n ncol_i = dims['ncol_i']\n amps = get_amp_list(ccd)\n for i, amp in enumerate(amps):\n regions = get_geom_regions(ccd, amp)\n image = get_raw_image(ccd, amp)\n ref_frames[i] = get_image_frames_2d(image, regions)\n continue\n self.get_ccd_data(ccd, ref_frames,\n ifile=ifile, s_correl=s_correl, p_correl=p_correl,\n nrow_i=nrow_i, ncol_i=ncol_i)\n\n self.log_progress(\"Done!\")\n\n data = {}\n for i in range(16):\n data['s_correl_a%02i' % i] = s_correl[i]\n data['p_correl_a%02i' % i] = p_correl[i]\n\n dtables = TableDict()\n dtables.make_datatable('files', make_file_dict(butler, bias_files))\n dtables.make_datatable(\"correl\", data)\n return dtables",
"def paths(self, paths):\r\n self._paths = paths\r\n self._extract()",
"def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def __init__(self, darks):\n super(ListDark, self).__init__(darks)\n self.hot_pixel_mask = None",
"def set(self, **kwargs):\n for ax in self.axes.flat:\n ax.set(**kwargs)\n return self",
"def predictors(self, predictors):\n\n self._predictors = predictors",
"def extract(self):\r\n raise NotImplementedError()",
"def brands(self, brands):\n\n self._brands = brands",
"def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)"
] | [
"0.5561792",
"0.5235037",
"0.5122618",
"0.5065962",
"0.4858689",
"0.4837116",
"0.48006856",
"0.4736323",
"0.47359043",
"0.46972084",
"0.4598676",
"0.45915288",
"0.45594144",
"0.45588872",
"0.45516175",
"0.4539661",
"0.45346084",
"0.4530291",
"0.44973975",
"0.4475857",
"0.4456626",
"0.4445582",
"0.4439943",
"0.44368502",
"0.44335055",
"0.44166872",
"0.44152182",
"0.440208",
"0.43895325",
"0.43889973"
] | 0.7489454 | 0 |
Gets the edibles of this Brand. OCPCs of the edibles from this brand. | def edibles(self):
return self._edibles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self) -> list:\n return self.__expedition",
"def get_escobas(self):\n return self.escobas",
"def escobas(self):\n return self._escobas",
"def edibles(self, edibles):\n\n self._edibles = edibles",
"def energies(self) -> np.ndarray:\n return np.array([item.energy for item in self])",
"def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens",
"def get_energies(self):\n N = len(self.particles)\n\n # Use C++ version if cppenabled\n if(self.cppenabled):\n energies = np.zeros(3) # Initialises Energy output array\n accelerate_lib.c_getenergies(self.get_positions(), self.get_velocities(), \\\n energies, self.boxdim, self.LJ_cutoff)\n return np.array(energies)\n\n # Python calculation if cppenabled = False:\n pot = Total_PE(self.particles, self.LJ_cutoff, self.boxdim)\n kin = Total_KE(self.get_velocities())\n\n return np.array([pot, kin, pot+kin])",
"def els(self):\n return self._els",
"def get_eangles(self):\n return self.eangles",
"def endorsements(self) -> t.Sequence[str]:\n return self.shards(\"endorsements\")[\"endorsements\"].split(\",\")",
"def list_editions(self):\n return sorted([self.catalog[key]['edition'] for key in self.catalog])",
"def getEcosystems(self):\n return self.__getColumnData(Q_ECOSYSTEMS, 'ecosystem')",
"def energy_states(self) -> List[int]:\n return self._energy_states",
"def get_E(self):\r\n return self.Real.E, self.Ideal.E",
"def get_E(self):\r\n return self.Real.E, self.Ideal.E",
"def erpac(self):\n return self._erpac",
"def get_escalators(self, active=False, **endpoint_kwargs):\n return self._facilities('ESCALATOR', active, **endpoint_kwargs)",
"def comitentes(self):\n return self.expedientepersona_set.filter(comitente=True)",
"def absorption_energy_eV(self):\n return self._absorption_energy_eV.copy()",
"def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter",
"def get_Ec(self):\n return self.Ec",
"def getEnergyConsumers(self):\n return self._EnergyConsumers",
"def IEs(self):\n return self._ies",
"def evasion(self):\n return self.rpc.call(MsfRpcMethod.ModuleEvasion)['modules']",
"def get_decs(self):\n return [i for i in self.decisions.keys()]",
"def get(self):\n return self.__expedition",
"def etls(self):\r\n return self._etls",
"def diferencias(self):\n return self._diferencias",
"def eula_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"eula_ids\")",
"def emb(self, entity):\n fv = []\n fv.extend(self.name_model.emb(entity))\n fv.extend(super().emb(entity))\n # if self.config.debug:\n # print('== emb ==')\n # print('==> name_model: %s' % self.name_model.emb(entity))\n # print('==> sub_ent_model: %s' % self.sub_ent_model.emb(entity))\n # print('== bme ==')\n return fv"
] | [
"0.6675802",
"0.661738",
"0.66034377",
"0.62080145",
"0.6023536",
"0.5934542",
"0.5931936",
"0.58610815",
"0.5732703",
"0.5711136",
"0.56978786",
"0.5690391",
"0.5612935",
"0.55846983",
"0.55846983",
"0.55751747",
"0.5565897",
"0.55330265",
"0.5528248",
"0.55117154",
"0.54840463",
"0.54165214",
"0.54054344",
"0.53824717",
"0.5368141",
"0.5364179",
"0.5347807",
"0.53407586",
"0.5337187",
"0.53243244"
] | 0.7684369 | 0 |
Sets the edibles of this Brand. OCPCs of the edibles from this brand. | def edibles(self, edibles):
self._edibles = edibles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edibles(self):\n return self._edibles",
"def diabetes(self, diabetes):\n\n self.logger.debug(\"In 'diabetes' setter.\")\n\n self._diabetes = diabetes",
"def set(self, episodes):\n self.episode_set = episodes",
"def set_parameters(self, *args, **kwargs):\n super(DAEM, self).set_parameters(*args, **kwargs)\n self._Em = self._calc_Em()",
"def escobas(self):\n return self._escobas",
"def set_doses(self, new_value):\n\n # increment counter\n Counter.increment(centres=0, vaccines=new_value, patients=0)\n\n self.doses_available = new_value\n\n self.save()",
"def set_list_of_expenses(self):\n fix_exp = DB.get_fixed_expenses(self.customer.email)\n var_exp = DB.get_variable_expenses(self.customer.email)\n self.listOfExpensesSEK.item(2).setText(str(fix_exp[\"subscription\"]))\n self.listOfExpensesSEK.item(3).setText(str(fix_exp[\"insurance\"]))\n self.listOfExpensesSEK.item(4).setText(str(fix_exp[\"rent\"]))\n self.listOfExpensesSEK.item(5).setText(str(fix_exp[\"others\"]))\n\n self.listOfExpensesSEK.item(11).setText(str(var_exp[\"food\"]))\n self.listOfExpensesSEK.item(12).setText(str(var_exp[\"bills\"]))\n self.listOfExpensesSEK.item(13).setText(str(var_exp[\"transportation\"]))\n self.listOfExpensesSEK.item(14).setText(str(var_exp[\"hygien\"]))\n self.listOfExpensesSEK.item(15).setText(str(var_exp[\"clothes\"]))\n self.listOfExpensesSEK.item(16).setText(str(var_exp[\"entertainment\"]))\n self.listOfExpensesSEK.item(17).setText(str(var_exp[\"others\"]))",
"def set_list_of_expenses(self):\n fix_exp = DB.get_fixed_expenses(self.customer.email)\n var_exp = DB.get_variable_expenses(self.customer.email)\n self.listOfExpensesSEK.item(2).setText(str(fix_exp[\"subscription\"]))\n self.listOfExpensesSEK.item(3).setText(str(fix_exp[\"insurance\"]))\n self.listOfExpensesSEK.item(4).setText(str(fix_exp[\"rent\"]))\n self.listOfExpensesSEK.item(5).setText(str(fix_exp[\"others\"]))\n\n self.listOfExpensesSEK.item(11).setText(str(var_exp[\"food\"]))\n self.listOfExpensesSEK.item(12).setText(str(var_exp[\"bills\"]))\n self.listOfExpensesSEK.item(13).setText(str(var_exp[\"transportation\"]))\n self.listOfExpensesSEK.item(14).setText(str(var_exp[\"hygien\"]))\n self.listOfExpensesSEK.item(15).setText(str(var_exp[\"clothes\"]))\n self.listOfExpensesSEK.item(16).setText(str(var_exp[\"entertainment\"]))\n self.listOfExpensesSEK.item(17).setText(str(var_exp[\"others\"]))",
"def get_escobas(self):\n return self.escobas",
"def get(self) -> list:\n return self.__expedition",
"def set_all(self):\n\n self.ecm = EnergyConsumptionModel(\n vehicle_type=\"car\",\n vehicle_size=list(self.array.coords[\"size\"].values),\n powertrains=list(self.array.coords[\"powertrain\"].values),\n cycle=self.cycle,\n gradient=self.gradient,\n country=self.country,\n )\n\n diff = 1.0\n\n while diff > 0.0001:\n old_driving_mass = self[\"driving mass\"].sum().values\n self.set_vehicle_mass()\n self.set_power_parameters()\n self.set_component_masses()\n self.set_auxiliaries()\n self.set_power_battery_properties()\n self.set_battery_properties()\n self.set_energy_stored_properties()\n self.set_recuperation()\n\n if \"FCEV\" in self.array.powertrain.values:\n self.set_fuel_cell_power()\n self.set_fuel_cell_mass()\n\n # if user-provided values are passed,\n # they override the default values\n if \"capacity\" in self.energy_storage:\n self.override_battery_capacity()\n\n diff = (self[\"driving mass\"].sum().values - old_driving_mass) / self[\n \"driving mass\"\n ].sum()\n\n self.set_ttw_efficiency()\n self.calculate_ttw_energy()\n self.set_ttw_efficiency()\n\n self.set_range()\n\n if self.target_range:\n self.override_range()\n\n self.set_share_recuperated_energy()\n self.set_battery_fuel_cell_replacements()\n self.adjust_cost()\n\n self.set_electric_utility_factor()\n self.set_electricity_consumption()\n self.set_costs()\n self.set_hot_emissions()\n self.set_particulates_emission()\n self.set_noise_emissions()\n self.create_PHEV()\n if self.drop_hybrids:\n self.drop_hybrid()\n\n self.remove_energy_consumption_from_unavailable_vehicles()",
"def absorption_energy_eV(self):\n return self._absorption_energy_eV.copy()",
"def set_etacalc(self, etacalc):\n self.__etacalc = etacalc",
"def set_ages(self):\n for i in self.individuals.values():\n i.set_age(i._age_line)",
"def cad(self, cad):\n\n self.logger.debug(\"In 'cad' setter.\")\n\n self._cad = cad",
"def eid(self, eid):\n\n self._eid = eid",
"def set_rod(self, E: float, rod: ea.CosseratRod) -> None:\n\n self.E = E\n self.shearable_rod = rod\n self.simulator.append(self.shearable_rod)\n\n \"\"\" Set up boundary conditions \"\"\"\n self.simulator.constrain(self.shearable_rod).using(\n ea.OneEndFixedBC,\n constrained_position_idx=(0,),\n constrained_director_idx=(0,),\n )\n\n # Set exponential damper\n # Below damping tuned for time-step 2.5E-4\n damp_coefficient = 0.5e-2 # 0.05\n density = 1\n radius_base = self.shearable_rod.radius[0]\n damping_constant = (\n damp_coefficient / density / (np.pi * radius_base**2) / 15\n ) # For tapered rod /15 stable\n self.simulator.dampen(self.shearable_rod).using(\n ea.AnalyticalLinearDamper,\n damping_constant=damping_constant,\n time_step=self.time_step,\n )",
"def add_electrode(self, e, name, kind, volt):\r\n\t\te.volt = volt\r\n\t\tself.electrode_dict[name] = (kind, e)\r\n\t\tif kind=='dc':\r\n\t\t\tself.dc_electrode_list.append((name,e))\r\n\t\tif kind=='rf':\r\n\t\t\tself.rf_electrode_list.append((name,e))",
"def evals(self):\n\t\tpass",
"def setEsquerdo(self, esquerdo):\n self.__esquerdo = esquerdo",
"def get_E(self):\r\n return self.Real.E, self.Ideal.E",
"def get_E(self):\r\n return self.Real.E, self.Ideal.E",
"def offers(self, offers):\n\n self._offers = offers",
"def DirES():\n\n global Asm\n\n target.BoundarySync()\n\n dec.Asm.Memory = 2\n dec.Asm.BOL_Address = dec.Asm.EM_Address\n dec.Asm.List_Address = dec.Asm.EM_Address",
"def set_epv_list(context, epv_list):\n context.epv_list = epv_list",
"def envs(self, envs):\n self._instructions_setter('ENV', envs)",
"def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs",
"def engine_states(self, engine_states):\n\n self._engine_states = engine_states",
"def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens",
"def id_endereco(self, id_endereco):\n self._id_endereco = id_endereco"
] | [
"0.6503128",
"0.53816485",
"0.5341736",
"0.5276346",
"0.5155568",
"0.5078174",
"0.5073542",
"0.5073542",
"0.5060536",
"0.50315374",
"0.4993845",
"0.4909943",
"0.49020875",
"0.4885647",
"0.4885031",
"0.48756042",
"0.48724318",
"0.4863869",
"0.4851659",
"0.4823056",
"0.48116922",
"0.48116922",
"0.48058346",
"0.4787507",
"0.47830707",
"0.47806838",
"0.47746864",
"0.47484928",
"0.47458884",
"0.4725694"
] | 0.818293 | 0 |
Gets the products of this Brand. OCPCs of the products from this brand. | def products(self):
return self._products | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_products(self):\n return self._make_get_request(self._urls['products'])",
"def ListProducts(self):\n return copy.deepcopy(self._products)",
"def products(self):\r\n return products.Products(self)",
"def products(self):\n return list(Product.select())",
"def get(self):\n return Products().get_all_products()",
"def products(self):\r\n return self._products",
"def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()",
"def listProducts(self):\n response = self.productClient.list_products(parent=self.locationPath)\n return [ProductSearch.Product._fromResponse(self.productSearch, x) for x in response]",
"def get_products(self):\n return [item.code for item in self._products]",
"def get_all_products(self):\n\t\tpass",
"def list_products(self):\n url = self.base_url\n # TODO add filtering support when holvi api supports it\n obdata = self.connection.make_get(url)\n return ProductList(obdata, self)",
"def get_all_products():\n data = order_obj.get_all_products()\n return data",
"def products(self):\n from hubspot3.products import ProductsClient\n\n return ProductsClient(**self.auth, **self.options)",
"def get(self):\n return Product.query.all()",
"def getListOfProducts(self, *args):\n return _libsbml.Reaction_getListOfProducts(self, *args)",
"def get_products(self):\n page = 1\n out = []\n while True:\n resp = self.get_session().Product.find(limit=10,page=page)\n if not len(resp):\n return\n yield resp\n page += 1",
"def products(self):\r\n return Products(self)",
"def get(self):\n return ProductModel.query.all()",
"def get_products(self, query_args={}):\n endpoint = '/v3/educator/products'\n result = self.request(endpoint, query_args)\n\n products = []\n for data in result.response:\n # Dynamically load product instance.\n class_name = data.type.capitalize()\n product = Product.instance(class_name, data)\n products.append(product)\n\n return products",
"def fetch_all_products():\n products = []\n client = ProductsClient()\n for product in client.get_products():\n products.append(Product(\n base_currency=product[0],\n quote_currency=product[1],\n ))\n return products",
"def get_products(self):\n formatted_products = []\n resp = woo_api\n for product in resp:\n formatted = self._format_product(product)\n formatted_products.append(formatted)\n return formatted_products",
"def products(self):\n _products = []\n for ext in self.exts:\n prod = Product(ext, self.node.opath_from_ext(ext))\n _products.append(prod)\n\n return _products",
"def get_products(self, code):\n if code not in self._baskets:\n raise BasketDoesNotExistException()\n return self._baskets[code].get_products()",
"def get_all_products():\n products = app.session.query(models.Product).all()\n return products",
"def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products",
"def products(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/products'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def obtener_productos():\n\n # Se crea la lista de objetos Producto()\n productos = [\n Producto(\"Caja chica\", 5, 100.0),\n Producto(\"Caja mediana\", 3, 185.0),\n Producto(\"Caja grande\", 1, 299.0)\n ]\n\n return productos",
"def __iter__(self):\n return self._products.__iter__()",
"def available_products(self):\n # TODO - take into account bands, and what bands available in input products, etc\n return {k: self.__products__[k].description for k in self.__products__.keys()}",
"def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})"
] | [
"0.76241523",
"0.7566509",
"0.7565762",
"0.756514",
"0.7535659",
"0.751238",
"0.7440312",
"0.7398282",
"0.7388195",
"0.72041655",
"0.71844614",
"0.71142894",
"0.71076566",
"0.7074467",
"0.7047273",
"0.7024207",
"0.69793373",
"0.6892202",
"0.67760795",
"0.6706479",
"0.6648986",
"0.6581126",
"0.65640104",
"0.65625614",
"0.64831185",
"0.6472876",
"0.64712334",
"0.6377807",
"0.6333724",
"0.6300313"
] | 0.7824494 | 0 |
Sets the products of this Brand. OCPCs of the products from this brand. | def products(self, products):
self._products = products | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def product(self, product):\n self._product = product",
"def product(self, product):\n\n self._product = product",
"def product(self, product):\n\n self._product = product",
"def product_groups(self, product_groups):\n\n self._product_groups = product_groups",
"def products(self):\n from hubspot3.products import ProductsClient\n\n return ProductsClient(**self.auth, **self.options)",
"def fill_data_product(self):\n self.product.fill_data_product(self.list_products, self.mycursor, self.my_database)",
"def ListProducts(self):\n return copy.deepcopy(self._products)",
"def products(self):\r\n return products.Products(self)",
"def product_types(self, product_types):\n\n self._product_types = product_types",
"def products(self):\n return self._products",
"def products(self):\r\n return Products(self)",
"def product_sizes(self, product_sizes):\n\n self._product_sizes = product_sizes",
"def products(self):\r\n return self._products",
"def add_products(self, products):\n return [self.add_product(product) for product in set(products)]",
"def products(self):\n return list(Product.select())",
"def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()",
"def emulate_off_api_manager_products(cls):\n cls.products = OFF_API_FILTERED_PRODUCTS",
"def UpdateProducts(self, products):\n def _CreateUpdateProductPayload(model, quantity):\n request = xml.etree.ElementTree.Element('Request')\n product = xml.etree.ElementTree.SubElement(request, 'Product')\n skus = xml.etree.ElementTree.SubElement(product, 'Skus')\n sku = xml.etree.ElementTree.SubElement(skus, 'Sku')\n sku_seller_sku = xml.etree.ElementTree.SubElement(sku, 'SellerSku')\n sku_seller_sku.text = model\n sku_quantity = xml.etree.ElementTree.SubElement(sku, 'Quantity')\n sku_quantity.text = str(quantity)\n\n preamble = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>'\n return preamble + xml.etree.ElementTree.tostring(request)\n\n results = []\n for p in products:\n if not p.modified:\n continue\n\n # Create XML request\n payload = _CreateUpdateProductPayload(p.model, p.quantity)\n\n result = self._Request(\n _UPDATE_PRODUCT_QUANTITY_ACTION, payload=payload)\n result.attachment = p\n\n results.append(result)\n\n return results",
"def get_all_products(self):\n\t\tpass",
"def openproducts(self):\n\n print \"Open products\"\n self.combo_product_list.setEnabled(True)\n frame=self.combo_area_list.currentText()\n self.combo_product_list.clear()\n self.combo_dataset_list.clear()\n self.combo_variable_list.clear()\n print str(frame)\n list_glo=[]\n if str(frame) == \"GLOBAL\":\n for key in self.dict_prod.keys():\n if str(frame) in key :\n list_glo.append(str(key))\n ind=0\n #print \"Frame %s \" %(frame)\n for key in self.dict_prod.keys():\n if str(frame) == \"BAL\":\n frame1=\"_BAL_\"\n frame2=\"-BAL-\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"NWS\":\n frame1=\"NORTHWESTSHELF_\"\n frame2=\"NWS\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"GLOBAL\":\n if str(frame) in key :\n if ind == 0 :\n self.combo_product_list.addItem(list_glo[5])\n elif ind == 5 : \n self.combo_product_list.addItem(list_glo[0])\n else : \n self.combo_product_list.addItem(list_glo[ind])\n ind+=1\n else :\n if str(frame) in key :\n self.combo_product_list.addItem(str(key))\n self.combo_dataset_list.setEnabled(True)",
"def setGeneProduct(self, *args):\n return _libsbml.GeneProductRef_setGeneProduct(self, *args)",
"def set_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.product_kendo_dropdown_locator, product)",
"def products(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/products'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def data_products(self, observation_id, product_id=None):\n pass",
"def product(self, product):\n if product is None:\n raise ValueError(\"Invalid value for `product`, must not be `None`\") # noqa: E501\n\n self._product = product",
"def product_config(self, product_config):\n\n self._product_config = product_config",
"def sync_products_with_gmc(self) :\n products = self.env['product.product'].search([('sync_with_mc','=',True), ('website_published','=',True), ('google_product_brand_id','!=',False), ('google_merchant_center_id','!=',False),('google_mcid','=',False)])\n _logger.info('Total products to be synced------ %s', len(products))\n self.sync_product_with_gmc(products)",
"def insert_products(self):\n logic = ProductLogic()\n \n try:\n # We create the list of product objects\n products = self.objects_factory.create_product_object_list()\n products = set(products)\n\n for product in products:\n logic.insert(product)\n except:\n print('Il y a eu un problème lors de la récupération des données, veuillez rééssayer')",
"def set_product_names(self, product_names):\n if not all(isinstance(product_name, str) for product_name in product_names):\n raise ApiError(\"One or more invalid product names\")\n self._update_criteria(\"product_name\", product_names)\n return self",
"def set_product_names(self, product_names):\n if not all(isinstance(product_name, str) for product_name in product_names):\n raise ApiError(\"One or more invalid product names\")\n self._update_criteria(\"product_name\", product_names)\n return self"
] | [
"0.661774",
"0.6568006",
"0.6568006",
"0.65407103",
"0.64364976",
"0.6406533",
"0.63383055",
"0.6323213",
"0.6307473",
"0.6213368",
"0.6169101",
"0.6024144",
"0.6013564",
"0.59371376",
"0.59202284",
"0.591889",
"0.58627635",
"0.5844896",
"0.58155435",
"0.5800325",
"0.58000284",
"0.57169676",
"0.5716149",
"0.56983215",
"0.5689008",
"0.568203",
"0.567874",
"0.56574446",
"0.5645422",
"0.5645422"
] | 0.8054498 | 1 |
Gets the created_at of this Brand. Date and time record was created, UTC. | def created_at(self):
return self._created_at | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def created_at(self) -> datetime.datetime:\n return self._created_at",
"def created_at(self):\n return self.getattr('created_at')",
"def created_at(self) -> \"datetime\":\n return self._attrs.get(\"createdAt\")",
"def created_at(self) -> \"datetime\":\n return self._attrs.get(\"createdAt\")",
"def created_at(self) -> \"datetime\":\n return self._attrs.get(\"createdAt\")",
"def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")",
"def created_at(self):\n return self.data[\"attributes\"][\"createdAt\"]",
"def created_at(self):\n return self._domain.created_at",
"def created_at(self) -> \"str\":\n return self._attrs.get(\"createdAt\")",
"def CreatedAt(self):\n return self._created_at",
"def created_at(self) -> datetime:\n return utils.snowflake_time(self.id)",
"def created_at(self):\n return string_to_datetime(self._dict.get('created_at'))",
"def get_created_at(self, instance):\n return instance.created_at.strftime(\"%B %d, %Y\")",
"def get_created_at(self, instance):\n return instance.created_at.strftime(\"%B %d, %Y\")",
"def get_created_at(self, instance):\n return instance.created_at.strftime(\"%B %d, %Y\")",
"def created_at(self) -> Optional[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> Optional[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> Optional[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> Optional[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> Optional[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> Optional[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> datetime:\n return util.to_datetime(self.doc.get('createdAt'))",
"def created_at(self):\n return self.viztrail.created_at",
"def created(self) -> datetime:\n return datetime.strptime(self.data['created_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')",
"def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")"
] | [
"0.83030766",
"0.82845074",
"0.82037127",
"0.82037127",
"0.82037127",
"0.81389177",
"0.81389177",
"0.81389177",
"0.8138423",
"0.7994378",
"0.79931575",
"0.7978305",
"0.79707175",
"0.79177344",
"0.7915649",
"0.7915649",
"0.7915649",
"0.7847033",
"0.7847033",
"0.7847033",
"0.7847033",
"0.7847033",
"0.7847033",
"0.7833918",
"0.77248687",
"0.7719685",
"0.7678978",
"0.7678978",
"0.7678978",
"0.7678978"
] | 0.83646435 | 1 |
Return a string representation of this priority queue. | def __repr__(self):
return 'PriorityQueue({} items, front={})'.format(self.size(), self.front()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self):\n return str(self._queue_items)",
"def __str__(self):\n data_str = [str(i) for i in self._data]\n return \"QUEUE { \" + \", \".join(data_str) + \" }\"",
"def __str__(self):\n data_str = [str(i) for i in self._data]\n return \"QUEUE { \" + \", \".join(data_str) + \" }\"",
"def __str__(self):\n return \"The queue contains: \" + str(self.the_queue)",
"def __str__(self):\n return str(self._heap)",
"def __str__(self) -> str:\n out = \"QUEUE: \" + str(self.da.size) + \" elements. \"\n out += str(self.da.data[:self.da.size])\n return out",
"def __repr__(self):\n return 'Queue({})'.format(self.length())",
"def __str__(self):\n out = \"QUEUE: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'",
"def __str__(self):\r\n out = \"QUEUE: \" + str(self.da.length()) + \" elements. [\"\r\n out += ', '.join([str(self.da.get_at_index(_))\r\n for _ in range(self.da.length())])\r\n return out + ']'",
"def __str__(self):\r\n\r\n if self._size > 0:\r\n\r\n lst = [str(self._data[item]) for item in range(self._size)]\r\n str1 = str(lst) + \" Capacity: \" + str(self._capacity)\r\n\r\n return str1\r\n\r\n else:\r\n return \"Empty Stack\"",
"def __repr__(self):\n return str(((\"P\" if self.is_P() else \"Q\"),self._children))",
"def __repr__(self):\n return 'ResizingArrayQueue([{}])'.format(', '.join(repr(i) for i in self))",
"def to_str(self):\n # type: () -> str\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n # type: () -> str\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n # type: () -> str\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())",
"def to_str(self):\n return pprint.pformat(self.to_dict())"
] | [
"0.80490476",
"0.76343757",
"0.76343757",
"0.7606811",
"0.7515623",
"0.74697816",
"0.7391219",
"0.72890997",
"0.72077906",
"0.71557087",
"0.7066766",
"0.7015214",
"0.69701236",
"0.69701236",
"0.69701236",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065",
"0.69171065"
] | 0.7990086 | 1 |
Insert the given item into this priority queue in order according to the given priority. | def enqueue(self, item, priority):
# TODO: Insert given item into heap
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)",
"def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))",
"def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)",
"def add(self, item, priority):\n heappush(self.contents, (priority, item))",
"def put(self, item, priority=None, *args, **kwargs):\n if priority is None:\n raise self.PQueueException('priority must be specified')\n super().put((priority, item), *args, **kwargs)",
"def insert(self, item):\n self.heaplist.append(item)\n self.currentsize += 1\n self.shift_item_up(self.currentsize)",
"def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)",
"def insert(self, item):\n self._heap.append(item)\n self._perc_up(len(self._heap) - 1)",
"def enqueue(self, item):\n self.__queue.insert(0, item)",
"def enqueue(self, priority, value, key=None):\n key = key if key else value\n if key in self._index:\n self._update(priority, key)\n return\n self._heap.append(Item(priority, value, key))\n self._size = len(self._heap)\n self._index[key] = self._size - 1\n self._sift_up(self._size - 1)",
"def push(self, item):\n heapq.heappush(self.heap, item)",
"def enqueue(self, item):\n self.items.insert(0, item)",
"def enqueue(self, item):\n self.items.insert(0, item)",
"def insert(self, id, priority):\n self.n += 1\n i = self.n\n while i > 1:\n pIdx = int(i/2)\n p = self.elements[pIdx]\n\n if priority > p[PRIORITY]:\n break\n self.elements[i] = list(p)\n self.positions[p[ID]] = 1\n i = pIdx\n\n self.elements[i][ID] = id\n self.elements[i][PRIORITY] = priority\n self.positions[id] = i",
"def enqueue(self, item):\n\t\tself.items.insert(0, item)",
"def push(self, item: tuple):\n self.__heap.append(item)\n self.__sift_up(self.__len__() - 1)",
"def push(self, priority, key):\n index = len(self.__heap)\n self.__position[key] = index\n self.__heap.append([priority, key])\n self.__bubble_up(index)",
"def insert(self, pri):\n heaps = self.priorities\n if pri > 10 or pri < 1:\n raise ValueError(\n 'Priority must be between 1 (high) - 10 (low)'\n )\n if pri not in heaps.keys():\n self._create_priorities(pri)\n\n priority = heaps.get(pri)\n priority.push(self._order)\n self._order += 1",
"def append(self,data,priority):\r\n\t\tbisect.insort(self.queue,(priority,data))",
"def enqueue(self, item):\n self._queue.append(item)",
"def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...",
"def enqueue(self, item):\n self.queue.append(item)",
"def enqueue(self, item):\n self.queue.append(item)",
"def push(self, item):\n self.heap.append(self.m * item)\n self._sift_up()",
"def put(self, item, priority=False):\n id = uuid.uuid4().hex\n pipe = self.redis.pipeline()\n\n if priority:\n pipe.rpush(self.feed_ids, id)\n else:\n pipe.lpush(self.feed_ids, id)\n pipe.incr(self.feed_publishes)\n pipe.hset(self.feed_items, id, item)\n pipe.zadd(self.feed_published, **{id: int(time.time()*1000)})\n pipe.execute()\n return id",
"def insert(self, node, priority=0):\n\n if node in self.entry_finder:\n self.delete(node)\n entry = [priority, node]\n self.entry_finder[node] = entry\n # logger_cagada.debug(\"el puto entry %s\" % entry)\n # logger_cagada.debug(\"l nodo q c agrega %s es %s\" % (type(node), node))\n self.heappush(self.heap, entry)\n # logger_cagada.debug(\"el finde aora es %s\" % self.entry_finder)\n # logger_cagada.debug(\"el heap aora es %s\" % self.heap)\n self.valida_caca()",
"def insert(self, item):\n index = self.insert_at_next_index(item)\n self.items[index] = item\n while index > 1:\n parent_index = index / 2 # Truncate, e.g. 4 and 5 have parent 2.\n if self.is_heap_order(self.items[parent_index], self.items[index]):\n # The item does not need to bubble up anymore. Done.\n return\n else:\n # Swap items at index and parent_index\n temp = self.items[index]\n self.items[index] = self.items[parent_index]\n self.items[parent_index] = temp\n index = parent_index\n # The item bubbled all the way to the root. Done.\n return",
"def push(self, transition, priority):\n priority = priority * 10000\n priority = self._clip_p(priority)\n priority = int(priority)\n # if we reached the capacity, overwrite the oldest item\n if (self.size == self.capacity):\n self.queue[self.to_write%self.capacity] = transition\n self.sum_tree.update(self.to_write%self.capacity,priority)\n else:\n self.queue.append(transition)\n self.sum_tree.push(priority)\n self.to_write = self.to_write + 1",
"def add(self, item):\n # must keep two pointers marching\n # in synch down the list.\n current = self._head\n previous = None\n while current != None:\n if current.getData() > item:\n # we’ve reached the insertion spot\n break\n else:\n # otherwise, advance both pointers\n previous = current\n current = current.getNext()\n temp = Node(item)\n if previous == None:\n # insert at the start of the list\n temp.setNext(self._head)\n self._head = temp\n else:\n temp.setNext(current)\n previous.setNext(temp)",
"def push(self, item):\n self.linked_list.prepend(item)"
] | [
"0.80786383",
"0.7865082",
"0.78041065",
"0.77141374",
"0.7674754",
"0.73016906",
"0.7278352",
"0.71895283",
"0.71843404",
"0.71743834",
"0.7151005",
"0.70398396",
"0.70398396",
"0.6892338",
"0.6884585",
"0.6837504",
"0.680479",
"0.68006176",
"0.67287093",
"0.67193216",
"0.6710441",
"0.668576",
"0.668576",
"0.66814315",
"0.66655016",
"0.6664169",
"0.65975374",
"0.65688664",
"0.654463",
"0.6526807"
] | 0.8348072 | 0 |
Return the item at the front of this priority queue without removing it, or None if this priority queue is empty. | def front(self):
if self.size() < 1:
return None
else:
# TODO: Return min item from heap, if any
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def front(self):\n return self.queue[0] if not self.empty() else None",
"def top(self): # O(1)\n if not self.queue:\n return None\n return self.queue[0]",
"def peek_front(self):\n\n if self.items:\n return self.items[0]\n return None",
"def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None",
"def peek(self):\n if not self.empty():\n return self.queue[-1]\n return None",
"def getFront(self):\n\t\tfront = self.queue[self.front]\n\t\treturn front\n\t\tpass",
"def peek_front(self):\n\n if (self._size == 0):\n return None\n\n return self._head.value",
"def peek(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n return self.priority_queue[nextkey][0]\n else:\n raise IndexError(\"There's nothing in your queue\")",
"def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element # front aligned with head of list",
"def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._data[self._front]",
"def peek(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n return self.first.item",
"def top(self):\n if len(self) == 0:\n raise IndexError('top from empty queue')\n return self.lst[self.head]",
"def first(self):\n if self.is_empty():\n raise Empty(\"Queue undeflow.\")\n return self._head._element",
"def remove_front(self):\n\n if self.items:\n return self.items.pop(0)\n return None",
"def Front(self):\r\n if (len(self.queue) >= 1):\r\n return self.queue[0]\r\n else:\r\n return -1",
"def Front(self):\n if self.count == 0:\n return -1\n return self.queue[self.headIndex]",
"def peek_first(self):\n if len(self._heap) == 0:\n return None\n else:\n return self._heap[0]",
"def peek(self):\n if self.isEmpty(): \n raise Exception(\"Queue underflow\")\n return self._q[self._first]",
"def Front(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[0]",
"def Front(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[0]",
"def first(self):\r\n if self.is_empty():\r\n raise Empty(\"Queue is empty\")\r\n return self._head._element",
"def first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._head._element",
"def first(self) -> Optional[T]:\n if len(self.entry_finder) == 0:\n return None\n for (_, _, (item,)) in self.priority_queue:\n if item is not None:\n return cast(T, item)\n return None",
"def peek(self):\n return self.the_queue[0]",
"def Front(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[self.front]",
"def peek(self):\n if not self.front:\n raise AttributeError(\"Can't peek from an empty queue\")\n return self.front.value\n\n # try:\n # return self.front.value\n # except AttributeError:\n # return \"Can't peek front from an empty queue\"",
"def peek(self):\r\n return self.queue[0]",
"def peek(self):\r\n return self.queue[0]",
"def front(queue):\n if empty_queue(queue):\n raise IndexError(\"Queue is empty!\")\n else:\n return queue.front.value",
"def peek(self):\n return self.queue[0]"
] | [
"0.83861667",
"0.812015",
"0.79910254",
"0.7972703",
"0.79355526",
"0.79029673",
"0.7864357",
"0.7844265",
"0.76888186",
"0.7597412",
"0.75942206",
"0.7568451",
"0.7565252",
"0.75484496",
"0.7531286",
"0.75137365",
"0.75074184",
"0.7491985",
"0.74702764",
"0.74702764",
"0.7462963",
"0.7439831",
"0.7433538",
"0.7432552",
"0.7428331",
"0.7400357",
"0.7395515",
"0.7395515",
"0.7384593",
"0.738113"
] | 0.83011127 | 1 |
Remove and return the item at the front of this priority queue, or raise ValueError if this priority queue is empty. | def dequeue(self):
if self.size() < 1:
raise ValueError('Priority queue is empty and has no front item')
else:
# TODO: Remove and return min item from heap, if any
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")",
"def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...",
"def peek(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n return self.first.item",
"def remove_front(self):\n\n if self.items:\n return self.items.pop(0)\n return None",
"def pop(self) -> T:\n while self.priority_queue:\n _, _, (item,) = heapq.heappop(self.priority_queue)\n if item is not None:\n del self.entry_finder[item] # type: ignore\n return cast(T, item)\n raise KeyError('pop from an empty priority queue')",
"def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')",
"def dequeue_front(self):\n try:\n return self._items.pop(0)\n except:\n raise IndexError('The deque is empty')",
"def peek(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n return self.priority_queue[nextkey][0]\n else:\n raise IndexError(\"There's nothing in your queue\")",
"def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r",
"def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')",
"def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem",
"def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...",
"def peek(self):\n if self.isEmpty(): \n raise Exception(\"Queue underflow\")\n return self._q[self._first]",
"def dequeue(self):\n if not self.front:\n raise AttributeError(\"Can't dequeue from an empty queue\")\n\n removed = self.front\n self.front = self.front.next\n return removed.value\n # try:\n # removed = self.front\n # self.front = self.front.next\n # return removed.value\n # except AttributeError:\n # return \"Can't dequeue from an empty queue\"",
"def pop(self):\n return heappop(self.priority_queue)[1]",
"def pop_front(self):\n if self.is_empty():\n return None\n val = self.head.value\n # Update head and size\n self.head = self.head.next_node\n self.size -= 1\n # If the only node was removed, also need to update tail\n if self.is_empty():\n self.tail = None\n return val",
"def pop(self):\r\n while self.pq:\r\n priority, count, task = heapq.heappop(self.pq)\r\n if task is not self.REMOVED:\r\n del self.entry_finder[task]\r\n return task\r\n raise KeyError('pop from an empty priority queue')",
"def pop_min(self):\n if self.get_size() == 0:\n return None\n\n # put minimum item at the end\n self.swap(0, len(self.table) - 1)\n\n # and remove it from the list;\n item = self.table.pop()\n\n # then fix new root\n self.percolate_down(0)\n return item",
"def dequeue(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur",
"def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')",
"def peek(self):\n if not self.front:\n raise AttributeError(\"Can't peek from an empty queue\")\n return self.front.value\n\n # try:\n # return self.front.value\n # except AttributeError:\n # return \"Can't peek front from an empty queue\"",
"def peek(self):\n if not self.is_empty():\n return self._queue_items[-1]\n else:\n raise QueueException('Peek operation not supported on an empty queue')",
"def remove(self):\n if len(self.line) == 0:\n raise ValueError, \"No more elements in the queue to remove.\"\n\n frontOfLine = self.line[-1]\n self.line = self.line[:-1]\n return frontOfLine",
"def front(self):\n return self.queue[0] if not self.empty() else None",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def pop(self) -> Optional[T]:\n try:\n elem = heapq.heappop(self._heap).val\n self._unique_values.remove(elem)\n except IndexError:\n return None\n return elem",
"def peek(self):\n if not self.empty():\n return self.queue[-1]\n return None",
"def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value"
] | [
"0.7618411",
"0.7553011",
"0.7513481",
"0.7512028",
"0.7495574",
"0.74546635",
"0.74098784",
"0.73618776",
"0.7308423",
"0.7285029",
"0.7266011",
"0.72317827",
"0.72248465",
"0.72198665",
"0.71959704",
"0.71899134",
"0.7139653",
"0.7117217",
"0.71051884",
"0.7096878",
"0.709194",
"0.7086201",
"0.7078108",
"0.7068025",
"0.7063479",
"0.70609397",
"0.70588255",
"0.7051653",
"0.7050092",
"0.7047689"
] | 0.7974448 | 0 |
Remove and return the item at the front of this priority queue, and insert the given item in order according to the given priority. | def push_pop(self, item, priority):
if self.size() < 1:
raise ValueError('Priority queue is empty and has no front item')
else:
# TODO: Replace and return min item from heap, if any
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...",
"def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))",
"def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)",
"def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)",
"def enqueue(self, priority, value, key=None):\n key = key if key else value\n if key in self._index:\n self._update(priority, key)\n return\n self._heap.append(Item(priority, value, key))\n self._size = len(self._heap)\n self._index[key] = self._size - 1\n self._sift_up(self._size - 1)",
"def MoveItem(src_queue, trg_queue, order_func):\n score, item = heapq.heappop(src_queue)\n score = - float(order_func(- score, item))\n heapq.heappush(trg_queue, (score, item))\n return item",
"def enqueue_front(self, item):\n self._items.insert(0, item)",
"def push_pop(self, priority, key):\n if not self or priority <= self.__heap[0][0]:\n return priority, key\n\n result_priority, result_key = self.__heap[0]\n del self.__position[result_key]\n\n self.__heap[0] = [priority, key]\n self.__position[key] = 0\n self.__bubble_down(0)\n\n return result_priority, result_key",
"def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)",
"def pushpop(self, item):\n return heapq.heappushpop(self.heap, item)",
"def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...",
"def active_item(self, remove=True):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n if not self.stack.empty():\n pass\n elif not self.backlog.empty():\n # feed the stack the top priority item from the queue\n self.stack.push(self.backlog.get())\n else: # both the stack & queue are empty\n raise queue.Empty\n\n assert not self.stack.empty(), \"BUG: empty stack\"\n\n if remove:\n return self.stack.pop()\n\n return self.stack.peek()",
"def append(self,data,priority):\r\n\t\tbisect.insort(self.queue,(priority,data))",
"def put(self, item, priority=None, *args, **kwargs):\n if priority is None:\n raise self.PQueueException('priority must be specified')\n super().put((priority, item), *args, **kwargs)",
"def getFront(self):\n\t\tfront = self.queue[self.front]\n\t\treturn front\n\t\tpass",
"def add(self, item, priority):\n heappush(self.contents, (priority, item))",
"def replace(self, item):\n return heapq.heapreplace(self.heap, item)",
"def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...",
"def peek(ind: int = 0, priority: int = 0) -> Any:\n\tglobal queue\n\tif queue:\n\t\ttry:\n\t\t\treturn queue[ind][1]\n\t\texcept IndexError:\n\t\t\treturn None\n\telse:\n\t\treturn None",
"def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")",
"def push(self, item):\n self.list.prepend(item)",
"def push(self, item):\n super().add_item_to_front(item)",
"def add_front(self, item):\n\n self.items.insert(0, item)",
"def top(self):\n while not self.queue[self.tag].empty():\n temp = self.queue[self.tag].get()\n self.queue[1 - self.tag].put(temp)\n self.tag = 1 - self.tag\n return temp",
"def insertFront(self, item):\n self.sentinel.insertAfter(item)\n self.N += 1",
"def push(self, obj):\n # wrap the object to allow for correct pop operation\n # remember that in python it's a min-heap (not max!)\n wrap_obj = (obj.minus_priority, len(self), obj)\n # use insertion number to ensure we never compare based on obj itself!\n # additionally resolves ties by popping earliest-inserted object\n heapq.heappush(self._heap, wrap_obj)",
"def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")",
"def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def enqueue(elem: Any, priority: int = 0) -> None:\n\tglobal queue\n\tqueue.append((priority, elem))\n\treturn None"
] | [
"0.7093361",
"0.70350254",
"0.6742636",
"0.6527571",
"0.6505629",
"0.645512",
"0.626797",
"0.6210028",
"0.61004966",
"0.60370773",
"0.60328144",
"0.60213953",
"0.6011833",
"0.5995145",
"0.59813595",
"0.59383404",
"0.59279823",
"0.5927908",
"0.59254074",
"0.5917036",
"0.5906796",
"0.5901396",
"0.5893914",
"0.5872185",
"0.5868598",
"0.5863504",
"0.58546567",
"0.58477026",
"0.5846182",
"0.58329964"
] | 0.78216314 | 0 |
Create a line from a point (x, y) and some angle in radians. | def from_angle(x1, y1, angle, length):
x2 = x1 + length * sin(angle)
y2 = y1 + length * cos(angle)
return Line(((x1, y1), (x2, y2))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createFromLine(cls, line, **kwargs):\n angle = line.angle\n x, y = cls.cartesian([1, angle])\n return cls(x, y, **kwargs)",
"def getLine(self, **kwargs):\n return Line(self.p1, self.angle, **kwargs)",
"def line_equation_ap(angle, (x1, y1)):\n \n # get second point on the line\n x2 = float(x1) + cos(angle)\n y2 = float(y1) + sin(angle)\n \n # return A, B and C coefficients\n return (y1 - y2, x2 - x1, x1*y2 - x2*y1)",
"def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end",
"def line(center, length, rotation=0):\n unit = np.array([math.cos(rotation * 2 * PI / 360),\n math.sin(rotation * 2 * PI / 360)])\n end = center + unit * length / 2\n start = center - unit * length / 2\n return [\"PA{},{};\".format(*[int(coord) for coord in start]),\n \"PD{},{};\".format(*[int(coord) for coord in end]),\n \"PU;\"]",
"def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))",
"def line(intercept, slope, x):\n return slope*x + intercept",
"def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:\n\n return self.get_line_from_axis_to_point(1, point, **kwargs)",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])",
"def line(points):\n return LineString(points)",
"def line(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"line\", x=x, y=y, **kwargs)",
"def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b",
"def createFromLine(line):\n return HalfLine(line.point, line.angle)",
"def line(x1, y1, x2, y2):\r\n\r\n x1 = normalize(x1)\r\n y1 = normalize(y1)\r\n x2 = normalize(x2)\r\n y2 = normalize(y2)\r\n\r\n xdiff = max(x1, x2) - min(x1, x2)\r\n ydiff = max(y1, y2) - min(y1, y2)\r\n xdir = 1 if x1 <= x2 else -1\r\n ydir = 1 if y1 <= y2 else -1\r\n\r\n r = max(xdiff, ydiff)\r\n\r\n for i in range(r+1):\r\n x = x1\r\n y = y1\r\n\r\n if ydiff:\r\n y += (float(i) * ydiff) / r * ydir\r\n if xdiff:\r\n x += (float(i) * xdiff) / r * xdir\r\n\r\n yield (x, y)",
"def get_line_from_axis_to_point(\n self,\n index: int,\n point: Sequence[float],\n line_func: Line = DashedLine,\n line_config: dict | None = None,\n color: ParsableManimColor | None = None,\n stroke_width: float = 2,\n ) -> Line:\n\n line_config = line_config if line_config is not None else {}\n\n if color is None:\n color = VMobject().color\n\n line_config[\"color\"] = ManimColor.parse(color)\n line_config[\"stroke_width\"] = stroke_width\n\n axis = self.get_axis(index)\n line = line_func(axis.get_projection(point), point, **line_config)\n return line",
"def line_to(self, point: Onion[Tuple[float, float], Point2D, Point3D, Point]):\n start_point = self.last_point\n end_point = _point_2d(point)\n self._segments.append(Line3D.create(start_point, end_point))",
"def getLine(self, correct=True):\n return Line(self.point, self.angle, correct=correct)",
"def create_line(self, x1, y1, x2, y2, style=None, parent=None):\n attrs = {'d': 'M %5f %5f L %5f %5f' % (x1, y1, x2, y2)}\n return self.create_path(attrs, style, parent)",
"def line_to(self, x, y):\n self._impl.line_to(x, y)",
"def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,",
"def drawLine(self, dx, dy):\n assert (type(dx) in [int, float]), \"parameter x:%s is not a valid number\" % `dx`\n assert (type(dy) in [int, float]), \"parameter y:%s is not a valid number\" % `dy`\n x = self._turtle.xcor()\n y = self._turtle.ycor()\n self._turtle.setposition(x+dx, y+dy)",
"def line_length_angle(line:tuple)->tuple:\n squared_dist = point_sqr_distance(line[0], line[1])\n if squared_dist == 0:\n return 0,1\n distance = math.sqrt(squared_dist)\n angle_cosine = (line[1][0] - line[0][0]) / distance\n return squared_dist, angle_cosine",
"def line_plot():\n # generate data\n x = np.arange(0, 4 * np.pi, 0.1) # x in [0, 4* pi)\n y_cos = np.cos(x)\n\n plt.figure()\n plt.plot(x, y_cos)\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.title('Cosine function in $[0, 4\\pi)$ with line plot')\n plt.show()\n\n return None",
"def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))",
"def DrawLinePoint(*args, **kwargs):\n return _gdi_.DC_DrawLinePoint(*args, **kwargs)",
"def line(self, x, y):\n self.call('line', x, y)",
"def DrawLinePoint(*args, **kwargs):\n return _gdi_.PseudoDC_DrawLinePoint(*args, **kwargs)",
"def dline(x, y):\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(0.0, 0.0, 1.0)\n glPointSize(10.0)\n glBegin(GL_POINTS)\n while (x <= y):\n glVertex2f(x, x)\n x += 0.05\n glEnd()\n glFlush()",
"def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b"
] | [
"0.6920295",
"0.6459677",
"0.63838017",
"0.6383747",
"0.62899935",
"0.6254749",
"0.6232047",
"0.6193431",
"0.61365706",
"0.6101261",
"0.6090885",
"0.60878295",
"0.60768676",
"0.60122246",
"0.593856",
"0.5925338",
"0.5848339",
"0.58428526",
"0.58347046",
"0.5824384",
"0.5823511",
"0.58115876",
"0.5789022",
"0.57863873",
"0.57813346",
"0.57660854",
"0.5762989",
"0.5762068",
"0.5750448",
"0.57293713"
] | 0.7692889 | 0 |
Returns the domain of X values for this line. | def domain(self):
lower, upper = sorted((self.x1, self.x2))
return FloatRange(lower=lower, upper=upper) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def domain(self) -> NDArrayFloat:\n\n return ndarray_copy(self._domain)",
"def _axes_domain(self, *args, **kwargs):\n # See _add_gridline_label for detials\n lon_0 = self.axes.projection.proj4_params.get('lon_0', 0)\n x_range, y_range = type(self)._axes_domain(self, *args, **kwargs)\n x_range = np.asarray(x_range) + lon_0\n return x_range, y_range",
"def index_in_domain(self):\n\t\treturn int((self.position - self.domain.x.min())/self.domain.dx)",
"def dx(self):\n values = self._interpolate_table(\"dx\")\n return values",
"def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]",
"def domain_bounds(self):\n return self._xmin, self._xmax, self._ymin, self._ymax, self._zmin, self._zmax",
"def independent(self):\n return self.x",
"def xvals(self):\n return self.Ls",
"def xvals(self):\n return self.Ls",
"def xminmax ( self ) :\n return self.xvar.minmax()",
"def get_lxly(self):\n return np.meshgrid(\n np.fft.fftfreq(self.nx, self.dx) * 2. * np.pi,\n np.fft.fftfreq(self.ny, self.dy) * 2. * np.pi)",
"def get_xrange(self):\n return self.xvec[0], self.xvec[-1]",
"def get_xList(self):\n return self.__x",
"def get_lxly(self):\n return np.meshgrid(\n np.fft.fftfreq(self.nx, self.dx)[0:self.nx / 2 + 1] * 2. * np.pi,\n np.fft.fftfreq(self.ny, self.dy) * 2. * np.pi)",
"def get_lxly(self):\n return np.meshgrid(\n np.fft.fftfreq(self.nx, self.dx)[0:self.nx / 2 + 1] * 2. * np.pi,\n np.fft.fftfreq(self.ny, self.dy) * 2. * np.pi)",
"def domain(self):\n # type: () -> string_types\n return self._domain",
"def domain( self ):\n raise NotImplementedError(\"domain\")",
"def xy(self):\n return self.to_xyah()[0:2]",
"def get_x(self):\n return self.x[:self.nump, :]",
"def xx(self):\n return self.exterior[:, 0]",
"def X(self):\n return self.dataset.X",
"def value(self, x):\n if isinstance(x, (float,int)):\n return self._values[x >= self._boundaries[:-1]][-1]\n else:\n a = array([self._values[xi >= self._boundaries[:-1]][-1]\n for xi in x])\n return a",
"def l_x_normalized(self):\n return (self.l_x_axis - 128) / 127",
"def domain(self):\n ret = libxml2mod.xmlErrorGetDomain(self._o)\n return ret",
"def x ( self ) :\n return self.xvar",
"def x_coords(self):\n x_coords = np.linspace(0, self.fft_length / self.samplate, self.fft_length + 1)\n return x_coords",
"def xax(self):\n return self.__xax",
"def domain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain\")",
"def domain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain\")",
"def x(self):\n return self.coords[0]"
] | [
"0.7240038",
"0.670912",
"0.6480548",
"0.644691",
"0.6432765",
"0.64078355",
"0.63080204",
"0.6278737",
"0.6278737",
"0.6257812",
"0.62101346",
"0.6198268",
"0.61593205",
"0.61583245",
"0.61583245",
"0.6129598",
"0.61284596",
"0.61077106",
"0.60557747",
"0.6044892",
"0.6039606",
"0.60224175",
"0.6018368",
"0.6016837",
"0.6012609",
"0.6009166",
"0.60091215",
"0.5976214",
"0.5976214",
"0.595539"
] | 0.73572046 | 0 |
Return the Intersection between this line and `other`. Don't take the domain or range of either line into account; assume the lines extend to infinity. | def intersection_with(self, other):
if self.gradient == other.gradient:
# Lines of the same gradient never intersect.
return None
# Calculate the X and Y values of this intersection using linear algebra.
x = (other.y_intercept - self.y_intercept) / (self.gradient - other.gradient)
y = self.gradient * x + self.y_intercept
# If this or the other line belong to a shape, add it to a new set of shapes
# involved in this intersection.
shapes = filter((lambda o: o is not None), (self.shape, other.shape))
return Intersection(x, y, shapes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None",
"def intersect(self, other):\n if isinstance(other, Arc):\n return other.intersect(self)\n elif not isinstance(other, LineSegment):\n raise TypeError(other)\n S = (self.p2 - self.p1).scale(1.)\n T = (other.p2 - other.p1).scale(1.)\n denom = S.y * T.x - S.x * T.y\n if nearly_zero(denom):\n if nearly_zero(S.cross(other.p1 - self.p1)):\n q1 = (other.p1 - self.p1) * S / (S * S)\n q2 = (other.p2 - self.p1) * S / (S * S)\n if q2 < q1:\n q1, q2 = q2, q1\n left, right = max(0, q1), min(1, q2)\n if left < right:\n return LineSegment(self.p1 + left * S, self.p1 + right * S)\n return None\n a = (T.x * (other.p1.y - self.p1.y) - T.y * (other.p1.x - self.p1.x)) / denom\n b = (S.x * (other.p1.y - self.p1.y) - S.y * (other.p1.x - self.p1.x)) / denom\n if 0 <= a <= 1 and 0 <= b <= 1:\n return self.p1 + a * S\n # else return None because we don't intersect",
"def line_intersection_with(self, other):\n # solve following system :\n # intersection = start of self + alpha * direction of self\n # intersection = start of other + beta * direction of other\n directions = [s.endpoints[1] - s.endpoints[0] for s in (self, other)]\n denominator = directions[0].cross_product(directions[1])\n if abs(denominator) < 0.000001:\n # almost parallel lines\n return\n start_diff = other.endpoints[0] - self.endpoints[0]\n alpha = start_diff.cross_product(directions[1]) / denominator\n return self.endpoints[0] + directions[0] * alpha",
"def intersection(self, other):\n log.info('self: '+str(self)+' other: '+str(other))\n if self == other:\n # Used to be return True, that is definitely not right (expects Coordinate)\n # Do we want start or end ? Does it matter? Lines are the same, everything is\n # an intersection.\n return self.start\n # If any of the start/end points match, return that point.\n if self.end==other.start or self.end == other.end:\n return self.end \n if self.start==other.start or self.start == other.end: \n return self.start\n\n # Line equation: y = mx + b\n # m = (y2-y1)/(x2-x1)\n # B_self = y - M_self*x\n # Pick any x/y on the line - try end point\n # B_self = self.end.lat - M_self*self.end.lon\n # B_other = other.end.lat - M_self*self.end.lon\n from pyresample.spherical_geometry import Coordinate\n\n selfendlon = self.end.lon\n selfstartlon = self.start.lon\n otherendlon = other.end.lon\n otherstartlon = other.start.lon\n # Not sure if this is necessary, or good...\n# if self.end.lon < 0:\n# selfendlon = self.end.lon + 2*math.pi\n# if self.start.lon < 0:\n# selfstartlon = self.start.lon + 2*math.pi\n# if other.end.lon < 0:\n# otherendlon = other.end.lon + 2*math.pi\n# if other.start.lon < 0:\n# otherstartlon = other.start.lon + 2*math.pi\n\n log.info(' self lons: '+str(math.degrees(selfstartlon))+' '+str(math.degrees(selfendlon))+' other lons: '+str(math.degrees(otherstartlon))+' '+str(math.degrees(otherendlon)))\n\n # If both vertical, will be no intersection\n if abs(selfendlon - selfstartlon) < EPSILON and abs(otherendlon - otherstartlon) < EPSILON:\n log.info(' Both vertical, no intersection')\n return None\n # If self is vertical, but not parallel, intersection will be selfstartlon and lat = Mother*lon+B_other\n if abs(selfendlon - selfstartlon) < EPSILON:\n lon = selfstartlon\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n B_other = other.end.lat - M_other*otherendlon\n lat = M_other*lon+B_other\n log.info(' self is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and\n lon < max([otherendlon,otherstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n # same for other\n if abs(otherendlon - otherstartlon) < EPSILON:\n lon = otherstartlon\n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n B_self = self.end.lat - M_self*selfendlon\n lat = M_self*lon+B_self\n log.info(' other is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and \n lon > min([selfendlon,selfstartlon]) and\n lon < max([selfendlon,selfstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS Use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n\n \n\n # Get slopes of the lines \n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n \n # If they are parallel, no intersection\n if (M_self-M_other) < EPSILON:\n log.info(' self and other are parallel, no intersection')\n return None\n\n # Get the y-intercepts of the lines \n B_self = self.end.lat - M_self*selfendlon\n B_other = other.end.lat - M_other*otherendlon\n\n # Solve the equation\n # y=m1x+b1 and y=m2x+b2, equate y's so m1x+b1=m2x+b2, x = (b1-b2)/(m2-m1)\n # equate x's so x=(y-b1)/m1=(y-b2)/m2, y = (b1m2-b2m1)/(m2-m1)\n lon = (B_self - B_other)/(M_other - M_self)\n lat = (B_self*M_other - B_other*M_self)/(M_other-M_self)\n\n # Make sure lat/lon intersects within the line segment, and not outside.\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and \n lon < max([otherendlon,otherstartlon]) and\n lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([selfendlon,selfstartlon]) and \n lon < max([selfendlon,selfstartlon])):\n log.info(' self and other intersect within segment')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n log.info(' self and other intersect, but not within segment')\n return None",
"def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)",
"def intersection(self, other):\r\n intersect = 0.0\r\n i = j = 0\r\n maxi = len(self)\r\n maxj = len(other)\r\n while i < maxi and j < maxj:\r\n intersect += self.gram_edit_distance(self[i], other[j])\r\n if self[i] == other[j]:\r\n i += 1\r\n j += 1\r\n elif self[i] < other[j]:\r\n i += 1\r\n else:\r\n j += 1\r\n return intersect",
"def intersection(self, other):\n intersect = 0.0\n i = j = 0\n while i < len(self) and j < len(other):\n intersect += self.gram_edit_distance(self[i], other[j])\n if self[i] == other[j]:\n i += 1\n j += 1\n elif self[i] < other[j]:\n i += 1\n else:\n j += 1\n return intersect",
"def intersect(self, other: Segment) -> Optional[Intersection]:\n # Short-circuit for parallel segments\n if self.orientation == other.orientation:\n return\n\n # Deconstruct segments into a series of Coordinates & find the set intersection\n intersection = self.to_set().intersection(other.to_set())\n if not intersection:\n return\n\n x, y = list(intersection)[0]\n return Intersection(location=Coordinate(x=x, y=y))",
"def intersects(self, other_line):\n intpt= self.intersection(other_line)\n return bool(intpt)",
"def intersect(self, other):\n return Intersection(self, other)",
"def intersect_with(self, other):\n point = self._lines_intersection(other)\n\n if point is False:\n return False\n\n if point is True:\n return not(\n self.min_x() > other.max_x() or\n other.min_x() > self.max_x() or\n self.min_y() > other.max_y() or\n other.min_y() > self.max_y()\n )\n\n else:\n return (\n self.contains_point(point) and\n other.contains_point(point) and\n point\n )",
"def intersection(self, other):\n a, b = min(self.start, self.finish), max(self.start, self.finish)\n c, d = min(other.start, other.finish), max(other.start, other.finish)\n a1 = normalize(a, 0, TWO_PI)\n a, b = a1, b + a1 - a\n c1 = normalize(c, 0, TWO_PI)\n c, d = c1, d + c1 - c\n e, f = max(a, c), min(b, d)\n if f >= e:\n return AngleRange(e, f)\n else:\n return None # no overlap",
"def intersection(self, other):\n p0_other, p1_other = other.p0, other.p1\n\n # w = p1 - p0\n # v = p1_other - p0_other\n # s*w + p0 = t*v + p0_other\n\n w = self.p1 - self.p0\n v = p1_other - p0_other\n\n A = np.vstack((w,v)).T\n b = p0_other - self.p0\n\n if np.abs(np.linalg.det(A)) < epsilon:\n return None\n\n soln = np.linalg.solve(A, b)\n s, t = soln[0], -soln[1]\n\n intersection = s*w + self.p0\n\n if ((-epsilon <= s) and (s <= 1+epsilon) and (-epsilon <= t) and (t <= 1+epsilon)):\n return intersection\n else:\n return None",
"def intersection(self, other):\n if other.size() != self.size():\n raise ValueError(\"Intersections are only possible on interval-posets of the same size.\")\n try:\n return TamariIntervalPoset(self.size(), self._cover_relations + other._cover_relations)\n except ValueError:\n raise ValueError(\"This intersection is empty, it does not correspond to an interval-poset.\")",
"def intersection(self, other):\n VERIFICATION.verify_type(other, Rect, \"intersection target must be Rect\")\n\n funcs = (max, min, max, min)\n intersection_tuple = self._apply_binary_funcs(other, funcs)\n\n (inter_row_start, inter_row_end, inter_col_start, inter_col_end) = intersection_tuple\n if inter_row_start >= inter_row_end or inter_col_start >= inter_col_end:\n return None\n\n return Rect(*intersection_tuple)",
"def intersection(self, other):\n return _binary_geo(arctern.ST_Intersection, self, other)",
"def intersection(self, other):\n return self._geomgen(capi.geom_intersection, other)",
"def crossSegment(self, other):\n ml = self.getLine(correct=False)\n ol = other.getLine(correct=False)\n point = ml.crossLine(ol)\n if point:\n if (point in self) and (point in other):\n return point",
"def intersect(self, other: Line | Segment) -> list[Point]:\n if self.dim == 2:\n return list(distinct(self.edges.intersect(other)))\n\n if isinstance(other, Segment):\n try:\n result = self._plane.meet(other._line)\n except LinearDependenceError as e:\n if isinstance(other, Segment):\n other = cast(Segment, other[~e.dependent_values])\n result = cast(Plane, self._plane[~e.dependent_values]).meet(other._line)\n return list(\n result[Polygon(self[~e.dependent_values], copy=False).contains(result) & other.contains(result)])\n else:\n return list(result[self.contains(result) & other.contains(result)])\n\n try:\n result = self._plane.meet(other)\n except LinearDependenceError as e:\n if other.cdim > 0:\n other = other[~e.dependent_values]\n result = cast(Plane, self._plane[~e.dependent_values]).meet(other)\n return list(result[Polygon(self[~e.dependent_values], copy=False).contains(result)])\n else:\n return list(result[self.contains(result)])",
"def intersection(self, other):\n \n self_corners = self.corners\n\n other_corners = get_2d_false_corners(other)\n\n #shell()\n\n return planar_intersection_polygon(self_corners,other_corners)",
"def get_overlap(self, other):\n return self.intersection_over_union(other)",
"def intersect(self, other: Line | Plane | Segment | Polygon | Polyhedron) -> list[Point]:\n if isinstance(other, (Polygon, Polyhedron)):\n return other.intersect(self)\n\n if isinstance(other, Segment):\n result = meet(self._line, other._line, _check_dependence=False)\n ind = ~result.is_zero() & self.contains(result) & other.contains(result)\n else:\n result = meet(self._line, other, _check_dependence=False)\n ind = ~result.is_zero() & self.contains(result)\n\n if result.cdim > 0:\n return list(result[ind])\n if ind:\n return [result]\n return []",
"def crossSegment(self, other, e=1e-14, **kwargs):\n # Determine the point of intersection between the line of the given segment ang the line\n line = other.getLine()\n point = self.crossLine(line)\n if point is None:\n return None\n x, y = point\n # Determine if the point of intersection belongs to both the segment and the line\n if other.xmin - e <= point.x <= other.xmax + e and other.ymin - e <= y <= other.ymax + e:\n return Point(x, y, **kwargs)\n # By default if nothing is returned the function returns None",
"def intersection(self, other):\n if self.isdisjoint(other):\n raise ValueError(\"Range {0} don't intersect {0}\".format(self, other))\n\n min_row = max(self.min_row, other.min_row)\n max_row = min(self.max_row, other.max_row)\n min_col = max(self.min_col, other.min_col)\n max_col = min(self.max_col, other.max_col)\n\n return CellRange(min_col=min_col, min_row=min_row, max_col=max_col,\n max_row=max_row)",
"def crossSegment(self, other, e=1e-14):\n sl = self.getLine()\n ol = other.getLine()\n point = sl.crossLine(ol)\n if point is not None:\n if self.__contains__(point, e) and other.__contains__(point, e):\n return point",
"def intersect(self, other):\n \n if self.overlaps(other):\n return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y, other.ll.y)),\n Point(min(self.ur.x, other.ur.x), min(self.ur.y, other.ur.y)))\n\n\n else:\n return Rect(Point(0, 0), Point(0, 0))",
"def crossLine(self, other):\n if self.parallel(other): return None\n line = self.getLine()\n point = other.crossLine(line)\n if point is not None:\n if point in self and point in other:\n return point",
"def segmentsIntersect(self, other, allowProjInt = False):\n \n \"\"\"\n If we are not allowing projected intersection and the bounding boxes\n do not intersect then return -3, None.\n \"\"\"\n if(not(allowProjInt) and not(self.doBoundingBoxesIntersect(other))): return -3, None #return if bounding boxes do not intersect\n \"\"\" A special case for colinear lines. \"\"\" \n if(self.areColinear(other)):\n \"\"\"\n First place all four endpoint into a set. This will elliminate shared\n end points. Next, convert the set back into a list so it can\n finally be sorted.\n \"\"\"\n pointList = sorted(list(set([self.start, self.end, other.start, other.end])), key=self.calcT) \n if len(pointList) == 3:\n \"\"\"\n if there are only three points in the list then return 2, the\n middle point in the list since it is the shared point of the\n two lines.\n \"\"\"\n return 2, pointList[1] #if they are colinear and two ends have the same point return that point\n elif len(pointList) == 2:\n \"\"\" If the two lines have the same endpoints. \"\"\"\n return 2.5, self.getMidPoint()\n else:\n \"\"\"\n If the length was not three then we know it is length 4 in which case\n we turn the two middle points into a line and return 3, the line's\n midpoint.\n \"\"\"\n tempLine = Line(pointList[1], pointList[2])\n return 3, tempLine.getMidPoint() #If they are colinear return half way inbetween middle two points\n \"\"\"\n To calculate the intersection of two points we put the lines into the\n form P+tr and Q+us where P and Q are the starting points of the lines\n r and s are vectors form the starting point to the end point, and\n t and u are scalars. Set the two equations equal to each other and \n then solve for t and u. If t and u are in the range [0-1] then the\n intersection point lines on the lines, else it is a projected point.\n \"\"\"\n r = np.subtract(self.end.get2DPoint(), self.start.get2DPoint())\n s = np.subtract(other.end.get2DPoint(), other.start.get2DPoint())\n Q_Less_P = np.subtract(other.start.get2DPoint(), self.start.get2DPoint())\n denom = np.cross(r, s)*1.0\n t = np.cross(Q_Less_P, s)/denom\n u = np.cross(Q_Less_P, r)/denom \n point = p.Point(self.start.x + r[c.X]*t, self.start.y+r[c.Y]*t) \n #If t or u are not in the range 0-1 then the intersection is projected\n if(t > 1 or u > 1 or t < 0 or u < 0):\n \"\"\"\n Due to floating point problems sometimes if t or u is outside the 0-1\n range we end up inside this if statement but are actually at the end\n of one of the lines. I can't figure out how to properly add in a tolerance\n so we are taking the four end points putting them into a list,\n then comparing them to the calculated point. The Point module is\n properly handling tolerances so if the point == any of the end\n points then we should not return a projected point.\n \"\"\"\n if not any(point == lineEnd for lineEnd in (self.start, self.end,\n other.start, other.end)):\n return -1, point #return for projected intersection of non-colinear lines\n return 1, point #lines intersect at given point",
"def crossLine(self, other):\n ml = self.getLine(correct=False)\n point = ml.crossLine(other)\n if point:\n if (point in self) and (point in other):\n return point",
"def _intersect_interval(self, other):\n interval = Intersection(self.interval, other.interval)\n return interval.inf, interval.sup"
] | [
"0.80653363",
"0.7543367",
"0.75131035",
"0.71969223",
"0.71900284",
"0.7076571",
"0.7005486",
"0.6964363",
"0.68594927",
"0.68334734",
"0.68170583",
"0.68019235",
"0.67605364",
"0.6682999",
"0.6666026",
"0.6607942",
"0.65651965",
"0.65284467",
"0.6512001",
"0.6498432",
"0.64695317",
"0.6437992",
"0.6429563",
"0.6386789",
"0.63809717",
"0.6322732",
"0.6245058",
"0.6241661",
"0.6232058",
"0.6185604"
] | 0.7800287 | 1 |
Yields a Line for each pair of points in the polygon. | def lines(self):
for pair in pairs(self.points):
yield Line(pair, shape=self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _lines_from_points(points, points_form_closed_loop=False):\n if points_form_closed_loop:\n return list(zip(points[:-1],points[1:]))\n return list(zip(points, tuple(points[1:])+(points[0],)))",
"def line(x1, y1, x2, y2):\r\n\r\n x1 = normalize(x1)\r\n y1 = normalize(y1)\r\n x2 = normalize(x2)\r\n y2 = normalize(y2)\r\n\r\n xdiff = max(x1, x2) - min(x1, x2)\r\n ydiff = max(y1, y2) - min(y1, y2)\r\n xdir = 1 if x1 <= x2 else -1\r\n ydir = 1 if y1 <= y2 else -1\r\n\r\n r = max(xdiff, ydiff)\r\n\r\n for i in range(r+1):\r\n x = x1\r\n y = y1\r\n\r\n if ydiff:\r\n y += (float(i) * ydiff) / r * ydir\r\n if xdiff:\r\n x += (float(i) * xdiff) / r * xdir\r\n\r\n yield (x, y)",
"def proc_polyline(self, tokens):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polyline(pen=self.pen, points=pts)\n\n return component",
"def create_lines(polygons):\n lines = []\n for polygon in polygons:\n curr_lines = []\n for idx in range(0, len(polygon)):\n for idx_ in range(idx, len(polygon)):\n curr_line = Line(polygon[idx], polygon[idx_])\n curr_lines.append(curr_line)\n lines.append(curr_lines)\n return lines",
"def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i",
"def line(x0: float, y0: float, x1: float, y1: float) -> LineCollection:\n return LineCollection([(complex(x0, y0), complex(x1, y1))])",
"def line_generator(self):\n for V in self.Vrepresentation():\n if V.is_line():\n yield V",
"def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)",
"def __iter__(self):\n for point in self.points:\n yield point",
"def project_points_line(points, line):\n return [project_point_line(point, line) for point in points]",
"def line(points):\n return LineString(points)",
"def endpoints_from_lines(lines):\n \n all_points = []\n for line in lines:\n for i in [0, -1]: # start and end point\n all_points.append(line.coords[i])\n \n unique_points = set(all_points)\n \n return [Point(p) for p in unique_points]",
"def create_lines(self) -> None:\n res = []\n for connection in self.connections:\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location + end_component.pin_locations[connection.end_pin]\n )\n\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n res.append(Line(connection, start_pin_location, *bends, end_pin_location))\n\n self.lines = res",
"def sub_polytope_generator(self):\n pointset = set(self.integral_points())\n for v in self.vertices():\n sub = list(pointset.difference([v]))\n yield LatticePolytope_PPL(*sub)",
"def __init__(self, points):\n self.points = points\n self.lines = []\n\n orientation = 1\n for i, point in enumerate(self.points):\n try:\n if points[i+1].x > point.x:\n orientation = orientation\n else:\n orientation = - 1\n point.orientation = orientation\n self.points[i+1].orientation = orientation\n self.lines.append(Line(point, self.points[i+1]))\n except IndexError:\n point.orientation = orientation\n self.lines.append(Line(point, self.points[0]))",
"def iter_points(self):\n for x in range(self.left, self.right + 1):\n for y in range(self.top, self.bottom + 1):\n yield Point(x, y)",
"def pinp_multiple_crossing(points, edges, include_edges = True):\n crossing_number = []\n initialized = False\n for edge in edges:\n d_y, d_x, b = line_equation(edge)\n index = -1\n for point in points:\n index += 1\n if not initialized:\n crossing_number.append([0, False])\n elif crossing_number[index][1]:\n continue\n if include_edges and point_on_line(point, edge, d_y, d_x, b):\n # If the point is on the edge, then we know it is in the polygon.\n crossing_number[index] = [1, True]\n continue\n if is_horizontal(edge):\n # We ignore horizontal edges (unless points are on them, as above).\n continue\n if intersects_right(point, edge, d_y, d_x, b, positive_slope(edge), include_edges):\n crossing_number[index][0] += 1\n initialized = True\n index = 0\n for point in points:\n if crossing_number[index] % 2 == 1:\n yield point",
"def __iter__(self) -> Iterable[Tuple[float, float]]:\n return iter([self.x, self.y])",
"def draw_line(self, coords, smooth=False, **options):\n # NOTE: Outline does not work because uses paths instead of normal line method.\n # TODO: Add volume param, containing a list of linewidths same length as line\n # or as a function that calculates the width at each node\n # Result is a flow line with varying thickness at each node\n # Have to calculate left/right xy at each node, and use symbol curveto()\n # Easy and really cool...DO IT!\n options = self._check_options(options)\n \n if not hasattr(coords[0], \"__iter__\"):\n coords = _grouper(coords, 2)\n else: coords = (point for point in coords)\n \n # get drawing tools from options\n args = []\n if options[\"fillcolor\"]:\n pen = aggdraw.Pen(options[\"fillcolor\"], options[\"fillsize\"])\n args.append(pen)\n\n if smooth:\n\n # Note: Creation of the aggdraw.Symbol object here can be\n # very slow for long lines; Path is much faster but due\n # to a bug it does not correctly render curves, hence the use\n # of Symbol\n \n pathstring = \"\"\n \n # begin\n coords = _pairwise(coords)\n (startx,starty),(endx,endy) = next(coords)\n pathstring += \" M%s,%s\" %(startx, starty)\n \n # draw straight line to first line midpoint\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" L%s,%s\" %(midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # for each line\n for line in coords:\n # curve from midpoint of first to midpoint of second\n (startx,starty),(endx,endy) = line\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" Q%s,%s,%s,%s\" %(startx, starty, midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # draw straight line to endpoint of last line\n pathstring += \" L%s,%s\" %(endx, endy)\n\n # make into symbol object\n symbol = aggdraw.Symbol(pathstring)\n\n # draw the constructed symbol\n self.drawer.symbol((0,0), symbol, *args)\n\n else:\n\n path = aggdraw.Path()\n \n # begin\n startx,starty = next(coords)\n path.moveto(startx, starty)\n \n # connect to each successive point\n for nextx,nexty in coords:\n path.lineto(nextx, nexty)\n\n # draw the constructed path\n self.drawer.path((0,0), path, *args)",
"def polyline(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polyline points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self",
"def polyline(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polyline points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self",
"def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)",
"def draw_line_list_points(draw_object, list_points, list_point_coords, close=True):\n if close:\n list_points.append(list_points[0])\n\n width_line = 4\n\n point_start = list_points[0: -1]\n point_end = list_points[1:]\n\n for crrt_start, crrt_end in zip(point_start, point_end):\n x1 = list_point_coords[crrt_start][0]\n y1 = list_point_coords[crrt_start][1]\n x2 = list_point_coords[crrt_end][0]\n y2 = list_point_coords[crrt_end][1]\n draw_object.line((x1, y1, x2, y2), width=width_line, fill=\"blue\")",
"def drawPolyline(points, ax, linespec=None):\n if len(points.shape) != 2 or points.shape[0] != 2:\n raise ValueError(\"'points' must be 2xN\")\n if linespec is None:\n linespec = __color_cycle.next() + 'o-'\n ax.plot(points[0,:].T, points[1,:].T, linespec)",
"def convert_points_to_lines(control_points):\n control_points_lines = []\n iterator = 0\n while iterator < (len(control_points) - 1):\n p1 = (control_points[iterator][0], control_points[iterator][1])\n p2 = (control_points[iterator + 1][0], control_points[iterator + 1][1])\n line = LineString([p1, p2])\n control_points_lines.append(line)\n iterator += 1\n return control_points_lines",
"def vertex_iterator(self):\n for X in self.fe.L:\n for x in self.K.unit_group:\n yield (X, x)",
"def get_line_segment(p1, p2, nb_points=30):\n x_spacing = (p2[0] - p1[0]) / (nb_points + 1)\n y_spacing = (p2[1] - p1[1]) / (nb_points + 1)\n\n return [(int(p1[0]+i*x_spacing), int(p1[1]+i*y_spacing))\n for i in range(1, nb_points+1)]",
"def _line_from_two_points(pt1: np.array, pt2: np.array) -> np.array:\n numLine = pt1.shape[0]\n lines = np.zeros((numLine, 6))\n n = np.cross(pt1, pt2)\n n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)\n lines[:, 0:3] = n\n\n areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))\n areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))\n areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))\n planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1\n lines[:, 3] = planeIDs\n\n for i in range(numLine):\n uv = _xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])\n umax = uv[:, 0].max() + np.pi\n umin = uv[:, 0].min() + np.pi\n if umax - umin > np.pi:\n lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi\n else:\n lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi\n\n return lines",
"def draw_polyline(*points):\r\n global _canvas\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n #print(points)\r\n #print(len(points))\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n #print(x)\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n #print(newpoints)\r\n path = Path(*newpoints)\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)",
"def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point"
] | [
"0.68208283",
"0.6730272",
"0.66045207",
"0.6540537",
"0.6299681",
"0.6237254",
"0.61816174",
"0.6175961",
"0.61647254",
"0.6123132",
"0.60234046",
"0.60123557",
"0.6010977",
"0.5986616",
"0.5942319",
"0.5904839",
"0.58956665",
"0.5895638",
"0.5843104",
"0.5839472",
"0.5839472",
"0.5837999",
"0.5834819",
"0.583331",
"0.5794091",
"0.57751656",
"0.5758022",
"0.5754145",
"0.5752993",
"0.5747228"
] | 0.79969424 | 0 |
Hidden instance method to append the object to the class attribute __category_list | def __append_to_category_list(self):
Category.get_category_list().append(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_category(self, category):\n raise NotImplementedError()",
"def get_category_list(cls):\n if Category.__category_list is None:\n Category.__category_list = []\n return Category.__category_list",
"def getCategory():",
"def categories(self):\n pass",
"def add_category(self, category):\n if category not in self.categories and category.strip() != \"\":\n self.categories.append(category.strip())",
"def list_categories(self):\n raise NotImplementedError()",
"def addCategory(self, c):\n\t\tif c in self._categories:\n\t\t\treturn\n\t\tself._categories.add(c)\n\t\tCONNECTOR.addCategory(self, c)",
"def add_category(self, category):\n if category not in self.categories and category.strip() != \"\" and category is not None:\n self.categories.append(category.strip())",
"def add_category(self, category: str) -> None:\n for letter in self.data:\n if not self.data[letter].get(category):\n self.data[letter][category] = []\n print(f'Categoria: {category} adicionada ao dicionário.')\n self.save()\n self.beautify_json()",
"def add_category_to_all_events(self):\n\n events = Event.objects.all()\n for event in events:\n event.categories.add(self)",
"def add_category(self, scheme, term, label):\n category = atom.data.Category(scheme=scheme, term=term, label=label)\n self.category.append(category)\n return category",
"def __repr__(self):\n return f\"Category=(id={self.id},category_name={self.category_name},category_slug={self.category_slug})\"",
"def append(self, obj):\r\n raise NotImplementedError",
"def extra_super_categories(self):\n return [self.base_category()]",
"def get_category_data_off(self):\n list_categories_name=[]\n cat = requests.get('https://fr.openfoodfacts.org/categories?json=true')\n cat_data = cat.json()\n tags_list = cat_data['tags']\n print (len(tags_list))\n list_of_random_tags_list = random.sample(tags_list, k=self.view.num_to_select)\n\n for category in list_of_random_tags_list:\n try :\n category_name = category['name']\n print(category_name)\n list_categories_name.append(category_name)\n print (list_categories_name)\n self.list_categories = list_categories_name # list_categories_name is passed in the instance property\n except KeyError:\n pass\n except UnicodeEncodeError:\n pass",
"def add(self, attr):\n self.validate_type(attr)\n self.categories.add(attr.value)",
"def _categorize(self, slug, category):\n key = self._category_key(category)\n self.r.sadd(key, slug)\n\n # Store all category names in a Redis set, for easy retrieval\n self.r.sadd(self._categories_key, category)",
"def __repr__(self):\n\n return \"<Category name=%s>\" % (self.name)",
"def append(self, object):\r\n raise NotImplementedError()",
"def obj_categories(self):\r\n return self._tags",
"def category(self):\r\n return lambda cls : self.__named(cls, CategoryContext)",
"def insert_categories(self):\n logic = CategoryLogic()\n \n # We create the list of category objects\n categories = self.objects_factory.create_category_object_list()\n categories = set(categories)\n \n for category in categories:\n logic.insert(category)",
"def __str__(self):\n return self.category_name",
"def register_to_category(cls, category, name, info, **kwargs):\n cls.categories.setdefault(category, {})[name] = info\n if kwargs.get('no_lookup'):\n cls._no_lookup.add(category)",
"def _get_categories(self, *args):\n raise NotImplementedError(self, \"_get_categories\")",
"def add(self, stem, cat):\n\n if cat not in self.lx:\n self.lx[cat] = [] \n self.lx[cat] += [stem]\n else:\n self.lx[cat] += [stem]",
"def getCategories(self):\r\n return self.categories",
"def category(self):\r\n return self._get('category', {})",
"def __category(self, name, label):\n if name in self.categories:\n res = self.categories[name]\n # Update the category label (if not yet defined)\n res['label'] = res['label'] or label\n\n return res\n\n cat = PrefCategoryTbl.query.filter_by(\n mid=self.mid\n ).filter_by(name=name).first()\n\n if cat is None:\n cat = PrefCategoryTbl(name=name, mid=self.mid)\n db.session.add(cat)\n db.session.commit()\n cat = PrefCategoryTbl.query.filter_by(\n mid=self.mid\n ).filter_by(name=name).first()\n\n self.categories[name] = res = {\n 'id': cat.id,\n 'name': name,\n 'label': label,\n 'preferences': dict()\n }\n\n return res",
"async def add(self, category, key, value=None):\n await super(MemoryKVCache, self).add(category, key, value)\n\n if self.in_transaction:\n self.dirty_categories.add(category)"
] | [
"0.67628896",
"0.66002876",
"0.6432391",
"0.63346964",
"0.625796",
"0.6200853",
"0.6176452",
"0.6161858",
"0.61149013",
"0.60869795",
"0.6050243",
"0.5962307",
"0.5923157",
"0.5906014",
"0.58910406",
"0.5843478",
"0.5798613",
"0.5784067",
"0.578279",
"0.5780437",
"0.57749104",
"0.5751038",
"0.5711776",
"0.57094824",
"0.5666444",
"0.5657049",
"0.5649705",
"0.5628771",
"0.5628055",
"0.56229067"
] | 0.83716774 | 0 |
Override the save method to resize the image on category.save() and notify all registered users that a new category has been added | def save(self, *args, **kwargs):
self.image = self.resizeImage(self.image)
self.__append_to_category_list()
super(Category, self).save(*args, **kwargs)
if settings.EMAIL_HOST_USER:
self.send_email_notification_to_users(
subject=f"[Portfolio App Demo] New Category added!",
message=f"A new category '{self.category_name}' has been added! Check it out here... https://www.gbournique.com/items/{self.category_slug}",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, *args, **kwargs):\n if not self.pk: # on create\n image = Image.open(self.file)\n image.thumbnail((400, 400), Image.ANTIALIAS)\n\n thumb = io.BytesIO()\n image.save(\n thumb, format=\"jpeg\", quality=80, optimize=True, progressive=True\n )\n self.thumbnail = InMemoryUploadedFile(\n thumb, None, self.file.name, 'image/jpeg', thumb.tell(), None\n )\n\n super(File, self).save(*args, **kwargs)",
"def save_image(self):\n self.save()",
"def save(self, **kwargs):\n if not self.width or not self.height:\n self.width, self.height = self.image.width, self.image.height\n\n # prefill the slug with the ID, it requires double save\n if not self.id:\n img = self.image\n\n # store dummy values first...\n w, h = self.width, self.height\n self.image = ''\n self.width, self.height = w, h\n self.slug = ''\n\n super(Photo, self).save(force_insert=True)\n\n # ... so that we can generate the slug\n self.slug = str(self.id) + '-' + slugify(self.title)\n # truncate slug in order to fit in an ImageField and/or paths in Redirects\n self.slug = self.slug[:64]\n # .. tha will be used in the image's upload_to function\n self.image = img\n # and the image will be saved properly\n super(Photo, self).save(force_update=True)\n else:\n try:\n old = Photo.objects.get(pk=self.pk)\n\n force_update = True\n # delete formatedphotos if new image was uploaded\n if old.image != self.image:\n for f_photo in self.formatedphoto_set.all():\n f_photo.delete()\n except Photo.DoesNotExist:\n # somebody is just trying to create new model with given PK\n force_update = False\n\n super(Photo, self).save(force_update=force_update)",
"def save_form_data(self, instance, data):\r\n if data and isinstance(data, UploadedFile):\r\n # A new file is being uploaded. So delete the old one.\r\n remove_model_image(instance, 'image')\r\n super(CampaignImageField, self).save_form_data(instance, data)\r\n instance._create_resized_images(raw_field=data, save=False)",
"def _create_resized_images(self, raw_field, save):\r\n # Derive base filename (strip out the relative directory).\r\n filename = os.path.split(self.image.name)[-1]\r\n ctype = guess_type(filename)[0]\r\n\r\n # Generate resized copy of image.\r\n remove_model_image(self, 'image_resized')\r\n bb = self.is_event and settings.EVENT_RESIZED_IMAGE_BOUNDING_BOX or settings.CAMPAIGN_RESIZED_IMAGE_BOUNDING_BOX\r\n resize, crop, img = get_perfect_fit_resize_crop(bb, input_image=self.image.path)\r\n resized_contents = resize_in_memory(img, resize, crop=crop)\r\n resized_file = str_to_file(resized_contents)\r\n resized_field = InMemoryUploadedFile(resized_file, None, None, ctype, len(resized_contents), None)\r\n self.image_resized.save(name='resized-%s' % filename, content=resized_field, save=save)\r\n resized_file.close()\r\n\r\n # Generate avatar.\r\n remove_model_image(self, 'image_avatar')\r\n avatar_contents = resize_in_memory(self.image.path, settings.CAMPAIGN_AVATAR_IMAGE_CROP, crop=settings.CAMPAIGN_AVATAR_IMAGE_CROP, crop_before_resize=True)\r\n avatar_file = str_to_file(avatar_contents)\r\n avatar_field = InMemoryUploadedFile(avatar_file, None, None, ctype, len(avatar_contents), None)\r\n self.image_avatar.save(name='avatar-%s' % filename, content=avatar_field, save=save)\r\n avatar_file.close()",
"def contribute_to_class(self, cls, name):\n\n super(StdImageField, self).contribute_to_class(cls, name)\n signals.post_save.connect(self._rename_resize_image, sender=cls)\n signals.post_init.connect(self.set_variations, sender=cls)",
"def save_image_action(self):\n self.view.save_image(self.settings.get_image_type())",
"def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super(Category, self).save(*args, **kwargs)",
"def save(self, **kwargs):\n self.remove_file()\n if not self.image:\n self.generate(save=False)\n else:\n self.image.name = self.file()\n super(FormatedPhoto, self).save(**kwargs)",
"def save(self):\n im = Image.open(self.picture)\n output = BytesIO()\n im.thumbnail((350, 350))\n im.save(output, format='JPEG', quality=100)\n output.seek(0)\n self.picture = InMemoryUploadedFile(output, 'ImageField', \"%s.jpg\" % self.picture.name.split('.')[0],\n 'image/jpeg', sys.getsizeof(output), None)\n super(Tire, self).save()",
"def _rename_resize_image(self, instance=None, **kwargs):\n if getattr(instance, self.name):\n filename = getattr(instance, self.name).path\n ext = os.path.splitext(filename)[1].lower().replace('jpg', 'jpeg')\n dst = self.generate_filename(instance, '%s_%s%s' % (self.name,\n instance._get_pk_val(), ext))\n dst_fullpath = os.path.join(settings.MEDIA_ROOT, dst)\n if os.path.abspath(filename) != os.path.abspath(dst_fullpath):\n os.rename(filename, dst_fullpath)\n for variation in self.variations:\n variation_filename = self._get_variation_filename(variation, dst_fullpath)\n shutil.copyfile(dst_fullpath, variation_filename)\n self._resize_image(variation_filename, variation)\n setattr(instance, self.attname, dst)\n instance.save()",
"def save(self, *args, **kwargs):\n slug_save(self) # call slug_save, listed below\n super(Coupons, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if self.pk is None:\n saved_image = self.logo\n self.logo = None\n super().save(*args, **kwargs)\n self.logo = saved_image\n kwargs.pop('force_insert', None)\n super().save(*args, **kwargs)",
"def resize_profile_pic(sender, instance, **kwargs):\n profile_pic = instance.profile_picture\n if profile_pic.name != \"default.png\":\n img = Image.open(profile_pic.path)\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(profile_pic.path)",
"def save_image(self):\n self.table_to_image.img.save(self.file_name)\n aws.AWSHandler().upload_image(self.file_name)",
"def register_collection_additional_image(self, image):\n save_path = os.path.join(self.session_dir, 'additional.jpg')\n image.save(save_path)",
"def form_valid(self, form):\n User.objects.filter(username=self.object).update(\n user_image =form.cleaned_data['user_image'],\n )\n myfile = self.request.FILES['user_image']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n messages.success(self.request, 'Image uploaded successfully')\n return super().form_valid(form)",
"def update_path_image_on_add(sender, **kwargs):\n instance = kwargs.pop('instance', None)\n action = kwargs.pop('action', None)\n pk_set = kwargs.pop('pk_set', None)\n if action == \"post_add\":\n content = Content.objects.get(pk=list(pk_set)[0])\n if content.image:\n instance.image = content.image\n instance.save()",
"def save(self, force_insert=False, force_update=False):\n if not self.width and not self.height:\n from django.db import IntegrityError\n raise IntegrityError, \"A dimension must have a width and a height.\"\n else:\n super(Dimension, self).save(force_insert, force_update)",
"def save(self):\n try:\n old_avatar_service = (\n self.avatar_service_registry\n .get_avatar_service(\n self.settings_manager.avatar_service_id)\n )\n except ItemLookupError:\n old_avatar_service = None\n\n if old_avatar_service and old_avatar_service.is_configurable():\n old_avatar_service.cleanup(self.user)\n self.settings_manager.configuration.pop(\n old_avatar_service.avatar_service_id)\n\n avatar_service_id = self.cleaned_data['avatar_service_id']\n new_avatar_service = (\n self.avatar_service_registry\n .get_avatar_service(avatar_service_id)\n )\n self.settings_manager.avatar_service_id = avatar_service_id\n\n if new_avatar_service.is_configurable():\n avatar_service_form = self.avatar_service_forms[avatar_service_id]\n self.settings_manager.configuration[avatar_service_id] = \\\n avatar_service_form.save()\n\n self.settings_manager.save()",
"def save(self, *args, **kwargs):\n self.trait_flavor_name = self.set_trait_flavor_name()\n # Call the \"real\" save method.\n super(HarmonizedTrait, self).save(*args, **kwargs)",
"def process_image(\n df: pd.DataFrame,\n current_id: int,\n encoder: LabelBinarizer,\n current_category_name: str,\n images_in_category: List,\n output_image_folder_path: str,\n resized_image_shape:Tuple,\n transformations:List[TransformationsEnum],\n zero_fill_id=16,\n):\n\n #todo save original picture name with folder it is in, so we can upload only new images and skip the ones that already are on the bucket\n for image in images_in_category:\n\n orignal_name = os.path.join(current_category_name,os.path.split(image)[1])\n image_path = str(image)\n\n str_id = str(current_id).zfill(zero_fill_id)\n current_id += 1\n\n image_new_name = f\"img_{current_category_name}_{str_id}.png\"\n binarized_label = encoder.transform([current_category_name])\n\n #save image file name, its category in 1hot encoding and its category name\n df.loc[len(df)] = [image_new_name, binarized_label.flatten().tolist(), current_category_name, orignal_name]\n new_image_path = os.path.join(output_image_folder_path, image_new_name)\n img = Image.open(image_path).convert('RGB').resize(resized_image_shape)\n\n #apply transformations\n if TransformationsEnum('hog') in transformations:\n img = to_hog(img)\n # because something fucky happens when you try apply HOG and rescale its intensity and then try to save it using pillow :(\n plt.imsave(new_image_path, img)\n continue\n\n if TransformationsEnum('grayscale') in transformations:\n img = to_gray_scale(img)\n\n if TransformationsEnum('edge_enh') in transformations:\n img = edge_enhacement(img)\n\n if TransformationsEnum('tv_den') in transformations:\n img = denoise_tv(img)\n\n img.save(new_image_path)\n\n\n #reutrning current_id instead\n print(f\"Processed category {current_category_name}, {len(df)} in total\")\n return df, current_id",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n\n if self.parent:\n self.level = self.parent.level + 1\n self.blog_category = self.parent.blog_category\n\n super(Category, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.title)\n existing = Project.objects.filter(\n # has some false positives, but almost no false negatives\n slug__startswith=self.slug).order_by('-pk').first()\n if existing:\n self.slug = self.slug + str(existing.pk)\n\n \"\"\"Save images to the Media model\"\"\"\n imagesave(self.description)\n\n super(Project, self).save(*args, **kwargs)",
"def resizeEvent(self, event):\n self.image_canvas.fit_in_view()",
"def save_category(self):\n self.start_connection()\n for element in config.CATEGORIES:\n self.cursor.execute(f\"\"\"\n INSERT INTO category (name) VALUES (\"{element}\")\"\"\")\n self.mysql_connection.commit()\n self.close_connection()",
"def save(self, *args, **kwargs):\n self.name = unique_slugify(self.name, instance=self)\n\n if self.is_personal and self.user.username != self.name:\n self.user.username = self.name\n self.user.save()\n\n if self.is_customer:\n self.update_customer()\n\n if not self.image:\n self.set_image_from_name(should_save=False)\n\n return super().save(*args, **kwargs)",
"def test_image_uploads_on_save(self):\n \n files_count = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n with open('media/test_images/test.jpg') as f:\n self.client.post(reverse('edit'), {'ava': f})\n files_count_after = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n # added file and thumbnail\n self.assertEquals(files_count_after - files_count, 2) \n \n # test image scales \n from PIL import Image\n im = Image.open(settings.MEDIA_ROOT + '/persons/test.thumbnail.jpg')\n thumbnail_size = Person.thumbnail_size\n self.assertEquals((thumbnail_size,thumbnail_size), im.size)"
] | [
"0.6385444",
"0.62491363",
"0.6205879",
"0.60539657",
"0.5842181",
"0.5778853",
"0.57606965",
"0.5692971",
"0.56885064",
"0.5646007",
"0.55873954",
"0.55335635",
"0.5482986",
"0.54811686",
"0.5468032",
"0.5459336",
"0.53800124",
"0.535844",
"0.53266585",
"0.53228825",
"0.5319206",
"0.53018135",
"0.5288305",
"0.5288305",
"0.5277817",
"0.5265989",
"0.5256056",
"0.524733",
"0.5241211",
"0.52411854"
] | 0.82570636 | 0 |
Override magic method to return a userfriendly string representation of the object on str(category_object) | def __str__(self):
return self.category_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n \n return \"Category ID: %s %s\" % (self.category_id, self.name)",
"def __repr__(self):\n return f\"Category=(id={self.id},category_name={self.category_name},category_slug={self.category_slug})\"",
"def __str__(self):\n return self.cat_name",
"def __repr__(self):\n return '{}:{}:{}'.format(self.category, self.name, self.id)",
"def __repr__(self):\n\n return f'<Category cat_code={self.cat_code} name={self.name}>'",
"def __str__(self):\n return str(self.obj)",
"def __repr__(self):\n\n return f\"<Cat id={self.cat_id} name={self.name}\"",
"def __str__(self):\n return \"cat:\"+str(self.name)+\":\"+str(self.age)",
"def __repr__(self):\n\n return \"<Category name=%s>\" % (self.name)",
"def __str__(self):\n return str(self.__dict__['_obj'])",
"def __str__(self):\n\n return '__str__ for Object'",
"def __repr__(self):\n\n return f'<Category category_id={self.category_id} category_name={self.category_name}\\\n count_crime ={self.count_crime} label = {self.label}>'",
"def __str__(self):\n table = 'objects'.join(self.galcat.__str__().split('objects')[1:])\n return self.__repr__()+'\\n'+table",
"def name(self) -> str:\n return str(self.category.value)",
"def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())",
"def str_(object_):\n return str(object_)",
"def __str__(self):\n return f'{self.mission} {self.category}: {self.title} / {self.created_at}'",
"def __str__(self):\n return '{}({})'.format(type(self).__name__, self.__name)",
"def __str__(self):\n return str(self.__dict__)",
"def __str__(self):\n return str(self.__dict__)",
"def format_item(self,obj):\n return unicode(obj)",
"def __str__(self):\n return self.get_string()",
"def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)",
"def __str__(self):\n return super().__str__()",
"def __str__(self):\n return \"{}\".format(super().__str__())",
"def _get_category_label(category_obj, level):\n result = ''\n for i in range(0, level + 1):\n if i < len(category_obj):\n level_label = category_obj[i]\n if not level_label:\n level_label = '$'\n else:\n # place holder\n level_label = '$'\n if not result:\n result = level_label\n else:\n result = result + '###' + level_label\n return result",
"def __str__(self):\n\n\t\toutput = MyUtilities.common.Container.__str__(self)\n\t\tif (self.thing is not None):\n\t\t\toutput += f\"-- Title: {self.title}\\n\"\n\t\treturn output",
"def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"",
"def __str__(self):\n return self.get_str()",
"def __str__(self):\n if self.sense:\n return \"_{}_{}_{}\".format(*self)\n else:\n return \"_{}_{}\".format(*self)"
] | [
"0.75058025",
"0.7413164",
"0.740318",
"0.7380656",
"0.71459764",
"0.70923764",
"0.7047891",
"0.7046232",
"0.7028249",
"0.69508773",
"0.6937812",
"0.692393",
"0.6880881",
"0.6708073",
"0.66840637",
"0.66094226",
"0.652625",
"0.64997977",
"0.6481348",
"0.6481348",
"0.6469661",
"0.6466692",
"0.6457473",
"0.6430894",
"0.64263266",
"0.6409256",
"0.6400563",
"0.6394919",
"0.6391053",
"0.63799006"
] | 0.7669583 | 0 |
Override magic method to return a developerfriendly string representation of the object on repr(category_object) | def __repr__(self):
return f"Category=(id={self.id},category_name={self.category_name},category_slug={self.category_slug})" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self):\n return '{}:{}:{}'.format(self.category, self.name, self.id)",
"def __repr__(self):\n\n return f'<Category cat_code={self.cat_code} name={self.name}>'",
"def __repr__(self):\n\n return f\"<Cat id={self.cat_id} name={self.name}\"",
"def __repr__(self):\n\n return \"<Category name=%s>\" % (self.name)",
"def __repr__(self):\n\n return f'<Category category_id={self.category_id} category_name={self.category_name}\\\n count_crime ={self.count_crime} label = {self.label}>'",
"def __str__(self):\n return self.category_name",
"def __str__(self):\n \n return \"Category ID: %s %s\" % (self.category_id, self.name)",
"def __str__(self):\n return str(self.obj)",
"def __str__(self):\n return str(self.__dict__['_obj'])",
"def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())",
"def __str__(self):\n return self.cat_name",
"def __str__(self):\n\n return '__str__ for Object'",
"def __str__(self):\n return \"cat:\"+str(self.name)+\":\"+str(self.age)",
"def __str__(self):\n table = 'objects'.join(self.galcat.__str__().split('objects')[1:])\n return self.__repr__()+'\\n'+table",
"def __str__(self):\n if self.__description:\n return self.__description\n return repr(self)",
"def __str__(self):\n return repr(self)",
"def repr_(object_):\n return repr(object_)",
"def __str__(self):\r\n return repr(self)",
"def __repr__(self):\n return repr(self.__dict__['_obj'])",
"def __str__(self):\n return str(self.__dict__)",
"def __str__(self):\n return str(self.__dict__)",
"def __unicode__(self):\n # Should be overwritten by base classes\n return object.__repr__(self)",
"def __str__(self):\n return self.__repr__()",
"def __str__(self):\n return self.__repr__()",
"def __str__(self):\n return self.__repr__()",
"def __str__(self):\n return self.__repr__()",
"def __str__(self):\n return self.__repr__()",
"def __str__(self):\r\n return self.__repr__()",
"def __str__(self):\n return \"<%s: %s>\" % (self.__class__, self.describe())",
"def __str__(self):\n\n desc = self.description\n if desc is not None:\n return str(desc)\n\n desc = self.debugDescription\n if desc is not None:\n return str(desc)\n\n return repr(self)"
] | [
"0.7909407",
"0.76931256",
"0.7598569",
"0.75420475",
"0.7461687",
"0.7396662",
"0.73540694",
"0.7169469",
"0.71685416",
"0.7161115",
"0.7109059",
"0.70717186",
"0.7037763",
"0.7030703",
"0.69059324",
"0.6898647",
"0.6874442",
"0.6865749",
"0.67619854",
"0.67369884",
"0.67369884",
"0.6729222",
"0.66490954",
"0.66490954",
"0.66490954",
"0.66490954",
"0.66490954",
"0.66265815",
"0.6587973",
"0.65875053"
] | 0.7837578 | 1 |
Hidden instance method to append the object to the class attribute __item_list | def __append_to_item_list(self):
Item.get_item_list().append(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append (self, item):\n pass",
"def append(self, item: Any) -> BaseList:\n super().append(item)\n return self",
"def append(self, item):\n # type: (Any) -> None\n list.append(self, self.ref(item))",
"def append(self, item):\n self.update([item])",
"def append(self, item):\n self.items.append(item)",
"def __init__(self):\n self.item_list = []",
"def add(self, item):",
"def push(self, item):\n if hasattr(item, \"__iter__\"):\n self.items.extend(item)\n else:\n self.items.append(item)",
"def append_item(self, item):\r\n if not isinstance(item, LR0Item):\r\n raise TypeError\r\n self.itemlist.append(item)",
"def extend(self, item: Any) -> BaseList:\n super().extend(item)\n return self",
"def get_item_list(cls):\n if Item.__item_list is None:\n Item.__item_list = []\n return Item.__item_list",
"def Push(self, item):\n self.list.append(item)",
"def append(self, item: T) -> None:\n pass",
"def append(self, item: T) -> None:\n pass",
"def add(self, *args, **kwargs):\n obj = self._class(*args, **kwargs)\n self._items.append(obj)",
"def append(self, obj):\r\n raise NotImplementedError",
"def append(self, object):\r\n raise NotImplementedError()",
"def __init__(self):\r\n self.items = []",
"def add_item(self, item):\n self.items.append(item)",
"def add_item(self, item: Item):\n self.__items_list.append(item)",
"def append(self, value):\n assert isinstance(value, Item), type(value)\n list.append(self, value)\n self.emit('appened', value)\n self.emit('modified')",
"def __init__(self):\n self.items = []",
"def __init__(self):\n self.items = []",
"def __init__(self):\n self.items = []",
"def __init__(self):\n self.items = []",
"def __init__(self):\n self.items = []",
"def __init__(self):\n self.items = []",
"def __init__(self):\n self.items = []",
"def Item(self) -> object:",
"def Item(self) -> object:"
] | [
"0.7601781",
"0.7582431",
"0.7336355",
"0.7185622",
"0.71147776",
"0.7101685",
"0.70831436",
"0.69715333",
"0.6935735",
"0.6905822",
"0.6859141",
"0.6827798",
"0.682628",
"0.682628",
"0.67792153",
"0.67779654",
"0.67643386",
"0.66602784",
"0.66271436",
"0.6626822",
"0.65846604",
"0.6580803",
"0.6580803",
"0.6580803",
"0.6580803",
"0.6580803",
"0.6580803",
"0.6580803",
"0.65762264",
"0.65762264"
] | 0.8207385 | 0 |
Instance method to increment the views variable | def increment_views(self):
self.views += 1
self.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def increase_view_count(self):\n try:\n self.view_counter += 1\n self.save(update_fields=['view_counter'])\n except:\n warnings.warn(\"Unable to increase view count for advert {}\".format(self.pk))",
"def count_view(self):\n self.count_views += 1\n self.save(update_fields=['count_views'])",
"def increment_counter(self) -> None:",
"def inc( self ):\n self.count += 1",
"def update_view_count(self, video_ID): #WORKS\n try:\n self.cur.execute(\"UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \\\"{}\\\"\".format(video_ID)) # Adds 1 to the existing value.\n self.db.commit()\n except:\n self.db.rollback()",
"def inc(self):\n \n self.count += 1",
"def updateVisits(self):\n self.nVisits += 1",
"def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, \"postdetail.html\", {\"post\": post})",
"def getNumberOfViews(self) -> int:\n ...",
"def increase_counter(self):\n self.values = self.values + 1",
"def test_increment_view_count(self):\n shortUrl = 'increment_url'\n url = 'http://www.google.com'\n author = 'author'\n self.urlShortener.saveUrl(shortUrl, url, author)\n\n self.urlShortener.increment_visited_count(shortUrl)\n self.urlShortener.increment_visited_count(shortUrl)\n\n doc = self.urlShortener.get_doc_from_shorturl(shortUrl)\n self.assertEqual(int(doc['clicks']), 2)\n\n self.urlShortener.removeUrl(shortUrl)",
"def update_count(self, model, view):\r\n view.SetLabel(str(len(model)))",
"def add_count(self):\n self.count += 1",
"def _increment_viewcount(model, model_id: int, request):\n object_key = model.__name__ + ':' + str(model_id)\n\n redis = get_redis_connection('traffic_stats')\n view_count = redis.get(object_key)\n if not view_count:\n # Cache miss. Get the view count from the database and cache it.\n try:\n view_count = int(model.objects.get(identifier=model_id).view_count)\n except ObjectDoesNotExist:\n # If the object doesn't even exist in the database, don't track it.\n return\n except FieldDoesNotExist:\n log.error(\n 'Cannot track model {} because it has no view_count field. '\n 'Views for this model will be lost.'.format(model.__name__)\n )\n return -1\n redis.set(object_key, view_count)\n else:\n view_count = int(view_count)\n\n # Only increment the view count if the user has not visited the resource in\n # the last few minutes. Prevents metrics gaming shenanigans.\n ip = _get_user_ip(request)\n if not _is_recent_visitor(ip, object_key):\n redis.incr(object_key)\n view_count += 1\n _mark_recent_visitor(ip, object_key)\n\n # Update the last access time of the model.\n # Store in a sorted set so we can easily find the oldest keys.\n timestamp = time.time()\n redis.execute_command(\n 'ZADD model-last-accessed {} {}'.format(timestamp, object_key)\n )\n return view_count",
"def increment_number(self):\n # self.number += 1\n print('fuckwit')\n # print(self.number)",
"def increase_page_view(sender, **kwargs):\n pass",
"def update_count(self):\n pass",
"def update_count(self):\n pass # Do nothing",
"def incInstCount(self):\n self.instCount += 1",
"def increase_count(self, number=1):\n self.count += number",
"def refresh(self):\n #self.find('counter-label').text = 'Counter: %i' % self.counter\n\n #@on('increment-button', 'click')\n #def on_button(self):\n \"\"\"\n This method is called every time a child element\n with ID 'increment-button' fires a 'click' event\n \"\"\"\n #self.counter += 1\n #self.refresh()",
"def remote_update(self, increment):\r\n\r\n self.window += increment",
"def _increase_counter(self, response):\n response_id = response.meta['__id']\n spot = self._request_registry[response_id]\n spot['counter'] = spot.get('counter', 0) + 1",
"def _inc_counter(self) -> None:\n self._state_storage.increment_counter()",
"def increment(self):\r\n return self.add(1)",
"def increment_number_served(self, increment):\n self.number_served += increment",
"def increment_number_served(self, increment):\n self.number_served += increment",
"def process_view(self, request, view_func, view_args, view_kwargs):\n if view_func.__name__ == settings.COUNTER_ADS_VIEW:\n counter_key = view_kwargs.get(\"pk\")\n ads_counter = Counter(view_func.__name__, counter_key)\n ads_counter.hit(request)",
"def inc(self):\n self._value += 1",
"def on_pushButton1_clicked(self):\n self.count+=1\n self.zShow.printf(\"count=%f\", self.count)"
] | [
"0.77704555",
"0.76575696",
"0.70364726",
"0.6700548",
"0.6661522",
"0.658473",
"0.65695125",
"0.65456796",
"0.6533949",
"0.6532441",
"0.6521893",
"0.65049404",
"0.64822143",
"0.6479589",
"0.642222",
"0.640727",
"0.6284937",
"0.6231589",
"0.6199503",
"0.61729074",
"0.61685324",
"0.61476856",
"0.6130139",
"0.6127602",
"0.608262",
"0.60804343",
"0.60804343",
"0.6072204",
"0.6069513",
"0.60452026"
] | 0.8641375 | 0 |
Overrides the save method to notify all registered users that a new item has been added | def save(self, *args, **kwargs):
if self not in Item.objects.all() and settings.EMAIL_HOST_USER:
# Send notification for newly created item
self.send_email_notification_to_users(
subject="[Portfolio App Demo] New Item added!",
message=f"A new item '{self.item_name}' has been added! Check it out here... https://www.gbournique.com/items/{self.category_name.category_slug}/{self.item_slug}",
)
super(Item, self).save(*args, **kwargs)
if self not in Item.get_item_list():
self.__append_to_item_list() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_user(self):\n\n User.user_list.append(self)",
"def save_user(self):\n User.user_list.append(self)",
"def save_user(self):\n User.user_list.append(self)",
"def save_users(self):\n\n User.user_list.append(self)",
"def save_user(self):\n\n User.user_list.append(self)",
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def save(self, *args, **kwargs):\n create = self.id is None\n # Strip out the user keyword argument, since the super save method\n # does not expect it.\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(Member, self).save(*args, **kwargs)\n # Only register if the object is not being updated\n if create:\n self._register(user=user)\n else:\n # User and UserProfile already exist so save them too\n self.userprofile.save()\n self.userprofile.user.save()",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save_user(self):\n db.session.add(self)\n db.session.commit()",
"def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)",
"def save_items(self):\n raise NotImplementedError()",
"def save_user(self):\n User.user_list.append(self)\n\n # finding a user's credentials",
"def save_item(self):\r\n raise NotImplementedError(\"Function not implemented, please implement in sub class\")",
"def save_users(user):\n user.save_user()",
"def save(self):\n users = User.getall()\n users[self.username] = dict(self)\n return self.db().put(self.udb, users)",
"def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SALESMAN\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n leader = Team.objects.get(id=form.data.get('team')).leader\n inviter_queryset = InviteRelationManager.objects.filter(invitee=leader)\n if inviter_queryset.exists():\n inviter_obj = inviter_queryset.first()\n superior = f'{inviter_obj.superior}|{leader.id}'\n else:\n superior = f'{leader.id}'\n InviteRelationManager.objects.create(inviter=leader, invitee=user, level=1, superior=superior)\n UserBusiness.objects.create(uid=user)\n super().save_model(request, obj, form, change)",
"def save_user_ref(sender, created, instance, **_):\n if created:\n UserExtend.objects.create(user=instance)\n UserSettings.objects.create(user=instance)",
"def save(self, **kwargs):\n super().save(**kwargs)\n self.instance.send_invite()\n return self.instance",
"def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_user(context, self.id, updates)\n self.obj_reset_changes()",
"def _save_user(self, user):\n self.firebase.patch(f'/{self.USERS_KEY}', {str(user.id): user.username})",
"def save(self, register=False):\n sha = sha1(self.email).hexdigest()\n infos = self.to_dict()\n infos[\"plan\"] = infos[\"plan\"][\"id\"] if infos[\"plan\"] else None\n\n if not redis.hmset(\"sl:account:{}\".format(sha), infos):\n raise SleekException(\"Could not save current user.\", 401)\n \n if register:\n try:\n send_email.delay(\n \"[email protected]\", \"Welcome to sleekstatus !\",\n \"Welcome message\", [self.email]\n )\n except:\n pass # Cannot send email",
"def save(self, *args, **kwargs):\n return",
"def save(self, *args, **kwargs):\n if not self.id:\n self.last_msg_time = timezone.now()\n super(WeixinUser, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if not self.id and User.objects.filter(email=self.email).exists():\n raise IntegrityError()\n super().save(*args, **kwargs)",
"def on_user_create(self, user):",
"def save(self, commit=True):\n user = super(InvitationCompleteForm, self).save(commit)\n\n def save_invited_user():\n invited_user = self.invited_user\n invited_user.created_user = user\n invited_user.status = InvitedUser.STATUS_REGISTERED\n invited_user.save()\n if commit:\n save_invited_user()\n else:\n self.save_invited_user = save_invited_user\n return user",
"def save(self, *args, **kwargs):\n pass",
"def add_items(self, items):\n\n self.model.add_items(items)\n self.refreshed.emit()",
"def test_save_users(self):\n\n self.new_users.save_users() # saving the new user\n self.assertEqual(len(User.user_list), 1)"
] | [
"0.7319862",
"0.71779156",
"0.71779156",
"0.71767735",
"0.7166091",
"0.70460325",
"0.6611724",
"0.6498972",
"0.6498972",
"0.6496938",
"0.6492612",
"0.6447528",
"0.6414119",
"0.6330929",
"0.63212955",
"0.63116044",
"0.62686855",
"0.6263381",
"0.6255758",
"0.6254118",
"0.6245261",
"0.618714",
"0.6121934",
"0.6110991",
"0.610169",
"0.60957557",
"0.6087933",
"0.6084532",
"0.6067125",
"0.6046002"
] | 0.79123265 | 0 |
Adds the greater or eq than behavior to compare two Item objects based on the number of views. This allows to sort items by their number of views with items.sort(). | def __ge__(self, value):
if not isinstance(value, Item):
raise ValueError("Can't compare Item to non-Item type")
return self.views >= value.views | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __lt__(self, value):\n if not isinstance(value, Item):\n raise ValueError(\"Can't compare Item to non-Item type\")\n return self.views < value.views",
"def OnCompareItems(self, item1, item2):\r\n\r\n return cmp(self.GetItemText(item1), self.GetItemText(item2))",
"def OnCompareItems(self, item1, item2):\n # Get the item data\n data_1 = self.GetItemText(item1)\n data_2 = self.GetItemText(item2)\n # Compare the item data\n if data_1 < data_2:\n return -1\n elif data_1 > data_2:\n return 1\n else:\n return 0",
"def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result",
"def __cmp__(self, other_view):\n # __cmp__ functions return -1 if we are less than schema\n # 0 if we are the same as schema\n # 1 if we are greater than schema\n # If our 'compare' method returns anything there are differences\n if self.compare(other_view):\n return 1\n else:\n return 0",
"def __gt__(self, other):\n\n if self.count == other.count:\n return self.word < other.count\n return self.count > other.count",
"def __gt__(self, other):\n return self.weight() > other.weight()",
"def __gt__(self, vs) -> bool:\n return vs <= self",
"def testCmp(self):\n\n item1 = models.Room(id=1,\n name=\"Test Room\",\n roomTypeId = 1)\n\n item2 = models.Room(id=1,\n name=\"Test Room\",\n roomTypeId = 1)\n \n self.assertEqual(item1,item2)\n \n #Order On Name\n item2.name = \"A_Test\"\n self.assertGreater(item1,item2)\n\n item2.name = \"Z_Test\"\n self.assertLess(item1,item2)\n\n item2.name = item1.name\n item2.roomTypeId = 0\n self.assertGreater(item1,item2)\n\n item2.roomTypeId = 2\n self.assertLess(item1,item2)",
"def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL",
"def __gt__(self, other):\n return greater(self, other)",
"def __gt__(self, other):\n return self.greaterThan(other)",
"def __gt__(self, other):\n return self.element() > other.element()",
"def __gt__(self, other):\n if not isinstance(other, OrderedDict):\n raise TypeError('Can only compare with other OrderedDicts')\n # FIXME: efficiency?\n # Generate both item lists for each compare\n return (self.items() > other.items())",
"def __gt__(self,other):\r\n\t\tsorted_self = sorted(self.vector, reverse=True) #sort both lists in descending order\r\n\t\tsorted_other = sorted(other, reverse=True) \r\n\t\tcmpflag = False\r\n\t\tfor li1, li2 in zip(sorted_self, sorted_other):\r\n\t\t\tif(li1 > li2):\r\n\t\t\t\tcmpflag = True\r\n\t\treturn cmpflag",
"def __gt__(self, other):\n return self.weight > other.weight",
"def __gt__(self, other: 'MultiChoiceQuestionGroup') -> DataFrame:\n results = {}\n for key in self._item_dict.keys():\n results[key] = self[key] > other[key]\n return DataFrame(results)",
"def compare_popularity(self, a, b):\n a_score = a['stats']['attending'] + a['stats']['maybe'] / 2.0\n b_score = b['stats']['attending'] + b['stats']['maybe'] / 2.0\n if a_score < b_score:\n return -1\n elif a_score > b_score:\n return 1\n else:\n return 0",
"def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )",
"def __gt__(self, other):\n return self.eval_score < other.eval_score",
"def __gt__(self, *args):\n return _ida_hexrays.citem_locator_t___gt__(self, *args)",
"def __gt__(self, other):\n return self._key > other._key",
"def __gt__(self, other):\n return other < self",
"def __gt__(self, other):\r\n assert isinstance(other, Order)\r\n return self - other > 0",
"def __gt__(self, other):\n return self._ordinals > other.ordinal()",
"def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other",
"def __gt__(self, other):\n return self.__cmp__(other) > 0",
"def __gt__(self, other):\n return True if self._compare(other) > 0 else False",
"def __gt__ (self, other) :\n return other.__lt__(self)",
"def __gt__(self, other):\n if isinstance(other, type(self)):\n return self.number > other.number\n return NotImplemented"
] | [
"0.64193386",
"0.61247426",
"0.6124245",
"0.60153514",
"0.6006596",
"0.5853002",
"0.58411884",
"0.5817707",
"0.5793101",
"0.5789319",
"0.57873005",
"0.5785609",
"0.57840955",
"0.57810473",
"0.5768118",
"0.5753078",
"0.5646922",
"0.5607596",
"0.5600079",
"0.55931616",
"0.5587003",
"0.55840224",
"0.55713785",
"0.556941",
"0.5531546",
"0.55215824",
"0.55111873",
"0.5505891",
"0.5498736",
"0.5496514"
] | 0.6777125 | 0 |
Adds the less than behavior to compare two Item objects based on the number of views. This allows to sort items by their number of views with items.sort(). | def __lt__(self, value):
if not isinstance(value, Item):
raise ValueError("Can't compare Item to non-Item type")
return self.views < value.views | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __ge__(self, value):\n if not isinstance(value, Item):\n raise ValueError(\"Can't compare Item to non-Item type\")\n return self.views >= value.views",
"def __lt__(self, other):\n return self.weight() < other.weight()",
"def __lt__(self, other):\n return less(self, other)",
"def __lt__(self, other):\n return self.weight < other.weight",
"def __lt__(self, other):\n return self.lessThan(other)",
"def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )",
"def __lt__(self, other):\n\n # seems like this should be == -1 but we're using a min heap\n return self._comparator.compare_measurements(self, other) == 1",
"def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result",
"def OnCompareItems(self, item1, item2):\n # Get the item data\n data_1 = self.GetItemText(item1)\n data_2 = self.GetItemText(item2)\n # Compare the item data\n if data_1 < data_2:\n return -1\n elif data_1 > data_2:\n return 1\n else:\n return 0",
"def __lt__(self, other):\n return self.index < other.index",
"def __lt__(self,other):\r\n\t\treturn self.n < other.n",
"def __lt__(self, other):\n return self.get_distance() < other.get_distance()",
"def __lt__(self, other):\n return self.score < other.score",
"def __lt__(self, other):\n return self.element() < other.element()",
"def __lt__(self, other):\n return self._obj_func() < other._obj_func()",
"def __lt__(self, other):\n if not set(self.keys()) <= set(other.keys()):\n return False\n result = False\n for value, times in dict.items(self):\n count = other.get(value, 0)\n if times > count:\n return False\n elif times < count:\n result = True\n return result or (dict.__len__(self) < dict.__len__(other))",
"def OnCompareItems(self, item1, item2):\r\n\r\n return cmp(self.GetItemText(item1), self.GetItemText(item2))",
"def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )",
"def __lt__(self, other):\n return self._d[\"priority\"] < other[\"priority\"]",
"def __lt__(self, other):\r\n return self.estimateCost < other.estimateCost",
"def __lt__(self, other):\n return not (self.unsplitable or self.split_necessity < other.split_necessity)",
"def __lt__(self, other):\n try:\n return self.length2 < other.length2\n except AttributeError:\n return assert_unorderable(self, other)",
"def __lt__(self, other):\n return True",
"def __lt__(self, other):\n return True",
"def __lt__(self, other):\r\n assert isinstance(other, Order)\r\n return self - other < 0",
"def __cmp__(self, other_view):\n # __cmp__ functions return -1 if we are less than schema\n # 0 if we are the same as schema\n # 1 if we are greater than schema\n # If our 'compare' method returns anything there are differences\n if self.compare(other_view):\n return 1\n else:\n return 0",
"def __gt__(self, other):\n\n if self.count == other.count:\n return self.word < other.count\n return self.count > other.count",
"def __lt__(self, other):\n return self.__cmp__(other) < 0",
"def __lt__(self, other):\n\t\tselfAttrs = (self.inflatedCost, self.label.winery.name, self.label.name, self.label.vintage)\n\t\totherAttrs = (other.inflatedCost, other.label.winery.name, other.label.name, other.label.vintage)\n\t\treturn selfAttrs < otherAttrs",
"def test_lt(self):\n assert self.app2 < self.app1\n assert self.app3 > self.app2"
] | [
"0.6530827",
"0.63083875",
"0.6215421",
"0.6153583",
"0.6152962",
"0.6097981",
"0.60503787",
"0.6007392",
"0.5985073",
"0.5960501",
"0.58917093",
"0.58641857",
"0.58614886",
"0.58559823",
"0.5845405",
"0.5823928",
"0.5821346",
"0.58023506",
"0.5794721",
"0.5771494",
"0.57623196",
"0.574824",
"0.5748043",
"0.5748043",
"0.57445616",
"0.57377446",
"0.5722011",
"0.57219666",
"0.5709032",
"0.5705632"
] | 0.7062926 | 0 |
Read cookies file, copypasted from chrome debugger, and return in a form suitable for requests.get. | def get_cookies():
home = expanduser('~')
with open(home + '/config/edx-tools/cookie.txt') as f:
lines = f.readlines()
lines = [line.strip(' \t\n\r') for line in lines if '✓' not in line]
d = {}
count = 0
for line in lines:
if count == 0:
cookie_name = line
if count == 1:
cookie_val = line
d[cookie_name] = cookie_val
if count == 5:
count = 0
else:
count += 1
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cookies_chrome(domain):\n cookpath = os.path.expanduser(udata.srcs['chrome']+'/Cookies')\n\n # copy DB to prevent 'database is locked' error\n cookcopy = cookpath+'.copy'\n shutil.copy(cookpath, cookcopy)\n\n # open SQLite3 database and execute query\n jar = sqlite3.connect(cookcopy)\n oreos = jar.execute('select name,encrypted_value from cookies where host_key like \"%%%s%%\"' % (domain)).fetchall()\n\n # decrypt cookies and build output dict\n baked = {}\n for tname, tenc in oreos:\n # check storage version (see os_crypt_linux.cc in Chromium source)\n stversion = tenc[:3]\n if stversion == 'v10':\n try:\n tval = chrome_decrypt(tenc)\n except:\n print(\"!! Failed to decrypt cookie\")\n tval = None\n elif stversion == 'v11':\n #print(\"!! Unable to decrypt v11-keyed cookies (libsecret/gnome-keyring/kwallet).\\n\"\n # \" To re-enable insecure cookie/password storage in Chrome or Chromium, start\\n\"\n # \" google-chrome or chromium with --password-store=basic or downgrade to\\n\"\n # \" Chrome/Chromium v50, which stores passwords via libsecret, but does not\\n\"\n # \" encrypt cookies\")\n tval = None\n else:\n # plaintext\n tval = tenc\n baked[tname] = tval\n\n os.remove(cookcopy)\n\n return baked",
"def _load_cookies(filename):\n with open(filename, 'rb') as handle:\n return pickle.load(handle)",
"def loadCookies():\n try:\n with open(getCookieFile(), 'rb') as f:\n cookies = pickle.load(f)\n f.close()\n return cookies\n except IOError as err:\n log('Unable to load cookies: {}'.format(err), True)\n\n return None",
"def cookie_cutter():\n sweets = open('cookies.txt', 'r').read().splitlines()\n cookie = {}\n for i in sweets:\n x = i.split()\n l = list(x[4])\n if l[0] in '1234567890':\n x[4], x[5] = x[5], x[4]\n # domain = x[0]\n # secure = x[1]\n name = x[4]\n try:\n value = x[6]\n except IndexError:\n value = x[5]\n # path = x[2]\n cookie[name] = value\n return cookie",
"def load_cookies(filename):\n with open(filename, 'rb') as f:\n requests_cookiejar = pickle.load(f)\n return requests_cookiejar",
"def get_cookies2():\n cookies = {\n\n }\n\n return cookies",
"def get_cookies(domname):\n if 'firefox' in udata.srcs:\n cout = get_cookies_firefox(domname)\n elif 'chrome' in udata.srcs:\n cout = get_cookies_chrome(domname)\n else:\n print(\"Error: No cookie source defined. Define either `srcs.firefox` or `srcs.chrome`.\")\n cout = None\n return cout",
"def get_cookies_firefox(domname):\n cookpath = os.path.expanduser(udata.srcs['firefox']) + '/cookies.sqlite'\n\n # copy DB to prevent disk I/O error on Windows\n cookcopy = cookpath+'.copy'\n shutil.copy(cookpath, cookcopy)\n\n sqx = sqlite3.connect('%s' % (cookcopy))\n cks = sqx.execute('select name,value from moz_cookies where host = \"%s\"' % (domname)).fetchall()\n cookies = {}\n for cn, cv in cks:\n cookies[cn] = cv\n os.remove(cookcopy)\n return cookies",
"def __loadCookies(self):\n with open(self.cookies_file, 'rb') as cookie_file:\n LOGGER.debug(\"Unpickling HTTP cookies from file: {0}\".format(self.cookies_file))\n session.cookies = requests.utils.cookiejar_from_dict(pickle.load(cookie_file))",
"def load_cookies_from_lwp(filename):\n lwp_cookiejar = cookielib.LWPCookieJar()\n lwp_cookiejar.load(filename, ignore_discard=True)\n return lwp_cookiejar",
"def parse_cookies( headers ):",
"def set_cookies():\r\n cookie = {'user': 'george', 'password': '1123abc'}\r\n response = requests.get(base_url + '/cookies', cookies=cookie)\r\n print(response.status_code)\r\n print(response.text)\r\n\r\n # request baidu\r\n response = requests.get('https://baidu.com')\r\n\r\n print(response.cookies)\r\n print(type(response.cookies))\r\n # to traversal a dict use for key,value in dict.items() method\r\n for key, value in response.cookies.items():\r\n print(key + ':' + value)",
"def get_cookies(self):\n return self.cookies",
"def get_cookie():\n response = requests.get(base_url)\n cookie = response.url.split('/')[3]\n return cookie",
"def cookies(self):\r\n return Dict(**self._get_cookies())",
"def GetCookies(host, path, cookie_paths=None):\n cookies = {}\n if cookie_paths is None:\n cookie_paths = (constants.GOB_COOKIE_PATH, constants.GITCOOKIES_PATH)\n for cookie_path in cookie_paths:\n if os.path.isfile(cookie_path):\n with open(cookie_path) as f:\n for line in f:\n fields = line.strip().split('\\t')\n if line.strip().startswith('#') or len(fields) != 7:\n continue\n domain, xpath, key, value = fields[0], fields[2], fields[5], fields[6]\n if cookielib.domain_match(host, domain) and path.startswith(xpath):\n cookies[key] = value\n return cookies",
"def extract_cookie_info():\n # setup cookie jar\n cj = cookielib.CookieJar()\n login_data = urllib.urlencode({ID_USERNAME: USERNAME,ID_PASSWORD: PASSWORD})\n # create url opener\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n resp = opener.open(LOGIN_URL, login_data)\n # send login info\n for cookie in cj:\n print \"----First time cookie: %s --> %s\" % (cookie.name,cookie.value)\n print \"Headers: %s\" % resp.headers\n # now access without any login info\n resp = opener.open(NORMAL_URL)\n for cookie in cj:\n print \"++++Second time cookie: %s --> %s\" % (cookie.name,cookie.value)\n print \"Headers: %s\" % resp.headers",
"def _load_credentials(self):\n wbi = httpbakery.WebBrowserInteractor(open=visit_page_with_browser)\n self._cookiejar = MozillaCookieJar(self._cookiejar_filepath)\n self._client = httpbakery.Client(cookies=self._cookiejar, interaction_methods=[wbi])\n\n if os.path.exists(self._cookiejar_filepath):\n logger.debug(\"Loading credentials from file: %r\", str(self._cookiejar_filepath))\n try:\n self._cookiejar.load()\n except Exception as err:\n # alert and continue processing (without having credentials, of course, the user\n # will be asked to authenticate)\n logger.warning(\"Failed to read credentials: %r\", err)\n else:\n logger.debug(\"Credentials file not found: %r\", str(self._cookiejar_filepath))\n\n # iterates the cookiejar (which is mutable, may change later) and get the cookies\n # for comparison after hitting the endpoint\n self._old_cookies = list(self._cookiejar)",
"def cookies(self):\n return Dict(**self._get_cookies())",
"def default_cookie():\n with open(raw_cookie_file) as f:\n rc = \"\".join(f.readlines())\n return browser_cookie(rc)",
"def getCookie(key):",
"def load(self):\n if not os.path.exists(self.file_path):\n return\n with open(self.file_path, \"rb\") as cookie_file:\n cookie_file = xbmcvfs.File(self.file_path, 'rb')\n cookie_jar = pickle.loads(cookie_file.readBytes())\n return cookie_jar",
"def convert_cookie(cookie_raw):\n cookie = {}\n logging.debug('Raw Cookie: ' + cookie_raw)\n try:\n for i in [i.strip() for i in cookie_raw.split(';')]:\n cookie[i.split('=')[0]] = i.split('=')[1]\n except IndexError:\n #if someone put a ; at the EOF\n pass\n return cookie",
"def cookiejar(name='session'):\n log = logging.getLogger('ipsv.common.cookiejar')\n spath = os.path.join(config().get('Paths', 'Data'), '{n}.txt'.format(n=name))\n cj = http.cookiejar.LWPCookieJar(spath)\n log.debug('Attempting to load session file: %s', spath)\n if os.path.exists(spath):\n try:\n cj.load()\n log.info('Successfully loaded a saved session / cookie file')\n except http.cookiejar.LoadError as e:\n log.warn('Session / cookie file exists, but could not be loaded', exc_info=e)\n\n return cj",
"def cookies(self):\n if not self.__loaded:\n self.__load()\n \n return self.allCookies()",
"def get_cookies(session):\n return requests.utils.dict_from_cookiejar(session.cookies)",
"def _cookies_for_request(self, request):\n cookies = []\n for domain in self._db.keys():\n cookies.extend(self._cookies_for_domain(domain, request))\n return cookies",
"def cookieJar(self, moreCookies=None):\n if moreCookies:\n self.cookies.extend(moreCookies)\n \n # TODO need some kind of urllib encode and join here\n #return self.cookies\n return \"; \".join([\"%s=%s\" % (k, v) for k, v in self.cookies])\n #return \"=\".join(['JSESSIONID', resultHeadersKv['JSESSIONID']])",
"def __extractCookies(self, cookieString):\n parts = cookieString.split(\"; \")\n \n data = {}\n \n for cookie in parts:\n keyValues = cookie.split(\"=\")\n data[keyValues[0]] = keyValues[1]\n \n return data\n #print resultHeadersKv\n # TODO sort this",
"def get_netscape_cookies(src, dst, verbose=False):\n if verbose:\n logging.set_root_level('INFO')\n lg = logging.get_logger(__name__, get_netscape_cookies.__name__)\n src = get_from_source(src)\n lg.info(colored(str(src), 'grey', 'on_white', ['bold']))\n cj = cookie.EzCookieJar()\n cj_kw = dict(ignore_expires=True, ignore_discard=True)\n cj.smart_load(src, **cj_kw)\n r = cj.get_netscape_text(**cj_kw)\n lg.info(colored(str(r), 'grey', None, ['bold']))\n give_to_sink(r, dst)"
] | [
"0.69890016",
"0.69329166",
"0.6925964",
"0.6896501",
"0.6833431",
"0.6477234",
"0.6418898",
"0.6413974",
"0.62408906",
"0.6213005",
"0.6211943",
"0.61870146",
"0.61656386",
"0.61045116",
"0.6030387",
"0.60081345",
"0.59826857",
"0.5955835",
"0.59539086",
"0.5894847",
"0.5893973",
"0.5869635",
"0.5867285",
"0.5864103",
"0.5857063",
"0.58299446",
"0.58193773",
"0.5817371",
"0.58127433",
"0.5810576"
] | 0.7160028 | 0 |
minimal html unescape function for quotes, , and &. | def unescape(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace(""", '"')
s = s.replace("'", "'")
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unescape(t):\r\n return (t\r\n .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\r\n .replace(\"'\", \"´\").replace(\""\", '\"').replace(''',\"'\")\r\n )",
"def html_unescape(text):\n return html.unescape(text)",
"def unescape(s):\n\n\tif s is None:\n\t\treturn \"\"\n\n\t# html entities\n\ts = s.replace(\" \", \"\\r\")\n\n\t# standard html\n\ts = s.replace(\"<\", \"<\")\n\ts = s.replace(\">\", \">\")\n\ts = s.replace(\"&\", \"&\") # this has to be last\n\n\treturn s",
"def unescape_html(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unicode_char(int(text[3:-1], 16))\n else:\n return unicode_char(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unicode_char(htmlentities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n\n return re.sub(r\"&#?\\w+;\", fixup, text)",
"def xhtml_unescape(value):\r\n return re.sub(r\"&(#?)(\\w+?);\", _convert_entity, _unicode(value))",
"def unescape(s):\n return (\n s.replace(\"&\", \"&\")\n .replace(\"<\", \"<\")\n .replace(\">\", \">\")\n .replace(\""\", '\"')\n .replace(\"'\", \"'\")\n )",
"def __html_unescape(self, text):\n\n return re.sub(\"&(%s);\" % \"|\".join(name2codepoint),\n lambda m: unichr(name2codepoint[m.group(1)]),\n text)",
"def html_unescape(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return chr(int(text[3:-1], 16))\n else:\n return chr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = chr(html.entities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def htmlunescape(value):\n\n retVal = value\n if value and isinstance(value, str):\n codes = ((\"<\", '<'), (\">\", '>'), (\""\", '\"'),\n (\" \", ' '), (\"&\", '&'), (\"'\", \"'\"))\n retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)\n try:\n retVal = re.sub(\n r\"&#x([^ ;]+);\", lambda match: chr(int(match.group(1), 16)), retVal)\n except ValueError:\n pass\n return retVal",
"def HtmlUnescape(text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def unescape(text):\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == '&#':\r\n try:\r\n if text[:3] == '&#x':\r\n return unichr(int(text[3:-1], 16)).encode('utf-8')\r\n return unichr(int(text[2:-1])).encode('utf-8')\r\n except ValueError:\r\n logger.info('error de valor')\r\n\r\n else:\r\n try:\r\n import htmlentitydefs\r\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode('utf-8')\r\n except KeyError:\r\n logger.info('keyerror')\r\n except:\r\n pass\r\n\r\n return text\r\n\r\n return re.sub('&#?\\\\w+;', fixup, text)",
"def unescape(text):\n import re, htmlentitydefs\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character ref\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1],1))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n #named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def unescape_html_entities(self, text):\n text = html.unescape(text)\n return text",
"def unescape(text):\n if isinstance(text, list):\n for i, t in enumerate(text):\n t = t.replace(r'&', r'\\&')\n t = t.replace(r'<', r'<')\n t = t.replace(r'>', r'>')\n text[i] = t\n else:\n text = text.replace(r'&', r'\\&')\n text = text.replace(r'<', r'<')\n text = text.replace(r'>', r'>')\n return text",
"def htmldecode(s):\n\ts = s.replace(\"<\", \"<\")\n\ts = s.replace(\">\", \">\")\n\ts = s.replace(\""\", \"\\\"\")\n\ts = s.replace(\"'\",\"'\")\n\ts = s.replace(\"&\", \"&\")\n\treturn s",
"def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output",
"def unescape(string, using_unicode=False):\n\t\t\n\tif using_unicode:\n\t\tsub_function = sub_from_html\n\telse:\n\t\tsub_function = lambda m: sub_from_html(m).encode('ascii', 'replace')\n\t\n\treturn re.sub(r\"&#?\\w+;\", sub_function, string)",
"def unescape_tweet(tweet):\r\n return html.unescape(tweet)",
"def _decode_html_entities(text: str) -> str:\n return html.unescape(text)",
"def filter_html(self, text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n print \"Value Error\"\n pass\n else:\n # named entity\n try:\n if text[1:-1] in (\"amp\",\"gt\",\"lt\"):\n return text\n else:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n print \"keyerror\"\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)",
"def unhtmlify(html):\n return unescape(re.sub(r'<.*?>', '', html))",
"def unescape(self, string):\r\n def convert(matches):\r\n text = matches.group(0)\r\n # Character reference\r\n if text[:2] == \"&#\":\r\n try:\r\n if text[:3] == \"&#x\":\r\n return H.unicode_chr(int(text[3:-1], 16))\r\n else:\r\n return H.unicode_chr(int(text[2:-1]))\r\n except ValueError:\r\n pass\r\n # Named entity\r\n else:\r\n try:\r\n # Following are not needed to be converted for XML\r\n if text[1:-1] == \"amp\" or text[1:-1] == \"gt\" or text[1:-1] == \"lt\":\r\n pass\r\n else:\r\n text = H.unicode_chr(name2codepoint[text[1:-1]])\r\n except KeyError:\r\n pass\r\n return text\r\n return re.sub(\"&#?\\w+;\", convert, string)",
"def test_unescape__ampersand(self) -> None:\n escaped: str = \"&\"\n unescaped: str = \"&\"\n\n self.assertEqual(first=unescape(value=escaped), second=unescaped)",
"def test_unescape__single_quote(self) -> None:\n escaped: str = \"'\"\n unescaped: str = \"'\"\n\n self.assertEqual(first=unescape(value=escaped), second=unescaped)",
"def UnescapeHTMLEntities(self, data):\n if '#39' not in htmlentitydefs.name2codepoint:\n htmlentitydefs.name2codepoint['#39'] = 39\n return re.sub('&(%s);' % '|'.join(htmlentitydefs.name2codepoint),\n lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]),\n data)",
"def cleaningHTML(text):\n # HTML-Entities decodieren\n h = html.parser.HTMLParser(convert_charrefs=True)\n text = h.unescape(text)\n \n # Geschützte Leerzeichen löschen\n text = re.sub('\\u00A0', \" \", text)\n text = re.sub(r'&', r'&', text)\n text = re.sub(r'<a .*?>', r'', text)\n text = re.sub(r'</a>', r'', text)\n return text",
"def escape_html_entity(text):\n parser = HTMLParser.HTMLParser()\n return parser.unescape(text)",
"def unescape(t):\n return re.sub(r'\\\\\\\\', r'\\\\', re.sub(r'\\\\%', '%', t)) if t else \"\"",
"def htmlquote(text):\r\n text = text.replace(\"&\", \"&\") # Must be done first!\r\n text = text.replace(\"<\", \"<\")\r\n text = text.replace(\">\", \">\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('\"', \""\")\r\n return text",
"def decode_html_entities(html):\n def decode(m):\n html = m.group(0)\n if html[:2] == \"&#\":\n try:\n if html[:3] == \"&#x\":\n return unichr(int(html[3:-1], 16))\n else:\n return unichr(int(html[2:-1]))\n except ValueError:\n pass\n else:\n try:\n html = unichr(name2codepoint[html[1:-1]])\n except KeyError:\n pass\n return html\n return re.sub(\"&#?\\w+;\", decode, html.replace(\"&\", \"&\"))"
] | [
"0.79410106",
"0.7851378",
"0.77472293",
"0.76925045",
"0.76480716",
"0.7613853",
"0.759237",
"0.75842106",
"0.754697",
"0.75264806",
"0.74689436",
"0.74556875",
"0.7431712",
"0.7344698",
"0.73413193",
"0.7225569",
"0.718302",
"0.7119962",
"0.70668316",
"0.70210046",
"0.6974316",
"0.6938188",
"0.6890404",
"0.6870495",
"0.682901",
"0.6778073",
"0.6762256",
"0.6757571",
"0.66957265",
"0.66571057"
] | 0.79456407 | 0 |
Get the deconv configs using presets | def _get_deconv_cfg(self, deconv_kernel):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError('Unsupported deconvolution kernel: {}'.format(deconv_kernel))
return deconv_kernel, padding, output_padding | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n\n return deconv_kernel, padding, output_padding",
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n return deconv_kernel, padding, output_padding",
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n return deconv_kernel, padding, output_padding",
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n return deconv_kernel, padding, output_padding",
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n return deconv_kernel, padding, output_padding",
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n return deconv_kernel, padding, output_padding",
"def decompose_layers(settings, cached_weights):\n # TODO this\n params = [];\n N = cached_weights.size - 1\n for layer in settings.conv_layer:\n params.append(cached_weights[N])\n if layer.HasField('rank'):\n print 'decomposing' \n P_struct = decompose_tensor(cached_weights[N - 1], layer.rank)\n params.append(P_struct)\n else:\n params.append(cached_weights[N - 1])\n N = N - 2\n for i in range(N, -1, -1):\n params.append(cached_weights[i])\n params.reverse() \n # for p in params: \n # print p\n print 'cached weights'\n for w in cached_weights:\n print w.size\n return params",
"def getDlsConfig():\n return [getDlConfig(beam) for beam in range(4)]",
"def get_deconv_resnet(base_network,\n pretrained=False,\n **kwargs):\n net = DeconvResnet(\n base_network=base_network,\n pretrained_backbone=pretrained,\n **kwargs)\n return net",
"def applyDemapping(self):\n pass",
"def presets(cls):\n return copy.deepcopy(backbone_presets)",
"def get_config(network, data_shape, **kwargs):\n if network == 'vgg16_reduced':\n if data_shape >= 448:\n from_layers = ['relu4_3', 'relu7', '', '', '', '', '']\n num_filters = [512, -1, 512, 256, 256, 256, 256]\n strides = [-1, -1, 2, 2, 2, 2, 1]\n pads = [-1, -1, 1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.15, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5,3,1./3], [1,2,.5], [1,2,.5]]\n normalizations = [20, -1, -1, -1, -1, -1, -1]\n steps = [] if data_shape != 512 else [x / 512.0 for x in\n [8, 16, 32, 64, 128, 256, 512]]\n else:\n from_layers = ['relu4_3', 'relu7', '', '', '', '']\n num_filters = [512, -1, 512, 256, 256, 256]\n strides = [-1, -1, 2, 2, 1, 1]\n pads = [-1, -1, 1, 1, 0, 0]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = [20, -1, -1, -1, -1, -1]\n steps = [] if data_shape != 300 else [x / 300.0 for x in [8, 16, 32, 64, 100, 300]]\n if not (data_shape == 300 or data_shape == 512):\n logging.warn('data_shape %d was not tested, use with caucious.' % data_shape)\n return locals()\n elif network == 'inceptionv3':\n if data_shape >= 448:\n from_layers = ['ch_concat_mixed_7_chconcat', 'ch_concat_mixed_10_chconcat', '', '', '', '']\n num_filters = [-1, -1, 512, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n else:\n from_layers = ['ch_concat_mixed_2_chconcat', 'ch_concat_mixed_7_chconcat', 'ch_concat_mixed_10_chconcat', '', '', '']\n num_filters = [-1, -1, -1, 256, 256, 128]\n strides = [-1, -1, -1, 2, 2, 2]\n pads = [-1, -1, -1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif network == 'resnet50':\n num_layers = 50\n image_shape = '3,224,224' # resnet require it as shape check\n network = 'resnet'\n from_layers = ['_plus12', '_plus15', '', '', '', '']\n num_filters = [-1, -1, 512, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif network == 'resnet101':\n num_layers = 101\n image_shape = '3,224,224'\n network = 'resnet'\n from_layers = ['_plus29', '_plus32', '', '', '', '']\n num_filters = [-1, -1, 512, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif network == 'mobilenet':\n from_layers = ['conv_12_relu', 'conv_14_relu', '', '', '', '', '']\n num_filters = [-1, -1, 512, 256, 256, 256, 256]\n strides = [-1, -1, 2, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.15, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5,3,1./3], [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif network == 'mymodel' or network == 'mymodel2':\n if data_shape >= 512:\n from_layers = ['conv12_sep_relu', 'conv14_sep_relu', '', '', '', '', '']\n num_filters = [-1, -1, 256, 256, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.15, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5,3,1./3], [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif data_shape >= 256:\n from_layers = ['conv12_sep_relu', 'conv14_sep_relu', '', '', '', '']\n num_filters = [-1, -1, 256, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5,3,1./3], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif data_shape >= 128:\n from_layers = ['conv12_sep_relu', 'conv14_sep_relu', '', '', '']\n num_filters = [-1, -1, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5,3,1./3]]\n normalizations = -1\n steps = []\n return locals()\n elif data_shape >= 80:\n from_layers = ['conv12_sep_relu', 'conv14_sep_relu', '', '']\n num_filters = [-1, -1, 256, 128]\n strides = [-1, -1, 2, 2]\n pads = [-1, -1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3]]\n normalizations = -1\n steps = []\n return locals()\n elif data_shape >= 64:\n from_layers = ['conv12_sep_relu', 'conv14_sep_relu', '']\n num_filters = [-1, -1, 256]\n strides = [-1, -1, 2]\n pads = [-1, -1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3]]\n normalizations = -1\n steps = []\n return locals()\n else:\n from_layers = ['conv14_sep_relu', '']\n num_filters = [-1, 256]\n strides = [-1, 2]\n pads = [-1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3]]\n normalizations = -1\n steps = []\n return locals()\n elif network == 'densenet121':\n network = 'densenet'\n data_type = 'imagenet'\n units = [6, 12, 24, 16]\n num_stage = 4\n growth_rate = 32\n bottle_neck = True\n from_layers = ['DBstage3_concat24', 'DBstage4_concat16', '', '', '', '']\n num_filters = [-1, -1, 256, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n elif network == 'densenet-tiny':\n network = 'densenet'\n data_type = 'imagenet'\n units = [6, 12, 18, 12]\n num_stage = 4\n growth_rate = 16\n bottle_neck = True\n from_layers = ['DBstage2_concat12', 'DBstage3_concat18', '', '', '', '']\n num_filters = [-1, -1, 256, 256, 256, 128]\n strides = [-1, -1, 2, 2, 2, 2]\n pads = [-1, -1, 1, 1, 1, 1]\n sizes = get_scales(min_scale=0.2, max_scale=0.9, num_layers=len(from_layers))\n ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \\\n [1,2,.5], [1,2,.5]]\n normalizations = -1\n steps = []\n return locals()\n else:\n msg = 'No configuration found for %s with data_shape %d' % (network, data_shape)\n raise NotImplementedError(msg)",
"def get_cfg():\n return _assert_and_infer_cfg(_C.clone())",
"def presets (self):\n return AviH264.presets ()",
"def _construct_encoders_decoders(self):\n self.enc_inp = {}\n self.dec_out = {}\n if self.encode_hints:\n self.enc_hint = {}\n if self.decode_diffs:\n self.node_dec_diff = hk.Linear(1)\n self.edge_dec_diff = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n self.graph_dec_diff = (hk.Linear(1), hk.Linear(1))\n if self.decode_hints:\n self.dec_hint = {}\n\n for name in self.spec:\n stage, loc, t = self.spec[name]\n if stage == _Stage.INPUT:\n self.enc_inp[name] = [hk.Linear(self.hidden_dim)]\n if loc == _Location.EDGE and t == _Type.POINTER:\n # Edge pointers need two-way encoders\n self.enc_inp[name].append(hk.Linear(self.hidden_dim))\n\n elif stage == _Stage.OUTPUT:\n if loc == _Location.NODE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1),)\n elif t == _Type.CATEGORICAL:\n self.dec_out[name] = (hk.Linear(self.nb_dims[name]),)\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.EDGE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_out[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims),\n hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.GRAPH:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_out[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n else:\n raise ValueError('Incorrect location')\n\n elif stage == _Stage.HINT:\n if self.encode_hints:\n self.enc_hint[name] = [hk.Linear(self.hidden_dim)]\n if loc == _Location.EDGE and t == _Type.POINTER:\n # Edge pointers need two-way encoders\n self.enc_hint[name].append(hk.Linear(self.hidden_dim))\n\n if self.decode_hints:\n if loc == _Location.NODE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1),)\n elif t == _Type.CATEGORICAL:\n self.dec_hint[name] = (hk.Linear(self.nb_dims[name]),)\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.EDGE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_hint[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims),\n hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.GRAPH:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_hint[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n else:\n raise ValueError('Incorrect location')",
"def getConfigs(self, host):\n raise \"not implemented\"",
"def get_caffe_configs(f_configs, header='NetOpts'):\n\n parser = Configurations.parse(f_configs)\n # Parse general options\n dic = dict(parser.items('net'))\n\n NetOpts = namedtuple(header, dic.keys())\n\n NetOpts.idim = int(dic['idim'])\n NetOpts.model_def = str(dic['model_def'])\n\n\n NetOpts.model_weights = str(dic['model_weights'])\n NetOpts.mode = str(dic['mode'])\n NetOpts.gpu_id = int(dic['gpu_id'])\n NetOpts.lfile = str(dic['lfile'])\n\n NetOpts.k = int(dic['k'])\n\n mdims = dic['mdims']\n mdims = mdims.replace('[', '').replace(']', '').split(',')\n\n NetOpts.mdims = np.array(\n [int(mdims[0]), int(mdims[1]), int(mdims[2]), int(mdims[3])]\n )\n\n sp_avg = dic['avg'].replace('[', '').replace(']', '').split(',')\n if len(sp_avg) == 3:\n NetOpts.avg = np.array([float(sp_avg[0]), float(sp_avg[1]), float(sp_avg[2])])\n else:\n NetOpts.avg = float(sp_avg)\n\n\n NetOpts.model_root = str(dic['model_root'])\n\n return NetOpts",
"def device_get_config(self, filters={}):\n return {}",
"def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(MatrixReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))",
"def get_config(seed, shot):\n if args.coco:\n # COCO\n assert args.two_stage, 'Only supports novel weights for COCO now'\n\n if args.novel_finetune:\n # Fine-tune novel classifier\n ITERS = {\n 1: (10000, 500),\n 2: (10000, 1500),\n 3: (10000, 1500),\n 5: (10000, 1500),\n 10: (10000, 2000),\n 30: (10000, 6000),\n }\n mode = 'novel'\n\n assert not args.fc and not args.unfreeze\n else:\n # Fine-tune entire classifier\n ITERS = {\n 1: (14400, 16000),\n 2: (28800, 32000),\n 3: (43200, 48000),\n 5: (72000, 80000),\n 10: (144000, 160000),\n 30: (216000, 240000),\n }\n mode = 'all'\n split = temp_split = ''\n temp_mode = mode\n\n config_dir = 'configs/COCO-detection'\n ckpt_dir = 'checkpoints/coco/faster_rcnn'\n base_cfg = '../../Base-RCNN-FPN.yaml'\n else:\n # PASCAL VOC\n assert not args.two_stage, 'Only supports random weights for PASCAL now'\n\n ITERS = {\n 1: (3500, 4000),\n 2: (7000, 8000),\n 3: (10500, 12000),\n 5: (17500, 20000),\n 10: (35000, 40000),\n }\n split = 'split{}'.format(args.split)\n mode = 'all{}'.format(args.split)\n # temp_split = 'split1'\n # temp_mode = 'all1'\n temp_split=split\n temp_mode = mode\n\n config_dir = 'configs/PascalVOC-detection'\n ckpt_dir = 'checkpoints/voc/faster_rcnn'\n base_cfg = '../../../Base-RCNN-FPN.yaml'\n\n seed_str = 'seed{}'.format(seed) if seed != 0 else ''\n fc = '_fc' if args.fc else ''\n unfreeze = '_unfreeze' if args.unfreeze else ''\n # Read an example config file for the config parameters\n temp = os.path.join(\n temp_split, 'faster_rcnn_R_101_FPN_ft{}_{}_1shot{}'.format(\n fc, temp_mode, unfreeze)\n )\n print('temp_file:', temp)\n config = os.path.join(args.root, config_dir, temp + '.yaml')\n print('config_file:', config)\n\n prefix = 'faster_rcnn_R_101_FPN_ft{}_{}_{}shot{}{}'.format(\n fc, mode, shot, unfreeze, args.suffix)\n print('prefix_file:', prefix)\n\n output_dir = os.path.join(args.root, ckpt_dir, seed_str)\n print('output_dir',output_dir)\n os.makedirs(output_dir, exist_ok=True)\n \n save_dir = os.path.join(\n args.root, config_dir, split, seed_str,\n )\n print('save_dir',save_dir)\n os.makedirs(save_dir, exist_ok=True)\n save_file = os.path.join(save_dir, prefix + '.yaml')\n print('save_file' , save_file)\n\n configs = load_yaml_file(config)\n print('reading from this config file ',config)\n configs['_BASE_'] = base_cfg\n configs['DATASETS']['TRAIN'] = make_tuple(configs['DATASETS']['TRAIN'])\n configs['DATASETS']['TEST'] = make_tuple(configs['DATASETS']['TEST'])\n if args.coco and not args.novel_finetune:\n ckpt_path = os.path.join(output_dir, prefix, 'model_reset_combine.pth')\n if not os.path.exists(ckpt_path):\n src2 = os.path.join(\n output_dir, 'faster_rcnn_R_101_FPN_ft_novel_{}shot{}'.format(\n shot, args.suffix),\n 'model_final.pth',\n )\n if not os.path.exists(src2):\n print('Novel weights do not exist. Please run with the ' + \\\n '--novel-finetune flag first.')\n assert False\n combine_cmd = 'python tools/ckpt_surgery.py --coco --method ' + \\\n 'combine --src1 checkpoints/coco/faster_rcnn/faster_rcnn' + \\\n '_R_101_FPN_base/model_final.pth --src2 {}'.format(src2) + \\\n ' --save-dir {}'.format(os.path.join(output_dir, prefix))\n run_cmd(combine_cmd)\n assert os.path.exists(ckpt_path)\n configs['MODEL']['WEIGHTS'] = ckpt_path\n elif not args.coco:\n configs['MODEL']['WEIGHTS'] = configs['MODEL']['WEIGHTS'].replace(\n 'base1', 'base' + str(args.split))\n for dset in ['TRAIN', 'TEST']:\n configs['DATASETS'][dset] = (\n configs['DATASETS'][dset][0].replace(\n temp_mode, 'all' + str(args.split)),\n )\n configs['DATASETS']['TRAIN'] = (\n configs['DATASETS']['TRAIN'][0].replace(\n '1shot', str(shot) + 'shot'\n ) + ('_{}'.format(seed_str) if seed_str != '' else ''),\n )\n configs['SOLVER']['BASE_LR'] = args.lr\n configs['SOLVER']['MAX_ITER'] = ITERS[shot][1]\n configs['SOLVER']['STEPS'] = (ITERS[shot][0],)\n configs['SOLVER']['CHECKPOINT_PERIOD'] = ITERS[shot][1] // args.ckpt_freq\n configs['OUTPUT_DIR'] = os.path.join(output_dir, prefix)\n\n if seed != 0:\n with open(save_file, 'w') as fp:\n yaml.dump(configs, fp)\n\n return save_file, configs",
"def create_decoder_config(args: argparse.Namespace,\n encoder_num_hidden: int,\n max_seq_len_source: int,\n max_seq_len_target: int,\n num_embed_target: int) -> decoder.DecoderConfig:\n _, decoder_num_layers = args.num_layers\n\n _, decoder_transformer_preprocess = args.transformer_preprocess\n _, decoder_transformer_postprocess = args.transformer_postprocess\n _, decoder_transformer_model_size = args.transformer_model_size\n\n total_target_factor_size = 0\n for factor_combine, factor_size in zip(args.target_factors_combine, args.target_factors_num_embed):\n if factor_combine == C.FACTORS_COMBINE_CONCAT:\n total_target_factor_size += factor_size\n if total_target_factor_size > 0:\n logger.info(\"Decoder transformer-model-size adjusted to account for target factor embeddings: %d -> %d\" % (\n decoder_transformer_model_size, num_embed_target + total_target_factor_size))\n decoder_transformer_model_size = num_embed_target + total_target_factor_size\n\n config_decoder = transformer.TransformerConfig(\n model_size=decoder_transformer_model_size,\n attention_heads=args.transformer_attention_heads[1],\n feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[1],\n act_type=args.transformer_activation_type[1],\n num_layers=decoder_num_layers,\n dropout_attention=args.transformer_dropout_attention[1],\n dropout_act=args.transformer_dropout_act[1],\n dropout_prepost=args.transformer_dropout_prepost[1],\n positional_embedding_type=args.transformer_positional_embedding_type,\n preprocess_sequence=decoder_transformer_preprocess,\n postprocess_sequence=decoder_transformer_postprocess,\n max_seq_len_source=max_seq_len_source,\n max_seq_len_target=max_seq_len_target,\n use_lhuc=args.lhuc is not None and (C.LHUC_DECODER in args.lhuc or C.LHUC_ALL in args.lhuc),\n depth_key_value=encoder_num_hidden,\n decoder_type=args.decoder,\n use_glu=args.transformer_feed_forward_use_glu)\n\n return config_decoder",
"def gather_configs(self):\n configs = []\n for what in self.order:\n for key in self.plugins[what]:\n mgr = self.plugins[what][key]\n c = mgr.config(what='get')\n if c is not None:\n c.update({\n 'description': mgr.description\n })\n # print(\"Gathering configuration from \", c)\n configs.append(c)\n return configs",
"def resnet18_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet18_v1b', **kwargs)",
"def resnet101_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet101_v1b', **kwargs)",
"def allPresets():\n\n return [__cleanPresetTreeName(k) for k in __preset_lookup.iterkeys()]",
"def get_config(self):\n layer_config = {\n \"anchors\": self._anchors, \n \"classes\": self._classes,\n \"ignore_thresh\": self._ignore_thresh, \n \"truth_thresh\": self._truth_thresh, \n \"iou_thresh\": self._iou_thresh, \n \"loss_type\": self._loss_type, \n \"iou_normalizer\": self._iou_normalizer,\n \"cls_normalizer\": self._cls_normalizer, \n \"scale_x_y\": self._scale_x_y, \n }\n layer_config.update(super().get_config())\n return layer_config",
"def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict",
"def resnet50_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet50_v1b', **kwargs)",
"def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]",
"def builtin_predictor_configs(self):\n return self.predictor_configs.keys()"
] | [
"0.6020298",
"0.59467304",
"0.59467304",
"0.59467304",
"0.59467304",
"0.59467304",
"0.56355107",
"0.53508013",
"0.5306479",
"0.52967423",
"0.5278897",
"0.521289",
"0.5203198",
"0.51874495",
"0.5179428",
"0.51582676",
"0.51260453",
"0.51042265",
"0.50966513",
"0.5086822",
"0.50807023",
"0.5064194",
"0.5027904",
"0.50150627",
"0.5013705",
"0.50123036",
"0.5009906",
"0.50018674",
"0.49892306",
"0.49794036"
] | 0.6031522 | 0 |
Make deconv layers using the configs | def _make_deconv_layer(self, num_filters, num_kernels):
assert len(num_kernels) == len(num_filters), \
'Deconv filters and kernels number mismatch: {} vs. {}'.format(
len(num_filters), len(num_kernels))
layers = nn.HybridSequential('deconv_')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.base_network.initialize()
in_planes = self.base_network(mx.nd.zeros((1, 3, 256, 256))).shape[1]
for planes, k in zip(num_filters, num_kernels):
kernel, padding, output_padding = self._get_deconv_cfg(k)
layers.add(nn.Conv2D(channels=planes,
kernel_size=3,
strides=1,
padding=1,
in_channels=in_planes))
layers.add(nn.BatchNorm())
layers.add(nn.Activation('relu'))
layers.add(nn.Conv2DTranspose(channels=planes,
kernel_size=kernel,
strides=2,
padding=padding,
output_padding=output_padding,
use_bias=False,
in_channels=planes,
weight_initializer=BilinearUpSampleInitializer()))
layers.add(nn.BatchNorm())
layers.add(nn.Activation('relu'))
in_planes = planes
return layers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output",
"def __init__(self, incoming, W=None, b=tf.zeros, ksize: int = None, num_outputs: int = None,\n weight_initializer=None, a=tf.nn.elu, output_shape=None, strides=(1, 2, 2, 1), padding='SAME',\n data_format='NHWC',\n name='DeConvLayer'):\n super(DeConvLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n # Set init for W and b\n if all(p is not None for p in [weight_initializer, ksize, num_outputs]):\n W = tofov(weight_initializer,\n shape=(ksize, ksize, num_outputs, incoming.get_output_shape()[-1]),\n var_params=dict(name='W_deconv'))\n else:\n W = tofov(W, shape=None, var_params=dict(name='W_deconv'))\n b = tofov(b, shape=W.get_shape().as_list()[-2], var_params=dict(name='b_deconv'))\n \n if output_shape is None:\n if padding == 'SAME' and strides[0] == 1:\n if len(self.incoming_shape) == 5:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1],\n self.incoming_shape[2] * strides[1], self.incoming_shape[3] * strides[2],\n W.get_shape().as_list()[-2] * strides[3]]\n else:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1] * strides[1],\n self.incoming_shape[2] * strides[2], W.get_shape().as_list()[-2] * strides[3]]\n else:\n raise AttributeError(\"Automatic output_shape calculation not implemented for strides!=1 in \"\n \"first dimension\")\n \n if isinstance(padding, int):\n if len(self.incoming_shape) == 5:\n self.padding = [[0, 0], [0, 0], [padding, padding], [padding, padding], [0, 0]]\n elif len(self.incoming_shape) == 4:\n self.padding = [[0, 0], [padding, padding], [padding, padding], [0, 0]]\n else:\n raise ValueError(\"invalid input shape\")\n else:\n self.padding = padding\n \n self.a = a\n self.b = b\n self.W = W\n \n self.output_shape = output_shape\n self.strides = strides\n \n self.data_format = data_format\n \n self.out = None\n self.name = name",
"def deconv(depth, nfilter, ksize=3, stride=1, \r\n pad_in=0, pad_out=0, groups=1,\r\n dilation=1, pad_mode='zeros',\r\n bias=True, lrelu=None):\r\n assert (depth>0 and nfilter>0 and ksize>0 and ksize%2==1 and \r\n stride>0 and pad_in>=0 and pad_out>=0 and dilation>=1 and\r\n groups>=1 and depth%groups==0 and nfilter%groups==0)\r\n deconv_ = nn.ConvTranspose2d(depth, nfilter, ksize, stride, \r\n pad_in, pad_out, groups, bias, dilation,\r\n pad_mode)\r\n if lrelu is not None:\r\n deconv_ = nn.Sequential(deconv_, \r\n nn.LeakyReLU(lrelu, inplace=True))\r\n return deconv_",
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n # append transpose conv layer\n # TODO: shouldn't we set bias to NOT batch_norm instead of always being False ?\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n # optional batch norm layer\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, normalization=True, norm_type='instance_norm'):\n layers = []\n # append transpose conv layer\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n\n # optional normalization layer\n if normalization == True and norm_type == 'instance_norm':\n layers.append(nn.InstanceNorm2d(out_channels))\n elif normalization == True and norm_type == 'batch_norm':\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def deconv(dims, inplanes, outplanes, kernel_size, stride, bias, dilation):\n padding = math.floor((kernel_size-stride+1)/2)\n if dims==2:\n return nn.ConvTranspose2d(inplanes, outplanes, kernel_size, stride,\n padding=padding, bias=bias) #, dilation=1)\n elif dims==3:\n return nn.ConvTranspose3d(inplanes, outplanes, kernel_size, stride,\n padding = padding, bias=bias) #, dilation=1)\n else:\n raise ValueError('dimension of deconv must be 2 or 3')",
"def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization(momentum=0.8)(u)\n u = Concatenate()([u, skip_input])\n return u",
"def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Image input",
"def deconv_layer(self, dtype,\n N, C, K,\n M, P, Q,\n T=1, R=1, S=1,\n pad_d=0, pad_h=0, pad_w=0,\n str_d=1, str_h=1, str_w=1,\n dil_d=1, dil_h=1, dil_w=1):\n return layer_mkl.DeconvLayerMKL(self, dtype, N, C, K, M, P, Q, T, R, S,\n pad_d, pad_h, pad_w, str_d, str_h, str_w,\n dil_d, dil_h, dil_w)",
"def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u",
"def test_deconv():\n\n # filter params\n R, S = 5, 5\n fshape = (R, S, 1)\n strides = 2\n filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)\n filter_val = np.zeros(fshape)\n filter_val[:, :, 0] = filter_val_nz\n\n deconv = Deconvolution(fshape,\n filter_init=ConstantInit(filter_val),\n strides=strides,\n padding=0,\n dilation=1)\n\n N = ng.make_axis(name='N', length=1) # batch\n image_shape = (1, 8, 8) # CHW\n image_axes = ng.make_axes([ng.make_axis(name=nm, length=l)\n for nm, l in zip('CHW', image_shape)])\n image_axes |= N\n image = ng.placeholder(axes=image_axes)\n\n output = deconv(image)\n\n with closing(ngt.make_transformer()) as transformer:\n comp = transformer.add_computation(ng.computation(output, image))\n input_val = np.zeros(image_shape + (N.length, ), dtype=float)\n input_val[0, 0, 0] = 1\n input_val[0, 5, 5] = 1\n input_val[0, 7, 7] = 1\n result = comp(input_val)\n feature_map = np.squeeze(result)\n\n assert (feature_map[:5, :5] == filter_val_nz).all()\n\n result2 = filter_val_nz.copy()\n result2[-1, -1] = 26\n assert (feature_map[10:15, 10:15] == result2).all()\n\n result3 = filter_val_nz.copy()\n result3[0, 0] = 26\n assert (feature_map[-5:, -5:] == result3).all()",
"def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTranspose(\n filters=num_filters,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_uniform\",\n )(inp)\n x = BatchNormalization()(x)\n x = Activation(\"elu\")(x)\n\n return x",
"def deconv(\n in_channels,\n out_channels,\n kernel_size,\n stride=2,\n padding=1,\n batch_norm=True,\n):\n layers = []\n layers.append(\n nn.ConvTranspose2d(\n in_channels, out_channels, kernel_size, stride, padding, bias=False\n )\n )\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def deconv2d(layer_input, filters, f_size=8, dropout_rate=0,permanent=False):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate and not permanent:\n u = Dropout(dropout_rate)(u)\n elif dropout_rate and permanent:\n # permanent droput from my main man fchollet <3\n u=Lambda(lambda x: K.dropout(x, level=dropout_rate))(u) \n \n u = BatchNormalization(momentum=0.8)(u)\n return u",
"def deconv2d(input_, \n output_dims,\n k_h=5, \n k_w=5,\n d_h=2,\n d_w=2,\n stddev=0.02,\n name='deconv2d',\n with_w=False):\n \n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w',\n [k_h, k_w, output_dims[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n try:\n deconv = tf.nn.conv2d_transpose(input_,\n w, \n output_shape=output_dims,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_,\n w, \n output_shape=output_dims,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_dims[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv",
"def deconv(self, input_layer, num_filters, filter_size,\n filter_strides=(2,2), padding='SAME',\n activation=None, use_batch_norm=None):\n num_inputs = input_layer.get_shape().as_list()[1]\n ih, iw = input_layer.get_shape().as_list()[2:]\n output_shape = [-1, num_filters,\n ih*filter_strides[0], iw*filter_strides[1]]\n kernel_shape = [filter_size[0], filter_size[1],\n num_filters, num_inputs]\n strides = [1, 1, filter_strides[0], filter_strides[1]]\n with tf.variable_scope(self._count_layer('deconv')) as scope:\n kernel = self._get_variable('weights', kernel_shape,\n input_layer.dtype)\n x = tf.nn.conv2d_transpose(input_layer, kernel, output_shape,\n strides, padding=padding,\n data_format='NCHW')\n x = self._bias_or_batch_norm(x, scope, use_batch_norm)\n x = self.activate(x, activation)\n return x",
"def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]",
"def deconv2d(layer_input,num=256):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(num, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u",
"def deconv_layer_with_stride(self, inputs, field_size, channels_size, stride,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape0 = tf.shape(inputs)[0]\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack(\n # [shape0, stride*height, stride*width, channels_size[0]])\n\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, stride*height, stride*width, channels_size[0]],\n [1, stride, stride, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n output = act_func(conv_bias)\n #output.set_shape([batch, stride*height, stride*width, channels_size[0]])\n\n return output",
"def create_vgg_down_block(filters, in_features, activation, kernel_size=3, strategy='stride', batch_norm=False):\n layers = []\n if isinstance(filters, int):\n filters = [filters]\n\n if not isinstance(filters, list):\n raise ValueError(\"Filters has to be a list\")\n\n in_feat = in_features\n for n in range(len(filters))[:-1]:\n out_feat = filters[n]\n conv = Conv2d_same(in_features=in_feat, out_features=out_feat, activation=activation, kernel_size=kernel_size, batch_norm=batch_norm)\n layers.append(conv)\n in_feat = out_feat\n\n layers.append(Conv2d_Down(in_features=in_feat, out_features=filters[-1], activation=activation, kernel_size=kernel_size, strategy=strategy, batch_norm=batch_norm))\n out_feat = filters[-1]\n return layers, out_feat",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def conv_dn(in_planes, out_planes):\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1),\n nn.ReLU(inplace=True)\n )",
"def discriminator_block(in_filters, out_filters, f_size=4, normalize=True,stride=2):\n layers = [nn.Conv2d(in_filters, out_filters, f_size, stride=stride, padding=0)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers",
"def deconv_block(input_tensor: tf.Tensor, features: int, name: str) -> tf.Tensor:\n out = input_tensor\n\n out = KL.Conv2D(\n int(features // 2),\n 1,\n strides=(1, 1),\n name=name + f\"_c{1}\",\n )(input_tensor)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n out = KL.Conv2DTranspose(\n int(features // 2),\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n name=name + f\"_d\",\n )(out)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n out = KL.Conv2D(\n features,\n 1,\n strides=(1, 1),\n name=name + f\"_c{2}\",\n )(out)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n return out",
"def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n\n return deconv_kernel, padding, output_padding",
"def DenseNet_decoder(input_tensor,\r\n skip_connection,\r\n network_name,\r\n output_dim,\r\n fpn_d,\r\n train_bn=False):\r\n bn_axis = 3 if K.image_data_format() == 'channels_last' else 1\r\n\r\n R1, R2, R3, R4 = skip_connection\r\n DC4 = x = deconv_block(input_tensor, R4, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv5', train_bn=train_bn)\r\n DC3 = x = deconv_block(x, R3, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv4', train_bn=train_bn)\r\n DC2 = x = deconv_block(x, R2, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv3', train_bn=train_bn)\r\n DC1 = x = deconv_block(x, R1, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv2', train_bn=train_bn)\r\n # We should get 256*256*64 at DC1\r\n x = deconv_block(x, None, network_name=network_name+'_deconv1', fpn_d=fpn_d, train_bn=train_bn)\r\n \r\n # FPN\r\n up4 = Conv2D(fpn_d, (1, 1), padding='same', name=network_name+'_up4_conv', use_bias=True)(DC4)\r\n up4 = Activation('relu', name=network_name+'_up4_relu')(up4)\r\n up4 = UpSampling2D(data_format=K.image_data_format(), name=network_name+'_up4_up')(up4)\r\n [_, up3, out_8] = fpn_side_output_block(DC3, up4, block_name='3', output_shape=output_dim,\r\n network_name=network_name, d=fpn_d,\r\n train_bn=train_bn, use_bias=True)\r\n [_, up2, out_4] = fpn_side_output_block(DC2, up3, block_name='2', output_shape=output_dim,\r\n network_name=network_name, d=fpn_d,\r\n train_bn=train_bn, use_bias=True)\r\n [_, up1, out_2] = fpn_side_output_block(DC1, up2, block_name='1', output_shape=output_dim,\r\n network_name=network_name, d=fpn_d,\r\n train_bn=train_bn, use_bias=True)\r\n [add0, _, out] = fpn_side_output_block(x, up1, block_name='0', up_output=False, d=fpn_d,\r\n network_name=network_name, output_shape=output_dim,\r\n train_bn=train_bn, use_bias=True)\r\n\r\n return [out, out_2, out_4, out_8, add0]",
"def _make_conv_layers(self):\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=1), # padding=3 so, output is 224.\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, padding=1), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def _get_deconv_cfg(self, deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError('Unsupported deconvolution kernel: {}'.format(deconv_kernel))\n\n return deconv_kernel, padding, output_padding",
"def Conv_DW(filters, *args, **kwargs):\n no_bias_kwargs = {\"use_bias\": False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(\n filters=filters, kernel_size=(3, 3), *args, **no_bias_kwargs\n ),\n # DarknetConv2D(filters=filters, kernel_size=(1, 1), *args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1),\n )",
"def conv2d(layer_input, filters, f_size=4, bn=True):\n d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = LeakyReLU(alpha=.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d"
] | [
"0.70237195",
"0.69309855",
"0.68351084",
"0.6832376",
"0.671199",
"0.67101246",
"0.66220206",
"0.6619228",
"0.6604463",
"0.6599641",
"0.65911293",
"0.6588114",
"0.65853435",
"0.65623236",
"0.6389578",
"0.63730615",
"0.6334144",
"0.63175875",
"0.6274826",
"0.6265299",
"0.62592715",
"0.6197759",
"0.61822015",
"0.6180801",
"0.6149698",
"0.6143868",
"0.61424065",
"0.61401194",
"0.6132785",
"0.61123747"
] | 0.75099677 | 0 |
Get resnet with deconv layers. | def get_deconv_resnet(base_network,
pretrained=False,
**kwargs):
net = DeconvResnet(
base_network=base_network,
pretrained_backbone=pretrained,
**kwargs)
return net | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resnet50_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet50_v1b', **kwargs)",
"def resnet18_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet18_v1b', **kwargs)",
"def resnet101_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet101_v1b', **kwargs)",
"def resnet():\n return models.resnet152(pretrained=True)",
"def _make_deconv_layer(self, num_filters, num_kernels):\n assert len(num_kernels) == len(num_filters), \\\n 'Deconv filters and kernels number mismatch: {} vs. {}'.format(\n len(num_filters), len(num_kernels))\n\n layers = nn.HybridSequential('deconv_')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n self.base_network.initialize()\n in_planes = self.base_network(mx.nd.zeros((1, 3, 256, 256))).shape[1]\n for planes, k in zip(num_filters, num_kernels):\n kernel, padding, output_padding = self._get_deconv_cfg(k)\n layers.add(nn.Conv2D(channels=planes,\n kernel_size=3,\n strides=1,\n padding=1,\n in_channels=in_planes))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n layers.add(nn.Conv2DTranspose(channels=planes,\n kernel_size=kernel,\n strides=2,\n padding=padding,\n output_padding=output_padding,\n use_bias=False,\n in_channels=planes,\n weight_initializer=BilinearUpSampleInitializer()))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n in_planes = planes\n\n return layers",
"def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad",
"def _get_resnet_fc_layer(self):\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tfc_layer= None\n\t\tfor ww_layer in layer_iterator:\n\t\t\tprint(ww_layer.name)\n\t\t\tif ww_layer.name=='fc':\n\t\t\t\tfc_layer = ww_layer\n\t\t\n\t\treturn fc_layer",
"def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTranspose(\n filters=num_filters,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_uniform\",\n )(inp)\n x = BatchNormalization()(x)\n x = Activation(\"elu\")(x)\n\n return x",
"def deconv(depth, nfilter, ksize=3, stride=1, \r\n pad_in=0, pad_out=0, groups=1,\r\n dilation=1, pad_mode='zeros',\r\n bias=True, lrelu=None):\r\n assert (depth>0 and nfilter>0 and ksize>0 and ksize%2==1 and \r\n stride>0 and pad_in>=0 and pad_out>=0 and dilation>=1 and\r\n groups>=1 and depth%groups==0 and nfilter%groups==0)\r\n deconv_ = nn.ConvTranspose2d(depth, nfilter, ksize, stride, \r\n pad_in, pad_out, groups, bias, dilation,\r\n pad_mode)\r\n if lrelu is not None:\r\n deconv_ = nn.Sequential(deconv_, \r\n nn.LeakyReLU(lrelu, inplace=True))\r\n return deconv_",
"def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output",
"def _get_resnet_fc_layer(self):\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\t\n\t\tfc_layer = ww_layer\n\t\t\n\t\treturn fc_layer",
"def conv_decoder(encoder_output):\n namescope = 'conv_decoder'\n with tf.variable_scope(namescope):\n net = tf.layers.conv2d(encoder_output,\n filters=256,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=tf.nn.elu)\n net = tf.layers.conv2d(net,\n filters=C,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=None)\n return net",
"def get_resnet_v1(input_shape, depth, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=7)(x)\n # x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model",
"def modified_resnet10(self) -> torch.nn.Module:\n # initialize a Resnet-10 instance\n net = torchvision.models.resnet._resnet(arch=\"resnet10\", block=torchvision.models.resnet.BasicBlock, layers=[1, 1, 1, 1], pretrained=False, progress=False)\n\n # the first layer will be a lazy convolutional layer with any input channels\n net.conv1 = torch.nn.LazyConv2d(\n out_channels=64,\n kernel_size=(7, 7),\n stride=(2, 2),\n padding=(3, 3),\n bias=not self.bn_affine\n )\n\n # modify batch-norm layer to have momentum 1 and no tracking statistics\n net.bn1 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer1[0].bn1 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer1[0].bn2 = torch.nn.BatchNorm2d(64, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer2[0].bn1 = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer2[0].bn2 = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer2[0].downsample[1] = torch.nn.BatchNorm2d(128, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer3[0].bn1 = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer3[0].bn2 = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer3[0].downsample[1] = torch.nn.BatchNorm2d(256, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n net.layer4[0].bn1 = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer4[0].bn2 = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n net.layer4[0].downsample[1] = torch.nn.BatchNorm2d(512, momentum=1, track_running_stats=False, affine=self.bn_affine)\n\n # last layer\n if self.dim_output is not None:\n net.fc = torch.nn.LazyLinear(out_features=self.dim_output)\n else:\n net.fc = torch.nn.Identity()\n\n # add dropout-2d after layers 1, 2, and 3\n net.maxpool.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer1[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer1[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer1.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer2[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer2[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer2.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer3[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer3[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer3.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n net.layer4[0].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n # net.layer4[1].add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n net.layer4.add_module(name='dropout2d', module=torch.nn.Dropout2d(p=self.dropout_prob))\n\n return net",
"def deconv_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)\n d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)\n d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])\n d3 = tf.layers.conv2d_transpose(\n inputs=d2_reshaped,\n filters=64,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d4 = tf.layers.conv2d_transpose(\n inputs=d3,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d5 = tf.layers.conv2d_transpose(\n inputs=d4,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n d6 = tf.layers.conv2d_transpose(\n inputs=d5,\n filters=output_shape[2],\n kernel_size=4,\n strides=2,\n padding=\"same\",\n )\n return tf.reshape(d6, [-1] + output_shape)",
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, normalization=True, norm_type='instance_norm'):\n layers = []\n # append transpose conv layer\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n\n # optional normalization layer\n if normalization == True and norm_type == 'instance_norm':\n layers.append(nn.InstanceNorm2d(out_channels))\n elif normalization == True and norm_type == 'batch_norm':\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def conv_dn(in_planes, out_planes):\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1),\n nn.ReLU(inplace=True)\n )",
"def regi_net_core_deconv(vol_size, enc_nf, dec_nf, full_size=True, src=None, tgt=None, src_feats=1, tgt_feats=1):\n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n deconv_layer = getattr(KL, 'DeConv%dD' % ndims)\n\n # inputs\n if src is None:\n src = Input(shape=[*vol_size, src_feats])\n if tgt is None:\n tgt = Input(shape=[*vol_size, tgt_feats])\n x_in = concatenate([src, tgt])\n \n\n # down-sample path (encoder)\n x_enc = [x_in]\n for i in range(len(enc_nf)):\n x_enc.append(conv_block(x_enc[-1], enc_nf[i], 2))\n\n # transform the results into a flow field.\n Conv = getattr(KL, 'Conv%dD' % ndims)\n # up-sample path (decoder)\n x = conv_block(x_enc[-1], dec_nf[0])\n# flow0 = Conv(ndims, kernel_size=3, padding='same', name='flow0',\n# kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n x = deconv_layer()(x)\n x = concatenate([x, x_enc[-2]])\n# flowU = upsample_layer()(flow0)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = conv_block(x, dec_nf[1])\n flow1 = Conv(ndims, kernel_size=3, padding='same', name='flow1',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n x = deconv_layer()(x)\n x = concatenate([x, x_enc[-3]])\n# flowU = upsample_layer()(flow1)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = conv_block(x, dec_nf[2])\n flow2 = Conv(ndims, kernel_size=3, padding='same', name='flow2',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n x = deconv_layer()(x)\n x = concatenate([x, x_enc[-4]])\n# flowU = upsample_layer()(flow2)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = conv_block(x, dec_nf[3])\n x = conv_block(x, dec_nf[4])\n flow3 = Conv(ndims, kernel_size=3, padding='same', name='flow3',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n \n # only upsampleto full dim if full_size\n # here we explore architectures where we essentially work with flow fields \n # that are 1/2 size \n if full_size:\n x = deconv_layer()(x)\n# flowU = upsample_layer()(flow3)\n# x = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([x, flowU])\n x = concatenate([x, x_enc[0]])\n x = conv_block(x, dec_nf[5])\n\n # optional convolution at output resolution (used in voxelmorph-2)\n if len(dec_nf) == 7:\n x = conv_block(x, dec_nf[6])\n \n flow4 = Conv(ndims, kernel_size=3, padding='same', name='flow4',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)\n\n # warp the source with the flow\n\n return Model(inputs=[src, tgt], outputs=[flow1,flow2,flow3,flow4])",
"def deconv_layer(self, dtype,\n N, C, K,\n M, P, Q,\n T=1, R=1, S=1,\n pad_d=0, pad_h=0, pad_w=0,\n str_d=1, str_h=1, str_w=1,\n dil_d=1, dil_h=1, dil_w=1):\n return layer_mkl.DeconvLayerMKL(self, dtype, N, C, K, M, P, Q, T, R, S,\n pad_d, pad_h, pad_w, str_d, str_h, str_w,\n dil_d, dil_h, dil_w)",
"def deconv2d(layer_input,num=256):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(num, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u",
"def residual_block(layer_input, filters=512, down_filter=False, normalization=False):\n\td1 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n\tif normalization:\n\t\t# d = InstanceNormalization()(d)\n\t\td1 = BatchNormalization(momentum=0.8)(d1) # 6/6/2018: use it for CT # 6/5/2018: remove it for MNIST\n\td1 = Activation('relu')(d1)\n\td2 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d1)\n\tif normalization:\n\t\t# d = InstanceNormalization()(d)\n\t\td2 = BatchNormalization(momentum=0.8)(d2) # 6/6/2018: use it for CT # 6/5/2018: remove it for MNIST\n\tif down_filter:\n\t\td = Add()([d1, d2])\n\telse:\n\t\td = Add()([d2, layer_input])\n\treturn d",
"def __init__(self, opt, input_nc=3, output_nc=256, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False,\n n_blocks=6, padding_type='reflect'):\n assert (n_blocks >= 0)\n super(ResnetFilter, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n if i == n_blocks - 1:\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,\n use_dropout=use_dropout, use_bias=use_bias, last=True)]\n else:\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,\n use_dropout=use_dropout, use_bias=use_bias)]\n\n if opt.use_tanh:\n model += [nn.Tanh()]\n self.model = nn.Sequential(*model)",
"def build_resnet(depth, input_layer, n_classes, activation='relu'):\n num_conv_layers = 1\n num_add_layers = 1\n if (depth - 2) % 6 != 0:\n raise ValueError( 'depth should be 6n+2 (eg 20, 32, 44 in [a])' )\n # Start model definition.\n num_filters = 16\n num_res_blocks = int( (depth - 2) / 6 )\n\n inputs = input_layer\n x = resnet_layer( inputs=inputs, layer_num=num_conv_layers )\n num_conv_layers += 1\n # Instantiate the stack of residual units\n for stack in range( 3 ):\n for res_block in range( num_res_blocks ):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer( inputs=x,\n num_filters=num_filters,\n activation=activation,\n strides=strides,\n layer_num=num_conv_layers,\n weight_decay=l2(1e-4) )\n\n num_conv_layers += 1\n y = resnet_layer( inputs=y,\n num_filters=num_filters,\n activation=None,\n layer_num=num_conv_layers,\n weight_decay=l2(1e-4) )\n num_conv_layers += 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer( inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False,\n layer_num=num_conv_layers,\n weight_decay=l2(1e-4) )\n num_conv_layers += 1\n x = keras.layers.add( [x, y], name='add_%d' % num_add_layers )\n num_add_layers += 1\n x = Activation( activation )( x )\n num_filters *= 2\n\n # Add classifier on top.\n x = AveragePooling2D( pool_size=8 )( x )\n y = Flatten()( x )\n outputs = Dense( n_classes,\n activation='softmax',\n kernel_initializer='he_normal',\n name='classification' )( y )\n\n # Instantiate model.\n model = Model( inputs=inputs, outputs=outputs )\n return model",
"def get_deprojecter(self,layer,n):\n K = layer.kernel_size\n P = layer.padding\n S = layer.stride\n if (isinstance(layer,nn.MaxPool2d)):\n return (lambda slices:(\n Projector.get_slic(slices[0],S,P,K,n),\n Projector.get_slic(slices[1],S,P,K,n)\n ),\n Projector.N_out(K,P,S,n)) #TODO: dont assume square image\n else:\n return (lambda slices:(\n Projector.get_slic(slices[0],S[0],P[0],K[0],n),\n Projector.get_slic(slices[1],S[1],P[1],K[1],n)\n ),\n Projector.N_out(K[0],P[0],S[0],n)) #TODO: dont assume square image",
"def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]",
"def deconv2d(layer_input):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u",
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n # append transpose conv layer\n # TODO: shouldn't we set bias to NOT batch_norm instead of always being False ?\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n # optional batch norm layer\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)",
"def modified_resnet(config=None):\n return ResNet(config.MAIN)",
"def DenseNet_decoder(input_tensor,\r\n skip_connection,\r\n network_name,\r\n output_dim,\r\n fpn_d,\r\n train_bn=False):\r\n bn_axis = 3 if K.image_data_format() == 'channels_last' else 1\r\n\r\n R1, R2, R3, R4 = skip_connection\r\n DC4 = x = deconv_block(input_tensor, R4, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv5', train_bn=train_bn)\r\n DC3 = x = deconv_block(x, R3, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv4', train_bn=train_bn)\r\n DC2 = x = deconv_block(x, R2, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv3', train_bn=train_bn)\r\n DC1 = x = deconv_block(x, R1, fpn_d=fpn_d,\r\n network_name=network_name+'_deconv2', train_bn=train_bn)\r\n # We should get 256*256*64 at DC1\r\n x = deconv_block(x, None, network_name=network_name+'_deconv1', fpn_d=fpn_d, train_bn=train_bn)\r\n \r\n # FPN\r\n up4 = Conv2D(fpn_d, (1, 1), padding='same', name=network_name+'_up4_conv', use_bias=True)(DC4)\r\n up4 = Activation('relu', name=network_name+'_up4_relu')(up4)\r\n up4 = UpSampling2D(data_format=K.image_data_format(), name=network_name+'_up4_up')(up4)\r\n [_, up3, out_8] = fpn_side_output_block(DC3, up4, block_name='3', output_shape=output_dim,\r\n network_name=network_name, d=fpn_d,\r\n train_bn=train_bn, use_bias=True)\r\n [_, up2, out_4] = fpn_side_output_block(DC2, up3, block_name='2', output_shape=output_dim,\r\n network_name=network_name, d=fpn_d,\r\n train_bn=train_bn, use_bias=True)\r\n [_, up1, out_2] = fpn_side_output_block(DC1, up2, block_name='1', output_shape=output_dim,\r\n network_name=network_name, d=fpn_d,\r\n train_bn=train_bn, use_bias=True)\r\n [add0, _, out] = fpn_side_output_block(x, up1, block_name='0', up_output=False, d=fpn_d,\r\n network_name=network_name, output_shape=output_dim,\r\n train_bn=train_bn, use_bias=True)\r\n\r\n return [out, out_2, out_4, out_8, add0]",
"def resnet_v2(input_shape, depth, num_classes=7):\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n \n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n \n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n \n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n # num of param setting \n if stage == 0: # first stage\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer & first stage\n activation = None\n batch_normalization = False\n else: # second, third stage\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but no first stage\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # Linear projection residual shortcut connection to match\n # changed dims\n # at the first time, make a shortcut origin\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n # and add every reputation\n x = keras.layers.add([x, y])\n \n num_filters_in = num_filters_out\n \n # Add classifier on top\n # v2 has BN_ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n \n # Instantiate model\n model = Model(inputs=inputs, outputs=outputs)\n \n return model"
] | [
"0.7195827",
"0.6876899",
"0.6849386",
"0.6753572",
"0.62811035",
"0.622978",
"0.6229372",
"0.6033808",
"0.60275984",
"0.5997466",
"0.5986289",
"0.5975139",
"0.5969439",
"0.5949569",
"0.59469706",
"0.5941289",
"0.59397143",
"0.59260434",
"0.5917289",
"0.59127927",
"0.5904779",
"0.5902693",
"0.5877975",
"0.58646303",
"0.5863815",
"0.5840713",
"0.5826435",
"0.58203477",
"0.5799298",
"0.5786478"
] | 0.8153153 | 0 |
Resnet18 v1b model with deconv layers. Returns HybridBlock A Resnet18 v1b model with deconv layers. | def resnet18_v1b_deconv(**kwargs):
return get_deconv_resnet('resnet18_v1b', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resnet18(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model",
"def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model",
"def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model",
"def decoder(self, features=[8], name=\"decoder\") -> KM.Model:\n input_tensor = KL.Input(shape=(2, 2, features[0]))\n\n decoded = input_tensor\n\n for i, feature_num in enumerate(features[1:], start=1):\n decoded = deconv_block(\n decoded, feature_num, name + f\"_deconv_{len(features)-i}\"\n )\n\n # Final reconstruction back to the original image size\n decoded = KL.Conv2DTranspose(\n 3,\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=True,\n activation=\"tanh\",\n name=name + f\"_out\",\n )(decoded)\n decoded = DropBlock2D(block_size=5, keep_prob=0.8)(decoded)\n return KM.Model(inputs=input_tensor, outputs=decoded, name=name)",
"def get_deconv_resnet(base_network,\n pretrained=False,\n **kwargs):\n net = DeconvResnet(\n base_network=base_network,\n pretrained_backbone=pretrained,\n **kwargs)\n return net",
"def resnet50_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet50_v1b', **kwargs)",
"def dilated_resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model",
"def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def convolutional_32_decoder(self, z, reuse=True):\n\t\tz = tf.convert_to_tensor(z)\n\t\treuse=tf.AUTO_REUSE\n\n\t\tif self.vimco_samples > 1:\n\t\t\tsamples = []\n\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('decoder', reuse=reuse):\n\t\t\t\tif len(z.get_shape().as_list()) == 2:\n\t\t\t\t\t# test\n\t\t\t\t\td = tf.layers.dense(z, 4*4*512, activation=tf.nn.relu, use_bias=False, reuse=reuse, name='fc1')\t\n\t\t\t\t\td = tf.reshape(d, (-1, 4, 4, 512))\n\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 512, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv1')\n\t\t\t\t\tdeconv1 = tf.layers.batch_normalization(deconv1)\n\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 256, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv2')\n\t\t\t\t\tdeconv2 = tf.layers.batch_normalization(deconv2)\n\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 128, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv3')\n\t\t\t\t\tdeconv3 = tf.layers.batch_normalization(deconv3)\n\t\t\t\t\tdeconv4 = tf.layers.conv2d(deconv3, 3, 1, strides=(1,1), padding=\"VALID\", activation=self.last_layer_act, reuse=reuse, name='deconv4')\n\t\t\t\t\treturn deconv4\n\t\t\t\telse:\n\t\t\t\t\t# train\n\t\t\t\t\tfor i in range(self.vimco_samples):\n\t\t\t\t\t\t# iterate through one vimco sample at a time\n\t\t\t\t\t\tz_sample = z[i]\n\t\t\t\t\t\td = tf.layers.dense(z_sample, 4*4*512, activation=tf.nn.relu, use_bias=False, reuse=reuse, name='fc1')\t\n\t\t\t\t\t\td = tf.reshape(d, (-1, 4, 4, 512))\n\t\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 512, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv1')\n\t\t\t\t\t\tdeconv1 = tf.layers.batch_normalization(deconv1)\n\t\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 256, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv2')\n\t\t\t\t\t\tdeconv2 = tf.layers.batch_normalization(deconv2)\n\t\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 128, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv3')\n\t\t\t\t\t\tdeconv3 = tf.layers.batch_normalization(deconv3)\n\t\t\t\t\t\tdeconv4 = tf.layers.conv2d(deconv3, 3, 1, strides=(1,1), padding=\"VALID\", activation=tf.nn.sigmoid, reuse=reuse, name='deconv4')\n\t\t\t\t\t\tsamples.append(deconv4)\n\t\tx_reconstr_logits = tf.stack(samples, axis=0)\n\t\tprint(x_reconstr_logits.get_shape())\n\t\treturn x_reconstr_logits",
"def resnet101_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet101_v1b', **kwargs)",
"def get_diracnetv2(blocks,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".chainer\", \"models\"),\n **kwargs):\n if blocks == 18:\n layers = [4, 4, 4, 4]\n elif blocks == 34:\n layers = [6, 8, 12, 6]\n else:\n raise ValueError(\"Unsupported DiracNetV2 with number of blocks: {}\".format(blocks))\n\n channels_per_layers = [64, 128, 256, 512]\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n init_block_channels = 64\n\n net = DiracNetV2(\n channels=channels,\n init_block_channels=init_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n load_npz(\n file=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n obj=net)\n\n return net",
"def resnet18_origin(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet18(pretrained=pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model",
"def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model",
"def ffc_resnet18(pretrained=False, **kwargs):\n model = FFCResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model",
"def disresnet18(**kwargs):\n return Discriminator(resnetblocks.EresNetBasicBlock, resnetblocks.DresNetBasicBlock, [2, 2, 2, 2], **kwargs)",
"def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model",
"def LadderNet(input_size = (256, 256, 1), num_classes=2, filters=30): \n \n # X's denote standard flow\n # XNUM denote ResBlock outputs\n \n # \"First\" UNet\n \n # Input branch\n inputs = Input(input_size)\n X = Conv2D(filters=filters, kernel_size=3, activation=\"relu\", padding = 'same', kernel_initializer = 'he_normal')(inputs)\n\n # Down branch\n X1 = ResBlock(input_tensor=X, filters=filters) # ResBlock located in the first layer of the paper scheme\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X1) \n X = Activation(\"relu\")(X) # This ReLU is not shown in the paper scheme\n \n X2 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X2)\n X = Activation(\"relu\")(X)\n \n X3 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X3)\n X = Activation(\"relu\")(X)\n \n X4 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X4)\n X = Activation(\"relu\")(X)\n \n # Bottom block \n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X4])\n # X = Activation(\"relu\")(X) # This ReLU is commented in the paper code\n X5 = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X5)\n X = Add()([X, X3])\n # X = Activation(\"relu\")(X)\n X6 = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X6)\n X = Add()([X, X2])\n # X = Activation(\"relu\")(X)\n X7 = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, output_padding=1, kernel_initializer='he_normal')(X7)\n X = Add()([X, X1])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Top block (bottle-neck)\n X8 = ResBlock(input_tensor=X, filters=filters)\n X = ResBlock(input_tensor=X, filters=filters)\n X = Add()([X, X8])\n \n # \"Second\" UNet\n \n # Down branch\n X9 = ResBlock(input_tensor=X, filters=filters)\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X7, X]) \n \n X10 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X) \n X = Add()([X6, X])\n \n X11 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X5, X])\n\n X12 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n \n # Bottom block\n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X12]) \n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X11])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X10])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, kernel_initializer='he_normal', output_padding=1)(X)\n X = Add()([X, X9])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Final block\n X = Conv2D(filters=num_classes, kernel_size=1, kernel_initializer='he_normal')(X)\n # X = Activation(\"relu\")(X)\n X = Activation(\"softmax\")(X)\n #X = Conv2D(1, 1)(X)\n \n model = Model(inputs, X)\n \n \n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))\n return model",
"def decoder_block(input, filters, block):\n\n conv_bn_name_base = 'decoder_' + str(block) + '_'\n\n i_filters, o_filters = filters\n\n x = _conv_bn(filters=i_filters//4, kernel_size=(1, 1), \n name=conv_bn_name_base + '1a')(input)\n x = Activation('relu', name= conv_bn_name_base + '1a_act')(x)\n\n x = Conv2DTranspose(filters=i_filters//4, kernel_size=(3,3),\n strides=(2,2), padding='same', name=conv_bn_name_base +'1b_fullconv')(x)\n x = BatchNormalization(axis=CHANNEL_AXIS, name=conv_bn_name_base + '1b_bn')(x)\n x = Activation('relu', name= conv_bn_name_base + '1b_act')(x)\n\n x = _conv_bn(filters=o_filters, kernel_size=(1, 1), \n name=conv_bn_name_base + '1c')(x)\n x = Activation('relu', name= conv_bn_name_base + '1c_act')(x)\n\n return x"
] | [
"0.62846977",
"0.6198227",
"0.6192097",
"0.6112437",
"0.61038554",
"0.60721475",
"0.60721475",
"0.60721475",
"0.60721475",
"0.60721475",
"0.6058464",
"0.59820086",
"0.59820086",
"0.59615123",
"0.59469116",
"0.5931862",
"0.5890543",
"0.5742647",
"0.5706555",
"0.5705783",
"0.5703118",
"0.5687097",
"0.5683885",
"0.5680803",
"0.5656962",
"0.5642234",
"0.56144065",
"0.55878735",
"0.55772483",
"0.55741584"
] | 0.6445889 | 0 |
Resnet50 v1b model with deconv layers. Returns HybridBlock A Resnet50 v1b model with deconv layers. | def resnet50_v1b_deconv(**kwargs):
return get_deconv_resnet('resnet50_v1b', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def learn_deblurring_model(num_res_blocks=5, quick_mode=False):\n model = build_nn_model(16, 16, 32, num_res_blocks)\n if quick_mode:\n train_model(model, sol5_utils.images_for_deblurring(),\n _motion_blur_for_learn_deblurring_model,\n 10, 3, 2, 30)\n return model\n train_model(model, sol5_utils.images_for_deblurring(),\n _motion_blur_for_learn_deblurring_model,\n 100, 100, 10, 1000)\n return model",
"def decoder(self, features=[8], name=\"decoder\") -> KM.Model:\n input_tensor = KL.Input(shape=(2, 2, features[0]))\n\n decoded = input_tensor\n\n for i, feature_num in enumerate(features[1:], start=1):\n decoded = deconv_block(\n decoded, feature_num, name + f\"_deconv_{len(features)-i}\"\n )\n\n # Final reconstruction back to the original image size\n decoded = KL.Conv2DTranspose(\n 3,\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=True,\n activation=\"tanh\",\n name=name + f\"_out\",\n )(decoded)\n decoded = DropBlock2D(block_size=5, keep_prob=0.8)(decoded)\n return KM.Model(inputs=input_tensor, outputs=decoded, name=name)",
"def get_deconv_resnet(base_network,\n pretrained=False,\n **kwargs):\n net = DeconvResnet(\n base_network=base_network,\n pretrained_backbone=pretrained,\n **kwargs)\n return net",
"def LadderNet(input_size = (256, 256, 1), num_classes=2, filters=30): \n \n # X's denote standard flow\n # XNUM denote ResBlock outputs\n \n # \"First\" UNet\n \n # Input branch\n inputs = Input(input_size)\n X = Conv2D(filters=filters, kernel_size=3, activation=\"relu\", padding = 'same', kernel_initializer = 'he_normal')(inputs)\n\n # Down branch\n X1 = ResBlock(input_tensor=X, filters=filters) # ResBlock located in the first layer of the paper scheme\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X1) \n X = Activation(\"relu\")(X) # This ReLU is not shown in the paper scheme\n \n X2 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X2)\n X = Activation(\"relu\")(X)\n \n X3 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X3)\n X = Activation(\"relu\")(X)\n \n X4 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X4)\n X = Activation(\"relu\")(X)\n \n # Bottom block \n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X4])\n # X = Activation(\"relu\")(X) # This ReLU is commented in the paper code\n X5 = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X5)\n X = Add()([X, X3])\n # X = Activation(\"relu\")(X)\n X6 = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X6)\n X = Add()([X, X2])\n # X = Activation(\"relu\")(X)\n X7 = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, output_padding=1, kernel_initializer='he_normal')(X7)\n X = Add()([X, X1])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Top block (bottle-neck)\n X8 = ResBlock(input_tensor=X, filters=filters)\n X = ResBlock(input_tensor=X, filters=filters)\n X = Add()([X, X8])\n \n # \"Second\" UNet\n \n # Down branch\n X9 = ResBlock(input_tensor=X, filters=filters)\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X7, X]) \n \n X10 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X) \n X = Add()([X6, X])\n \n X11 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X5, X])\n\n X12 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n \n # Bottom block\n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X12]) \n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X11])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X10])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, kernel_initializer='he_normal', output_padding=1)(X)\n X = Add()([X, X9])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Final block\n X = Conv2D(filters=num_classes, kernel_size=1, kernel_initializer='he_normal')(X)\n # X = Activation(\"relu\")(X)\n X = Activation(\"softmax\")(X)\n #X = Conv2D(1, 1)(X)\n \n model = Model(inputs, X)\n \n \n return model",
"def _make_deconv_layer(self, num_filters, num_kernels):\n assert len(num_kernels) == len(num_filters), \\\n 'Deconv filters and kernels number mismatch: {} vs. {}'.format(\n len(num_filters), len(num_kernels))\n\n layers = nn.HybridSequential('deconv_')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n self.base_network.initialize()\n in_planes = self.base_network(mx.nd.zeros((1, 3, 256, 256))).shape[1]\n for planes, k in zip(num_filters, num_kernels):\n kernel, padding, output_padding = self._get_deconv_cfg(k)\n layers.add(nn.Conv2D(channels=planes,\n kernel_size=3,\n strides=1,\n padding=1,\n in_channels=in_planes))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n layers.add(nn.Conv2DTranspose(channels=planes,\n kernel_size=kernel,\n strides=2,\n padding=padding,\n output_padding=output_padding,\n use_bias=False,\n in_channels=planes,\n weight_initializer=BilinearUpSampleInitializer()))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n in_planes = planes\n\n return layers",
"def darknet_body():\n return compose(DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(), DarknetConv2D_BN_Leaky(64, (3, 3)),\n MaxPooling2D(), bottleneck_block(128, 64), MaxPooling2D(), bottleneck_block(256, 128),\n MaxPooling2D(), bottleneck_x2_block(512, 256), MaxPooling2D(), bottleneck_x2_block(1024, 512))",
"def DeconvBlock(tensor, residual, nb_filters, kernel_size=3, padding=\"same\", strides=(2,2), regularization=None):\n y = Conv2DTranspose(nb_filters, kernel_size=(kernel_size, kernel_size), strides=strides, padding=padding)(tensor)\n y = concatenate([y, residual], axis=3)\n y = ConvBlock(y, nb_filters, kernel_size, regularization=regularization)\n return y",
"def get_unet2(patch_height, patch_width, channels, n_classes):\n axis = 3\n k = 3 # kernel size 卷积核大小\n s = 2 # stride 步长\n n_filters = 32 # number of filters 通道数\n\n #初始化keras张量\n inputs = Input((patch_height, patch_width, channels))\n\n # n_filters:输出的维度 (k,k):卷积核尺寸 padding:边缘填充\n # 400,400,3 ==> 400,400,32\n conv1 = Conv2D(n_filters, (k,k), padding='same')(inputs)\n conv1 = BatchNormalization(scale=False, axis=axis)(conv1)\n conv1 = Activation('relu')(conv1)\n conv1 = Conv2D(n_filters, (k, k), padding='same')(conv1)\n conv1 = BatchNormalization(scale=False, axis=axis)(conv1)\n conv1 = Activation('relu')(conv1)\n # 400,400,32 ==> 200,200,32\n pool1 = MaxPooling2D(pool_size=(s,s))(conv1)\n\n # DenseBlock模块 200,200,32 ==> 200,200,32\n conv2 = Conv2D(n_filters, (1,1), padding='same')(pool1)\n # battleneck层 第一层Dense\n conv2 = BatchNormalization(scale=False, axis=axis)(conv2)\n conv2 = Activation('relu')(conv2)\n conv2 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv2)\n conv2 = BatchNormalization(scale=False, axis=axis)(conv2)\n conv2 = Activation('relu')(conv2)\n conv2 = Conv2D(n_filters,(k,k),padding='same')(conv2)\n # 200,200,32 ==> 200,200,64\n x1 = Concatenate(axis=axis)([conv2,pool1])\n\n # Transition层\n ts = Conv2D(n_filters * 4, (1, 1), padding='same')(x1)\n # 200,200,64 ==> 100,100,64 #将pool2 看成 x1\n pool2 = AveragePooling2D(pool_size=(s, s), strides=2)(ts)\n\n conv3 = Conv2D(n_filters,(1,1),padding='same')(pool2)\n conv3 = BatchNormalization(scale=False, axis=axis)(conv3)\n conv3 = Activation('relu')(conv3)\n conv3 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv3)\n conv3 = BatchNormalization(scale=False, axis=axis)(conv3)\n conv3 = Activation('relu')(conv3)\n conv3 = Conv2D(n_filters, (k, k), padding='same')(conv3)\n # 100,100,32 ==> 100,100,96\n tmp2 = Concatenate(axis=axis)([conv3,pool2])\n\n conv3 = Conv2D(n_filters, (1, 1), padding='same')(tmp2)\n conv3 = BatchNormalization(scale=False, axis=axis)(conv3)\n conv3 = Activation('relu')(conv3)\n conv3 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv3)\n conv3 = BatchNormalization(scale=False, axis=axis)(conv3)\n conv3 = Activation('relu')(conv3)\n conv3 = Conv2D(n_filters, (k, k), padding='same')(conv3)\n # 100,100,32 ==> 100,100,128\n x2 = Concatenate(axis=axis)([conv3, tmp2])\n\n # Transition层\n ts1 = Conv2D(n_filters * 4,(1,1),padding='same')(x2)\n # 100,100,128 ==> 50,50,128\n pool2 = AveragePooling2D(pool_size=(s,s),strides=2)(ts1)\n\n # 50,50,128 ==> 50,50,32\n conv4 = Conv2D(n_filters, (1, 1), padding='same')(pool2)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv4)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(n_filters, (k, k), padding='same')(conv4)\n # 50,50,32 ==> 50,50,160\n tmp3 = Concatenate(axis=axis)([conv4,pool2])\n\n # 50,50,160 ==> 50,50,32\n conv4 = Conv2D(n_filters, (1, 1), padding='same')(tmp3)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv4)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(n_filters, (k, k), padding='same')(conv4)\n # 50,50,32 ==> 50,50,192\n tmp4 = Concatenate(axis=axis)([conv4, tmp3])\n\n # 50,50,192 ==> 50,50,32\n conv4 = Conv2D(n_filters, (1, 1), padding='same')(tmp4)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv4)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(n_filters, (k, k), padding='same')(conv4)\n # 50,50,32 ==> 50,50,224\n tmp5 = Concatenate(axis=axis)([conv4, tmp4])\n\n # 50,50,224 ==> 50,50,32\n conv4 = Conv2D(n_filters, (1, 1), padding='same')(tmp5)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv4)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(n_filters, (k, k), padding='same')(conv4)\n # 50,50,32 ==> 50,50,256\n x3 = Concatenate(axis=axis)([conv4, tmp5])\n\n ts2 = Conv2D(n_filters * 8, (1, 1), padding='same')(x3)\n # 50,50,256 ==> 25,25,256\n pool3 = AveragePooling2D(pool_size=(s, s), strides=2)(ts2)\n\n # 25,25,256 ==> 25,25,32\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(pool3)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 25,25,32 ==> 25,25,288\n tmp6 = Concatenate(axis=axis)([conv5, pool3])\n\n # 50,50,288 ==> 50,50,32\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp6)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 50,50,32 ==> 50,50,320\n tmp7 = Concatenate(axis=axis)([conv5, tmp6])\n\n # 50,50,288 ==> 50,50,32\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp7)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 50,50,32 ==> 50,50,352\n tmp8 = Concatenate(axis=axis)([conv5, tmp7])\n\n # 50,50,352 ==> 50,50,32\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp8)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 50,50,32 ==> 50,50,384\n tmp9 = Concatenate(axis=axis)([conv5, tmp8])\n\n # 50,50,352 ==> 50,50,32\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp9)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 50,50,32 ==> 50,50,416\n tmp10 = Concatenate(axis=axis)([conv5, tmp9])\n\n # 50,50,352 ==> 50,50,32\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp10)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 50,50,32 ==> 50,50,448\n tmp11 = Concatenate(axis=axis)([conv5, tmp10])\n\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp11)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 50,50,32 ==> 50,50,480\n tmp12 = Concatenate(axis=axis)([conv5, tmp11])\n\n conv5 = Conv2D(n_filters, (1, 1), padding='same')(tmp12)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(4 * n_filters, (1, 1), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(n_filters, (k, k), padding='same')(conv5)\n # 25,25,32 ==> 25,25,512\n conv5 = Concatenate(axis=axis)([conv5, tmp12])\n\n #先上采样放大 在进行卷积操作 相当于转置卷积 并进行拼接\n # 25,25,512 ==> 50,50,768\n up1 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv5), x3])\n conv6 = Conv2D(8 * n_filters, (k,k), padding='same')(up1)\n conv6 = BatchNormalization(scale=False, axis=axis)(conv6)\n conv6 = Activation('relu')(conv6)\n conv6 = Conv2D(8 * n_filters, (k, k), padding='same')(conv6)\n conv6 = BatchNormalization(scale=False, axis=axis)(conv6)\n conv6 = Activation('relu')(conv6)\n\n #50,50,768 ==> 100,100,896\n up2 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv6), x2])\n conv7 = Conv2D(4 * n_filters, (k, k), padding='same')(up2)\n conv7 = BatchNormalization(scale=False, axis=axis)(conv7)\n conv7 = Activation('relu')(conv7)\n conv7 = Conv2D(4 * n_filters, (k, k), padding='same')(conv7)\n conv7 = BatchNormalization(scale=False, axis=axis)(conv7)\n conv7 = Activation('relu')(conv7)\n\n # 100,100,896 ==> 200,200,960\n up3 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv7), x1])\n conv8 = Conv2D(2 * n_filters, (k, k), padding='same')(up3)\n conv8 = BatchNormalization(scale=False, axis=axis)(conv8)\n conv8 = Activation('relu')(conv8)\n conv8 = Conv2D(2 * n_filters, (k, k), padding='same')(conv8)\n conv8 = BatchNormalization(scale=False, axis=axis)(conv8)\n conv8 = Activation('relu')(conv8)\n\n # 200,200,960 ==> 400,400,992\n up4 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv8), conv1])\n conv9 = Conv2D(n_filters, (k, k), padding='same')(up4)\n conv9 = BatchNormalization(scale=False, axis=axis)(conv9)\n conv9 = Activation('relu')(conv9)\n conv9 = Conv2D(n_filters, (k, k), padding='same')(conv9)\n conv9 = BatchNormalization(scale=False, axis=axis)(conv9)\n conv9 = Activation('relu')(conv9)\n\n # 全连接层 400,400,992 ==> 400,400,5\n outputs = Conv2D(n_classes, (1,1), padding='same', activation='softmax')(conv9)\n\n unet = Model(inputs=inputs, outputs=outputs)\n\n return unet",
"def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d",
"def resnet50():\n initializer = K.initializers.he_normal(seed=None)\n\n X = K.Input(shape=(224, 224, 3))\n\n # conv1\n layer = K.layers.Conv2D(filters=64,\n kernel_size=(7, 7),\n strides=(2, 2),\n padding='same',\n kernel_initializer=initializer,\n )(X)\n\n layer = K.layers.BatchNormalization(axis=3)(layer)\n\n layer = K.layers.Activation('relu')(layer)\n\n # conv2_x\n layer = K.layers.MaxPool2D(pool_size=(3, 3),\n strides=(2, 2),\n padding='same')(layer)\n\n layer = projection_block(layer, [64, 64, 256], 1)\n for _ in range(2):\n layer = identity_block(layer, [64, 64, 256])\n\n # conv3_x\n layer = projection_block(layer, [128, 128, 512])\n for _ in range(3):\n layer = identity_block(layer, [128, 128, 512])\n\n # conv4_x\n layer = projection_block(layer, [256, 256, 1024])\n for _ in range(5):\n layer = identity_block(layer, [256, 256, 1024])\n\n # conv5_x\n layer = projection_block(layer, [512, 512, 2048])\n for _ in range(2):\n layer = identity_block(layer, [512, 512, 2048])\n\n layer = K.layers.AveragePooling2D(pool_size=(7, 7),\n padding='same')(layer)\n\n layer = K.layers.Dense(units=1000,\n activation='softmax',\n kernel_initializer=initializer,\n )(layer)\n\n model = K.models.Model(inputs=X, outputs=layer)\n return model",
"def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)",
"def resnet_head(input_shape):\n input_layer = layers.Input(shape=input_shape)\n\n model = layers.Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(input_layer)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 16)\n model = residual_block(model, 16)\n\n model = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 32)\n model = residual_block(model, 32)\n\n model = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 32)\n model = residual_block(model, 32)\n\n model = layers.Flatten()(model)\n\n return input_layer, model",
"def densenet121(growth_rate=32, compression=1.0):\n inputs = K.Input(shape=(224, 224, 3))\n out_l = K.layers.BatchNormalization(axis=3)(inputs)\n out_l = K.layers.Activation('relu')(out_l)\n out_l = K.layers.Conv2D(64, kernel_size=(7, 7), padding='same',\n kernel_initializer='he_normal',\n strides=(2, 2))(out_l)\n out_l = K.layers.MaxPool2D((3, 3), (2, 2), padding=\"same\")(out_l)\n out_l, filters = dense_block(out_l, 64, growth_rate, 6)\n out_l, filters = transition_layer(out_l, filters, compression)\n out_l, filters = dense_block(out_l, filters, growth_rate, 12)\n out_l, filters = transition_layer(out_l, filters, compression)\n out_l, filters = dense_block(out_l, filters, growth_rate, 24)\n out_l, filters = transition_layer(out_l, filters, compression)\n out_l, filters = dense_block(out_l, filters, growth_rate, 16)\n out_l = K.layers.AvgPool2D((7, 7), padding='same')(out_l)\n out_l = K.layers.Dense(1000, activation='softmax')(out_l)\n model = K.Model(inputs, out_l)\n return model",
"def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n base_model = Model(img_input, x, name='resnet50')\n\n return base_model",
"def densenet121(growth_rate=32, compression=1.0):\n X_input = K.Input(shape=(224, 224, 3))\n initializer = K.initializers.he_normal(seed=None)\n\n batch1 = K.layers.BatchNormalization()(X_input)\n activation1 = K.layers.Activation('relu')(batch1)\n conv1 = K.layers.Conv2D(64, kernel_size=(7, 7), strides=2,\n padding='same',\n kernel_initializer=initializer)(activation1)\n max_pool = K.layers.MaxPooling2D(pool_size=(3, 3),\n strides=2, padding='same')(conv1)\n X, nb_filters = dense_block(max_pool, 64, growth_rate, 6)\n X, nb_filters = transition_layer(X, nb_filters, compression)\n X, nb_filters = dense_block(X, nb_filters, growth_rate, 12)\n X, nb_filters = transition_layer(X, nb_filters, compression)\n X, nb_filters = dense_block(X, nb_filters, growth_rate, 24)\n X, nb_filters = transition_layer(X, nb_filters, compression)\n X, nb_filters = dense_block(X, nb_filters, growth_rate, 16)\n\n avg_pool = K.layers.AveragePooling2D(pool_size=(7, 7), strides=None,\n padding='same')(X)\n\n dense = K.layers.Dense(1000, activation='softmax',\n kernel_initializer=initializer)(avg_pool)\n model = K.models.Model(inputs=X_input, outputs=dense)\n\n return model",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def _make_layer(self, block, outputs, blocks, stride=1):\n downsample = None\n \n downsample = nn.Sequential(\n nn.Conv2d(self.inputs, outputs * 4,\n kernel_size=1, stride=stride, bias=False,\n dilation=self.dilation),\n nn.BatchNorm2d(outputs * 4),\n )\n\n layers = []\n layers.append(block(self.inputs, outputs, stride, downsample, self.dilation))\n self.inputs = outputs * 4\n for i in range(1, blocks):\n layers.append(block(self.inputs, outputs))\n\n layer = nn.Sequential(*layers)\n\n self.channels.append(outputs * 4)\n self.layers.append(layer)\n\n return layer",
"def decoder_block(input, filters, block):\n\n conv_bn_name_base = 'decoder_' + str(block) + '_'\n\n i_filters, o_filters = filters\n\n x = _conv_bn(filters=i_filters//4, kernel_size=(1, 1), \n name=conv_bn_name_base + '1a')(input)\n x = Activation('relu', name= conv_bn_name_base + '1a_act')(x)\n\n x = Conv2DTranspose(filters=i_filters//4, kernel_size=(3,3),\n strides=(2,2), padding='same', name=conv_bn_name_base +'1b_fullconv')(x)\n x = BatchNormalization(axis=CHANNEL_AXIS, name=conv_bn_name_base + '1b_bn')(x)\n x = Activation('relu', name= conv_bn_name_base + '1b_act')(x)\n\n x = _conv_bn(filters=o_filters, kernel_size=(1, 1), \n name=conv_bn_name_base + '1c')(x)\n x = Activation('relu', name= conv_bn_name_base + '1c_act')(x)\n\n return x",
"def resnext50(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnext50_32x4d']))\n return model",
"def residual_block(layer_input, filters=512, down_filter=False, normalization=False):\n\td1 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n\tif normalization:\n\t\t# d = InstanceNormalization()(d)\n\t\td1 = BatchNormalization(momentum=0.8)(d1) # 6/6/2018: use it for CT # 6/5/2018: remove it for MNIST\n\td1 = Activation('relu')(d1)\n\td2 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d1)\n\tif normalization:\n\t\t# d = InstanceNormalization()(d)\n\t\td2 = BatchNormalization(momentum=0.8)(d2) # 6/6/2018: use it for CT # 6/5/2018: remove it for MNIST\n\tif down_filter:\n\t\td = Add()([d1, d2])\n\telse:\n\t\td = Add()([d2, layer_input])\n\treturn d",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def densenet121(growth_rate=32, compression=1.0):\n\n init = K.initializers.he_normal(seed=None)\n X = K.Input(shape=(224, 224, 3))\n\n batch_1 = K.layers.BatchNormalization()(X)\n activation_1 = K.layers.Activation('relu')(batch_1)\n\n conv_1 = K.layers.Conv2D(filters=64,\n kernel_size=7,\n strides=2,\n padding='same',\n kernel_initializer=init)(activation_1)\n max_pool = K.layers.MaxPooling2D(pool_size=[3, 3],\n strides=2,\n padding='same')(conv_1)\n\n ly_1, nb_filters1 = dense_block(max_pool, 64, growth_rate, 6)\n\n ly_2, nb_filters2 = transition_layer(ly_1, nb_filters1, compression)\n ly_3, nb_filters3 = dense_block(ly_2, nb_filters2, growth_rate, 12)\n\n ly_4, nb_filters4 = transition_layer(ly_3, nb_filters3, compression)\n ly_5, nb_filters5 = dense_block(ly_4, nb_filters4, growth_rate, 24)\n\n ly_6, nb_filters6 = transition_layer(ly_5, nb_filters5, compression)\n ly_7, nb_filters7 = dense_block(ly_6, nb_filters6, growth_rate, 16)\n\n avg_pool = K.layers.AveragePooling2D(pool_size=[7, 7],\n strides=7,\n padding='same')(ly_7)\n\n dense = K.layers.Dense(1000, activation='softmax',\n kernel_initializer=init)(avg_pool)\n\n model = K.models.Model(inputs=X, outputs=dense)\n return model",
"def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d",
"def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet50(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet50(pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model",
"def resnet18_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet18_v1b', **kwargs)",
"def build_vgg(self):\n # Get the vgg network. Extract features from Block 5, last convolution.\n vgg = tf.keras.applications.VGG19(weights=\"imagenet\", input_shape=self.hr_shape, include_top=False)\n vgg.trainable = False\n for layer in vgg.layers:\n layer.trainable = False\n\n # Create model and compile\n model = tf.keras.models.Model(inputs=vgg.input, outputs=vgg.get_layer(\"block5_conv4\").output)\n\n return model",
"def densenet121(growth_rate=32, compression=1.0):\n\n kernel_init = K.initializers.he_normal(seed=None)\n\n X = K.Input(shape=(224, 224, 3))\n batchNorm0 = K.layers.BatchNormalization(axis=3)(X)\n activation0 = K.layers.Activation('relu')(batchNorm0)\n\n layer1 = K.layers.Conv2D(filters=2*growth_rate, kernel_size=(7, 7),\n strides=(2, 2), padding='same',\n kernel_initializer=kernel_init)(activation0)\n l1pool = K.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2),\n padding='same')(layer1)\n\n layer2, n_f2 = dense_block(l1pool, 2*growth_rate, growth_rate, 6)\n layer3, n_f3 = transition_layer(layer2, n_f2, compression)\n layer4, n_f4 = dense_block(layer3, n_f3, growth_rate, 12)\n layer5, n_f5 = transition_layer(layer4, n_f4, compression)\n layer6, n_f6 = dense_block(layer5, n_f5, growth_rate, 24)\n layer7, n_f7 = transition_layer(layer6, n_f6, compression)\n layer8, n_f8 = dense_block(layer7, n_f7, growth_rate, 16)\n\n avg_pool = K.layers.AveragePooling2D(pool_size=(7, 7), strides=7,\n padding='same')(layer8)\n\n Y = K.layers.Dense(1000, activation='softmax',\n kernel_initializer=kernel_init)(avg_pool)\n\n model = K.models.Model(inputs=X, outputs=Y)\n\n return model",
"def LeNet5(input_shape=None):\n input_data = tf.keras.layers.Input(shape=input_shape)\n # First block\n conv1 = tf.keras.layers.Conv2D(\n 6, (5, 5), padding='valid', activation='relu', kernel_initializer='he_uniform')(input_data)\n maxpool1 = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(conv1)\n\n # Second block\n conv2 = tf.keras.layers.Conv2D(16, (5, 5), padding='valid',\n activation='relu', kernel_initializer='he_uniform')(maxpool1)\n maxpool2 = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2), strides=(2, 2))(conv2)\n\n # Third block\n flatten = tf.keras.layers.Flatten()(maxpool2)\n dense1 = tf.keras.layers.Dense(400, activation='relu',\n kernel_initializer='he_uniform')(flatten)\n dense2 = tf.keras.layers.Dense(120, activation='relu',\n kernel_initializer='he_uniform')(dense1)\n dense3 = tf.keras.layers.Dense(84, activation='relu',\n kernel_initializer='he_uniform')(dense2)\n\n # Output\n dense4 = tf.keras.layers.Dense(10, activation='softmax')(dense3)\n\n model = tf.keras.models.Model(inputs=input_data, outputs=dense4)\n\n return model",
"def resnet101_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet101_v1b', **kwargs)"
] | [
"0.5999963",
"0.5938829",
"0.5738903",
"0.5721558",
"0.5713137",
"0.5692295",
"0.56700504",
"0.56655633",
"0.56542367",
"0.5653041",
"0.5642231",
"0.5638073",
"0.56156534",
"0.56155646",
"0.5600691",
"0.5598976",
"0.5593832",
"0.5579433",
"0.55768794",
"0.5557143",
"0.555273",
"0.5552569",
"0.55473727",
"0.55427575",
"0.55287164",
"0.5527304",
"0.5527066",
"0.5507494",
"0.5507386",
"0.5502897"
] | 0.6533484 | 0 |
Resnet101 v1b model with deconv layers. Returns HybridBlock A Resnet101 v1b model with deconv layers. | def resnet101_v1b_deconv(**kwargs):
return get_deconv_resnet('resnet101_v1b', **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resnet50_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet50_v1b', **kwargs)",
"def resnet18_v1b_deconv(**kwargs):\n return get_deconv_resnet('resnet18_v1b', **kwargs)",
"def get_deconv_resnet(base_network,\n pretrained=False,\n **kwargs):\n net = DeconvResnet(\n base_network=base_network,\n pretrained_backbone=pretrained,\n **kwargs)\n return net",
"def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model",
"def learn_deblurring_model(num_res_blocks=5, quick_mode=False):\n model = build_nn_model(16, 16, 32, num_res_blocks)\n if quick_mode:\n train_model(model, sol5_utils.images_for_deblurring(),\n _motion_blur_for_learn_deblurring_model,\n 10, 3, 2, 30)\n return model\n train_model(model, sol5_utils.images_for_deblurring(),\n _motion_blur_for_learn_deblurring_model,\n 100, 100, 10, 1000)\n return model",
"def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def decoder(self, features=[8], name=\"decoder\") -> KM.Model:\n input_tensor = KL.Input(shape=(2, 2, features[0]))\n\n decoded = input_tensor\n\n for i, feature_num in enumerate(features[1:], start=1):\n decoded = deconv_block(\n decoded, feature_num, name + f\"_deconv_{len(features)-i}\"\n )\n\n # Final reconstruction back to the original image size\n decoded = KL.Conv2DTranspose(\n 3,\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=True,\n activation=\"tanh\",\n name=name + f\"_out\",\n )(decoded)\n decoded = DropBlock2D(block_size=5, keep_prob=0.8)(decoded)\n return KM.Model(inputs=input_tensor, outputs=decoded, name=name)",
"def resnet18(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet_head(input_shape):\n input_layer = layers.Input(shape=input_shape)\n\n model = layers.Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(input_layer)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 16)\n model = residual_block(model, 16)\n\n model = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 32)\n model = residual_block(model, 32)\n\n model = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None)(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n model = layers.ReLU()(model)\n model = residual_block(model, 32)\n model = residual_block(model, 32)\n\n model = layers.Flatten()(model)\n\n return input_layer, model",
"def LadderNet(input_size = (256, 256, 1), num_classes=2, filters=30): \n \n # X's denote standard flow\n # XNUM denote ResBlock outputs\n \n # \"First\" UNet\n \n # Input branch\n inputs = Input(input_size)\n X = Conv2D(filters=filters, kernel_size=3, activation=\"relu\", padding = 'same', kernel_initializer = 'he_normal')(inputs)\n\n # Down branch\n X1 = ResBlock(input_tensor=X, filters=filters) # ResBlock located in the first layer of the paper scheme\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X1) \n X = Activation(\"relu\")(X) # This ReLU is not shown in the paper scheme\n \n X2 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X2)\n X = Activation(\"relu\")(X)\n \n X3 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X3)\n X = Activation(\"relu\")(X)\n \n X4 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X4)\n X = Activation(\"relu\")(X)\n \n # Bottom block \n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X4])\n # X = Activation(\"relu\")(X) # This ReLU is commented in the paper code\n X5 = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X5)\n X = Add()([X, X3])\n # X = Activation(\"relu\")(X)\n X6 = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X6)\n X = Add()([X, X2])\n # X = Activation(\"relu\")(X)\n X7 = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, output_padding=1, kernel_initializer='he_normal')(X7)\n X = Add()([X, X1])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Top block (bottle-neck)\n X8 = ResBlock(input_tensor=X, filters=filters)\n X = ResBlock(input_tensor=X, filters=filters)\n X = Add()([X, X8])\n \n # \"Second\" UNet\n \n # Down branch\n X9 = ResBlock(input_tensor=X, filters=filters)\n X = Conv2D(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X7, X]) \n \n X10 = ResBlock(input_tensor=X, filters=filters*2)\n X = Conv2D(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X) \n X = Add()([X6, X])\n \n X11 = ResBlock(input_tensor=X, filters=filters*4)\n X = Conv2D(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n X = Add()([X5, X])\n\n X12 = ResBlock(input_tensor=X, filters=filters*8)\n X = Conv2D(filters=filters*16, kernel_size=3, strides=2, kernel_initializer='he_normal')(X) \n X = Activation(\"relu\")(X)\n \n # Bottom block\n X = ResBlock(input_tensor=X, filters=filters*16)\n \n # Up branch\n X = Conv2DTranspose(filters=filters*8, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X12]) \n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*8)\n \n X = Conv2DTranspose(filters=filters*4, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X11])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*4)\n \n X = Conv2DTranspose(filters=filters*2, kernel_size=3, strides=2, kernel_initializer='he_normal')(X)\n X = Add()([X, X10])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters*2)\n \n X = Conv2DTranspose(filters=filters, kernel_size=3, strides=2, kernel_initializer='he_normal', output_padding=1)(X)\n X = Add()([X, X9])\n # X = Activation(\"relu\")(X)\n X = ResBlock(input_tensor=X, filters=filters)\n \n # Final block\n X = Conv2D(filters=num_classes, kernel_size=1, kernel_initializer='he_normal')(X)\n # X = Activation(\"relu\")(X)\n X = Activation(\"softmax\")(X)\n #X = Conv2D(1, 1)(X)\n \n model = Model(inputs, X)\n \n \n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model",
"def _make_deconv_layer(self, num_filters, num_kernels):\n assert len(num_kernels) == len(num_filters), \\\n 'Deconv filters and kernels number mismatch: {} vs. {}'.format(\n len(num_filters), len(num_kernels))\n\n layers = nn.HybridSequential('deconv_')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n self.base_network.initialize()\n in_planes = self.base_network(mx.nd.zeros((1, 3, 256, 256))).shape[1]\n for planes, k in zip(num_filters, num_kernels):\n kernel, padding, output_padding = self._get_deconv_cfg(k)\n layers.add(nn.Conv2D(channels=planes,\n kernel_size=3,\n strides=1,\n padding=1,\n in_channels=in_planes))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n layers.add(nn.Conv2DTranspose(channels=planes,\n kernel_size=kernel,\n strides=2,\n padding=padding,\n output_padding=output_padding,\n use_bias=False,\n in_channels=planes,\n weight_initializer=BilinearUpSampleInitializer()))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n in_planes = planes\n\n return layers",
"def decoder_block(input, filters, block):\n\n conv_bn_name_base = 'decoder_' + str(block) + '_'\n\n i_filters, o_filters = filters\n\n x = _conv_bn(filters=i_filters//4, kernel_size=(1, 1), \n name=conv_bn_name_base + '1a')(input)\n x = Activation('relu', name= conv_bn_name_base + '1a_act')(x)\n\n x = Conv2DTranspose(filters=i_filters//4, kernel_size=(3,3),\n strides=(2,2), padding='same', name=conv_bn_name_base +'1b_fullconv')(x)\n x = BatchNormalization(axis=CHANNEL_AXIS, name=conv_bn_name_base + '1b_bn')(x)\n x = Activation('relu', name= conv_bn_name_base + '1b_act')(x)\n\n x = _conv_bn(filters=o_filters, kernel_size=(1, 1), \n name=conv_bn_name_base + '1c')(x)\n x = Activation('relu', name= conv_bn_name_base + '1c_act')(x)\n\n return x",
"def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def get_discrinminator_trained_model(self):\n return Model(self.model.inputs[0], self.model.layers[2](self.model.layers[1](self.model.inputs[0])))",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model",
"def densenet201(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['densenet201']))\n return model",
"def get_diracnetv2(blocks,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".chainer\", \"models\"),\n **kwargs):\n if blocks == 18:\n layers = [4, 4, 4, 4]\n elif blocks == 34:\n layers = [6, 8, 12, 6]\n else:\n raise ValueError(\"Unsupported DiracNetV2 with number of blocks: {}\".format(blocks))\n\n channels_per_layers = [64, 128, 256, 512]\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n init_block_channels = 64\n\n net = DiracNetV2(\n channels=channels,\n init_block_channels=init_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n load_npz(\n file=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n obj=net)\n\n return net",
"def convolutional_32_decoder(self, z, reuse=True):\n\t\tz = tf.convert_to_tensor(z)\n\t\treuse=tf.AUTO_REUSE\n\n\t\tif self.vimco_samples > 1:\n\t\t\tsamples = []\n\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('decoder', reuse=reuse):\n\t\t\t\tif len(z.get_shape().as_list()) == 2:\n\t\t\t\t\t# test\n\t\t\t\t\td = tf.layers.dense(z, 4*4*512, activation=tf.nn.relu, use_bias=False, reuse=reuse, name='fc1')\t\n\t\t\t\t\td = tf.reshape(d, (-1, 4, 4, 512))\n\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 512, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv1')\n\t\t\t\t\tdeconv1 = tf.layers.batch_normalization(deconv1)\n\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 256, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv2')\n\t\t\t\t\tdeconv2 = tf.layers.batch_normalization(deconv2)\n\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 128, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv3')\n\t\t\t\t\tdeconv3 = tf.layers.batch_normalization(deconv3)\n\t\t\t\t\tdeconv4 = tf.layers.conv2d(deconv3, 3, 1, strides=(1,1), padding=\"VALID\", activation=self.last_layer_act, reuse=reuse, name='deconv4')\n\t\t\t\t\treturn deconv4\n\t\t\t\telse:\n\t\t\t\t\t# train\n\t\t\t\t\tfor i in range(self.vimco_samples):\n\t\t\t\t\t\t# iterate through one vimco sample at a time\n\t\t\t\t\t\tz_sample = z[i]\n\t\t\t\t\t\td = tf.layers.dense(z_sample, 4*4*512, activation=tf.nn.relu, use_bias=False, reuse=reuse, name='fc1')\t\n\t\t\t\t\t\td = tf.reshape(d, (-1, 4, 4, 512))\n\t\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 512, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv1')\n\t\t\t\t\t\tdeconv1 = tf.layers.batch_normalization(deconv1)\n\t\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 256, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv2')\n\t\t\t\t\t\tdeconv2 = tf.layers.batch_normalization(deconv2)\n\t\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 128, 2, strides=(2,2), padding=\"VALID\", activation=tf.nn.relu, reuse=reuse, name='deconv3')\n\t\t\t\t\t\tdeconv3 = tf.layers.batch_normalization(deconv3)\n\t\t\t\t\t\tdeconv4 = tf.layers.conv2d(deconv3, 3, 1, strides=(1,1), padding=\"VALID\", activation=tf.nn.sigmoid, reuse=reuse, name='deconv4')\n\t\t\t\t\t\tsamples.append(deconv4)\n\t\tx_reconstr_logits = tf.stack(samples, axis=0)\n\t\tprint(x_reconstr_logits.get_shape())\n\t\treturn x_reconstr_logits",
"def residual_block(layer_input, filters):\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d",
"def resnet34(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))\n return model",
"def disresnet18(**kwargs):\n return Discriminator(resnetblocks.EresNetBasicBlock, resnetblocks.DresNetBasicBlock, [2, 2, 2, 2], **kwargs)"
] | [
"0.6411092",
"0.6288105",
"0.6117896",
"0.58658695",
"0.5787724",
"0.57668394",
"0.57667327",
"0.57633877",
"0.5757062",
"0.57168233",
"0.5690354",
"0.56773686",
"0.56585187",
"0.56361216",
"0.5624006",
"0.5623314",
"0.5622369",
"0.5622369",
"0.5622369",
"0.5622369",
"0.5622369",
"0.56128365",
"0.56101507",
"0.5607529",
"0.5583586",
"0.55823684",
"0.5579574",
"0.5575516",
"0.55632913",
"0.55616647"
] | 0.66662174 | 0 |
Get a center net instance. | def get_center_net(model_name,
classes,
pretrained=False,
ctx=mx.cpu(),
root=os.path.join('~', '.mxnet', 'models'),
**kwargs):
heads = OrderedDict([
('heatmap', {'num_output': classes, 'bias': -2.19}), # use bias = -log((1 - 0.1) / 0.1)
('wh', {'num_output': 2}),
('reg', {'num_output': 2})
])
head_conv_channel = 64
topk = 40
net = CenterNet(
heads=heads,
head_conv_channel=head_conv_channel,
topk=topk,
**kwargs)
if pretrained:
from gluoncv.model_zoo.model_store import get_model_file
net.load_parameters(get_model_file(model_name, tag=pretrained, root=root), ctx=ctx)
return net | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getInstance():\n return net()",
"def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None",
"def get_instance (self):\n instances = self.data['instances']\n if not len(instances):\n raise Exception, \"ArchivalObject: No Instances found\"\n for instance in instances:\n # print json.dumps(instance, indent=3)\n try:\n instance['sub_container']['top_container']\n return instance\n except:\n pass\n return None",
"def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net",
"def get_stored_network(cls):\n store = cls.get_store()\n try:\n network_dict = store['network']\n except KeyError:\n network_dict = {}\n network_name = network_dict.get(\n 'value', ChainID.MAINNET.name)\n network = ChainID[network_name]\n return network",
"def get_center(self):\n return self.center",
"def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network",
"def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])",
"def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance",
"def retrieve(self, code):\n _, _, cost_center = self.http_client.get(\"/costcenters/{code}\".format(code=code))\n return cost_center",
"def getCenter(self):\n if self.__center is None:\n raise ValueError, \"Center is undefined.\"\n return self.__center",
"def get_instance(self, container, cls, **params):\n if not cls in self.instances:\n self.instances[cls] = self.create_instance(container, cls, **params)\n \n return self.instances[cls]",
"def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)",
"def get_instance(self, instance):\n return self._get(_instance.Instance, instance)",
"def get_datacenter(conn):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_datacenters()[\"items\"]:\n if item[\"id\"] == datacenter_id:\n return item\n\n raise SaltCloudNotFound(\n \"The specified datacenter '{}' could not be found.\".format(datacenter_id)\n )",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def GetCenter(self):\n ...",
"def GetCenter(self):\n ...",
"def GetCenter(self):\n ...",
"def GetCenter(self):\n ...",
"def get_center(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_center()",
"def get_network(self):\n return self._network",
"def _get_instance(cls, configuration, auth_type):\n if configuration in cls._INSTANCES:\n return cls._INSTANCES[configuration]\n return cls._create_instance(configuration, auth_type)",
"def get_instance(self, contract_name: str) -> None:\n self._validate_name_and_references(contract_name)\n factory = self.contract_factories[contract_name]\n address = to_canonical_address(self.deployment_data[contract_name][\"address\"])\n contract_kwargs = {\n \"abi\": factory.abi,\n \"bytecode\": factory.bytecode,\n \"bytecode_runtime\": factory.bytecode_runtime,\n }\n return self.w3.eth.contract(address=address, **contract_kwargs)",
"def GetInstance():\n pass",
"def instance():\n global inst\n try:\n inst\n except:\n inst = BNVMAPI(None)\n return inst",
"def getInstance():\n if Car.inst is None: Car.inst = Car()\n return Car.inst",
"def get_instance(c: Config) -> NotionDAO:\n if c.notion_official_configured:\n result = APIv2(c)\n else:\n result = APIv1(c)\n return result",
"def get_instance(self, instance_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'hostname',\r\n 'domain',\r\n 'createDate',\r\n 'modifyDate',\r\n 'provisionDate',\r\n 'notes',\r\n 'dedicatedAccountHostOnlyFlag',\r\n 'privateNetworkOnlyFlag',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n '''networkComponents[id, status, speed, maxSpeed, name,\r\n macAddress, primaryIpAddress, port,\r\n primarySubnet]''',\r\n 'lastKnownPowerState.name',\r\n 'powerState',\r\n 'status',\r\n 'maxCpu',\r\n 'maxMemory',\r\n 'datacenter',\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n 'lastOperatingSystemReload.id',\r\n 'blockDevices',\r\n 'blockDeviceTemplateGroup[id, name, globalIdentifier]',\r\n 'postInstallScriptUri',\r\n 'userData',\r\n '''operatingSystem[passwords[username,password],\r\n softwareLicense.softwareDescription[\r\n manufacturer,name,version,\r\n referenceCode]]''',\r\n 'hourlyBillingFlag',\r\n 'billingItem.recurringFee',\r\n 'tagReferences[id,tag[name,id]]',\r\n 'networkVlans[id,vlanNumber,networkSpace]',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.guest.getObject(id=instance_id, **kwargs)",
"def get_cinder(self, version='2'):\n if self.cinder is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.cinder = cinderclient.Client(version,\n session=self.get_session(),\n interface=iface)\n return self.cinder"
] | [
"0.7030677",
"0.5973064",
"0.5955322",
"0.59289825",
"0.58522224",
"0.5824274",
"0.56849664",
"0.5666703",
"0.56177497",
"0.5609592",
"0.55093485",
"0.5500398",
"0.5492306",
"0.54684716",
"0.54643327",
"0.54516166",
"0.54325414",
"0.54325414",
"0.54325414",
"0.54325414",
"0.54270935",
"0.5426979",
"0.53994113",
"0.53875756",
"0.5376688",
"0.5344667",
"0.5334808",
"0.5323173",
"0.5305966",
"0.52279174"
] | 0.60560864 | 1 |
main function for inserting gateways | def main():
insert_gateway_values("hermes/bin/gateways.txt")
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n if len(sys.argv) != 5:\n print ('usage: %s <SRC_USER::SRC_PASSWD@@SRC_HOST> '\n '<DEST_USER:DEST_PASSWD@DEST_HOST> SRC_GW DEST_GW\\n'\n ' where\\n'\n ' HOST Aviatrix Controller hostname or IP\\n'\n ' USER Aviatrix Controller login username\\n'\n ' PASSWORD Aviatrix Controller login password\\n'\n ' GW name of a provisioned gateway\\n' % sys.argv[0])\n sys.exit(1)\n\n # connect to both controllers\n src_controller = get_controller_from_argument(sys.argv[1])\n dst_controller = get_controller_from_argument(sys.argv[2])\n\n # find the source gateway\n gw_name = sys.argv[3]\n src_gwy = src_controller.get_gateway_by_name('admin', gw_name)\n if not src_gwy:\n print 'Source gateway %s not found\\n' % (gw_name)\n return\n\n # find the destination gateway\n gw_name = sys.argv[4]\n dst_gwy = dst_controller.get_gateway_by_name('admin', gw_name)\n if not dst_gwy:\n print 'Destination gateway %s not found\\n' % (gw_name)\n return\n\n # clone the firewall policies and the FQDN filters\n clone_fw_rules(src_controller, src_gwy, dst_controller, dst_gwy)\n clone_fqdn_rules(src_controller, src_gwy, dst_controller, dst_gwy)",
"def main():\n example = {\n \"maybe\": True,\n \"gtags\": [\"one\", \"two\", \"three\"],\n \"current\": {\n \"type\": \"NUMERIC\",\n \"is_filter\": True,\n \"values\": {\n \"lower\": 33,\n \"upper\": 0,\n \"operation\": \"between\"\n }\n },\n \"loc\": {\n \"type\": \"GEO\",\n \"is_filter\": True,\n \"values\": {\n \"long\": 33,\n \"lat\": -10,\n \"distance\": 1.2,\n \"metric\": \"km\"\n }\n },\n \"exact_text\": {\n \"type\": \"TEXT\",\n \"is_filter\": True,\n \"values\": {\n \"term\": \"hello world\"\n }\n },\n }\n \n\n for _ in range(100):\n insertable = create_insertable(example)\n \n print(insertable)\n pass",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--tg_name', required=True,\n help='specify target group name', type=str)\n parser.add_argument('--gwlb_name', required=True,\n help='specify gateway load balancer name', type=str)\n parser.add_argument('--vpc_id', required=True,\n help='specify vpc id', type=str)\n parser.add_argument('--subnet_ids', nargs='+', required=True,\n help='specify subnet ids')\n parser.add_argument('--target_ids', nargs='+', required=True,\n help='specify target ids')\n\n args = parser.parse_args()\n ############################\n # Define script variables:\n ############################\n tg_name = args.tg_name\n gwlb_name = args.gwlb_name\n vpc_id = args.vpc_id\n subnet_ids = args.subnet_ids\n target_ids = args.target_ids\n\n tg1_args = {\n 'name': tg_name,\n 'protocol': 'GENEVE',\n 'port': 6081,\n 'healthchkproto': 'HTTP',\n 'healthchkport': '80',\n 'healthchkpath': '/',\n 'vpc_id': vpc_id,\n 'type': 'instance'\n }\n #############################\n # Target Group:\n tg1 = create_tg(**tg1_args)\n print(f\"TG ARN: {tg1[1]}\")\n # GWLB:\n gwlb1 = create_gwlb(gwlb_name, subnet_ids)\n print(f\"GWLB ARN: {gwlb1[1]}\")\n # Listener:\n listener1 = create_fwd_listener(gwlb1[1], tg1[1])\n print(f\"LISTENER ARN: {listener1[1]}\")\n # Register Targets:\n register_targets(tg1[1], target_ids[0])",
"def do_gateways(self, arg):\n bridge()",
"def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)",
"def __init__(self,\n gateways: List['Gateway']) -> None:\n self.gateways = gateways",
"def test_append_two_wire_gate(self, circuit):\n gate = jet.GateFactory.create(\"CNOT\")\n circuit.append_gate(gate, wire_ids=[2, 3])\n assert gate.indices == [\"2-1\", \"3-1\", \"2-0\", \"3-0\"]\n assert list(circuit.operations)[-1] == jet.Operation(gate, [2, 3])\n assert list(circuit.wires) == [\n jet.Wire(0, depth=0, closed=False),\n jet.Wire(1, depth=0, closed=False),\n jet.Wire(2, depth=1, closed=False),\n jet.Wire(3, depth=1, closed=False),\n ]",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n\n ericsson_connect = {\n \"device_type\": \"ericsson_ipos\",\n \"ip\": \"1.1.1.1\",\n \"username\": \"admin\",\n \"password\": \"admin\",\n }\n\n net_connect = ConnectHandler(**ericsson_connect)\n output = net_connect.send_command(\"show ip int brief\")\n print(output)\n\n output_commit = net_connect.commit()\n print(output_commit)",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['STAGE'].values()))\n cur = conn.cursor()\n \n #remove the existing tables\n drop_tables(cur, conn)\n \n #replace the tables with new ones\n create_tables(cur, conn)\n \n #add missing postcode value into table\n default_missing_values(cur, conn)\n \n conn.close()",
"def main2():\n\t\n\tcu_locations = cu_locations_data();\n\t\n\tfor row in cu_locations:\n\t\tprint \"INSERT INTO contact ('ref_id') VALUES (%s);\" % ( row['location_id'] );",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} \\\n port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(config['CLUSTER']['HOST'], config['CLUSTER']['DB_NAME'], config['CLUSTER']['DB_USER'], config['CLUSTER']['DB_PASSWORD'], config['CLUSTER']['DB_PORT']))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()",
"def main():\n # This have specific paths to prevent abitrary binaries from being\n # executed. The \"gsi\"* utilities are configured to use either grid proxies\n # or ssh, automatically.\n remoteLoginCmd = \"/usr/bin/gsissh\"\n remoteCopyCmd = \"/usr/bin/gsiscp\"\n\n UNKNOWN_PLATFORM_EXIT_CODE = 10\n MISSING_PBS_CONFIG_EXIT_CODE = 20\n\n p = AllocatorParser(sys.argv[0])\n platform = p.getPlatform()\n\n creator = Allocator(platform, p.getArgs(), \"$HOME/.lsst/condor-info.py\")\n\n platformPkgDir = lsst.utils.getPackageDir(\"ctrl_platform_\"+platform)\n configName = os.path.join(platformPkgDir, \"etc\", \"config\", \"pbsConfig.py\")\n execConfigName = os.path.join(platformPkgDir, \"etc\", \"config\", \"execConfig.py\")\n\n creator.load(execConfigName)\n\n creator.loadPbs(configName)\n\n verbose = creator.isVerbose()\n \n pbsName = os.path.join(platformPkgDir, \"etc\", \"templates\", \"generic.pbs.template\")\n generatedPbsFile = creator.createPbsFile(pbsName)\n\n condorFile = os.path.join(platformPkgDir, \"etc\", \"templates\", \"glidein_condor_config.template\")\n generatedCondorConfigFile = creator.createCondorConfigFile(condorFile)\n\n scratchDirParam = creator.getScratchDirectory()\n template = Template(scratchDirParam)\n scratchDir = template.substitute(USER_HOME=creator.getUserHome())\n userName = creator.getUserName()\n \n hostName = creator.getHostName()\n\n utilityPath = creator.getUtilityPath()\n\n #\n # execute copy of PBS file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedPbsFile, userName, hostName, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute copy of Condor config file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedCondorConfigFile, userName, hostName, scratchDir, os.path.basename(generatedCondorConfigFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute qsub command on XSEDE node to perform Condor glide-in\n #\n cmd = \"%s %s@%s %s/qsub %s/%s\" % (remoteLoginCmd, userName, hostName, utilityPath, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteLoginCmd, hostName)\n sys.exit(exitCode)\n\n nodes = creator.getNodes()\n slots = creator.getSlots()\n wallClock = creator.getWallClock()\n nodeString = \"\"\n if int(nodes) > 1:\n nodeString = \"s\"\n print \"%s node%s will be allocated on %s with %s slots per node and maximum time limit of %s\" % (nodes, nodeString, platform, slots, wallClock)\n print \"Node set name:\"\n print creator.getNodeSetName()\n sys.exit(0)",
"def main():\n print \"Python has started\"\n #os.system(\"xterm -e \\\"pyhton Listen_link.py\\\"\") \n #print \"Started Listener\"\n tutorial = MoveGroupPythonIntefaceTutorial()\n model_info_prox = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)\n rospy.wait_for_service('/gazebo/get_link_state')\n rospy.wait_for_service('/gazebo/get_model_state')\n # writer below is to pass sings to out Link Listener and recording program\n writer = rospy.Publisher('writer', String, queue_size=1)\n filestring = rospy.Publisher('filestring', String, queue_size=1)\n rospy.set_param('/move_group/trajectory_execution/allowed_start_tolerance',0)\n hello_str = \"Test listener is working %s\" % rospy.get_time()\n file_str = \"/NewFile\"\n writer.publish(hello_str)\n filestring.publish(file_str)\n print \"Generating a population\"\n #pop = Population(20,False,0) #should be an even number 10-20 \n pop = Population(20,True,941) #should be an even number 10-20 \n #\n if pop.gencount == 0:\n pop.gencount = pop.gencount + 1\n grav = GravityControl()\n velocity = VelocityControl()\n grav.init_values()\n grav.change_gravity(0.0,0.0,-9.8)\n\n generation = pop.gencount\n if pop.res_sim == False:\n pop.generate_inds(pop.numberinds,pop.numpoints,pop.xmin,pop.xmax,pop.ymin,pop.ymax,pop.zmin,pop.zmax,pop.vmin,pop.vmax,generation)\n pop.gen_wp()\n if pop.res_sim == True:\n print \"We have had a restart, attempting to load individuals\"\n pop.load_inds()\n pop.gen_wp()\n pop.gencount = 56\n check_valid_waypoints(pop.current_ind_instances)\n print \"Success adding table \"\n time.sleep(1)\n print \"============ Please ensure the Recorder is started and then Press `Enter` to execute joint state goal ...\"\n raw_input()\n tutorial.go_to_joint_state(0.0,-0.5,0.0,0.0,0.0,0.0) # note Rads each joint \n # The main loop for the program is below\n while (pop.gencount<100):\n \n grav.change_gravity(0.0,0.0,-9.8)\n print \"Starting for population count number\", pop.gencount\n run_simulation(tutorial,pop,pop.gencount,filestring,writer,velocity)\n #make gravity Heavier to change simulation of 'real' slightly\n grav.change_gravity(0.0,0.0,-10.8)\n run_real(tutorial,pop,pop.gencount,filestring,writer,velocity)\n # Evaluate, also calculates Euclidean based on collected data points between sim and real\n evaluate_pop(pop) # This is a GA function call to evauate this population before the next generatio \n \n\n print \"============ Exiting the Loop!\"",
"def test_append_one_wire_gate(self, circuit):\n gate = jet.GateFactory.create(\"H\")\n circuit.append_gate(gate, wire_ids=[3])\n assert gate.indices == [\"3-1\", \"3-0\"]\n assert list(circuit.operations)[-1] == jet.Operation(gate, [3])\n assert list(circuit.wires) == [\n jet.Wire(0, depth=0, closed=False),\n jet.Wire(1, depth=0, closed=False),\n jet.Wire(2, depth=0, closed=False),\n jet.Wire(3, depth=1, closed=False),\n ]",
"def bridge(gwc = 0, brc = bridge_int):\n# bridge interface list\n br_interface = []\n# bridge ip addresses list\n gw_ipaddr = []\n# bridge network list\n gw_network = []\n# gatweway start number list\n gw_number = 0\n\n# fill all lists for bridge\n for i in netifaces.ifaddresses(bridge_int)[netifaces.AF_INET]:\n br_interface.append([gw_number, ' ', i['addr'], i['netmask']])\n gw_ipaddr.append(i['addr'])\n gw_network.append(i['netmask'])\n gw_number = gw_number + 1\n br_interface[0][1] = bridge_int\n\n if gwc == 'check':\n return (br_interface, gw_ipaddr, gw_network)\n\n# print jadm gateways table\n br_menu = [\"Number\", \"Bridge name\", \"Gateway IP Address\", \"Gatewy Network Mask\"]\n print tabulate(br_interface, br_menu)\n\n# return bridge interface name, ip addresses and network mask\n return (br_interface, gw_ipaddr, gw_network)",
"def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id):\n\n # Verify the ip address format \n if is_valid_ipv4_address(local_ip) is False:\n ctx.fail(\"Given local ip address {} is invalid. Please enter a valid local ip address !!\".format(local_ip)) \n\n if is_valid_ipv4_address(global_ip) is False:\n ctx.fail(\"Given global ip address {} is invalid. Please enter a valid global ip address !!\".format(global_ip))\n \n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"STATIC_NAT\"\n key = global_ip\n dataKey1 = 'local_ip'\n dataKey2 = 'nat_type'\n dataKey3 = 'twice_nat_id'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == local_ip:\n click.echo(\"Trying to add static nat entry, which is already present.\")\n entryFound = True\n\n if nat_type == 'snat':\n ipAddress = local_ip\n else:\n ipAddress = global_ip\n\n if isIpOverlappingWithAnyStaticEntry(ipAddress, 'STATIC_NAPT') is True:\n ctx.fail(\"Given entry is overlapping with existing NAPT entry !!\")\n\n if isOverlappingWithAnyDynamicEntry(ipAddress) is True:\n ctx.fail(\"Given entry is overlapping with existing Dynamic entry !!\")\n\n if entryFound is False:\n counters_db = SonicV2Connector()\n counters_db.connect(counters_db.COUNTERS_DB)\n snat_entries = 0\n max_entries = 0\n exists = counters_db.exists(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values')\n if exists:\n counter_entry = counters_db.get_all(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values')\n if 'SNAT_ENTRIES' in counter_entry:\n snat_entries = counter_entry['SNAT_ENTRIES']\n if 'MAX_NAT_ENTRIES' in counter_entry:\n max_entries = counter_entry['MAX_NAT_ENTRIES']\n\n if int(snat_entries) >= int(max_entries):\n click.echo(\"Max limit is reached for NAT entries, skipping adding the entry.\")\n entryFound = True\n\n if entryFound is False:\n count = 0\n if twice_nat_id is not None:\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, table, count)\n count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, None)\n if count > 1:\n ctx.fail(\"Same Twice nat id is not allowed for more than 2 entries!!\")\n\n if nat_type is not None and twice_nat_id is not None:\n config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type, dataKey3: twice_nat_id}) \n elif nat_type is not None:\n config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type})\n elif twice_nat_id is not None:\n config_db.set_entry(table, key, {dataKey1: local_ip, dataKey3: twice_nat_id})\n else:\n config_db.set_entry(table, key, {dataKey1: local_ip})",
"def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)",
"def __init__(self, input0, input1, output) :\n Gate.__init__(self, [input0,input1], output)",
"def addTargets(v):\n if v.spoof:\n print(\" \" + bcolors.WARNING + \"Turn off spoofer first\" + bcolors.ENDC)\n time.sleep(1)\n return\n try:\n target = input(\" Enter IP address of targets separated with spaces: \")\n except KeyboardInterrupt:\n return\n\n target = target.split(\" \")\n\n if len(v.targets) == 0:\n try:\n gw = input(\" Enter IP address of router (leave blank if same subnet): \")\n except KeyboardInterrupt:\n return\n if validIPAddress(gw):\n tmp = spoofer.get_mac(gw)\n if tmp:\n v.targets.append(gw)\n v.macs.append(tmp)\n else:\n print(\" \" + bcolors.WARNING + \"Did not add \" + gw + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(2)\n return\n else:\n gw = getGwIp(target[0])\n if gw:\n tmp = spoofer.get_mac(gw)\n if tmp:\n v.targets.append(gw)\n v.macs.append(tmp)\n else:\n if gw:\n print(\" \" + bcolors.WARNING + \"Did not add \" + gw + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(1)\n return\n\n for x in target:\n if validIPAddress(x):\n tmp = spoofer.get_mac(x)\n if tmp:\n v.targets.append(x)\n v.macs.append(x)\n else:\n print(\" \" + bcolors.WARNING + \"Did not add \" + x + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(1)\n else:\n print(\" \" + bcolors.WARNING + x + \" is not a valid ip address\" + bcolors.ENDC)\n time.sleep(1)\n\n return",
"def test_add_gate(self):\n sched = Schedule()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)))\n inst_map = InstructionScheduleMap()\n\n inst_map.add(U1Gate(0), 1, sched)\n inst_map.add(U1Gate(0), 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(U1Gate(0)), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))\n\n with self.assertRaises(PulseError):\n inst_map.add(U1Gate(0), (), sched)\n with self.assertRaises(PulseError):\n inst_map.add(U1Gate(0), 1, \"not a schedule\")",
"def _process_connections(self, connections):\n # create connection\n for con in connections:\n self._add_connection(con)\n\n for inp_lab, inp in self.inputs.items():\n # use self._find_routes() to find routes from input inp\n routes_inp = self._find_routes(inp)\n # create routes\n for route in routes_inp:\n self._add_route(route)\n # sort the routes dictionary\n self._sort_routes()",
"def _create_special_connections(self):\n\t\tfor connection in self._infoSpecialConnections:\n\t\t\t# List of source cells ids\n\t\t\tsourcesId = self.cellsId[connection[0]][connection[1]]\n\t\t\t# gather the sources all together\n\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t# List of taget cells ids\n\t\t\ttargetsId = self.cellsId[connection[2]][connection[3]]\n\t\t\t# Ratio of connection\n\t\t\tconRatio = connection[4]\n\t\t\t# Number of connections\n\t\t\tconNum = int(connection[5])\n\t\t\t# Weight of connections\n\t\t\tconWeight = float(connection[6])\n\t\t\t# Type of synapse\n\t\t\tsynType = connection[7]\n\t\t\t# connect sources to targets\n\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)",
"def main():\r\n db = connect_database()\r\n with db:\r\n if sys.argv[1] == \"-s\":\r\n select_all(db, sys.argv[2])\r\n elif sys.argv[1] == \"-i\":\r\n cus_data = []\r\n for i in range(2, len(sys.argv)):\r\n cus_data.append(sys.argv[i])\r\n insert_customer(db, cus_data)\r\n elif sys.argv[1] == \"-c\":\r\n create_tables()\r\n elif sys.argv[1] == \"-pw\":\r\n pop_waiting(db, sys.argv[2])\r\n elif sys.argv[1] == \"-ph\":\r\n pop_help(db, sys.argv[2])\r\n elif sys.argv[1] == \"-r\":\r\n refresh_tables(db)\r\n elif sys.argv[1] == \"-e\":\r\n export_helped_table(db)\r\n else:\r\n print errorArgument\r\n db.close()",
"def __init__(self, input, output) :\n Gate.__init__(self, [input], output)",
"def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()"
] | [
"0.616866",
"0.58173704",
"0.57266355",
"0.5699523",
"0.5562407",
"0.5561937",
"0.5531044",
"0.5519377",
"0.5519377",
"0.5514505",
"0.55023265",
"0.55012304",
"0.5470605",
"0.5431577",
"0.54253876",
"0.5411088",
"0.5408261",
"0.5388444",
"0.538794",
"0.53809285",
"0.53729486",
"0.5361862",
"0.53339076",
"0.532673",
"0.5297596",
"0.5290611",
"0.52882195",
"0.52874476",
"0.5261856",
"0.52595985"
] | 0.74428517 | 0 |
Only defining here because simply including list_display_links = [] above does not work; it defaults to linking from items in AccessTime col | def __init__(self, *args, **kwargs):
super(AccessTimeAdmin, self).__init__(*args, **kwargs)
# There's no need to show the page for an individual AccessTime, so no field should link to it.
self.list_display_links = (None, ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_list_display(self, request):\n list_display = self.list_display\n\n if 'admin_created' not in list_display:\n list_display += ('admin_created', )\n if 'admin_modified' not in list_display:\n list_display += ('admin_modified', )\n\n return list_display",
"def format_list(self, at_char, user, list_name):\r\n return u'<a href=\"http://%s/%s/%s\" data-list=\"\">%s%s/%s</a>' \\\r\n % (self.domain, user, list_name, at_char, user, list_name)",
"def changelist_view(self, request, extra_context=None):\n if request.user.has_perm('deflect.list_all'):\n self.list_filter = self._list_filter + ('creator__username',)\n self.list_display = self._list_display + ('creator',)\n else:\n self.list_filter = self._list_filter\n self.list_display = self._list_display\n return super(ShortURLAdmin, self).changelist_view(request, extra_context=extra_context)",
"def _link_items(self):\n pass",
"def getExpandedLinks():",
"def get_list_display(self, *args, **kwargs):\n list_display = super(\n BitemporalModelAdmin, self).get_list_display(*args, **kwargs)\n return list(list_display) + [\n 'valid_datetime_start', 'valid_datetime_end',\n 'transaction_datetime_start', 'transaction_datetime_end']",
"def _formatListEntry(self, iEntry):\n\n from testmanager.webui.wuiadmin import WuiAdmin\n oEntry = self._aoEntries[iEntry]\n\n aoActions = [\n WuiTmLink('Details', WuiAdmin.ksScriptName,\n { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildSrcDetails,\n BuildSourceData.ksParam_idBuildSrc: oEntry.idBuildSrc,\n WuiAdmin.ksParamEffectiveDate: self._tsEffectiveDate, }),\n ];\n if self._oDisp is None or not self._oDisp.isReadOnlyUser():\n aoActions += [\n WuiTmLink('Clone', WuiAdmin.ksScriptName,\n { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildSrcClone,\n BuildSourceData.ksParam_idBuildSrc: oEntry.idBuildSrc,\n WuiAdmin.ksParamEffectiveDate: self._tsEffectiveDate, }),\n ];\n if isDbTimestampInfinity(oEntry.tsExpire):\n aoActions += [\n WuiTmLink('Modify', WuiAdmin.ksScriptName,\n { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildSrcEdit,\n BuildSourceData.ksParam_idBuildSrc: oEntry.idBuildSrc } ),\n WuiTmLink('Remove', WuiAdmin.ksScriptName,\n { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildSrcDoRemove,\n BuildSourceData.ksParam_idBuildSrc: oEntry.idBuildSrc },\n sConfirm = 'Are you sure you want to remove build source #%d?' % (oEntry.idBuildSrc,) )\n ];\n\n return [ oEntry.idBuildSrc,\n oEntry.sName,\n oEntry.sDescription,\n oEntry.sProduct,\n oEntry.sBranch,\n self._getSubList(oEntry.asTypes),\n self._getSubList(oEntry.asOsArches),\n oEntry.iFirstRevision,\n oEntry.iLastRevision,\n utils.formatIntervalSeconds2(oEntry.cSecMaxAge) if oEntry.cSecMaxAge is not None else None,\n aoActions,\n ]",
"def get_list_display(self, request):\n delete = partial(self.remove, request=request)\n delete.short_description = \"\"\n delete.allow_tags = True\n list_display = list(self.list_display)\n for index, field_name in enumerate(list_display):\n field = getattr(self.model, field_name, None)\n if hasattr(field, \"related\"):\n list_display.remove(field_name)\n list_display.insert(\n index, self.display_add_link(request, field.related))\n list_display.append(delete)\n return list_display",
"def adjust_display(self, display: typing.List[typing.List[str]]):",
"def _parse_links(self, item, start, links_list):\n result_list = []\n target_str_1 = start.strftime(\"%m-%d-%Y\").replace(\" 0\", \" \")\n target_str_2 = start.strftime(\"%m-%d-%y\").replace(\" 0\", \" \")\n for item in links_list:\n if item[\"date\"] in target_str_1 or item[\"date\"] in target_str_2:\n new_dict = {}\n new_dict[\"href\"] = item[\"href\"]\n new_dict[\"title\"] = item[\"title\"]\n result_list.append(new_dict)\n return result_list",
"def _prepare_links_table(self):\n\n links_tbl = OrderedDict()\n for colname in itertools.islice(self._pinfos, 1, None):\n links_tbl[colname] = {}\n links_tbl[colname][\"name\"] = f\"{colname}\"\n fname = colname.replace(\"%\", \"_pcnt\") + \".html\"\n links_tbl[colname][\"fname\"] = fname\n links_tbl[colname][\"hlink\"] = f\"<a href=\\\"{fname}\\\">{colname}</a>\"\n\n return links_tbl",
"def changelist_view(self, request, extra_context=None):\n if request.user.user_type == User.ADMIN_CEA:\n self.list_display = ('user', 'cea', 'booking', 'request_status')\n elif request.user.user_type == User.ADMIN_CRC:\n self.list_display = ('user', 'crc', 'booking', 'request_status')\n elif request.user.user_type == User.EXPRESS_USER:\n self.list_display = ('user', 'payment_type', 'request_status', 'credit_status', 'booking')\n else:\n self.list_display = ('user', 'booking','cea', 'crc', 'transit', 'payment_type', 'request_status',)\n return super(RequestAdmin, self).changelist_view(request, extra_context)",
"def listing(self):\r\n listing = LinkListing(self.builder_obj, show_nums = self.show_nums)\r\n return listing.listing()",
"def links(self):\n return self._links_tpl.expand(self._identity, self._record)",
"def get_list_display(self, request):\n list_display = []\n for field_name in self.list_display:\n try:\n db_field = self.model._meta.get_field(field_name)\n if isinstance(db_field, BooleanField):\n field_name = boolean_switch_field(db_field)\n except FieldDoesNotExist:\n pass\n list_display.append(field_name)\n return list_display",
"def old_list_links(self, link_list, dd):\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names",
"def print_on_display(self, a_display, a_last_time_display): #pylint: disable-msg=R0201 \n current_time = datetime.datetime.utcnow()\n if not a_last_time_display:\n a_display.print_screen(self.mem_db, current_time, self._sort_criteria)\n return current_time\n else:\n if current_time - a_last_time_display > datetime.timedelta(seconds=2):\n \n #clean database \n self.remove_expired_records(self.mem_db)\n \n a_display.print_screen(self.mem_db, current_time, self._sort_criteria)\n return current_time\n else:\n return a_last_time_display",
"def links(self):\n\t\treturn self.list_of_links",
"def _parse_links(self, response, start):\n links = self.document_date_map[start.date()]\n for link in response.css(\".agenda-min-pres .field a\"):\n link_url = response.urljoin(link.xpath(\"@href\").extract_first())\n title = link.xpath(\"./text()\").extract_first()\n if title.strip().startswith(\"Agenda\"):\n title = \"Agenda\"\n links.append(\n {\"title\": re.sub(r\"\\s+\", \" \", title).strip(), \"href\": link_url}\n )\n return links",
"def _parse_link_date_map(self, response):\n link_date_map = defaultdict(list)\n for link in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16\"\n )[:1].css(\"a\"):\n link_str = link.xpath(\"./text()\").extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append(\n {\n \"title\": re.sub(r\"\\s+\", \" \", link_str.split(\" – \")[-1]).strip(),\n \"href\": link.attrib[\"href\"],\n }\n )\n for section in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel\"\n ):\n year_str = section.css(\".vc_tta-title-text::text\").extract_first().strip()\n for section_link in section.css(\"p > a\"):\n link_str = section_link.xpath(\"./text()\").extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append(\n {\n \"title\": re.sub(\n r\"\\s+\", \" \", link_str.split(\" – \")[-1]\n ).strip(),\n \"href\": section_link.xpath(\"@href\").extract_first(),\n }\n )\n return link_date_map",
"def getLink(self):",
"def dt_links(row = 1,\n tableID = \"list\",\n quiet = True\n ):\n config = current.test_config\n browser = config.browser\n\n links = []\n # loop through each column\n column = 1\n while True:\n td = \".//*[@id='%s']/tbody/tr[%s]/td[%s]\" % (tableID, row, column)\n try:\n elem = browser.find_element_by_xpath(td)\n except:\n break\n # loop through looking for links in the cell\n cnt = 1\n while True:\n link = \".//*[@id='%s']/tbody/tr[%s]/td[%s]/a[%s]\" % (tableID, row, column, cnt)\n try:\n elem = browser.find_element_by_xpath(link)\n except:\n break\n cnt += 1\n if not quiet:\n s3_debug(\"%2d) %s\" % (column, elem.text))\n links.append([column,elem.text])\n column += 1\n return links",
"def __init__(self,name,oneDatumOnly=False):\r\n Link.__init__(self)\r\n self.name = name\r\n self.links = Links()\r\n self.oneDatumOnly = oneDatumOnly",
"def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def get_changelist_link_css(self, job_name):\n return ['addlink']",
"def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify",
"def AddListReferencesFormat(parser):\n parser.display_info.AddFormat(\"\"\"\n table(\n firewallPolicy,\n rulePriority\n )\n \"\"\")",
"def methods():\n list_groups_text = '<a href=\"/groups\">List Groups</a>'\n list_users_text = '<a href=\"/users\">List Users</a>'\n page_links = list_groups_text + \"<br>\" + list_users_text\n return page_links",
"def get_left_panel_links(self):\n status_list = {}\n link_list = (self.reports, self.rewards, self.catalogs, self.campaigns, self.loyalties,\n self.merchants, self.customer_management, self.bulk_actions, self.settings,\n self.business_intelligence)\n for link in link_list:\n if find_element(self.browser, link):\n status_list[link[1]] = 'Access'\n else:\n status_list[link[1]] = 'No Access'\n return status_list",
"def _getListing(self):\n\n # lets assure consistent litsting order\n items = self._items.items()\n items.sort()\n return [ \"%s%s%s: %s\" % (_def_sep, str(x[1]), _def_sep, x[1].__doc__)\n for x in items ]"
] | [
"0.61103547",
"0.57638454",
"0.5698465",
"0.5654802",
"0.55889523",
"0.5581935",
"0.55087066",
"0.54733235",
"0.5446723",
"0.5401263",
"0.5352148",
"0.5347162",
"0.53248256",
"0.52955914",
"0.5181165",
"0.5177439",
"0.5163127",
"0.5143022",
"0.5088965",
"0.5075256",
"0.50626796",
"0.50308657",
"0.5010939",
"0.4999317",
"0.4951118",
"0.49492496",
"0.48856732",
"0.48697925",
"0.4858742",
"0.48577315"
] | 0.70390844 | 0 |
matplotlib key press event. Close all figures when q is pressed | def press(self, event):
if event.key == "q":
self.exit_event.set()
if event.key == " ":
self.plot_paused = not self.plot_paused
print("Plot is paused:", self.plot_paused) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _quit_figure(event):\n\tif event.key == 'q':\n\t\tplt.close(event.canvas.figure)",
"def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()",
"def end(self, event):\n plt.close()",
"def kill(self):\r\n plt.close(self.fig)",
"def on_key_press(self, key):\n if key == 'esc':\n self.backtrack()\n elif key in ['f1', '?']:\n self.open(HelpPane(self._get_current_pane()))",
"def auto_close_all_figures(request):\n if \"matplotlib\" in request.keywords:\n plt.close(\"test\")\n plt.close(\"reference\")",
"def handle_close(event):\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')",
"def on_clicked(event):\n\n if event.key == \"x\":\n # Use this os._exit(0) to close whole window, even when playing\n os._exit(0)\n\n if event.key == \"s\":\n # Get time to define image's name\n now = datetime.now()\n current_time = now.strftime(\"%H-%M-%S\")\n plot_name = \"Plot\" + \"-\" + current_time\n\n # Remove left title, then save image\n pyplot.title(\"\", loc=\"left\", pad=20)\n fig.savefig(\n \"%s%s%s\"\n % (\n CONS[\"OUTPUT_PHOTO_DIRECTORY\"],\n plot_name,\n CONS[\"PHOTO_TYPE\"],\n ),\n transparent=False,\n dpi=300,\n )\n\n # Use this exit(0) to prevent exiting when playing the plot\n # but allow closing when plotting finishes\n exit(0)",
"def figure_key_press(self, event):\n\n # Check if we are waiting for an exclusion region first\n # (This means the mouse has been clicked, but not released in-axis yet)\n try:\n self._exclude_selected_region_signal\n except AttributeError:\n None\n else:\n return None\n\n # Show a new order.\n if event.key in (\"left\", \"right\"):\n offset = 1 if event.key == \"right\" else -1\n self.order_slide.setValue(self.order_slide.value() + offset)\n self.update_order_figure()\n\n return None\n\n # Scale the continuum up/down.\n if event.key in (\"up\", \"down\"):\n scale = self._cache[\"input\"].get(\"scale\", 1.0)\n sign = +1 if event.key == \"up\" else -1\n\n self._cache[\"input\"][\"scale\"] = scale + sign * 0.01\n\n self.fit_continuum(True)\n self.draw_continuum(True)\n\n return None\n\n\n # 'd': No normalization for this order.\n if event.key in (\"d\", \"D\"):\n try:\n idx, session = self.current_order_index, self.parent.session\n\n except AttributeError:\n return None\n\n session.metadata[\"normalization\"][\"continuum\"][idx] = 1\n session.metadata[\"normalization\"][\"normalization_kwargs\"][idx] = {}\n\n self.draw_continuum(True)\n\n return None\n\n\n # 'c': Clear the scale, excluded regions and points for this order.\n if event.key in \"cC\":\n for key in (\"scale\", \"exclude\", \"additional_points\"):\n if key in self._cache[\"input\"]:\n del self._cache[\"input\"][key]\n\n # Force refit.\n self.fit_continuum(clobber=True)\n self.draw_continuum(refresh=False)\n self.update_continuum_mask(refresh=True)\n self.norm_plot.reset_zoom_limits()\n\n return True\n\n\n # 'r': Reset the zoom limits without refitting/clearing masks\n if event.key in \"rR\":\n self.norm_plot.reset_zoom_limits()\n self.draw_continuum(refresh=False)\n self.update_continuum_mask(refresh=True)\n\n return True\n\n\n # 'f': Refit without resetting the zoom limits\n if event.key in \"fF\":\n # Force refit.\n self.fit_continuum(clobber=True)\n self.draw_continuum(refresh=False)\n self.update_continuum_mask(refresh=True)\n\n return True",
"def terminate(self):\n plt.close('all')",
"def debounced_key_release(event):\n # print('Debounced release', repr(event.key))\n key_indicator.set_text('')\n fig.canvas.draw()",
"def close(self):\n curses.nocbreak()\n self.screen.keypad(0)\n curses.echo()\n curses.endwin()",
"def close(self):\n curses.nocbreak()\n self.screen.keypad(0)\n curses.echo()\n curses.endwin()",
"def closeEvent(self, event):\n self._renderer.plotter.close()\n self.close()",
"def on_press_escape(self, event):\n del event\n self.destroy()",
"def _close_figure(self):\n if self.disp_images:\n plt.show()\n else:\n plt.close()",
"def quit (event=None):\n root.destroy ()",
"def _on_key_release(self, event):",
"def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()",
"def close(self):\n\n if self.fig:\n plt.close('all')\n plt.ioff()",
"def ev_quit(self, event: Quit) -> None:",
"def Quit(self, event):\n pass",
"def keyReleaseEvent (self, event):\n super(DiagramScene, self).keyReleaseEvent(event)",
"def _key_press_event(self, event):\n if event.key() == 'escape':\n self.close()\n\n if event.text() == 'h':\n self._show_help()\n\n if event.text() in ('=', '+', '-'):\n self._zoom(sign=-2 * (event.text() == '-') + 1, draw=True)\n\n # Changing slices\n if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,\n QtCore.Qt.Key_Left, QtCore.Qt.Key_Right,\n QtCore.Qt.Key_Comma, QtCore.Qt.Key_Period,\n QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown):\n ras = np.array(self._ras)\n if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down):\n ras[2] += 2 * (event.key() == QtCore.Qt.Key_Up) - 1\n elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Right):\n ras[0] += 2 * (event.key() == QtCore.Qt.Key_Right) - 1\n else:\n ras[1] += 2 * (event.key() == QtCore.Qt.Key_PageUp or\n event.key() == QtCore.Qt.Key_Period) - 1\n self._set_ras(ras)",
"def close(self):\r\n if plt.fignum_exists(num=1):\r\n plt.close()",
"def close(fig=None):\n plt.close(fig)",
"def clickQuit(self, event):\n self.quitFlag = True",
"def handle_close(evt):\n plt.tight_layout()\n plt.savefig('figure.pdf')",
"def term():\n curses.endwin()\n unicornhathd.off()",
"def event_handler(self, event):\n if event.type == pygame.QUIT:\n self.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit()"
] | [
"0.84586275",
"0.6683859",
"0.65130144",
"0.6340119",
"0.6235164",
"0.62336737",
"0.6173516",
"0.61614585",
"0.6143306",
"0.6141613",
"0.6122026",
"0.609293",
"0.609293",
"0.60200125",
"0.6017953",
"0.60134417",
"0.60088164",
"0.59827036",
"0.5974096",
"0.59326994",
"0.59242654",
"0.591504",
"0.59002906",
"0.5824554",
"0.58205926",
"0.5819339",
"0.5804975",
"0.57781774",
"0.5775189",
"0.5769998"
] | 0.70938265 | 1 |
Interpolate between two arrays by taking the mean of the two arrays. | def interpolation_array(a1, a2):
return (a1+a2)/2. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interpolate_and_average(xs, ys, interop_points=None, confidence_interval=False):\n # Get the xs of shortest curve\n max_min_x = max(x.min() for x in xs)\n min_max_x = min(x.max() for x in xs)\n if interop_points is None:\n # Interop points according to curve with \"least resolution\"\n interop_points = min(x.shape[0] for x in xs)\n\n new_x = np.linspace(max_min_x, min_max_x, interop_points)\n new_ys = []\n\n for old_x, old_y in zip(xs, ys):\n new_ys.append(np.interp(new_x, old_x, old_y))\n\n # Average out\n # atleast_2d for case when we only have one reptition\n new_ys = np.atleast_2d(np.array(new_ys))\n new_y = np.mean(new_ys, axis=0)\n std_y = np.std(new_ys, axis=0)\n\n if confidence_interval:\n interval = 1.96 * (std_y / np.sqrt(len(xs)))\n lower_bound = new_y - interval\n upper_bound = new_y + interval\n return new_x, new_y, std_y, lower_bound, upper_bound\n else:\n return new_x, new_y, std_y",
"def interpolate(x1, x2, u, N):\n \n # finding the magnitude of each component\n a1 = np.matmul(x1, u)\n a2 = np.matmul(x2, u)\n\n ims = [np.matmul(u, t * a1 + (1 - t) * a2) \\\n for t in np.linspace(0, 1, N)]\n\n return np.stack(ims, 0)",
"def avg(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n \n return (u + v) / 2.0",
"def _mean_diff(x, y):\n return np.mean(x) - np.mean(y)",
"def _interp_array(self, start: np.ndarray, end: np.ndarray,\n num_steps: int):\n alpha = np.linspace(0., 1., num_steps)\n beta = 1 - alpha\n return np.einsum('a,bc->abc', alpha, end) + np.einsum('a,bc->abc', beta,\n start)",
"def mean(vals):",
"def interpolation_matrix(m):\n return np.nanmean(m,axis=1)",
"def interpolate(a, b):\n x = 1\n i = 1\n f = b[0]\n while i < n:\n b = b*(x-a[i])\n i += 1\n f += (b[i] - f(a[i]))/a[i]) * b\n return f",
"def d_mean(x, y):\n return (x + y) / 2",
"def mean(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return a/(a + b)",
"def _interpolate(self, kps1: List[List[kp]], kps2: List[List[kp]]) -> np.ndarray:\n interpolated_kps = []\n for i in range(len(kps1)):\n # If one of the two points is empty -> Not interpolate\n if len(kps1[i]) != 0 and len(kps2[i]) != 0:\n interpolated_coords = np.linspace(np.array(kps1[i]), np.array(kps2[i]), num=3).tolist()\n interpolated_kps.append(interpolated_coords[1])\n else:\n interpolated_kps.append([None, None, None])\n return np.array(interpolated_kps)",
"def get_mean_offset(a, b):\n off = np.abs(a - b)\n return np.sum(off) / b.sum()",
"def mean(*a):\n return numpy.mean(*a) # pylint: disable=no-value-for-parameter",
"def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y",
"def interp1d_stair_aver(x, y): #TODO: deal with the case x not sorted\n def f(xp):\n yp=np.empty(np.size(xp)-1)\n xmod=x[~(np.isnan(x)+np.isnan(y))]\n ymod=y[~(np.isnan(x)+np.isnan(y))]\n yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))\n g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)\n# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n return yp\n\n return f",
"def incremental_mean(x1, x2):\n n_a, mean_a = x1\n n_b, mean_b = x2\n n_ab = n_a + n_b # Total samples\n mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab # Averaged mean\n return n_ab, mean_ab",
"def interp1d_stair_aver_withnan(x, y): #TODO: deal with the case x not sorted\n def f(xp):\n xmod=x[~(np.isnan(x)+np.isnan(y))]\n ymod=y[~(np.isnan(x)+np.isnan(y))]\n yp=np.empty(np.size(xp)-1)\n yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))\n g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)\n# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n for i in range(np.size(xp)-1):\n if np.isnan(y[np.where((x>=xp[i])*(x<xp[i+1]))]).all():\n yp[i]=np.nan\n return yp\n\n return f",
"def create_interpolate_prior_mean_fun(final_time, prior_mean_raw):\n f_outlet = interp1d(np.arange(0, final_time+1), prior_mean_raw, kind='cubic')\n return f_outlet",
"def compare_mean(values, weights):\n return np.average(values.numpy(), weights=weights)",
"def _combine(data, target, a, b):\r\n data[:, target, :2] = (data[:, a, :2] + data[:, b, :2]) / 2\r\n data[:, target, 2] = harmonic_mean(data[:, a, 2], data[:, b, 2])",
"def interpolate(self, interp):\n x = np.linspace(0, 29, len(self.ya))\n f_ya = interpolate.interp1d(x, self.ya)\n f_yv = interpolate.interp1d(x, self.yv)\n f_pa = interpolate.interp1d(x, np.reshape(self.pa, [-1]))\n f_pv = interpolate.interp1d(x, np.reshape(self.pv, [-1]))\n\n x_interp = np.linspace(0, 29, len(self.ya)*interp)\n self.ya = list(f_ya(x_interp))\n self.yv = list(f_yv(x_interp))\n self.pa = list(f_pa(x_interp))\n self.pv = list(f_pv(x_interp))",
"def interpolate_to_amplitude(a):\n a_min = a.min()\n a_max = a.max()\n return np.interp(a, (a_min, a_max), (0, 1))",
"def imageAvg(img1, img2):\n return myimg.imageAvg(img1.tolist(), img2.tolist())",
"def interpolate(self, xs):\n tck = splrep(self._xs, self._ys)\n new_ys = splev(xs, tck, der=0)\n return new_ys",
"def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values",
"def interpolate_arrays(t, idx, x_array, time_array):\n x1 = np.array([x_array[i, idx[i] - 1] for i in range(len(idx))])\n x2 = np.array([x_array[i, idx[i]] for i in range(len(idx))])\n t1 = np.array([time_array[i, idx[i] - 1] for i in range(len(idx))])\n t2 = np.array([time_array[i, idx[i]] for i in range(len(idx))])\n ta = t * np.ones(time_array.shape[0])\n return x1 + np.multiply(np.divide(x2-x1, t2-t1), ta - t1)",
"def avg(x, y):\n return (x + y)/2",
"def average(a1, a2, coord1, coord2, dim1, dim2, unit):\r\n \r\n avg = (a1 + a2)/2\r\n \r\n avg_xr = xr.DataArray(avg, coords=[coord1, coord2], dims=[dim1, dim2])\r\n avg_xr.attrs['units'] = unit\r\n \r\n return avg_xr",
"def mape(x, y):\n return statistics.mean(ape(x, y))",
"def lin_trim_mean(a: np.ndarray, start: float = 0.5, end: float = 0.1,\n start_v: float = 0, end_v: float = 0.5) -> float:\n start_w = np.linspace(start_v, 1, start * len(a), endpoint=False)\n end_w = np.linspace(end_v, 1, end * len(a), endpoint=False)[::-1]\n mid_w = np.ones(len(a) - len(start_w) - len(end_w))\n weights = np.concatenate((start_w, mid_w, end_w))\n return ((a * weights).sum() / weights.sum()).item()"
] | [
"0.68202096",
"0.6288764",
"0.61606866",
"0.6074447",
"0.60347766",
"0.5936755",
"0.59121615",
"0.5846344",
"0.5794967",
"0.5777938",
"0.576445",
"0.57544976",
"0.5718352",
"0.56939507",
"0.5673777",
"0.5601805",
"0.5579589",
"0.5571771",
"0.55689275",
"0.55656314",
"0.55560327",
"0.55345947",
"0.5523175",
"0.55212873",
"0.551369",
"0.5513324",
"0.5508468",
"0.55059445",
"0.5504028",
"0.5432"
] | 0.71933377 | 0 |
Use duration and PTratio to cluster into RS and FS neurons | def cluster_rsfs(durations, PTratio):
iter=1000
# cluster for FS and RS neurons according to duration of spike and PTratio
waveform_k = kmeans2(np.vstack((durations/np.max(durations),PTratio/np.max(PTratio))).T,
2, iter=iter, thresh=5e-6,minit='random')
labels = waveform_k[1]
return labels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()",
"def _cluster_matching(self, diarization_time=None, interactive=False,\n quiet=False, thrd_n=1, start_t=0):\n basename = self.get_file_basename()\n self._extract_clusters()\n self._match_clusters(interactive, quiet)\n# if not interactive:\n# #merging\n# self.automerge_clusters()\n self._status = 4\n sec = fm.wave_duration(basename + '.wav')\n total_time = time.time() - start_t\n self._set_time(total_time)\n if not quiet:\n print self.get_working_status()\n if interactive:\n print \"Updating db\"\n self.update_db(thrd_n, automerge=True)\n if not interactive:\n if not quiet:\n for clu in self._clusters:\n #print \"**********************************\"\n #print clu\n #for speaker in self[clu].speakers:\n # print \"\\t %s %s\" % (speaker, self[clu].speakers[speaker])\n #print '\\t ------------------------'\n distance = self[clu].get_distance()\n try:\n mean = self[clu].get_mean()\n m_distance = self[clu].get_m_distance()\n except (KeyError, ValueError):\n mean = 0\n m_distance = 0\n #print \"\"\"\\t best speaker: %s (distance from 2nd %f - mean %f - distance from mean %f ) \"\"\" % (self[clu],\n # distance, mean, m_distance)\n speakers_in_db = self.get_db().get_speakers()\n tot_voices = len(speakers_in_db['F']) + \\\n len(speakers_in_db['M']) + len(speakers_in_db['U'])\n\n #if diarization_time != None:\n # voice_time = float(total_time - diarization_time)\n # t_f_s = voice_time / len(speakers_in_db)\n #print \"\"\"\\nwav duration: %s\\nall done in %dsec (%s) (diarization %dsec time:%s ) with %s threads and %d voices in db (%f) \"\"\" % (utils.humanize_time(sec),\n #total_time,\n #utils.humanize_time(total_time),\n #diarization_time,\n #utils.humanize_time(diarization_time),\n #thrd_n,\n #tot_voices,\n #t_f_s)",
"def _cluster_time_estimator(self, sim, args, **kwargs):\n size = args['network size']\n if size == 11:\n return datetime.timedelta(hours=3)\n elif size == 15:\n return datetime.timedelta(hours=6)\n elif size == 21:\n return datetime.timedelta(hours=12)\n elif size == 25:\n return datetime.timedelta(hours=24)\n else:\n raise RuntimeError(\"No time estimate for network sizes other than 11, 15, 21 or 25\")",
"def _load_cluster(self):",
"def _setup_arrival_calcs(metrics_client: HeronMetricsClient,\n graph_client: GremlinClient,\n topology_id: str, cluster: str, environ: str,\n topology_ref: str, start: dt.datetime,\n end: dt.datetime, io_bucket_length: int,\n tracker_url: str, **kwargs: Union[str, int, float]\n ) -> Tuple[pd.DataFrame, List[List[Vertex]],\n pd.DataFrame, Dict[Vertex, List[int]],\n Dict[Vertex, List[int]]]:\n\n topo_traversal: GraphTraversalSource = \\\n graph_client.topology_subgraph(topology_id, topology_ref)\n\n # Calculate the routing probabilities for the defined metric gathering\n # period\n i2i_rps: pd.Series = (calculate_inter_instance_rps(\n metrics_client, topology_id, cluster, environ, start, end, tracker_url,\n **kwargs).set_index([\"source_task\", \"destination_task\", \"stream\"])\n [\"routing_probability\"])\n\n # Get the vertex levels for the logical graph tree\n LOG.info(\"Calculating levels for topology %s reference %s\", topology_id,\n topology_ref)\n levels: List[List[Vertex]] = get_levels(topo_traversal)\n LOG.debug(\"Found %d levels is topology %s reference %s\", len(levels),\n topology_id, topology_ref)\n\n # Calculate the input output ratios for each instances using data from the\n # defined metrics gathering period\n coefficients: pd.Series = lstsq_io_ratios(\n metrics_client, graph_client, topology_id, cluster, environ, start,\n end, io_bucket_length, **kwargs).set_index([\"task\", \"output_stream\",\n \"input_stream\",\n \"source_component\"]\n )[\"coefficient\"]\n\n # Get the details of the incoming and outgoing physical connections for\n # stream manager in the topology\n\n # Get a dictionary mapping from stream manager id string to a list of the\n # instances (within each container) that will send tuples to each stream\n # manager\n sending_instances: Dict[Vertex, List[int]] = \\\n (topo_traversal.V().hasLabel(\"stream_manager\")\n .group().by(\"id\").by(in_(\"physically_connected\")\n .hasLabel(P.within(\"spout\", \"bolt\"))\n .values(\"task_id\")\n .fold())\n .next())\n\n # Get a dictionary mapping from stream manager id string to a list of the\n # instances (within each container) that will receive tuples from each\n # stream manager\n receiving_instances: Dict[Vertex, List[int]] = \\\n (topo_traversal.V().hasLabel(\"stream_manager\")\n .group().by(\"id\").by(out(\"physically_connected\")\n .hasLabel(\"bolt\").values(\"task_id\").fold())\n .next())\n\n return (i2i_rps, levels, coefficients, sending_instances,\n receiving_instances)",
"def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)",
"def main(input_scenario):\n\n print(\"Program running for demand scenario \" + str(input_scenario))\n\n policy_f = open(\"./Policy/policy_result\" + str(input_scenario) + \".csv\", \"w\", newline='')\n policy_writer = csv.writer(policy_f)\n\n # Read case data\n ds, orig_fl = get_data_case(input_scenario, htime, zone)\n\n # Initialize operator location\n location = initialize_location(zone, vec)\n\n # Initialize cluster\n assignment, centre = initialize_cluster(cluster, zone, mindis)\n\n # Run\n # Timestep 0\n ds = update_ds(zone, cp, ds, mindis, orig_fl[0])\n\n # Timestep 1:\n for timestep in range(1, htime):\n\n print(\"Running for timestep \" + str(timestep))\n\n # Extract demand flows in current timestep\n train_fl = [fl[ts][timestep] for ts in range(trainsample)]\n\n # Compute total demand flows in and out of each station\n train_fl_out = [[0 for s in range(zone)] for ts in range(trainsample)]\n train_fl_in = [[0 for s in range(zone)] for ts in range(trainsample)]\n\n for ts in range(trainsample):\n for s in range(zone):\n train_fl_out[ts][s] = sum(train_fl[ts][s])\n for z in range(zone):\n train_fl_in[ts][s] += train_fl[ts][z][s]\n\n # Modify cluster based on demand flows\n assignment = modify_cluster(cluster, assignment, centre, zone, cp, ds, mindis, trainsample, train_fl)\n print(\"Clusters generated.\")\n\n # Initialize global intra-cluster rebalancing result\n yp_total = [0.0 for s in range(zone)]\n yn_total = [0.0 for s in range(zone)]\n bn = [[0 for s in range(zone)] for v in range(vec)]\n\n # Intra-cluster rebalancing\n for c in range(cluster):\n\n # Extract local data\n local_list = [s for s in range(zone) if assignment[s] == c]\n\n local_zone = len(local_list)\n\n local_cp = [cp[s] for s in local_list]\n local_ds = [ds[s] for s in local_list]\n\n local_vec_list = []\n local_vec = 0\n local_cap = []\n local_location = []\n\n for v in range(vec):\n if location[v].index(1) in local_list:\n local_vec_list.append(v)\n local_vec += 1\n local_cap.append(cap[v])\n local_location.append([1 if (s == location[v].index(1)) else 0 for s in local_list])\n \n local_dis = [[dis[s][z] for z in local_list] for s in local_list]\n\n local_train_fl_out = [[train_fl_out[ts][s] for s in local_list] for ts in range(trainsample)]\n local_train_fl_in = [[train_fl_in[ts][s] for s in local_list] for ts in range(trainsample)]\n\n # Apply local rebalance\n status, local_yp_total, local_yn_total, local_bn = intra_redeployment(local_zone, local_cp, local_ds, local_vec, local_cap, local_location, local_dis, trainsample, local_train_fl_out, local_train_fl_in, dismaxpick, dismaxdrop)\n\n # Make error report if no solution\n if status == 1:\n error_f = open(\"./error_report.csv\", \"a\", newline='')\n error_writer = csv.writer(error_f)\n error_writer.writerow([str(cluster), str(input_scenario), str(timestep), str(c)])\n error_f.close()\n\n # Translate to global indexing\n for s in range(local_zone):\n yp_total[local_list[s]] = local_yp_total[s]\n yn_total[local_list[s]] = local_yn_total[s]\n \n for v in range(local_vec):\n for s in range(local_zone):\n bn[local_vec_list[v]][local_list[s]] = local_bn[v][s]\n \n print(\"Cluster done is: \" + str(c))\n \n for s in range(zone):\n policy_writer.writerow([str(timestep), str(s), str(yp_total[s]), str(yn_total[s])])\n \n # Update ds\n update_ds(zone, cp, ds, mindis, orig_fl[timestep], yp_total, yn_total)\n\n # Update operator location\n for v in range(vec):\n for s in range(zone):\n if round(bn[v][s]) == 1:\n location[v][s] = 1\n else:\n location[v][s] = 0\n \n print(\"Timestep done is: \" + str(timestep))\n \n policy_f.close()",
"def run():\n rospy.init_node(\"time\")\n mpc = PointFollowerMPC(horizon_length=HORIZON_LENGTH, time_step=TIME_STEP)\n mpc.setup()\n FarthestPointFollower(mpc=mpc).start()",
"def test(dist_param, picker_param, iters):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(4, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n None,\n tfidf_name('merged.stem{}.tfidf', name_tag),\n 10,\n 0,\n None)\n execute(tf_conf)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n None,\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n None,\n None,\n None)\n execute(tf_conf)\n #input, out, picker, distance, iterations,\n clust_cfg = configs.ClusteringConfig(\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n tfidf_name('merged.stem{}.stop.clustered.t', name_tag),\n picker_param,\n dist_param,\n iters,\n None\n )\n execute(clust_cfg)\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n var, norm = variation_of_information(cl_orig, clust2)\n print(\"**** FOR var {} VOI is {}\".format(name_tag, norm))",
"def __init__(self):\n \n rospy.init_node('trajectory_planner', anonymous=True)\n \n rospy.Subscriber('/costmap_2d', OccupancyGrid, self.costmap_callback)\n rospy.Subscriber('/exploration_complete', Bool, self.exploration_complete_callback)\n self.trans_listener = tf.TransformListener()\n \n self.traj_pub = rospy.Publisher('/cmd_path', Path, queue_size=1)\n\tself.auto_goal_pub = rospy.Publisher('/auto_goal', Pose2D, queue_size=1)\n self.exp_complete_pub = rospy.Publisher('/exploration_complete', Bool, queue_size=1)\n\n self.number_of_fails = 0",
"def __init__(self,\n name: str = 'Process',\n time_horizon: int = 432000,\n lambda_: float = 0.1,\n beta: float = 0.2,\n epsilon: float = 0.2,\n mu: int = 4,\n stream_speed: int = 100,\n n_features: int = 2,\n gen_plot: bool = False,\n gen_metrics: bool = True):\n self.gen_plot = gen_plot\n self.gen_metrics = gen_metrics\n self.event_index = 0\n self.total_cases = set()\n self.check_point = datetime(2010, 1, 1)\n self.cases = []\n self.name = name\n self.time_horizon = time_horizon\n self.initialized = False\n self.cp_count = 0\n self.nyquist = 0\n self.check_point_cases = 0\n self.process_model_graph = nx.DiGraph()\n self.denstream = DenStream(lambda_, beta, epsilon, mu, stream_speed, n_features)\n self.cluster_metrics = []\n self.case_metrics = []\n self.active_core_clusters = set()\n self.drift_indexes = []\n self.metrics = Metrics(self.name)\n self.feature_space_plot_path = f'visualization/{self.name}_feature_space'\n makedirs(self.feature_space_plot_path, exist_ok=True)",
"def __init__(self, *args, **kwargs):\n if args:\n parameters = args[0]\n else:\n parameters = kwargs\n self.parameters = parameters\n self.number_of_nodes = self.parameters['Number_of_nodes']\n self.detecting_deadlock = self.parameters['detect_deadlock']\n self.digraph = nx.DiGraph()\n self.lmbda = [self.parameters['Arrival_rates']['Class ' + str(i)] for i in range(self.parameters['Number_of_classes'])]\n self.overall_lmbda = sum([sum(self.lmbda[i]) for i in range(len(self.lmbda))])\n self.mu = [self.parameters['Service_rates']['Class ' + str(i)] for i in range(self.parameters['Number_of_classes'])]\n self.c = self.parameters['Number_of_servers']\n self.schedules = [False for i in range(len(self.c))]\n for i in range(len(self.c)):\n if type(self.c[i])==type('string') and self.c[i]!='Inf':\n self.schedules[i] = True \n self.queue_capacities = self.parameters['Queue_capacities']\n self.transition_matrix = [self.parameters['Transition_matrices']['Class ' + str(i)] for i in range(self.parameters['Number_of_classes'])]\n if 'Class_change_matrices' in self.parameters:\n self.class_change_matrix = [self.parameters['Class_change_matrices']['Node ' + str(i)] for i in range(self.parameters['Number_of_nodes'])]\n else:\n self.class_change_matrix = 'NA'\n self.max_simulation_time = self.parameters['Simulation_time']\n self.transitive_nodes = [Node(i + 1, self) for i in range(len(self.c))]\n self.nodes = [ArrivalNode(self)] + self.transitive_nodes + [ExitNode(\"Inf\")]\n self.service_times = self.find_service_time_dictionary()\n self.state = [[0, 0] for i in range(self.number_of_nodes)]\n initial_state = [[0, 0] for i in range(self.number_of_nodes)]\n self.times_dictionary = {tuple(tuple(initial_state[i]) for i in range(self.number_of_nodes)): 0.0}\n\n if len(self.lmbda) != len(self.mu) or len(self.lmbda) != len(self.transition_matrix) or len(self.mu) != len(self.transition_matrix):\n raise ValueError('Lambda, Mu and the Transition Matrix should all have the same number of classes')\n\n if any(len(lmbdacls) != len(self.c) for lmbdacls in self.lmbda):\n raise ValueError('Lambda should have same length as c for every class')\n\n if any(len(mucls) != len(self.c) for mucls in self.mu):\n raise ValueError('Mu should have same length as c for every class')\n\n if any(len(transmatrxcls) != len(self.c) for transmatrxcls in self.transition_matrix):\n raise ValueError('Transition matrix should be square matrix of length c for every class')\n\n if any(len(transmatrxrow) != len(self.c) for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls):\n raise ValueError('Transition matrix should be square matrix of length c for every class')\n\n if any(l < 0 for lmbdaclass in self.lmbda for l in lmbdaclass):\n raise ValueError('All arrival rates should be positive')\n\n if any(tmval < 0 for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls for tmval in transmatrxrow) or any(tmval > 1 for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls for tmval in transmatrxrow) or any(sum(transmatrxrow) > 1 for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls):\n raise ValueError('All transition matrix entries should be probabilities 0<=p<=1 and all transition matrix rows should sum to 1 or less')\n\n if self.max_simulation_time < 0:\n raise ValueError('Maximum simulation time should be positive')",
"def __init__(self,manager,name):\n Online.DatapointLoader.DatapointLoader.__init__(self,manager,name)\n self.dpName = self.name\n self.runType = self.dp('general.runType')\n self.partitionName = self.dp('general.partName')\n self.partitionID = self.dp('general.activePartId')\n self.nSubFarm = self.dp('HLTFarm.nSubFarms')\n self.subfarms = self.dp('HLTFarm.subFarms')\n self.addDp(self.reader)",
"def __init__(self, node, type, ds, mr, rrdtool_step):\n\n self.node = node\n if type == \"host\":\n self.data = ds.get_host_data()[node]\n self.topdir, self.file_prefix, outfiles = ds.get_host_outfiles()\n self.outfiles = outfiles[self.node]\n elif type == \"vm\":\n self.data = ds.get_vm_data()[node]\n self.topdir, self.file_prefix, outfiles = ds.get_vm_outfiles()\n self.outfiles = outfiles[self.node]\n else:\n raise Exception(\"Invalide type value: %s\" % type)\n self.start, self.end, self.datasource_step = ds.get_time_info()\n self.rrdtool_step = rrdtool_step\n self.conrate = self.rrdtool_step / self.datasource_step\n\n if len(self.data)/self.conrate < 1:\n logging.warn(\"%s doesn't have enough data. Skipped it.\" % node)\n self.outfiles[\"rrdfile\"] = self.SKIPPED\n self.rrdfile = None\n return\n\n self.outfiles[\"rrdfile\"] = \"%s/%s_%s.rrd\" % (self.topdir,\n self.file_prefix, node)\n self.rrdfile = self.outfiles[\"rrdfile\"]\n\n # Create db\n logging.info(\"Creating rrdtool database for %s\" % node)\n dslist = [\"DS:%s:%s:%d:0:U\" % (mobj.name, mobj.type,\n 2*self.datasource_step)\n for mname, mobj in mr.items()]\n self.rrdtool_cmd(\"create\", self.outfiles[\"rrdfile\"],\n dslist,\n \"--start\", str(self.start - 1),\n \"--step\", str(self.datasource_step),\n \"RRA:AVERAGE:0.5:%d:%d\" %\n (self.conrate, len(self.data)/self.conrate),\n log_level=logging.DEBUG)\n\n # Import data\n logging.info(\"Importing data to the database\")\n fields = [mname for mname, mobj in mr.items()]\n for d in self.data:\n template = \":\".join(fields)\n # If the data doesn't have a field for this metric or if it has\n # but the value of the field is \"NA\", change it to \"U\" (this means\n # unknown value in RRDTool).\n values = \":\".join([d.get(k, \"U\").replace(\"NA\", \"U\")\n for k in fields])\n self.rrdtool_cmd(\"update\", self.outfiles[\"rrdfile\"],\n \"--template\", template,\n \"%s:%s\" % (d[\"time_h\"], values))",
"def __init__(self):\r\n self.label = \"Create Inflow File From ECMWF Runoff\"\r\n self.description = (\"Creates RAPID NetCDF input of water inflow \" +\r\n \"based on ECMWF runoff results and previously created weight table.\")\r\n self.canRunInBackground = False\r\n #CJB self.header_wt = ['StreamID', 'area_sqm', 'lon_index', 'lat_index', 'npoints']\r\n self.header_wt = ['rivid', 'area_sqm', 'lon_index', 'lat_index', 'npoints']\r\n #SDR added new structure to fit new ecmwf ##.runoff.nc file order\r\n #self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time']]\r\n self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time'], ['time','lon','lat']] # Line Added/Modified CJB 20190108\r\n #self.vars_oi = [[\"lon\", \"lat\", \"time\", \"RO\"], ['longitude', 'latitude', 'time', 'ro']]\r\n self.vars_oi = [[\"lon\", \"lat\", \"time\", \"RO\"], ['longitude', 'latitude', 'time', 'ro'], [\"time\", \"lon\", \"lat\", \"RO\"]] # Line Added/Modified CJB 20190108\r\n self.length_time = {\"LowRes\": 61, \"Low3HrRes\": 40, \"LowResFull\": 85,\"HighRes\": 125, \"High3HrRes\":3} # *** MJS What is High3HrRes for? Doesn't seem to be used.\r\n #self.length_time = {\"LowResFull\": 85,\"HighRes\": 125}\r\n self.length_time_opt = {\"LowRes-6hr\": 60, \"LowRes-3hr\": 40,\r\n \"LowResFull-3hr-Sub\": 48, \"LowResFull-6hr-Sub\": 36,\r\n \"HighRes-1hr\": 90, \"HighRes-3hr\": 48, \"HighRes-6hr\": 40, # *** MJS HighRes-3hr was changed to 40 before; why?\r\n \"HighRes-3hr-Sub\": 18, \"HighRes-6hr-Sub\": 16}\r\n self.errorMessages = [\"Missing Variable 'time'\",\r\n \"Incorrect dimensions in the input ECMWF runoff file.\",\r\n \"Incorrect variables in the input ECMWF runoff file.\",\r\n \"Incorrect time variable in the input ECMWF runoff file\",\r\n \"Incorrect number of columns in the weight table\",\r\n \"No or incorrect header in the weight table\",\r\n \"Incorrect sequence of rows in the weight table\"]",
"def start_training(params):\n\n\n\n # CREATE A FOLDER TO HOLD RESULTS\n\n\n exp_pref = \"../results/\" + params.EXPERIMENT_PREFIX\n time_str = time.strftime(\"_%m-%d-%H-%M_\", time.gmtime())\n exp_dir = exp_pref + time_str + \\\n \"{}\".format(params.LEARNING_RATE).replace(\".\", \"p\") + \"_\" \\\n + \"{}\".format(params.DISCOUNT).replace(\".\", \"p\")\n\n try:\n os.stat(exp_dir)\n except OSError:\n os.makedirs(exp_dir)\n\n logger = logging.getLogger(\"DeepLogger\")\n logger.setLevel(logging.INFO)\n\n # Logging filehandler\n #fh = logging.FileHandler(exp_dir + \"/log.log\")\n # Rotate file when filesize is 5 mb\n fh = RotatingFileHandler(exp_dir + \"/log.log\", maxBytes=5000000, backupCount=100)\n\n fh.setLevel(logging.INFO)\n\n # Console filehandler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n\n # Prevent nohup from producing large log file, logging to file is handled internally\n # logger.addHandler(ch)\n\n log_params(logger, params)\n\n #logging.basicConfig(level=logging.INFO, filename=exp_dir + \"/log.log\")\n\n\n if params.DETERMINISTIC:\n rng = np.random.RandomState(12345)\n else:\n rng = np.random.RandomState()\n\n if params.CUDNN_DETERMINISTIC:\n theano.config.dnn.conv.algo_bwd = 'deterministic'\n\n # Init ale\n ale = ale_python_interface.ALEInterface()\n ale.setInt('random_seed', 123)\n ale.setBool('display_screen', params.DISPLAY_SCREEN)\n ale.setFloat('repeat_action_probability', params.REPEAT_ACTION_PROBABILITY)\n full_rom_path = os.path.join(params.ROM_PATH, params.ROM_NAME)\n ale.loadROM(full_rom_path)\n num_actions = len(ale.getMinimalActionSet())\n\n print \"Legal actions: \", num_actions\n print ale.getMinimalActionSet()\n\n # Instantiate network\n logger.info(\"Setting up network...\")\n network = None # Be able to continue training from a network or watch a network play\n if (params.NETWORK_PICKLE_FILE is None):\n logger.info(\"Initializing a new random network...\")\n network = q_network.DeepQLearner(params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n num_actions,\n params.PHI_LENGTH,\n params.DISCOUNT,\n params.LEARNING_RATE,\n params.RMS_DECAY,\n params.RMS_EPSILON,\n params.MOMENTUM,\n params.CLIP_DELTA,\n params.FREEZE_INTERVAL,\n params.BATCH_SIZE,\n params.NETWORK_TYPE,\n params.UPDATE_RULE,\n params.BATCH_ACCUMULATOR,\n rng)\n else:\n logger.info(\"Loading network instance from file...\")\n handle = open(params.NETWORK_PICKLE_FILE, 'r')\n network = cPickle.load(handle)\n\n\n # Only used when getting a random network\n if params.RANDOM_NETWORK_PICKLE:\n import sys\n sys.setrecursionlimit(10000)\n result_net_file = open(params.EXPERIMENT_PREFIX + '.pkl', 'w')\n print \"File opened\"\n cPickle.dump(network, result_net_file, -1)\n print \"Pickle dumped\"\n result_net_file.close()\n sys.exit(0)\n\n\n # Instatiate agent\n logger.info(\"Setting up agent...\")\n agent = ale_agent.NeuralAgent(network,\n params.EPSILON_START,\n params.EPSILON_MIN,\n params.EPSILON_DECAY,\n params.REPLAY_MEMORY_SIZE,\n exp_dir,\n params.REPLAY_START_SIZE,\n params.UPDATE_FREQUENCY,\n rng)\n\n # Instantiate experient\n logger.info(\"Setting up experiment...\")\n experiment = ale_experiment.ALEExperiment(ale, agent,\n params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n params.RESIZE_METHOD,\n params.EPOCHS,\n params.STEPS_PER_EPOCH,\n params.STEPS_PER_TEST,\n params.FRAME_SKIP,\n params.DEATH_ENDS_EPISODE,\n params.MAX_START_NULLOPS,\n rng)\n\n\n # Run experiment\n logger.info(\"Running experiment...\")\n experiment.run()",
"def time_cnn():\n\n data_dir = \"/home/liyanzeng/git/Var-CNN--DynaFlow/preprocess\"\n\n # read in data from numpy files\n train_metadata = np.load(r\"%s/train_metadata.npy\" % data_dir)\n test_metadata = np.load(r\"%s/test_metadata.npy\" % data_dir)\n train_seq = np.load(r\"%s/train_seq.npy\" % data_dir)\n train_labels = np.load(r\"%s/train_labels.npy\" % data_dir)\n test_seq = np.load(r\"%s/test_seq.npy\" % data_dir)\n test_labels = np.load(r\"%s/test_labels.npy\" % data_dir)\n\n # apply normalization to metadata\n metadata_scaler = StandardScaler()\n train_metadata = metadata_scaler.fit_transform(train_metadata)\n test_metadata = metadata_scaler.transform(test_metadata)\n\n # extract sequences\n train_time, train_time_dleft, train_time_dright, train_dir = np.split(train_seq, 4, axis=2)\n test_time, test_time_dleft, test_time_dright, test_dir = np.split(test_seq, 4, axis=2)\n\n # reshape to be able to normalize\n train_time = np.reshape(train_time, (train_time.shape[0], train_time.shape[1]))\n test_time = np.reshape(test_time, (test_time.shape[0], test_time.shape[1]))\n train_time_dleft = np.reshape(train_time_dleft, (train_time_dleft.shape[0], train_time_dleft.shape[1]))\n test_time_dleft = np.reshape(test_time_dleft, (test_time_dleft.shape[0], test_time_dleft.shape[1]))\n train_time_dright = np.reshape(train_time_dright, (train_time_dright.shape[0], train_time_dright.shape[1]))\n test_time_dright = np.reshape(test_time_dright, (test_time_dright.shape[0], test_time_dright.shape[1]))\n\n # apply normalization to packet time data according to scaling computed on train timestamp data\n time_scaler = StandardScaler()\n train_time = time_scaler.fit_transform(train_time)\n test_time = time_scaler.transform(test_time)\n train_time_dleft = time_scaler.transform(train_time_dleft)\n test_time_dleft = time_scaler.transform(test_time_dleft)\n train_time_dright = time_scaler.transform(train_time_dright)\n test_time_dright = time_scaler.transform(test_time_dright)\n\n train_seq = np.stack((train_time, train_time_dleft, train_time_dright), axis=-1)\n test_seq = np.stack((test_time, test_time_dleft, test_time_dright), axis=-1)\n\n # construct CNN\n cnn_input = Input(shape=(seq_length, 3,), name='cnn_input')\n cnn_model = time_conv_block(cnn_input, 2, 4)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_output = Flatten()(cnn_model)\n cnn_output = dense_layer(cnn_output, 1024, 0.4)\n\n # construct MLP for metadata\n metadata_input = Input(shape=(7,), name='metadata_input')\n metadata_output = dense_layer(metadata_input, 32, 0.) # consider this the embedding of all the metadata\n\n # concatenate before second dense layer\n combined = Concatenate()([cnn_output, metadata_output])\n combined = dense_layer(combined, 1024, 0.5)\n\n # add final softmax layer\n if NUM_UNMON_SITES == 0: # closed-world\n combined_output = Dense(units=NUM_MON_SITES, activation='softmax', name='combined_output')(combined)\n else:\n # add extra class for unmonitored sites\n combined_output = Dense(units=NUM_MON_SITES + 1, activation='softmax', name='combined_output')(combined)\n\n model = Model(inputs=[cnn_input, metadata_input], outputs=[combined_output])\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.001),\n metrics=['accuracy'])\n\n training_data = ({'cnn_input': train_seq,\n 'metadata_input': train_metadata},\n {'combined_output': train_labels})\n\n test_data = ({'cnn_input': test_seq,\n 'metadata_input': test_metadata},\n {'combined_output': test_labels})\n\n lr_modifier = LearningRateScheduler(schedule=lr_scheduler)\n\n # train model\n train_time_start = time.time()\n model.fit(x=training_data[0],\n y=training_data[1],\n batch_size=50,\n epochs=200,\n verbose=0,\n callbacks=[lr_modifier])\n train_time_end = time.time()\n\n # compute final softmax predictions on test set and save predictions\n test_time_start = time.time()\n predictions = model.predict(test_data[0], batch_size=50, verbose=0)\n test_time_end = time.time()\n \n save_dir = \"predictions\"\n np.save(file=r\"%s/time_model\" % save_dir, arr=predictions)\n \n return (train_time_end - train_time_start), (test_time_end - test_time_start)",
"def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')",
"def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)",
"def timlinucb_parallel_t(\n df_edges,\n df_feats,\n times,\n nodes,\n num_seeds=5,\n sigma=4,\n c=0.1,\n epsilon=0.4,\n num_repeats_oim=10,\n num_repeats_oim_reward=10,\n style=\"additive\",\n process_id=1,\n persist=False,\n):\n if \"tim\" not in os.listdir():\n logger_tlu.warning(\"Couldn't find TIM in the program directory\")\n return False\n\n tim_name = f\"tim_tlu_{process_id}\"\n dir_name = f\"{tim_name}_dir\"\n logger_tlu.debug(f\"Name of the new TIM file: {tim_name}\")\n shutil.copyfile(\"tim\", tim_name)\n\n # Making the new tim file executable\n st = os.stat(tim_name)\n os.chmod(tim_name, st.st_mode | stat.S_IEXEC)\n\n results = []\n\n # For persistent parameters - making the b and M matrices\n if persist:\n b = np.zeros((df_feats.shape[1], 1))\n m_inv = np.eye(df_feats.shape[1], df_feats.shape[1])\n else:\n b = None\n m_inv = None\n\n for t in times:\n if style == \"additive\":\n df_t = df_edges[df_edges[\"day\"] <= t].sort_values(\"source\").reset_index()\n elif style == \"dynamic\":\n df_t = df_edges[df_edges[\"day\"] == t].sort_values(\"source\").reset_index()\n\n df_feats_t = df_t[\"index\"].apply(lambda x: df_feats.loc[x])\n\n result_oim = oim_node2vec(\n df_t,\n df_feats_t,\n nodes,\n num_inf=num_seeds,\n sigma=sigma,\n c=c,\n epsilon=epsilon,\n num_repeats=num_repeats_oim,\n num_repeats_reward=num_repeats_oim_reward,\n oracle=partial(tim_parallel, tim_file=tim_name, temp_dir=dir_name),\n hide_tqdm=True,\n persist=persist,\n m_inv=m_inv,\n b=b,\n )\n\n result_oim[\"time\"] = t\n\n if persist:\n m_inv = result_oim.pop(\"m_inv\")\n b = result_oim.pop(\"b\")\n\n results.append(result_oim)\n\n logger_tlu.debug(\n f\"Removing the new TIM files {tim_name} and the temp directories {dir_name}\"\n )\n os.remove(tim_name)\n shutil.rmtree(dir_name)\n\n return pd.DataFrame(results)",
"def init_node(self):\n # publishers\n self.pub_poly_traj_points = rospy.Publisher('/espeleo/traj_points_polygon', Polygon, latch=True, queue_size=1)\n self.pub_path_short = rospy.Publisher('/robot_path_shortest', Path, latch=True, queue_size=1)\n self.pub_path_energy = rospy.Publisher('/robot_path_energy', Path, latch=True, queue_size=1)\n self.pub_path_traver = rospy.Publisher('/robot_path_traversal', Path, latch=True, queue_size=1)\n self.pub_path_traver_pybullet = rospy.Publisher('/robot_path_traversal_pybullet', Path, latch=True,\n queue_size=1)\n self.pub_path_traver_op = rospy.Publisher('/robot_path_traversal_optimization', Path, latch=True, queue_size=1)\n self.pub_path_traver_pybullet_normal = rospy.Publisher('/robot_path_traversal_pybullet_normal', Path,\n latch=True,\n queue_size=1)\n self.pub_path_traver_op_normal = rospy.Publisher('/robot_path_traversal_optimization_normal', Path, latch=True,\n queue_size=1)\n self.pub_path_straight = rospy.Publisher('/robot_path_straightest', Path, latch=True, queue_size=1)\n self.pub_path_combined = rospy.Publisher('/robot_path_combined', Path, latch=True, queue_size=1)\n\n\n self.pub_src_point = rospy.Publisher('/source_path_point', Marker, latch=True, queue_size=1)\n self.pub_dst_point = rospy.Publisher('/target_path_point', Marker, latch=True, queue_size=1)\n self.pub_frontiers_ground_pts = rospy.Publisher('/frontiers_ground_pts', MarkerArray, latch=True,\n queue_size=1)\n self.pub_frontiers_ground_centroids = rospy.Publisher('/frontiers_ground_centroids', MarkerArray, latch=True,\n queue_size=1)\n self.pub_frontiers_ground_centroids_labels = rospy.Publisher('/frontiers_ground_centroids_labels', MarkerArray,\n latch=True, queue_size=1)\n self.pub_frontiers_ground_trav_labels = rospy.Publisher('/frontiers_ground_centroids_traversability_labels', MarkerArray,\n latch=True, queue_size=1)\n\n # subscribers\n rospy.Subscriber('/laser_cloud_surround2', PointCloud2, self.map_point_cloud_callback)\n rospy.Subscriber('/integrated_to_init2', Odometry, self.odom_callback)",
"def runTest(schedType, reCap, pattern, lenRand, lenConfig, numExprs):\n conf = config.Configuration()\n #default configures\n conf.set(\"num.distributed.node\", 64)\n conf.set(\"node.block.len\", 64)\n conf.set(\"job.client.num.iterations\", 1)\n conf.set(\"job.split.length.rand\", lenRand)\n #customized configuration\n # schedule choice\n conf.set(\"node.schedule.choice\", schedType)\n # cap choice\n if (schedType == 0):\n conf.set('num.replicas', 3)\n conf.set('node.disk.capacity', reCap)\n else:\n conf.set('num.replicas', reCap)\n # pattern p[0]#p[1]\n p = pattern.split('#')\n conf.set('job.class.name', patternDict[p[0]][0])\n if len(p) > 1:\n assert (p[0].endswith('rp')), 'pattern syntax error:%s' %pattern\n conf.set('randpair.dense.level', p[1])\n if (p[0].startswith('i')):\n conf.set('job.client.num.iterations', 10)\n # record length config\n lconf = lenConfig.split('#')\n i0 = i1 = s0 = s1 = b = 1\n if len(lconf) == 5:\n i0, i1, s0, s1, b = lconf\n elif len(lconf) == 4:\n i0, i1, s0, s1 = lconf\n elif len(lconf) == 2:\n i0, s0 = lconf\n else:\n raise Exception('lenConfig incorrect format:%s'%lenConfig)\n conf.set('%s.input0.length' %patternDict[p[0]][1], i0)\n conf.set('%s.input1.length' %patternDict[p[0]][1], i1)\n conf.set('%s.split0.length' %patternDict[p[0]][1], s0)\n conf.set('%s.split1.length' %patternDict[p[0]][1], s1)\n conf.set('%s.num.blocks' %patternDict[p[0]][1], b)\n\n #write out conf\n conf.write('conf/sim-sched-conf.xml')\n\n #run test\n rcount, rsum, rave, rstd, rmin, rmax = er.repeatNoneInteract(\n \"ant runtest -Dclassname=TestScheduler\",\n er.BasicCollector('{k:>>rate}: {v:%float}'), numExprs)\n #print 'count:%s, sum:%s, ave:%s, std:%s, min:%s, max:%s' %(\n # rcount, rsum, rave, rstd, rmin, rmax)\n print '==>>', 'average rate: ', rave, ' std: ', rstd",
"def cluster_timeseries(X, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n import scipy as sp\n import time \n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n print('Beginning Calculating pairwise distances between voxels')\n \n X = np.array(X)\n X_dist = sp.spatial.distance.pdist(X.T, metric = similarity_metric)\n \n temp=X_dist\n temp[np.isnan(temp)]=0\n tempmax=temp.max()\n \n X_dist = sp.spatial.distance.squareform(X_dist)\n X_dist[np.isnan(X_dist)]=tempmax\n #import pdb;pdb.set_trace()\n sim_matrix=1-sk.preprocessing.normalize(X_dist, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n #import pdb;pdb.set_trace()\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n# # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n return y_pred",
"def staining_segmentation():\n\n # Inputs of the function\n parser = argparse.ArgumentParser(description='Segmentation script')\n parser.add_argument('-scheduler', default=False, help='dask scheduler address ex. tcp://192.168.0.4:7003')\n parser.add_argument('-path', help='processing directory')\n parser.add_argument('-processing_file', help='path to the file with the \\\n staning to process')\n parser.add_argument('-segmentation_staining', help='staining to be \\\n segmented')\n \n \n\n args = parser.parse_args()\n \n # Directory to process\n processing_directory = args.path\n\n # File to process\n processing_file = args.processing_file\n\n # staining to segment\n segmentation_staining = args.segmentation_staining\n\n # Dask scheduler address\n scheduler_address = args.scheduler\n \n if scheduler_address:\n # Start dask client on server or cluster\n client=Client(scheduler_address)\n\n else:\n # Start dask client on local machine. It will use all the availabe\n # cores -1\n\n # number of core to use\n ncores = multiprocessing.cpu_count()-1\n cluster = LocalCluster(n_workers=ncores)\n client=Client(cluster)\n\n # Determine the operating system running the code\n os_windows, add_slash = utils.determine_os()\n\n # Check training slash in the processing directory\n processing_directory=utils.check_trailing_slash(processing_directory,os_windows)\n\n\n segmentation_parameters = utils.general_yaml_parser(processing_directory+'Staining_segmentation.config.yaml')\n\n # Chunking parameters\n chunk_size = segmentation_parameters[segmentation_staining]['image_chunking_parameters']['chunk_size']\n percent_padding = segmentation_parameters[segmentation_staining]['image_chunking_parameters']['percent_padding']\n\n # Segmentation parameters\n trimming = segmentation_parameters[segmentation_staining]['segmentation_parameters']['trimming']\n min_object_size = segmentation_parameters[segmentation_staining]['segmentation_parameters']['min_object_size']\n disk_radium_rank_filer = segmentation_parameters[segmentation_staining]['segmentation_parameters']['disk_radium_rank_filer']\n min_distance = segmentation_parameters[segmentation_staining]['segmentation_parameters']['min_distance'] \n threshold_rel = segmentation_parameters[segmentation_staining]['segmentation_parameters']['threshold_rel'] \n\n # Load the image (will be modified after the change to hdf5 input)\n img = io.imread(processing_file)\n\n # Image chunking\n nr_chunks,nc_chunks,Coords_Chunks_list, Coords_Padded_Chunks_list,r_coords_tl_all_padded,\\\n c_coords_tl_all_padded,r_coords_br_all_padded,c_coords_br_all_padded = \\\n object_based_segmentation.image_chunking(img,chunk_size,percent_padding)\n \n \n # Create the chunks idx\n Chunks_idxs_linear=np.arange(len(Coords_Padded_Chunks_list),dtype='int32')\n\n # Distribute the chunks idx and distridute them in an array according to the position\n # in the chunked image\n Chunks_idxs=Chunks_idxs_linear.reshape(nr_chunks,nc_chunks) \n\n # Flatten the array for make it easier the creation of the coords combination\n Chunks_idxs_rows=np.ravel(Chunks_idxs)\n Chunks_idxs_cols=np.ravel(Chunks_idxs,order='F')\n\n # Calculate coords of the overlapping chunks\n Overlapping_chunks_coords=list()\n counter=0\n left_pos=Chunks_idxs_rows[0]\n for el in Chunks_idxs_rows[1:]:\n if counter < nc_chunks-1:\n Coords_left=Coords_Padded_Chunks_list[left_pos]\n Coords_right=Coords_Padded_Chunks_list[el]\n row_tl=Coords_left[0]\n row_br=Coords_left[1]\n col_tl=Coords_right[2]\n col_br=Coords_left[3]\n Overlapping_chunks_coords.append((row_tl,row_br,col_tl,col_br))\n left_pos=el\n counter+=1\n else:\n left_pos=el\n counter=0\n \n counter=0\n top_pos=Chunks_idxs_cols[0]\n for el in Chunks_idxs_cols[1:]:\n if counter < nr_chunks-1:\n Coords_top=Coords_Padded_Chunks_list[top_pos]\n Coords_bottom=Coords_Padded_Chunks_list[el]\n \n row_tl=Coords_bottom[0]\n row_br=Coords_top[1]\n col_tl=Coords_top[2]\n col_br=Coords_top[3]\n Overlapping_chunks_coords.append((row_tl,row_br,col_tl,col_br))\n \n counter+=1\n top_pos=el\n else:\n top_pos=el\n counter=0\n\n # Now i use this approach for testing. If the image gets to big to fit in RAM\n # then save the files and load them separately in each node\n chunked_image_seq = list()\n for coords in Coords_Padded_Chunks_list:\n chunked_image_seq.append(img[coords[0]:coords[1],coords[2]:coords[3]])\n\n # Run the segmentation\n futures_processes = client.map(object_based_segmentation.polyT_segmentation,chunked_image_seq,\n min_object_size=min_object_size,\n min_distance=min_distance,\n disk_radium_rank_filer=disk_radium_rank_filer,\n threshold_rel=threshold_rel,\n trimming=trimming)\n\n Objects_list = client.gather(futures_processes)\n\n # Recalculate labels and coords\n\n processed_images_data = dict()\n\n max_starting_label = 0\n total_data_dict = dict()\n\n for idx, objs_chunk in enumerate(Objects_list):\n for label ,cvalues in objs_chunk.items():\n new_label=max_starting_label+1\n coords = Coords_Padded_Chunks_list[idx][0::2]\n total_data_dict[new_label] = cvalues+coords\n max_starting_label = new_label\n\n\n # Calculate all the intersecting objects\n futures_processes = client.map(object_based_segmentation.OverlappingCouples,Overlapping_chunks_coords,\n TotalDataDict = total_data_dict)\n \n\n All_intersecting = client.gather(futures_processes)\n\n\n # Put together the couple with the same label for multiple intersection\n # for the labels of objects where there is intersection between multiple regions\n # Then scatter all of them and calculate intersection\n\n # Combine the results from the parallel processing\n flatten_couple = [el for grp in All_intersecting for el in grp] \n # Remove duplicates\n flatten_couple=list(set(flatten_couple))\n\n # Create a list of the labels (removing the repeats)\n singles=list()\n [singles.append(x) for cpl in flatten_couple for x in cpl]\n singles=list(set(singles))\n\n # Identify the couples containing singles\n Combined_all_singles=list()\n for item in singles:\n Combined_single=list()\n for couple in flatten_couple:\n if item in couple:\n Combined_single.append(couple)\n Combined_all_singles.append(Combined_single)\n\n if Combined_all_singles:\n # Combine all the intersecting labeles\n start=Combined_all_singles[0]\n ComparisonList=Combined_all_singles[1:].copy()\n #merged=start.copy()\n merged=list()\n SavedCombinations=list()\n tmp_list=ComparisonList.copy()\n KeepGoing=True\n Loop=0\n while KeepGoing:\n Loop+=1\n\n\n for idx,el in enumerate(ComparisonList):\n\n if set(start).intersection(set(el)):\n #merged=list(set(merged)|set(el))\n [merged.append(x) for x in el]\n tmp_list = [e for e in tmp_list if e != el]\n\n\n intersection=list(set.intersection(set(merged),set(start))) \n if intersection:\n merged=list(set.union(set(merged),set(start)))\n #merged=list(set(merged))\n start=merged.copy()\n merged=list()\n ComparisonList=tmp_list.copy()\n #tmp_list.append(merged)\n else:\n SavedCombinations.append(start)\n start=tmp_list[0]\n tmp_list=tmp_list[1:]\n ComparisonList=tmp_list.copy()\n\n\n if len(tmp_list)<1:\n [SavedCombinations.append(x) for x in tmp_list]\n KeepGoing =False\n \n # Remove all the duplicated labeled that intersect\n # in this case the labeled are merged. It will be nice to run an extra\n # segmentation on the merged objects\n # If it is too slow this step can be parallelised\n\n SavedLab_list=list()\n CleanedDict=total_data_dict.copy()\n for couple in SavedCombinations:\n SaveLab, RemoveLabs,NewCoords=object_based_segmentation.IntersectionCouples(couple,total_data_dict)\n SavedLab_list.append(SaveLab)\n for lab in RemoveLabs:\n del CleanedDict[lab]\n CleanedDict[SaveLab]=NewCoords\n else:\n CleanedDict=total_data_dict \n\n\n # Calculate all objects properties\n all_objects_list = [(key,coords) for key,coords in CleanedDict.items()]\n\n futures_processes = client.map(object_based_segmentation.obj_properties_calculator,all_objects_list)\n\n all_objects_properties_list = client.gather(futures_processes)\n\n\n # convert the list to a dictionary\n all_objects_properties_dict = { k: v for d in all_objects_properties_list for k, v in d.items() }\n\n # Save all the objects\n segmented_objs_fname = processing_directory + 'segmented_' + segmentation_staining + '_all_objs_properties.pkl'\n pickle.dump(all_objects_properties_dict,open(segmented_objs_fname,'wb'))",
"def run_time_operation(self, learning_option, cluster):\r\n # get input\r\n input_ = self.get_input('input')\r\n indim = self.get_dimension('input')\r\n\r\n # get attr\r\n # optional field\r\n initializer = self.get_attr('initializer', default={'weight': {}, 'bias': {}}) # default will set later\r\n regularizer = self.get_attr('regularizer', default={}) # default will set later\r\n ch_shared = self.get_attr('channel_shared', default=False)\r\n scope = self.get_attr('scope', default='default')\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # get weight for prelu\r\n alpha_init = get_initializer(initializer.get('weight'), is_bias=False)\r\n alpha_reg = get_regularizer(regularizer, scope, is_bias=False)\r\n\r\n #WARNINIG: constraint of weight is always None\r\n prelu = tf.keras.layers.PReLU(input_, alpha_initializer=alpha_init,\r\n alpha_regularizer=alpha_reg,\r\n alpha_constraint=None,\r\n shared_axes=ch_shared)\r\n\r\n # get output dimension\r\n outdim = indim\r\n\r\n # set output\r\n self.set_dimension('output', outdim)\r\n self.set_output('output', prelu)\r\n\r\n # set tf summary\r\n tf.summary.histogram(self.name, prelu)\r\n\r\n with tf.variable_scope(self.name):\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()",
"def __init__(self, num_cycles_index1=None, num_cycles_index2=None, num_cycles_read1=None, num_cycles_read2=None, num_lanes=None, num_reads=None, num_surfaces=None, num_swaths_per_lane=None, num_tiles_per_swath=None, error_rate=None, error_rate_r1=None, error_rate_r2=None, intensity_cycle1=None, is_indexed=None, max_cycle_called=None, max_cycle_extracted=None, max_cycle_scored=None, min_cycle_called=None, min_cycle_extracted=None, min_cycle_scored=None, non_indexed_error_rate=None, non_indexed_intensity_cycle1=None, non_indexed_percent_aligned=None, non_indexed_percent_gt_q30=None, non_indexed_projected_total_yield=None, non_indexed_yield_total=None, percent_aligned=None, percent_gt_q30=None, percent_gt_q30_last10_cycles=None, percent_gt_q30_r1=None, percent_gt_q30_r2=None, percent_pf=None, percent_resynthesis=None, phasing_r1=None, phasing_r2=None, pre_phasing_r1=None, pre_phasing_r2=None, projected_total_yield=None, reads_pf_total=None, reads_total=None, yield_total=None, clusters=None, clusters_pf=None, cluster_density=None, occupancy=None, percent_loading_concentration=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._num_cycles_index1 = None\n self._num_cycles_index2 = None\n self._num_cycles_read1 = None\n self._num_cycles_read2 = None\n self._num_lanes = None\n self._num_reads = None\n self._num_surfaces = None\n self._num_swaths_per_lane = None\n self._num_tiles_per_swath = None\n self._error_rate = None\n self._error_rate_r1 = None\n self._error_rate_r2 = None\n self._intensity_cycle1 = None\n self._is_indexed = None\n self._max_cycle_called = None\n self._max_cycle_extracted = None\n self._max_cycle_scored = None\n self._min_cycle_called = None\n self._min_cycle_extracted = None\n self._min_cycle_scored = None\n self._non_indexed_error_rate = None\n self._non_indexed_intensity_cycle1 = None\n self._non_indexed_percent_aligned = None\n self._non_indexed_percent_gt_q30 = None\n self._non_indexed_projected_total_yield = None\n self._non_indexed_yield_total = None\n self._percent_aligned = None\n self._percent_gt_q30 = None\n self._percent_gt_q30_last10_cycles = None\n self._percent_gt_q30_r1 = None\n self._percent_gt_q30_r2 = None\n self._percent_pf = None\n self._percent_resynthesis = None\n self._phasing_r1 = None\n self._phasing_r2 = None\n self._pre_phasing_r1 = None\n self._pre_phasing_r2 = None\n self._projected_total_yield = None\n self._reads_pf_total = None\n self._reads_total = None\n self._yield_total = None\n self._clusters = None\n self._clusters_pf = None\n self._cluster_density = None\n self._occupancy = None\n self._percent_loading_concentration = None\n self.discriminator = None\n\n self.num_cycles_index1 = num_cycles_index1\n self.num_cycles_index2 = num_cycles_index2\n self.num_cycles_read1 = num_cycles_read1\n self.num_cycles_read2 = num_cycles_read2\n self.num_lanes = num_lanes\n self.num_reads = num_reads\n self.num_surfaces = num_surfaces\n self.num_swaths_per_lane = num_swaths_per_lane\n self.num_tiles_per_swath = num_tiles_per_swath\n if error_rate is not None:\n self.error_rate = error_rate\n if error_rate_r1 is not None:\n self.error_rate_r1 = error_rate_r1\n if error_rate_r2 is not None:\n self.error_rate_r2 = error_rate_r2\n if intensity_cycle1 is not None:\n self.intensity_cycle1 = intensity_cycle1\n if is_indexed is not None:\n self.is_indexed = is_indexed\n if max_cycle_called is not None:\n self.max_cycle_called = max_cycle_called\n if max_cycle_extracted is not None:\n self.max_cycle_extracted = max_cycle_extracted\n if max_cycle_scored is not None:\n self.max_cycle_scored = max_cycle_scored\n if min_cycle_called is not None:\n self.min_cycle_called = min_cycle_called\n if min_cycle_extracted is not None:\n self.min_cycle_extracted = min_cycle_extracted\n if min_cycle_scored is not None:\n self.min_cycle_scored = min_cycle_scored\n if non_indexed_error_rate is not None:\n self.non_indexed_error_rate = non_indexed_error_rate\n if non_indexed_intensity_cycle1 is not None:\n self.non_indexed_intensity_cycle1 = non_indexed_intensity_cycle1\n if non_indexed_percent_aligned is not None:\n self.non_indexed_percent_aligned = non_indexed_percent_aligned\n if non_indexed_percent_gt_q30 is not None:\n self.non_indexed_percent_gt_q30 = non_indexed_percent_gt_q30\n if non_indexed_projected_total_yield is not None:\n self.non_indexed_projected_total_yield = non_indexed_projected_total_yield\n if non_indexed_yield_total is not None:\n self.non_indexed_yield_total = non_indexed_yield_total\n if percent_aligned is not None:\n self.percent_aligned = percent_aligned\n if percent_gt_q30 is not None:\n self.percent_gt_q30 = percent_gt_q30\n if percent_gt_q30_last10_cycles is not None:\n self.percent_gt_q30_last10_cycles = percent_gt_q30_last10_cycles\n if percent_gt_q30_r1 is not None:\n self.percent_gt_q30_r1 = percent_gt_q30_r1\n if percent_gt_q30_r2 is not None:\n self.percent_gt_q30_r2 = percent_gt_q30_r2\n if percent_pf is not None:\n self.percent_pf = percent_pf\n if percent_resynthesis is not None:\n self.percent_resynthesis = percent_resynthesis\n if phasing_r1 is not None:\n self.phasing_r1 = phasing_r1\n if phasing_r2 is not None:\n self.phasing_r2 = phasing_r2\n if pre_phasing_r1 is not None:\n self.pre_phasing_r1 = pre_phasing_r1\n if pre_phasing_r2 is not None:\n self.pre_phasing_r2 = pre_phasing_r2\n if projected_total_yield is not None:\n self.projected_total_yield = projected_total_yield\n if reads_pf_total is not None:\n self.reads_pf_total = reads_pf_total\n if reads_total is not None:\n self.reads_total = reads_total\n if yield_total is not None:\n self.yield_total = yield_total\n if clusters is not None:\n self.clusters = clusters\n if clusters_pf is not None:\n self.clusters_pf = clusters_pf\n if cluster_density is not None:\n self.cluster_density = cluster_density\n if occupancy is not None:\n self.occupancy = occupancy\n if percent_loading_concentration is not None:\n self.percent_loading_concentration = percent_loading_concentration",
"def __init__(self, traindf, params):\n default_args = {\n \"seqin\": 0,\n \"smode\": \"positional\", #site mode\n \"direction\": \"inout\",\n \"positive_cores\" : [],\n \"poscols\": [],\n \"namecol\":\"Name\",\n \"seqcol\":\"Sequence\",\n }\n self.df = traindf\n self.set_attrs(params, default_args)\n if self.smode != \"relative\" and self.smode != \"positional\":\n raise TypeError(\"Smode can only be 'relative' or 'positional'\")\n if self.direction != \"inout\" and self.direction != \"orientation\":\n raise TypeError(\"Direction can only be 'inout' or 'orientation'\")\n if self.direction == \"orientation\" and (\"positive_cores\" not in params or not params[\"positive_cores\"]):\n raise TypeError(\"Positive cores are needed when direction is 'orientation'\")\n\n if self.namecol in self.df:\n fastadict = dict(zip(self.df[self.namecol], self.df[self.seqcol]))\n shapeobj = ds.DNAShape(fastadict)\n else:\n shapeobj = ds.DNAShape(self.df[self.seqcol].tolist())\n self.shapes = {k:getattr(shapeobj,k.lower()) for k in shapeobj.shapetypes}\n # make a dictionary of list instead of nested dictionary since we use this\n # as features\n if self.namecol in self.df:\n namelist = self.df[self.namecol].tolist()\n else:\n namelist = next(iter(self.shapes.values())).keys()\n self.shapes = {k:[v[str(n)] for n in namelist] for k, v in self.shapes.items()}\n if self.direction == \"orientation\":\n ori = Orientation(self.df, {\"positive_cores\":self.positive_cores}).get_feature()\n self.df[\"orientation\"] = [o[\"ori\"] for o in ori]",
"def __init__(self, manager, name):\n FarmDescriptor.__init__(self,manager,'Trigger')\n #Online.DatapointLoader.DatapointLoader.__init__(self,manager,name)\n self.typeMgr = self.manager.typeMgr()\n setup = 'RECO'\n setup = 'ONLINE'\n if setup=='RECO':\n self.allocatePartition = self.allocRecoPartition\n self.getPartition = self.getRecoPartition\n self.freePartition = self.freeRecoPartition\n self.loadRunInfo = self.loadRecoRunInfo\n self.runInfo_type = self.typeMgr.type('RunFarmInfo')\n self.runinfos = Online.PVSS.DpVectorActor(self.manager)\n self.runinfos.lookupOriginal(self.name+'_Farm??.general.partName',self.runInfo_type)\n else:\n self.allocatePartition = self.allocateSlice\n self.getPartition = self.getSlice\n self.freePartition = self.freeSlice\n self.loadRunInfo = self.loadRecoRunInfo\n self.sliceType = self.typeMgr.type('FarmSlice')\n self.runInfo_type = self.typeMgr.type('FarmRunInfo')\n \n self.writer = self.manager.devWriter()\n self.fsm_typ = self.typeMgr.type('_FwFsmDevice')\n subinfo_typ = self.typeMgr.type('FarmSubInfo')\n self.allsubfarms = Online.PVSS.DpVectorActor(self.manager)\n self.allsubfarms.lookupOriginal(self.name+'_*.UsedBy',subinfo_typ)",
"def __init__(self, p):\n self.p = p\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(p.STATE_SIZE, p.STATE_SIZE, p.ACTION_SIZE, p.RANDOM_SEED).to(p.DEVICE)\n self.actor_target = Actor(p.STATE_SIZE, p.STATE_SIZE, p.ACTION_SIZE, p.RANDOM_SEED).to(p.DEVICE)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=p.LR_ACTOR)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(p.STATE_SIZE, p.STATE_SIZE, p.ACTION_SIZE, p.RANDOM_SEED).to(p.DEVICE)\n self.critic_target = Critic(p.STATE_SIZE, p.STATE_SIZE, p.ACTION_SIZE, p.RANDOM_SEED).to(p.DEVICE)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=p.LR_CRITIC,\n weight_decay=self.p.WEIGHT_DECAY)\n self.scheduler_critic = torch.optim.lr_scheduler.StepLR(self.critic_optimizer, step_size=1, gamma=0.5)\n self.scheduler_actor = torch.optim.lr_scheduler.StepLR(self.actor_optimizer, step_size=1, gamma=0.5)\n\n # Noise process\n self.noise = OUNoise(p.ACTION_SIZE, p.RANDOM_SEED, sigma=p.NOISE_SIGMA)\n\n # Replay memory\n self.memory = ReplayBuffer(p)\n\n # when you instantiate agent, make weights the same for target and local\n self.deep_copy(self.actor_target, self.actor_local)\n self.deep_copy(self.critic_target, self.critic_local)\n self.update_count=0",
"def test_create_cluster_network(self):\n pass"
] | [
"0.55085295",
"0.5444285",
"0.5415583",
"0.5309192",
"0.5286226",
"0.5262705",
"0.5232784",
"0.52248675",
"0.5181988",
"0.51781946",
"0.5170047",
"0.51646507",
"0.5155263",
"0.5151695",
"0.5144661",
"0.5142094",
"0.5134443",
"0.5127981",
"0.51168716",
"0.51157445",
"0.5111512",
"0.5111339",
"0.51096547",
"0.510295",
"0.50958455",
"0.5093275",
"0.5082244",
"0.5056136",
"0.5036384",
"0.50239044"
] | 0.6637513 | 0 |
Plot example traces for rs and fs. Plot mean waveform. | def plot_rsfs_waveforms(peak_waveform, durations, labels):
if np.mean(durations[np.where(labels==0)[0]]) < np.mean(durations[np.where(labels==1)[0]]):
fs_k = 0;rs_k = 1
waveform_class_ids = [1,0]
else:
rs_k = 0;fs_k = 1
waveform_class_ids = [0,1]
waveform_class = [waveform_class_ids[k] for k in labels]
waveform_class = np.array(waveform_class)
plt.figure(figsize=(6,4))
for i in range(len(peak_waveform)):
waveform = peak_waveform[i]
if waveform_class[i]==np.unique(waveform_class)[0]:
plt.plot(waveform/np.max(np.abs(waveform)),'#b3b3ff',alpha=0.7)
if waveform_class[i]==np.unique(waveform_class)[1]:
plt.plot(waveform/np.max(np.abs(waveform)),'#c6ecc6',alpha=0.7)
# plot means, normalized
for waveform_class_id in np.unique(waveform_class):
plt.plot(np.mean(peak_waveform[waveform_class==waveform_class_id],axis=0)/
(np.max(np.abs(np.mean(peak_waveform[waveform_class==waveform_class_id],axis=0)))),lw=3,label=waveform_class_id)
plt.title('Raw: RS:'+str(len(np.where(waveform_class==0)[0]))+', FS: '+str(len(np.where(waveform_class==1)[0])))
return waveform_class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()",
"def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()",
"def display_averaging(self):\r\n\r\n cwd = os.getcwd()\r\n path = cwd + \"/results\"\r\n df1 = pd.read_csv(path + \"/average_U.csv\") # black line\r\n df2 = pd.read_csv(path + \"/average_N.csv\") # green line\r\n chem = 25 # from 0 to 35\r\n\r\n s1 = df1.iloc[chem]\r\n s1.plot()\r\n\r\n plt.show()",
"def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)",
"def _plot_rfs(ax, xe, ye, de, legend, alpha=0.5):\n # ax = plt.axes()\n ax.set_aspect('equal')\n # FIXME: HARD CODED 2x\n r = 0.203 * de\n for i, (x, y) in enumerate(zip(xe, ye)):\n if i == 0:\n label = None # 'One SDev of Neuron RF'\n else:\n label = None\n ax.add_patch(plt.Circle((x, -y), r, color='red', fill=True,\n alpha=alpha, label=label))\n\n if legend:\n plt.legend()\n ax.set_xlabel('x (arcmin)')\n ax.set_ylabel('y (arcmin)')",
"def plot(self):\n\t\tself.plotOfSpect()",
"def demo(self, tmin=0, tmax=27.4, cadence=30.0 / 60.0 / 24.0, offset=0, raw=False, ax=None):\n t = np.arange(tmin, tmax, cadence)\n if ax is None:\n plt.figure('demo', figsize=(8, 3))\n else:\n plt.sca(ax)\n y = self.model(t)\n if raw:\n plt.plot(t, y + offset, alpha=0.25, linewidth=1, color='royalblue')\n plt.plot(t, self.integrated(t) + offset, alpha=0.5, linewidth=1, color='darkorange')\n plt.xlim(tmin, tmax)\n # plt.ylim(np.max(y)+0.01, np.min(y)-0.01)\n plt.xlabel('Time (days)')\n plt.ylabel('Flux (mag.)')",
"def plotSpectrum(self,wavelengths,intensities = 1.0):\n\n fieldAngle,spectralOutput = self.getIntensitySpectum(wavelengths,intensities)\n\n # Do the actual plot\n plot(np.degrees(fieldAngle),spectralOutput)\n grid()\n title(\"Spectral plot\")\n xlabel(\"Angle in degrees\")\n ylabel(\"Intensty\")",
"def plot_example_psds(example,rate):\r\n plt.figure()\r\n \r\n ##YOUR CODE HERE \r\n \r\n return",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def plot_mean_hfo(evlist,color='blue', xlim =[-1,1], figure_size=(10,10),dpi=600,saveplot = None):\n f = plt.figure(figsize=figure_size,dpi=dpi)\n \n \n \n \n raw = np.array([]) # creating a empty array \n filt = np.array([]) # creating a empty array\n pxx = np.array([]) # creating a empty array\n nwave, a = evlist[0].waveform.shape\n time_v = np.linspace(-1,1,nwave,endpoint=True)\n npw, = evlist[0].spectrum.nPxx.shape\n F = evlist[0].spectrum.F\n for hfo in evlist:\n raw = np.append(raw, hfo.waveform[:,0])\n #ax1.plot(time_v,hfo.waveform[:,0],lw=.5)\n filt = np.append(filt, hfo.waveform[:,1])\n #ax2.plot(time_v,hfo.waveform[:,1],lw=.5)\n pxx = np.append(pxx, hfo.spectrum.nPxx)\n \n raw = raw.reshape(len(evlist),nwave)\n filt = filt.reshape(len(evlist),nwave)\n pxx = pxx.reshape(len(evlist),npw)\n\n \n \n \n ax1 = plt.subplot(311)\n m = np.mean(raw,0)\n s = np.std(raw,0)/np.sqrt(raw.shape[0])\n plt.plot(time_v,m,'k',lw=2)\n #ax1.fill_between(time_v,m+s,m-s, facecolor=color, alpha=0.1)\n ax1.set_xlim(xlim)\n adjust_spines(ax1, ['left'])\n \n ax2 = plt.subplot(312)\n m = np.mean(filt,0)\n s = np.std(filt,0)/np.sqrt(filt.shape[0])\n plt.plot(time_v,m,'k',lw=2)\n #ax2.fill_between(time_v,m+s,m-s, facecolor=color, alpha=0.1)\n ax2.set_xlim(xlim)\n adjust_spines(ax2, ['left', 'bottom'])\n \n ax3 = plt.subplot(313)\n m = np.mean(pxx,0)\n s = np.std(pxx,0)/np.sqrt(pxx.shape[0])\n plt.plot(F,m,'k',lw=2)\n ax3.fill_between(F,m+s,m-s, facecolor=color, alpha=0.1)\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')",
"def plot_spectral_estimate(f, sdf, sdf_ests, limits=None, elabels=()):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax_limits = (sdf.min() - 2*np.abs(sdf.min()),\r\n sdf.max() + 1.25*np.abs(sdf.max()))\r\n ax.plot(f, sdf, 'c', label='True S(f)')\r\n\r\n if not elabels:\r\n elabels = ('',) * len(sdf_ests)\r\n colors = 'bgkmy'\r\n for e, l, c in zip(sdf_ests, elabels, colors):\r\n ax.plot(f, e, color=c, linewidth=2, label=l)\r\n\r\n if limits is not None:\r\n ax.fill_between(f, limits[0], y2=limits[1], color=(1, 0, 0, .3),\r\n alpha=0.5)\r\n\r\n ax.set_ylim(ax_limits)\r\n ax.legend()\r\n return fig",
"def plot_rfs(self):\n self.xe = self.data['XE']\n self.ye = self.data['YE']\n# self.IE = self.data['IE']\n self.Var = self.data['Var']\n std = np.sqrt(np.mean(self.Var))\n fig = plt.gcf()\n ax = plt.gca()\n ax.set_xlim((np.min(self.xe), np.max(self.xe)))\n ax.set_ylim((np.min(self.ye), np.max(self.ye)))\n for xe, ye in zip(self.xe, self.ye):\n circ = plt.Circle((xe, ye), std, color='b', alpha=0.4)\n fig.gca().add_artist(circ)",
"def plot_trace(self):\n az.plot_trace(self.ifd_)",
"def plot_spectra(stream, freqs, amps, data_type, plot_dir, synthetic=True, parameter='none', project='none', run='none'):\n \n import matplotlib.pyplot as plt\n \n # Get station info \n tr = stream[0]\n station = tr.stats.station\n \n # Set up plot\n fig, axs = plt.subplots(3)\n \n # Loop through frequencies and amplitudes \n for i in range(len(freqs)):\n \n # Units\n if data_type == 'disp':\n title = 'Disp'\n units = 'm*s'\n code = 'LX' \n ylim = 10**-4, 6*10**-1\n xlim = 2*10**-3, 5*10**-1\n elif data_type == 'acc':\n title = 'Acc'\n units = 'm/s'\n code = 'HN'\n ylim = 6*10**-15, 6*10**-1\n xlim = .002, 10\n elif data_type == 'vel':\n title = 'Vel'\n units = 'm'\n code = 'HN'\n ylim = 6*10**-15, 8*10**-2\n xlim = .002, 10\n \n # Define label \n if i == 0:\n component = 'E'\n elif i == 1:\n component = 'N'\n elif i == 2:\n component = 'Z'\n label = code + component \n \n # Plot spectra\n axs[i].loglog(freqs[i],amps[i], lw=.8, label=label)\n axs[i].grid(linestyle='--')\n axs[i].set_ylim(ylim)\n axs[i].set_xlim(xlim)\n axs[i].legend()\n\n # Format whole figure\n plt.tight_layout()\n plt.subplots_adjust(hspace=0)\n fig.suptitle(f'{station} {title} Fourier Spectra', fontsize=14, y=1.08)\n fig.text(-.03, 0.5, f'Amplitude {units}', va='center', rotation='vertical')\n plt.xlabel('Frequency (Hz)')\n \n if synthetic:\n plt.savefig(f'{plot_dir}/parameters/{parameter}/{project}/plots/fourier_spec/{run}/{data_type}/{station}.{code}.png',bbox_inches='tight',dpi=300)\n else:\n plt.savefig(f'/Users/tnye/tsuquakes/plots/fourier_spec/obs/{data_type}/{station}.{code}.png',bbox_inches='tight',dpi=300) \n \n plt.close()\n\n\n return()",
"def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return",
"def plot_example_psds(example,rate):\r\n plt.figure()\r\n \r\n labels = ['REM', 'NREM 1', 'NREM 2', 'NREM 3-4']\r\n \r\n ##YOUR CODE HERE\r\n for i in xrange(0,4):\r\n (Pxx, freqs) = m.psd(example[i], NFFT=256, Fs=rate)\r\n Pxx = Pxx / sum(Pxx)\r\n plt.plot(freqs, Pxx, hold=True, label=labels[i], lw=10)\r\n plt.yscale('log')\r\n plt.xlim( xmax = 20 )\r\n plt.legend()\r\n plt.show()\r\n \r\n return",
"def plot_example_spectrograms(example,rate):\r\n plt.figure()\r\n \r\n ###YOUR CODE HERE\r\n y_lim = 40\r\n plt.title('Spectrogram')\r\n bin_space = 512 #30*rate # A typical window size is 30 seconds\r\n plt.subplot(411)\r\n plt.specgram(examples[0]/np.sum(examples[0]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.title ('REM')\r\n plt.subplot(412)\r\n plt.title ('Stage 1 NREM')\r\n plt.specgram(examples[1]/np.sum(examples[1]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.subplot(413)\r\n plt.title ('Stage 2 NREM')\r\n plt.specgram(examples[2]/np.sum(examples[2]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.subplot(414)\r\n plt.title ('Stage 3/4 NREM')\r\n plt.specgram(examples[3]/np.sum(examples[3]),NFFT=bin_space,Fs=srate)\r\n plt.ylim((0,y_lim))\r\n plt.show();\r\n \r\n return",
"def show(self):\n lines, = pylab.plot(self.wavelengths, self.intensities)\n return lines",
"def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()",
"def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')",
"def plot_mean_std(real: pd.DataFrame, fake: pd.DataFrame, ax=None):\n if ax is None:\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n fig.suptitle('Absolute Log Mean and STDs of numeric data\\n', fontsize=16)\n\n ax[0].grid(True)\n ax[1].grid(True)\n real = real._get_numeric_data()\n fake = fake._get_numeric_data()\n real_mean = np.log(np.add(abs(real.mean()).values, 1e-5))\n fake_mean = np.log(np.add(abs(fake.mean()).values, 1e-5))\n min_mean = min(real_mean) - 1\n max_mean = max(real_mean) + 1\n line = np.arange(min_mean, max_mean)\n sns.lineplot(x=line, y=line, ax=ax[0])\n sns.scatterplot(x=real_mean,\n y=fake_mean,\n ax=ax[0])\n ax[0].set_title('Means of real and synthetic data')\n ax[0].set_xlabel('real data mean (log)')\n ax[0].set_ylabel('synthetic data mean (log)')\n\n real_std = np.log(np.add(real.std().values, 1e-5))\n fake_std = np.log(np.add(fake.std().values, 1e-5))\n min_std = min(real_std) - 1\n max_std = max(real_std) + 1\n line = np.arange(min_std, max_std)\n sns.lineplot(x=line, y=line, ax=ax[1])\n sns.scatterplot(x=real_std,\n y=fake_std,\n ax=ax[1])\n ax[1].set_title('Stds of real and synthetic data')\n ax[1].set_xlabel('real data std (log)')\n ax[1].set_ylabel('synthetic data std (log)')\n\n if ax is None:\n plt.show()",
"def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()",
"def plot_spectrum(freqs, fluxes, min_lambda=3700, max_lambda=8000):\n plt.plot(freqs, fluxes)\n plt.xlim((min_lambda, max_lambda))\n plt.xlabel(r'$\\lambda\\, (\\AA)$', size=16)\n plt.ylabel(r'$Flux$', size=16)\n #plt.axes().minorticks_on()",
"def plotSpectrum(inp,xrng=[],yrng=[],xlabel='',ylabel='',xlog=False,ylog=False,grid=False,\n legend=[],legend_location='upper right',fontscale=1,legend_fontscale=1,title='',\n color='k',colormap=None,linestyle='-',linewidth=1.5,alpha=1.,\n show_noise=True,color_noise='k',linestyle_noise='-',linewidth_noise=1.5,alpha_noise=0.5,\n comparison=None,color_comparison='grey',linestyle_comparison='-',linewidth_comparison=1.5,alpha_comparison=1,\n residual=False,color_residual='m',linestyle_residual='-',linewidth_residual=1.5,alpha_residual=0.5,\n telluric=False,color_telluric='grey',linestyle_telluric='-',linewidth_telluric=1.5,alpha_telluric=0.2,\n features=[],mdwarf=False,ldwarf=False,tdwarf=False,young=False,binary=False,nsamples=100,\n band=[],band_color='k',band_alpha=0.2,band_label='',band_label_position='bottom',band_width=0.1,\n show_zero=True,stack=0.,zeropoint=0.,color_zero='k',linestyle_zero=':',linewidth_zero=1.5,alpha_zero=0.3,\n inset=False,inset_xrange=[],inset_yrange=[],inset_position=[0.65,0.60,0.20,0.20],inset_features=False,\n output='',multiplot=False,multipage=False,layout=[1,1],figsize=[],tight=True,\n interactive=False,**kwargs):\n\n# keyword parameters (for backward compatability)\n for k in ['showZero','showzero']: show_zero=kwargs.get(k,show_zero)\n for k in ['showNoise','noise','uncertainty','shownoise','showuncertainty','show_uncertainty']: show_noise=kwargs.get(k,show_noise)\n\n for k in ['line_style','lineStyle','ls','linestyles','line_styles']: linestyle=kwargs.get(k,linestyle)\n for k in ['line_width','lineWidth','width','lw','linewidths','line_widths']: linewidth=kwargs.get(k,linewidth)\n for k in ['colors','colour','colours']: color=kwargs.get(k,color)\n for k in ['colorScheme','color_scheme','colorscheme','colorMap','color_map']: colormap=kwargs.get(k,colormap)\n\n for k in ['colornoise','colorNoise','colorUnc','coloruncertainty','color_uncertainty','colorUncertainty']: color_noise=kwargs.get(k,color_noise)\n for k in ['linestylenoise','line_style_noise','linestyleNoise']: linestyle_noise=kwargs.get(k,linestyle_noise)\n for k in ['linewidthnoise','linewidthNoise','line_width_noise']: linewidth_noise=kwargs.get(k,linewidth_noise)\n for k in ['alphanoise','alphaNoise']: alpha_noise=kwargs.get(k,alpha_noise)\n\n for k in ['colorzero','colorZero']: color_zero=kwargs.get(k,color_zero)\n for k in ['linestylezero','line_style_zero','linestyleZero']: linestyle_zero=kwargs.get(k,linestyle_zero)\n for k in ['linewidthzero','linewidthZero','line_width_zero']: linewidth_zero=kwargs.get(k,linewidth_zero)\n for k in ['alphazero','alphaZero']: alpha_zero=kwargs.get(k,alpha_zero)\n\n for k in ['colorcomparison','colorComparison']: color_comparison=kwargs.get(k,color_comparison)\n for k in ['linestyleComparison','line_style_comparison','linestylecomparison']: linestyle_comparison=kwargs.get(k,linestyle_comparison)\n for k in ['linewidthcomparison','linewidthComparison','line_width_comparison']: linewidth_comparison=kwargs.get(k,linewidth_comparison)\n for k in ['alphacomparison','alphaComparison']: alpha_comparison=kwargs.get(k,alpha_comparison)\n\n for k in ['colorresidual','colorResidual']: color_residual=kwargs.get(k,color_residual)\n for k in ['linestyleresidual','line_style_residual','linestyleResidual']: linestyle_residual=kwargs.get(k,linestyle_residual)\n for k in ['linewidthresidual','linewidthResidual','line_width_residual']: linewidth_residual=kwargs.get(k,linewidth_residual)\n for k in ['alpharesidual','alphaResidual']: alpha_residual=kwargs.get(k,alpha_residual)\n\n for k in ['bands']: band=kwargs.get(k,band)\n if len(band) == 2 and isinstance(band[0],list) == False: band = [band]\n for k in ['bandcolors','bandcolor','band_colors']: band_color=kwargs.get(k,band_color)\n for k in ['bandalphas','band_alphas','bandalpha']: band_alpha=kwargs.get(k,band_alpha)\n for k in ['band_labels','bandlabel','bandlabels']: band_label=kwargs.get(k,band_label)\n for k in ['band_label_positions','bandlabelposition','bandlabelpositions']: band_label_position=kwargs.get(k,band_label_position)\n for k in ['bandwidth','bandwidths','band_widths']: band_width=kwargs.get(k,band_width)\n for par in [band_color,band_alpha,band_label,band_label_position,band_width]:\n if not isinstance(par,list): par = [par]*len(band)\n if len(par) < len(band): par.extend([par[-1] for x in range(len(band)-len(par))])\n\n for k in ['legends','label','labels']: legend=kwargs.get(k,legend)\n if not isinstance(legend,list): legend = [legend]\n for k in ['legendfontscale','legendFontscale']: legend_fontscale=kwargs.get(k,legend_fontscale)\n legend_fontscale=legend_fontscale*fontscale\n for k in ['legendLocation','legendlocation','labelLocation','labellocation','label_location']: legend_location=kwargs.get(k,legend_location)\n\n for k in ['xrange','x_range','wave_range','wrange','wrng']: xrng=kwargs.get(k,xrng)\n if not isinstance(xrng,list): xrng = [xrng]\n for k in ['yrange','y_range','flux_range','frange','frng']: yrng=kwargs.get(k,yrng)\n if not isinstance(yrng,list): yrng = [yrng]\n\n for k in ['multilayout','multiLayout','multi_layout']: layout=kwargs.get(k,layout)\n for k in ['file','filename']: output=kwargs.get(k,output)\n if not isinstance(output,str): output=''\n filetype = '.pdf'\n if output!='': filetype=output.split('.')[-1]\n\n if comparison != None and isinstance(comparison,splat.Spectrum) == False and isinstance(comparison,list) == False: \n print('plotSpectrum() Warning: comparison spectrum should be a splat Spectrum object, you passed {}'.format(comparison))\n comparison = None\n\n# some plotting constants\n xlabel_default = 'Wavelength'\n ylabel_deafult = 'Flux'\n\n# telluric bands in micron\n telluric_bands = [[1.1,1.2]*u.micron,[1.3,1.5]*u.micron,[1.75,2.0]*u.micron]\n\n# assign features by group\n if not isinstance(features,list): features = [features]\n if ldwarf==True or mdwarf==True: features.extend(['k','na','feh','tio','co','h2o','h2'])\n if tdwarf==True: features.extend(['k','ch4','h2o','h2'])\n if young==True: features.extend(['vo'])\n if binary==True: features.extend(['sb'])\n\n# clean repeats in features while maintaining order - set does not do this\n if len(features)>0:\n fea = []\n for i in features:\n if i not in fea: fea.append(i)\n features = fea\n\n\n# if a list is passed, use this list\n splist = copy.deepcopy(inp)\n if isinstance(splist,list) == False: splist = [splist]\n \n# set up for multiplot\n if len(splist) == 1: multiplot = False\n \n# array of lists => force multiplot\n elif len(splist) > 1 and isinstance(splist[0],list) == True: multiplot = True\n else: pass\n\n# reformat array of spectra of multiplot is used (i.e., user forgot to set)\n if multiplot == True and isinstance(splist[0],splat.Spectrum):\n splist = [[s] for s in splist]\n\n elif multiplot == False and isinstance(splist[0],splat.Spectrum):\n splist = [splist]\n \n# flatten array if multiplot is not set\n elif multiplot == False and isinstance(splist[0],list) and len(splist) > 1:\n splist = [[item for sublist in splist for item in sublist]] # flatten\n else: pass\n\n# total number of spectra - use to assign default legends\n allsps = [item for sublist in splist for item in sublist] # Total number of spectra\n if len(legend) == 0: legend=[sp.name for sp in allsps]\n if len(legend) < len(allsps):\n legend.extend([allsps[i].name for i in range(len(legend),len(allsps)-len(legend))])\n \n\n# now run a loop through the input subarrays\n plt.close('all')\n\n# set up here for multiple file output\n nplot = 1\n if multipage == True or multiplot == True:\n nplot = layout[0]*layout[1]\n numpages = int(len(splist) / nplot) + 1\n if (len(splist) % nplot == 0):\n numpages -= 1\n fig = []\n \n if multipage == True and filetype.lower() == 'pdf':\n pdf_pages = PdfPages(output)\n \n if multipage == False:\n if len(splist) > 1:\n filebase = output.replace('.{}'.format(filetype),'')\n files = [filebase+'{}.'.format(i+1)+filetype for i in numpy.arange(len(splist))]\n else:\n files = [output]\n\n pg_n = 0 # page counter\n plt_n = 0 # plot per page counter\n lg_n = 0 # legend per plot counter\n\n for plts,sp in enumerate(splist):\n# set specific plot parameters\n if not isinstance(sp[0],splat.Spectrum):\n raise ValueError('\\nInput to plotSpectrum has wrong format:\\n\\n{}\\n\\n'.format(sp[0]))\n\n# set up plotting defaults for the list of spectra - REPLACE THIS\n if not isinstance(zeropoint,list): zeropoint = [zeropoint]*len(sp)\n if len(zeropoint) < len(sp): zeropoint.extend([zeropoint[-1] for x in range(len(sp)-len(zeropoint))])\n if not isinstance(color,list): color = [color]*len(sp)\n if len(color) < len(sp): color.extend([color[-1] for x in range(len(sp)-len(color))])\n if not isinstance(linestyle,list): linestyle = [linestyle]*len(sp)\n if len(linestyle) < len(sp): linestyle.extend([linestyle[-1] for x in range(len(sp)-len(linestyle))])\n if not isinstance(linewidth,list): linewidth = [linewidth]*len(sp)\n if len(linewidth) < len(sp): linewidth.extend([linewidth[-1] for x in range(len(sp)-len(linewidth))])\n if not isinstance(alpha,list): alpha = [alpha]*len(sp)\n if len(alpha) < len(sp): alpha.extend([alpha[-1] for x in range(len(sp)-len(alpha))])\n if not isinstance(color_noise,list): color_noise = [color_noise]*len(sp)\n if len(color_noise) < len(sp): color_noise.extend([color_noise[-1] for x in range(len(sp)-len(color_noise))])\n if not isinstance(linestyle_noise,list): linestyle_noise = [linestyle_noise]*len(sp)\n if len(linestyle_noise) < len(sp): linestyle_noise.extend([linestyle_noise[-1] for x in range(len(sp)-len(linestyle_noise))])\n if not isinstance(linewidth_noise,list): linewidth_noise = [linewidth_noise]*len(sp)\n if len(linewidth_noise) < len(sp): linewidth_noise.extend([linewidth_noise[-1] for x in range(len(sp)-len(linewidth_noise))])\n if not isinstance(alpha_noise,list): alpha_noise = [alpha_noise]*len(sp)\n if len(alpha_noise) < len(sp): alpha_noise.extend([alpha_noise[-1] for x in range(len(sp)-len(color_noise))])\n if not isinstance(color_comparison,list): color_comparison = [color_comparison]*len(sp)\n if len(color_comparison) < len(sp): color_comparison.extend([color_comparison[-1] for x in range(len(sp)-len(color_comparison))])\n if not isinstance(linestyle_comparison,list): linestyle_comparison = [linestyle_comparison]*len(sp)\n if len(linestyle_comparison) < len(sp): linestyle_comparison.extend([linestyle_comparison[-1] for x in range(len(sp)-len(linestyle_comparison))])\n if not isinstance(linewidth_comparison,list): linewidth_comparison = [linewidth_comparison]*len(sp)\n if len(linewidth_comparison) < len(sp): linewidth_comparison.extend([linewidth_comparison[-1] for x in range(len(sp)-len(linewidth_comparison))])\n if not isinstance(alpha_comparison,list): alpha_comparison = [alpha_comparison]*len(sp)\n if len(alpha_comparison) < len(sp): alpha_comparison.extend([alpha_comparison[-1] for x in range(len(sp)-len(alpha_comparison))])\n\n# settings that work if the spectrum was read in as legitmate Spectrum object\n try:\n xlabel = kwargs.get('xlabel','{} ({})'.format(sp[0].wave_label,sp[0].wave.unit))\n ylabel = kwargs.get('ylabel','{} ({})'.format(sp[0].flux_label,sp[0].flux.unit))\n except:\n xlabel = kwargs.get('xlabel',xlabel_default)\n ylabel = kwargs.get('ylabel',ylabel_default)\n# initial plot range\n bound = [numpy.nanmin(sp[0].wave.value),numpy.nanmax(sp[0].wave.value)]\n ymax = [numpy.nanquantile(s.flux.value,0.98) for s in sp]\n bound.extend(numpy.array([-0.02,1.3])*numpy.nanmax(ymax)+\\\n numpy.array([numpy.nanmin(zeropoint),numpy.nanmax(zeropoint)+stack*(len(sp)-1)]))\n\n# set colormap if provided\n if colormap != None:\n values = numpy.arange(len(sp))\n color_map = plt.get_cmap(colormap)\n norm = colmap.Normalize(vmin=0, vmax=1.0*values[-1])\n scalarMap = cm.ScalarMappable(norm=norm, cmap=color_map)\n for i in range(len(sp)): color[i] = scalarMap.to_rgba(values[i])\n\n# GENERATE PLOTS\n if multiplot == True or multipage == True:\n plt_n = plts % nplot\n if (plt_n == 0):\n fig.append(plt.figure())\n pg_n += 1\n ax = fig[pg_n-1].add_subplot(layout[0], layout[1], plt_n+1)\n \n# plotting a single plot with all spectra\n else:\n plt.close('all')\n plt_n = 0\n fig = []\n if len(figsize)>0: fig.append(plt.figure(figsize=figsize))\n else: fig.append(plt.figure())\n ax = fig[0].add_subplot(111)\n \n for ii, a in enumerate(sp):\n# zeropoint and stack\n flx = [i+zeropoint[ii] for i in a.flux.value]\n if stack > 0: flx = [f + (len(sp)-ii-1)*stack for f in flx]\n ax.plot(a.wave.value,flx,color=color[ii],linestyle=linestyle[ii], lw=linewidth[ii], alpha=alpha[ii], zorder = 10, label = legend[lg_n]) \n\n# add comparison\n if comparison != None:\n# zeropoint and stack\n cflx = [i+zeropoint[ii] for i in comparison.flux.value]\n if stack > 0: cflx = [f + (len(sp)-ii-1)*stack for f in cflx]\n ax.plot(comparison.wave.value,cflx,color=color_comparison[ii],linestyle=linestyle_comparison[ii], lw=linewidth_comparison[ii], alpha=alpha_comparison[ii], zorder = 10)\n \n# add residual\n if residual == True and len(sp) == 2:\n # Save flux values from first spectrum\n if ii == 0:\n flx0 = [f - (len(sp)-ii-1)*stack for f in flx]\n \n # Subtract fluxes and plot\n elif ii == 1:\n res = [flx0[f_n] - f for f_n, f in enumerate(flx)]\n ax.plot(a.wave.value, res, alpha = alpha_residual[ii], color = color_residual[ii], linsetyle=linestyle_residual[ii], lw=linewidth_residual[ii])\n \n # Fix bound[2] if residual goes below 0\n if numpy.nanmin(res) < bound[2]:\n b0 = numpy.argmin(a.wave.value[a.wave.value > bound[0]])\n b1 = numpy.argmax(a.wave.value[a.wave.value < bound[1]])\n bound[2] = numpy.nanmin(res[b0:b1])\n\n# noise\n if show_noise == True:\n ns = [i+zeropoint[ii] for i in a.noise.value]\n ax.plot(a.wave.value,ns,color=color_noise[ii],linestyle=linestyle_noise[ii],alpha=alpha_noise[ii], lw=linewidth_noise[ii], zorder = 10)\n\n# zeropoint\n if show_zero == True:\n ze = numpy.ones(len(a.flux))*zeropoint[ii]\n ax.plot(a.wave.value,ze,color=color[ii],linestyle=linestyle_zero,alpha=alpha_zero,lw=linewidth_zero, zorder = 10)\n\n# save maximum flux among all spectra for plotting\n# THIS IS VERY SLOW AND IT WOULD BE BETTER TO FIND AN ALTERNATE APPROACH\n if len(features)>0:\n f = interp1d(a.wave,flx,bounds_error=False,fill_value=0.)\n if ii == 0: \n wvmax = numpy.linspace(bound[0],bound[1],nsamples)\n flxmax = numpy.array(f(wvmax))\n else: flxmax = numpy.maximum(flxmax,numpy.array(f(wvmax)))\n\n# legend counter\n lg_n = lg_n + 1 # Increment legend\n\n\n# label features\n# THIS NEEDS TO BE FIXED WITH GRETEL'S STUFF\n if len(features) > 0:\n yoff = 0.02*(bound[3]-bound[2]) # label offset\n fontsize = int((10-numpy.nanmin([(layout[0]*layout[1]-1),6]))*fontscale)\n for ftr in features:\n ftr = ftr.lower()\n if ftr in FEATURE_LABELS:\n ftrc = checkDict(ftr,FEATURE_LABELS)\n if ftrc != False:\n for ii,waveRng in enumerate(FEATURE_LABELS[ftrc]['wavelengths']):\n wRng = waveRng.to(sp[0].wave.unit).value\n# features must be contained in plot range (may change this)\n if numpy.nanmin(wRng) > bound[0] and numpy.nanmax(wRng) < bound[1]:\n wfeature = numpy.where(numpy.logical_and(wvmax >= numpy.nanmin(wRng),wvmax <= numpy.nanmax(wRng)))\n if len(wvmax[wfeature]) == 0: wfeature = numpy.argmax(numpy.absolute(wvmax-numpy.nanmedian(wRng)))\n y = numpy.nanmax(flxmax[wfeature])+yoff\n flxmax[wfeature] = flxmax[wfeature]+3.*yoff\n\n if FEATURE_LABELS[ftrc]['type'] == 'band':\n ax.plot(wRng,[y+yoff]*2,color='k',linestyle='-')\n ax.plot([wRng[0]]*2,[y,y+yoff],color='k',linestyle='-')\n ax.text(numpy.mean(wRng),y+1.5*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=fontsize)\n else:\n for w in wRng: ax.plot([w]*2,[y,y+yoff],color='k',linestyle='-')\n ax.text(numpy.mean(wRng),y+1.5*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=fontsize)\n bound[3] = numpy.nanmax([numpy.nanmax(flxmax)+2.*yoff,bound[3]])\n\n# add grid\n if grid == True: ax.grid() \n\n# axis labels \n fontsize = (numpy.round(numpy.max([13./((layout[0]*layout[1])**0.33),5]))) * fontscale\n legend_fontsize = (13-numpy.min([(layout[0]*layout[1]-1),8])) * legend_fontscale\n ax.set_xlabel(xlabel, fontsize = fontsize)\n ax.set_ylabel(ylabel, fontsize = fontsize)\n ax.tick_params(axis='x', labelsize=fontsize)\n ax.tick_params(axis='y', labelsize=fontsize)\n\n# add title\n if title!='': ax.set_title(title)\n\n# log scale?\n if kwargs.get('xlog',False): ax.set_xscale('log',nonposx='clip')\n if kwargs.get('ylog',False): ax.set_yscale('log',nonposy='clip')\n\n# place legend\n if len(legend) > 0:\n if legend_location == 'outside':\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.15, box.width * 0.7, box.height * 0.7])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':legend_fontsize})\n else:\n ax.legend(loc=legend_location, prop={'size':legend_fontsize})\n bound[3] = bound[3]+0.1*(bound[3]-bound[2]) # extend axis for in-plot legends\n\n# overplot telluric absorption\n if telluric == True:\n yoff = 0.02*(bound[3]-bound[2]) # label offset\n for waveRng in telluric_bands:\n wR = waveRng.to(sp[0].wave.unit).value\n rect = patches.Rectangle((wR[0],bound[2]),wR[1]-wR[0],bound[3]-bound[2],facecolor=color_telluric,alpha=alpha_telluric,color=color_telluric)\n ax.add_patch(rect)\n ax.text(numpy.mean(wR),bound[2]+3*yoff,r'$\\oplus$',horizontalalignment='center',fontsize=fontsize)\n\n# overplot color swaths for pre-specified bands\n if len(band) > 0:\n for i,b in enumerate(band):\n if not isinstance(b,list): \n try: b = [float(b)-0.5*band_width,float(b)+0.5*band_width]\n except:\n print('\\nWarning: plotSpectrum bands variables should be array of 2-element arrays; you passed {}'.format(band))\n b = [0.,0.]\n rect = patches.Rectangle((b[0],bound[2]),b[1]-b[0],bound[3]-bound[2],facecolor=band_color[i],color=band_color[i],alpha=band_alpha[i])\n ax.add_patch(rect)\n if band_label_position[i].lower() == 'top':\n ax.text(numpy.mean(b),bound[3]-3*yoff,band_label[i],horizontalalignment='center',fontsize=fontsize)\n elif band_label_position[i].lower() == 'middle':\n ax.text(numpy.mean(b),0.5*(bound[2]+bound[3]),band_label[i],horizontalalignment='center',fontsize=fontsize)\n else:\n ax.text(numpy.mean(b),bound[2]+3*yoff,band_label[i],horizontalalignment='center',fontsize=fontsize)\n\n# place inset - RIGHT NOW ONLY SETTING LIMITS WITH FIRST SPECTRUM IN LIST\n if inset == True and len(inset_xrange) == 2:\n ax_inset = fig[pg_n-1].add_axes(inset_position) #, axisbg='white')\n bound2 = inset_xrange\n if len(inset_yrange) == 0:\n b0 = numpy.argmax(sp[0].wave.value > bound2[0])\n b1 = numpy.argmin(sp[0].wave.value < bound2[1])\n inset_yrange = [numpy.nanmin(sp[0].flux.value[b0:b1]),numpy.nanmax(sp[0].flux.value[b0:b1])]\n bound2.extend(inset_yrange)\n db = (bound2[3]-bound2[2])\n bound2[2] = bound2[2]-0.05*db\n bound2[3] = bound2[3]+0.05*db\n ax_inset.axis(bound2)\n inset_fontsize = fontsize*0.7\n\n for ii,a in enumerate(sp):\n flx = [i+zeropoint[ii] for i in a.flux.value]\n ax_inset.plot(a.wave.value,flx,color=colors[ii],linestyle=linestyle[ii],linewidth=linewidth[ii],alpha=alpha[ii]) \n ax_inset.set_xlabel('')\n ax_inset.set_ylabel('')\n ax_inset.tick_params(axis='x', labelsize=inset_fontsize)\n ax_inset.tick_params(axis='y', labelsize=inset_fontsize)\n# ax_inset.legend()\n\n# inset feature labels\n if len(inset_features) > 0:\n yoff = 0.05*(bound2[3]-bound2[2])\n for ftr in inset_features:\n ftrc = checkDict(ftr,FEATURE_LABELS)\n if ftrc != False:\n for ii,waveRng in enumerate(FEATURE_LABELS[ftrc]['wavelengths']):\n wRng = waveRng.to(sp[0].wave.unit).value\n if (numpy.min(wRng) > bound2[0] and numpy.max(wRng) < bound2[1]):\n wfeature = numpy.where(numpy.logical_and(wvmax >= numpy.nanmin(wRng),wvmax <= numpy.nanmax(wRng)))\n if len(wvmax[wfeature]) == 0: wfeature = numpy.argmax(numpy.absolute(wvmax-numpy.nanmedian(wRng)))\n y = numpy.nanmax(flxmax[wfeature])+yoff\n flxmax[wfeature] = flxmax[wfeature]+3.*yoff\n \n if FEATURE_LABELS[ftrc]['type'] == 'band':\n ax_inset.plot(wR,[y+yoff]*2,color='k',linestyle='-')\n ax_inset.plot([wR[0]]*2,[y,y+yoff],color='k',linestyle='-')\n ax_inset.text(numpy.mean(wR),y+2*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=inset_fontsize)\n else:\n for w in waveRng:\n ax_inset.plot([w]*2,[y,y+yoff],color='k',linestyle='-')\n ax_inset.text(numpy.mean(wR),y+2*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=inset_fontsize)\n waveRng = [wR[0]-0.02,wR[1]+0.02] # for overlap\n \n# update offset\n if len(inset_features) > 0: bound2[3] = numpy.nanmax([bound2[3],numpy.nanmax(flxmax)+5.*yoff])\n ax_inset.axis(bound2)\n\n# finalize bounding\n if len(xrng) > 0: bound[0:2] = xrng\n if len(yrng) > 0: bound[2:4] = yrng\n if isUnit(bound[0]): bound = [x.value for x in bound]\n ax.axis(bound)\n \n# save to file or display\n# ERROR HERE - CHECK WHAT FILES\n if multipage == False:\n if files[plts] != '' and (plts % nplot == 3 or plts == len(splist)-1):\n if kwargs.get('tight',True) == True: \n plt.savefig(files[plts], bbox_inches='tight')\n else:\n plt.savefig(files[plts])\n if output == '' and not kwargs.get('web',False):\n plt.show()\n if (kwargs.get('interactive',False) != False): plt.ion()\n else: plt.ioff()\n\n\n# save figures in multipage format and write off pdf file\n if multipage == True: \n for pg_n in numpy.arange(numpages):\n# fig[pg_n].text(0.5, 0.04, xlabel, ha = 'center', va = 'center')\n# fig[pg_n].text(0.06, 0.5, ylabel, ha = 'center', va = 'center', rotation = 'vertical')\n fig[pg_n].tight_layout\n fig[pg_n].suptitle(title, fontsize = int(14*fontsize), fontweight = 'bold')\n pdf_pages.savefig(fig[pg_n])\n if filetype.lower() == 'pdf':\n pdf_pages.close()\n\n plt.clf()\n return fig",
"def plot_mean_std(data,ax,label=None,show_error=True):\n x = np.arange(1,100)\n mean = np.array([np.mean(data_n) for data_n in data])\n if show_error: std = np.array([np.std(data_n) for data_n in data])\n ax.plot(x,mean,label=label)\n if show_error: ax.fill_between(x,mean-std,mean+std,alpha=0.3)",
"def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()",
"def avg_sse_plot(self):\n df_sse = self.df[\"sse_avg\"].sort_values(ascending=False)\n plt.figure(figsize=(self.plot_width, self.plot_height))\n df_sse.plot(\"bar\")\n plt.title(\"Media SSE por cluster\")\n output_path_sse = os.path.join(self.output_folder, 'sse_avg_plot.png')\n plt.savefig(output_path_sse)",
"def plot(x, y, title):\n fig, ax = plt.subplots()\n\n x_ave,y_ave = averages(x,y)\n\n ax.scatter(x, y, label=\"data\", color='b')\n ax.scatter(x_ave, y_ave, label=\"means\", color='r')\n \n ax.set_xlim( xmin=0 ) \n ax.set_ylim( ymin=0 )\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n ax.set_title(title)\n ax.set_xlabel(Network_.__name__ +' size')",
"def plot_period_averages(data_sources: list, sun_df: pd.DataFrame):\n daily_mean_df = extract_daily_means(data_sources)\n\n traces = []\n\n # There is a bug in here with using datetime.time as the x axis. Things\n # don't line up as expected, so we need to have the first trace(s) have\n # a full sampling of all timestamps from the data sources\n # to be able to plot the night shading rects. Dunno why.\n # To solve this, we just plot an invisible line with our target x axis\n # before the night rects\n traces.append(\n go.Scatter(\n x=daily_mean_df.index,\n y=[60] * len(daily_mean_df.index),\n mode=\"lines\",\n line=dict(width=0),\n name=\"placeholder\",\n showlegend=False,\n )\n )\n\n # shade the night time\n # these traces need to be after the temperature traces, because indexing is weird for datetime.time\n sunset = mean_time(sun_df.sunset)\n sunrise = mean_time(sun_df.sunrise)\n y_range = (60, 100) # temperature range, in F\n traces.extend([\n # NOTE: the sunset/sunrise datetime.time needs to perfectly align with the times in the temperature traces\n go.Scatter(\n x=[\n datetime.time(0, 0),\n datetime.time(0, 0),\n datetime.time(6, 15),\n datetime.time(6, 15),\n # sunrise,\n # sunrise,\n ],\n y=[y_range[0], y_range[1], y_range[1], y_range[0]],\n fill=\"toself\",\n fillcolor=\"grey\",\n mode=\"lines\",\n line=dict(width=0),\n opacity=0.3,\n showlegend=True,\n name=\"night\",\n ),\n go.Scatter(\n x=[\n # sunset,\n # sunset,\n datetime.time(19, 42),\n datetime.time(19, 42),\n datetime.time(23, 57),\n datetime.time(23, 57),\n ],\n y=[y_range[0], y_range[1], y_range[1], y_range[0]],\n fill=\"toself\",\n fillcolor=\"grey\",\n mode=\"lines\",\n line=dict(width=0),\n opacity=0.3,\n showlegend=False,\n name=\"night\",\n ),\n ])\n\n # create traces for each of the source day averages\n for i, source in enumerate(data_sources):\n traces.append(\n go.Scatter(\n x=daily_mean_df.index,\n y=daily_mean_df[source.name],\n mode=\"lines\",\n line=dict(width=2, color=source.color),\n name=source.name,\n opacity=LINE_OPACITY,\n )\n )\n\n fig = go.Figure(\n data=traces,\n layout=dict(\n width=FIGURE_DIMENSIONS[0],\n height=FIGURE_DIMENSIONS[1],\n title=\"Dwelling 24H average temperature - BRC 2019\",\n xaxis=dict(title=\"time of day\"),\n yaxis=dict(title=f\"temperature (F)\", range=y_range),\n ),\n )\n out_filename = os.path.join(FIGURE_PATH, \"phage_average_temperature_2019.html\")\n plot(fig, filename=out_filename)\n return fig"
] | [
"0.6366105",
"0.6343887",
"0.6251174",
"0.62412655",
"0.6195293",
"0.6167033",
"0.61656606",
"0.61597526",
"0.6135713",
"0.6123626",
"0.60824937",
"0.6050469",
"0.605036",
"0.60355365",
"0.60064894",
"0.60045254",
"0.5998158",
"0.59978235",
"0.59749985",
"0.59437656",
"0.59196746",
"0.5914041",
"0.58753455",
"0.58731914",
"0.58329284",
"0.58193415",
"0.5818043",
"0.58039236",
"0.5802635",
"0.58017725"
] | 0.6464803 | 0 |
Converts offset+length coordinates to sentence_id and token_id. Returns (sentence_start, token_start), (sentence_end, token_end) | def offset_to_tokens(self, offset, length):
first_sent_id = None
first_tok_id = None
#print offset
#print length
for sent_id, (sent_json, sent_conll) in enumerate(
zip(self.tokenized['sentences'], self.conll)):
#print first_sent_id
for tok_id, (token_json, token_conll) in enumerate(
zip(sent_json['tokens'], sent_conll)):
begin = token_json['characterOffsetBegin']
end = token_json['characterOffsetEnd']
if end < offset:
continue
if (offset + length) <= begin:
if first_sent_id is not None:
#print "hello"
return ((first_sent_id, first_tok_id),
(sent_id, tok_id))
else:
return None, None
if first_sent_id is None:
first_sent_id = sent_id
first_tok_id = tok_id
return None, None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def token_positions(separation):\n offsets = (-separation, 0, separation)\n for x_pos in offsets:\n for y_pos in offsets:\n yield x_pos, y_pos",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def charIdx_to_tokenIdx(spans, ans_text, ans_start): \n # Convert answer 'char idxs' to 'token idxs' for one single record\n if ans_start != -999:\n ans_end = ans_start + len(ans_text) - 1 \n ans_token_idxs = []\n for token_idx, span in enumerate(spans):\n if span[0] < ans_end and span[1] > ans_start:\n ans_token_idxs.append(token_idx)\n y1, y2 = ans_token_idxs[0], ans_token_idxs[-1] \n else:\n y1, y2 = -999, -999\n \n return y1, y2",
"def tokenize(self, sentences) -> Tuple[Dict[str, Any], List[List[Tuple[int, int]]]]:\n inputs = self.tokenizer(sentences, return_tensors=\"pt\", return_attention_mask=True, return_token_type_ids=True,\n return_offsets_mapping=True,\n padding=True, truncation=True)\n\n offset_mapping = inputs[\"offset_mapping\"]\n inputs = {k: v.to(self.device) for k, v in inputs.items() if k != \"offset_mapping\"}\n\n return inputs, offset_mapping",
"def _sentence(self, node, offset_mngr):\n text = self._text(node)\n offset = offset_mngr.update(node, text)\n return text, offset",
"def _get_text_positions_tokenless(self, node, padded):\n # type: (ast.AST, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if sys.version_info[:2] < (3, 8):\n raise AssertionError(\"This method should only be called internally after checking supports_tokenless()\")\n\n if isinstance(node, ast.Module):\n # Modules don't have position info, so just return the range of the whole text.\n # The token-using method does something different, but its behavior seems weird and inconsistent.\n # For example, in a file with only comments, it only returns the first line.\n # It's hard to imagine a case when this matters.\n return (1, 0), self._line_numbers.offset_to_line(len(self._text))\n\n if not hasattr(node, 'lineno'):\n return (1, 0), (1, 0)\n\n assert node # tell mypy that node is not None, which we allowed up to here for compatibility\n\n decorators = getattr(node, 'decorator_list', [])\n if decorators:\n # Function/Class definition nodes are marked by AST as starting at def/class,\n # not the first decorator. This doesn't match the token-using behavior,\n # or inspect.getsource(), and just seems weird.\n start_node = decorators[0]\n else:\n start_node = node\n\n if padded and last_stmt(node).lineno != node.lineno:\n # Include leading indentation for multiline statements.\n start_col_offset = 0\n else:\n start_col_offset = self._line_numbers.from_utf8_col(start_node.lineno, start_node.col_offset)\n\n start = (start_node.lineno, start_col_offset)\n\n # To match the token-using behaviour, we exclude trailing semicolons and comments.\n # This means that for blocks containing multiple statements, we have to use the last one\n # instead of the actual node for end_lineno and end_col_offset.\n end_node = last_stmt(node)\n end_lineno = cast(int, end_node.end_lineno)\n end_col_offset = cast(int, end_node.end_col_offset)\n end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)\n end = (end_lineno, end_col_offset)\n\n return start, end",
"def ctx_embd_input(sentence):\n sent_split = sentence.split()\n\n annotation_indices = []\n tokens = []\n for index, token in enumerate(sent_split):\n token, *annotation = token.split('---')\n tokens.append(token)\n \n if annotation:\n annotation_indices.append((index, annotation[0]))\n \n return tokens, annotation_indices",
"def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if getattr(node, \"_broken_positions\", None):\n # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.\n return (1, 0), (1, 0)\n\n if supports_tokenless(node):\n return self._get_text_positions_tokenless(node, padded)\n\n return self.asttokens.get_text_positions(node, padded)",
"def get_offsets(word, raw_text):\n try:\n match = re.search(word, raw_text)\n return (match.start(), match.end())\n except AttributeError: #could not find word\n return (0, 0)",
"def tokenize_sequences(source_sent, target_sent):\r\n\tsource_sent = source_sent.numpy().decode('utf-8')\r\n\ttarget_sent = target_sent.numpy().decode('utf-8')\r\n\r\n\tinpt = [en2idx.get(word, 1) for word in (u\"<SOS> \" + source_sent + u\" <EOS>\").split()]\r\n\toutpt = [de2idx.get(word, 1) for word in (u\"<SOS> \" + target_sent + u\" <EOS>\").split()]\r\n\r\n\treturn inpt, outpt",
"def convert_mousepos(self, pos):\n tokenx, tokeny = pos\n row = int((tokenx - self.x_margin) / SPACESIZE)\n column = int((tokeny - self.y_margin) / SPACESIZE)\n return column, row",
"def getStartAndEndCoordinates(alignedSegment):\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq)",
"def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4",
"def search_start_end_index_in_sentence(sent, np):\n\n nps = [x for x in np.split() if x]\n if len(nps) == 0:\n return (-1, -1)\n elif len(nps) == 1:\n indices = search_one_token_reducing_suffix(sent, np)\n if len(indices) > 0:\n return (indices[0], search_next_whitespace(sent, indices[0]))\n else:\n return (-1, -1)\n else:\n # search start:\n start = search_correct_position(sent, nps)\n end = search_correct_position(sent, nps, True)\n if end != -1:\n end = search_next_whitespace(sent, end)\n return (start,end)",
"def convert_idx(text, tokens):\n current = 0\n spans = []\n for token in tokens:\n current = text.find(token, current) # Find position of 1st occurrence; start search from 'current' \n if current < 0:\n raise Exception(f\"Token '{token}' cannot be found\")\n spans.append((current, current + len(token)))\n current += len(token) # next search start from the token afterwards\n return spans",
"def retrieve_sentence_and_id(label_sent: str) -> tuple:\n sentence_db = sentence_record.SentenceDB()\n mapping = sentence_db.get_mapping_by_label(label_sent)\n try:\n sentence = mapping[0][0]\n sentence_id = mapping[0][1]\n except IndexError:\n print('Failed to retrieve sentence and sentence_id')\n sentence = ''\n sentence_id = -1\n return (sentence, sentence_id)",
"def get_words_position(self, words: List[Word]) -> Tuple[int, int]:\n start: int = self.get_word_postion(words[0])[0]\n end: int = self.get_word_postion(words[-1])[1]\n return start, end",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n raise NotImplementedError",
"def seq_2_pos(idx):\n\tglobal SEQ2POS\n\tif idx not in SEQ2POS:\n\t\treturn None\n\tcod = SEQ2POS[idx]\n\treturn (cod&0xFFFF) , (cod>>16)",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)",
"def get_offsets(tag, event_idx, timex_idx):\n if tag.name in LINK_TAGS:\n id1, id1_type = tag.attrs.get(TIME_ID), TIMEX\n if id1 is None:\n saved = \"%s-%s\" % (id1, id1_type)\n id1, id1_type = tag.attrs.get(EVENT_INSTANCE_ID), EVENT\n id2, id2_type = tag.attrs.get(RELATED_TO_TIME), TIMEX\n if id2 is None:\n id2, id2_type = tag.attrs.get(RELATED_TO_EVENT_INSTANCE), EVENT\n if id2 is None:\n id2, id2_type = tag.attrs.get(SUBORDINATED_EVENT_INSTANCE), EVENT\n offsets = [_retrieve_from_index(id1, id1_type, event_idx, timex_idx),\n _retrieve_from_index(id2, id2_type, event_idx, timex_idx)]\n if len(offsets) != 2:\n _offset_warning(\"unexpected offsets\", tag, offsets)\n return None\n elif offsets[0][0] is None or offsets[1][0] is None:\n _offset_warning(\"cannot find source and/or target\", tag, offsets)\n return None\n else:\n return tuple(offsets)\n else:\n return (tag.begin, tag.end)",
"def _get_marker_indices(marker, line):\n indices = [i for i, ltr in enumerate(line) if ltr == marker]\n start = indices[0:-1:2]\n end = [i + 1 for i in indices[1::2]]\n assert len(start) == len(end)\n return start, end",
"def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids",
"def __get_exon_coordinates(self, exon):\n start = None\n end = None\n if self.__is_padding_enabled():\n start = exon[constants.EXON_PADDED_START]\n end = exon[constants.EXON_PADDED_END]\n else:\n start = exon[constants.EXON_START]\n end = exon[constants.EXON_END]\n return (start, end)",
"def _whitespace_tokenize_with_offsets(self, input_tensor):\n (values, row_splits, start_offsets, end_offsets) = (\n gen_whitespace_tokenizer_v2.tf_text_whitespace_tokenize_with_offsets_v2(\n input_values=input_tensor, input_config=self._config))\n values = RaggedTensor.from_nested_row_splits(\n flat_values=values,\n nested_row_splits=[row_splits])\n start_offsets = RaggedTensor.from_nested_row_splits(\n flat_values=start_offsets,\n nested_row_splits=[row_splits])\n end_offsets = RaggedTensor.from_nested_row_splits(\n flat_values=end_offsets,\n nested_row_splits=[row_splits])\n return (values, start_offsets, end_offsets)",
"def idx2tokens(eval_file, ids, start_idxs, end_idxs):\n predictions = dict()\n for _id, start_idx, end_idx in zip(ids, start_idxs, end_idxs):\n context = eval_file[str(_id.item())]['context']\n spans = eval_file[str(_id.item())]['spans']\n quid = eval_file[str(_id.item())]['uuid']\n if start_idx >= len(spans) or end_idx >= len(spans):\n answer = \"\"\n else:\n start = spans[start_idx][0]\n # print(end_idx)\n # print(spans[end_idx])\n end = spans[end_idx][1]\n answer = context[start:end]\n predictions[quid] = answer\n return predictions"
] | [
"0.6784497",
"0.64283454",
"0.6420033",
"0.6386695",
"0.6348043",
"0.6061477",
"0.60567343",
"0.6041508",
"0.6039938",
"0.5996507",
"0.5840656",
"0.57864076",
"0.5765366",
"0.5730394",
"0.5728136",
"0.5719357",
"0.57179654",
"0.56979686",
"0.5688681",
"0.56320363",
"0.5586682",
"0.5586682",
"0.5586682",
"0.5582906",
"0.55811006",
"0.55744076",
"0.5542414",
"0.5522972",
"0.5503926",
"0.5502402"
] | 0.71816325 | 0 |
Convert existing RADIUS usernames in profiles to AuthenticationDataentries. | def convert_radius_username(apps, schema_editor):
Profile = apps.get_model("organization", "Profile")
AuthenticationData = apps.get_model("organization", "AuthenticationData")
for profile in Profile.objects.exclude(radius_username=""):
a = AuthenticationData(backend=RADIUS_BACKEND_NAME, username=profile.radius_username, user=profile.user)
a.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reverse_convert_radius_username(apps, schema_editor):\n Profile = apps.get_model(\"organization\", \"Profile\")\n AuthenticationData = apps.get_model(\"organization\", \"AuthenticationData\")\n for auth_data in AuthenticationData.objects.filter(backend=RADIUS_BACKEND_NAME):\n auth_data.user.profile.radius_username = auth_data.username\n auth_data.user.profile.save()",
"def _process_ldap_info_for_all_users(self, result_data):\n results = []\n logger.debug(\" LDAP.py _process_ldap_info_for_all_users result_data %s \"\n % (result_data))\n for ldapentry in result_data:\n logger.debug(\" LDAP.py _process_ldap_info_for_all_users \\\n ldapentry name : %s \" % (ldapentry[1]['uid'][0]))\n tmpname = ldapentry[1]['uid'][0]\n hrn = self.authname + \".\" + tmpname\n\n tmpemail = ldapentry[1]['mail'][0]\n if ldapentry[1]['mail'][0] == \"unknown\":\n tmpemail = None\n\n try:\n results.append({\n 'type': 'user',\n 'pkey': ldapentry[1]['sshPublicKey'][0],\n #'uid': ldapentry[1]['uid'][0],\n 'uid': tmpname ,\n 'email':tmpemail,\n #'email': ldapentry[1]['mail'][0],\n 'first_name': ldapentry[1]['givenName'][0],\n 'last_name': ldapentry[1]['sn'][0],\n #'phone': 'none',\n 'serial': 'none',\n 'authority': self.authname,\n 'peer_authority': '',\n 'pointer': -1,\n 'hrn': hrn,\n })\n except KeyError, error:\n logger.log_exc(\"LDAPapi.PY \\t LdapFindUser EXCEPTION %s\"\n % (error))\n return\n\n return results",
"def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]",
"def generate_accounts_dict(john):\n users = {}\n # Read in cracked password from John output and update user object in dictionary\n jlines = john.read().splitlines()\n for j in jlines:\n if \":\" in j:\n if not j.split(\":\")[0].endswith(\"$\"): # Eliminate machine hashes\n # print \"%s : %s\" % (j.split(\":\")[0], j.split(\":\")[1])\n users[j.split(\":\")[0]] = j.split(\":\")[1]\n return users",
"def _process_ldap_info_for_one_user(self, record, result_data):\n #One entry only in the ldap data because we used a filter\n #to find one user only\n ldapentry = result_data[0][1]\n logger.debug(\"LDAP.PY \\t LdapFindUser ldapentry %s\" % (ldapentry))\n tmpname = ldapentry['uid'][0]\n\n tmpemail = ldapentry['mail'][0]\n if ldapentry['mail'][0] == \"unknown\":\n tmpemail = None\n\n parent_hrn = None\n peer_authority = None\n if 'hrn' in record:\n hrn = record['hrn']\n parent_hrn = get_authority(hrn)\n if parent_hrn != self.authname:\n peer_authority = parent_hrn\n #In case the user was not imported from Iotlab LDAP\n #but from another federated site, has an account in\n #iotlab but currently using his hrn from federated site\n #then the login is different from the one found in its hrn\n if tmpname != hrn.split('.')[1]:\n hrn = None\n else:\n hrn = None\n\n results = {\n 'type': 'user',\n 'pkey': ldapentry['sshPublicKey'],\n #'uid': ldapentry[1]['uid'][0],\n 'uid': tmpname,\n 'email': tmpemail,\n #'email': ldapentry[1]['mail'][0],\n 'first_name': ldapentry['givenName'][0],\n 'last_name': ldapentry['sn'][0],\n #'phone': 'none',\n 'serial': 'none',\n 'authority': parent_hrn,\n 'peer_authority': peer_authority,\n 'pointer': -1,\n 'hrn': hrn,\n }\n return results",
"def _ParseAccountsData(self, account_data):\n if not account_data:\n return {}\n lines = [line for line in account_data.splitlines() if line]\n user_map = {}\n for line in lines:\n if not all(ord(c) < 128 for c in line):\n self.logger.info('SSH key contains non-ascii character: %s.', line)\n continue\n split_line = line.split(':', 1)\n if len(split_line) != 2:\n self.logger.info('SSH key is not a complete entry: %s.', split_line)\n continue\n user, key = split_line\n if self._HasExpired(key):\n self.logger.debug('Expired SSH key for user %s: %s.', user, key)\n continue\n if user not in user_map:\n user_map[user] = []\n user_map[user].append(key)\n logging.debug('User accounts: %s.', user_map)\n return user_map",
"def get_user_info_by_name(self, username: str) -> dict:",
"def ReadUserMappings(self, file_object):\n # TODO: replace by:\n # config_parser = configparser. ConfigParser(interpolation=None)\n config_parser = configparser.RawConfigParser()\n config_parser.readfp(file_object)\n\n user_mappings = {}\n for option_name in config_parser.options(u'user_mappings'):\n user_mapping = self._GetConfigValue(\n config_parser, u'user_mappings', option_name)\n\n option_name = option_name.lower()\n user_mappings[option_name] = user_mapping.lower()\n\n return user_mappings",
"def _interpret_auth_data(auth_data):\n if isinstance(auth_data, Mapping):\n # upgrade old-style single dict configs to new-style list-of-dicts (with one item)\n auth_data = [auth_data]\n\n auth = []\n for config in auth_data:\n # translate config options\n host = \"\"\n ssl = True\n extra_kwargs = {}\n try:\n # SSL options\n if \"protocol\" in config and config[\"protocol\"] == \"SASL_PLAINTEXT\":\n ssl = False\n elif \"ssl_ca_location\" in config:\n extra_kwargs[\"ssl_ca_location\"] = config[\"ssl_ca_location\"]\n\n # SASL options\n user = config[\"username\"]\n password = config[\"password\"]\n\n if \"hostname\" in config:\n host = config[\"hostname\"]\n\n token_endpoint = config.get(\"token_endpoint\")\n\n if \"mechanism\" in config:\n mechanism = config[\"mechanism\"].replace(\"-\", \"_\")\n elif token_endpoint:\n mechanism = \"OAUTHBEARER\"\n else:\n mechanism = \"SCRAM_SHA_512\"\n\n except KeyError as ke:\n raise RuntimeError(\"configuration file is not configured correctly: \"\n f\"missing auth property {ke}\")\n else:\n auth.append(Auth(user, password, host=host, ssl=ssl, method=SASLMethod[mechanism],\n token_endpoint=token_endpoint, **extra_kwargs))\n return auth",
"def take_auth_data():\n home = str(Path.home())\n path_to_keys = '/Documents/twitter/keys/'\n\n files = [f for f in listdir(home+path_to_keys) if '.DS' not in f]\n\n tokens = []\n for f in files:\n with open(home+path_to_keys+f, 'r') as lines:\n ln = lines.readline().replace(\" \", \"\")\n tokens.append(ln)\n\n auth_data = dict(zip(files, tokens))\n return auth_data",
"def transform_credentials(self, data: Dict, **kwargs) -> Dict:\r\n name = data.pop(\"name\")\r\n return_data = {name: data}\r\n return return_data",
"def checkAndInitUsers(self):\n # config\n users = {}\n\n # iterate through all usernames\n for rUser in pwd.getpwall():\n # check userid\n if rUser.pw_uid is not None and rUser.pw_uid != \"\" and not (\"/nologin\" in rUser.pw_shell or \"/false\" in rUser.pw_shell):\n # save our user, if it mactches\n if verifyNormalUserID(rUser.pw_uid):\n # get processed usernames\n userFName = getNormalizedUserNames(pUser=rUser)[1]\n # save ()\n users[rUser.pw_name] = [rUser.pw_uid, userFName]\n\n # get user config\n timekprConfigManager = timekprConfig()\n # load user config\n timekprConfigManager.loadMainConfiguration()\n\n # go through our users\n for rUser in users:\n # get path of file\n file = os.path.join(timekprConfigManager.getTimekprConfigDir(), cons.TK_USER_CONFIG_FILE % (rUser))\n\n # check if we have config for them\n if not os.path.isfile(file):\n log.log(cons.TK_LOG_LEVEL_INFO, \"setting up user \\\"%s\\\" with id %i\" % (rUser, users[rUser][0]))\n # user config\n timekprUserConfig(timekprConfigManager.getTimekprConfigDir(), rUser).initUserConfiguration()\n # user control\n timekprUserControl(timekprConfigManager.getTimekprWorkDir(), rUser).initUserControl()\n\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"finishing setting up users\")\n\n # user list\n return users",
"def read_all():\n with open(User.get_users_path(), encoding='utf8') as file:\n for line in file:\n username, pw_hash, email, name, level = line.strip().split(';')\n level = int(level)\n user = User.from_file(username)\n# userfile = User.get_path()\n# user = User(username=username, pw_hash=pw_hash, email=email, name=name, level=level,\n# new=False)\n User.users[username] = user",
"def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data",
"def loadProfiles():\n with open(userProfilesDir, \"r\") as infile:\n profiles = json.loads(\"\\n\".join(infile.readlines()))\n infile.close()\n return profiles",
"def test_read_user_identity_mapping(self):\n pass",
"def load_users(self):\n for user_type in self.user_types:\n url = \"%s_url\" % user_type\n try:\n self.lookup(url)\n except AttributeError:\n continue\n users = self._fetcher.get_entities(self.lookup(url))\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def _process_profiles(profiles):\n # keep_attributes = str.split(\"user_id public completion_percentage gender region last_login registration age\")\n # p2=profiles[keep_attributes]\n p2 = profiles\n p2['region'] = p2['region'].astype('category')\n p2['public'] = p2['public'].astype('category')\n p2['gender'] = p2['gender'].astype('category')\n p2['last_login'] = pd.to_datetime(p2['last_login'])\n p2['registration'] = pd.to_datetime(p2['registration'])\n p2.loc[p2.age == 0, 'age'] = np.nan\n\n return p2",
"def extract_normalize_yandex_data(data):\n profile = {\n 'accounts': [\n {\n 'domain': 'yandex.ru',\n 'userid': data['id']\n }\n ],\n 'birthday': data.get('birthday'),\n 'gender': data.get('sex'),\n }\n\n email = data.get('default_email')\n if email:\n profile['emails'] = [{\n 'value': email,\n 'primary': True\n }]\n\n display_name = data.get('display_name')\n if display_name:\n profile['preferredUsername'] = display_name\n profile['nickname'] = display_name\n real_name = data.get('real_name')\n profile['displayName'] = (\n real_name\n or display_name\n or u'Yandex user #{id}'.format(id=data['id'])\n )\n\n # Now strip out empty values\n for k, v in profile.items():\n if not v or (isinstance(v, list) and not v[0]):\n del profile[k]\n\n return profile",
"def make_ldap_attributes_from_record(self, record):\n\n attrs = {}\n attrs['objectClass'] = [\"top\", \"person\", \"inetOrgPerson\",\n \"organizationalPerson\", \"posixAccount\",\n \"shadowAccount\", \"systemQuotas\",\n \"ldapPublicKey\"]\n\n attrs['uid'] = self.LdapGenerateUniqueLogin(record)\n try:\n attrs['givenName'] = str(record['first_name']).lower().capitalize()\n attrs['sn'] = str(record['last_name']).lower().capitalize()\n attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']\n attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']\n\n except KeyError:\n attrs['givenName'] = attrs['uid']\n attrs['sn'] = attrs['uid']\n attrs['cn'] = attrs['uid']\n attrs['gecos'] = attrs['uid']\n\n attrs['quota'] = self.ldapUserQuotaNFS\n attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']\n attrs['loginShell'] = self.ldapShell\n attrs['gidNumber'] = self.ldapUserGidNumber\n attrs['uidNumber'] = self.find_max_uidNumber()\n attrs['mail'] = record['mail'].lower()\n try:\n attrs['sshPublicKey'] = record['pkey']\n except KeyError:\n attrs['sshPublicKey'] = self.get_ssh_pkey(record)\n\n\n #Password is automatically generated because SFA user don't go\n #through the Iotlab website used to register new users,\n #There is no place in SFA where users can enter such information\n #yet.\n #If the user wants to set his own password , he must go to the Iotlab\n #website.\n password = self.login_pwd.generate_password()\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n\n #Account automatically validated (no mail request to admins)\n #Set to 0 to disable the account, -1 to enable it,\n attrs['shadowExpire'] = '-1'\n\n #Motivation field in Iotlab\n attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'\n\n attrs['ou'] = 'SFA' #Optional: organizational unit\n #No info about those here:\n attrs['l'] = 'To be defined'#Optional: Locality.\n attrs['st'] = 'To be defined' #Optional: state or province (country).\n\n return attrs",
"def load_users(self):\n for user_type in self.user_types:\n url_string = \"%s_url\" % user_type\n try:\n url = self.lookup(url_string)\n users = self._fetcher.get_entities(url)\n except AttributeError as ate:\n logger.err(str(ate))\n continue\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def get_user_principals(access_token):\r\n #Make request to user info and preferences to get principals for login\r\n user_url = 'https://api.tdameritrade.com/v1/userprincipals'\r\n headers = {'Authorization': 'Bearer {}'.format(access_token)}\r\n params = {'fields':'streamerSubscriptionKeys,streamerConnectionInfo'}\r\n user_principals_json = requests.get(url=user_url,headers=headers,params=params)\r\n user_principals = user_principals_json.json()\r\n\r\n #convert token timestamp to milliseconds (required for login to websocket)\r\n tokenTimeStamp = user_principals['streamerInfo']['tokenTimestamp']\r\n token_date = dateutil.parser.parse(tokenTimeStamp,ignoretz=True)\r\n epoch = datetime.datetime.utcfromtimestamp(0)\r\n tokenTimeStampAsMs = int((token_date-epoch).total_seconds()*1000.0)\r\n \r\n return (user_principals,tokenTimeStampAsMs)",
"def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))",
"def loadusers(userfname):\n\t\n\ttry:\n\t\twith open(userfname, \"r\") as userfile:\n\t\t\tusers = json.load(userfile)\n\texcept EnvironmentError:\n\t\tprint(\"Failed to open the users file\", file=sys.stderr)\n\t\n\tnew_users = []\n\t\n\tfor user in users:\n\t\tname = user.get(\"name\", \"\")\n\t\tline1 = user.get(\"line1\", \"\")\n\t\tline2 = user.get(\"line2\", \"\")\n\t\tcity = user.get(\"city\", \"\")\n\t\tstate = user.get(\"state\", \"\")\n\t\tzipcode = user.get(\"zip\", \"\")\n\t\tphone = user.get(\"phone\", \"\")\n\t\temail = user.get(\"email\", \"\")\n\t\t\n\t\tusername = get_free_username()\n\t\t\n\t\t# Django's idea that people have first and last names is pretty narrow;\n\t\t# although this is not optimal, it's one of the more general options and most\n\t\t# compatible. The only downside (besides clarity) is that first_name and\n\t\t# last_name have a 30 chars limit\n\t\tfirst_name = name\n\t\tlast_name = \"\"\n\t\t\n\t\taddress = format_address(line1, line2, city, state, zipcode)\n\t\t\n\t\ttry:\n\t\t\tAuthUser.objects.get(first_name=first_name, last_name=last_name,\n\t\t\t address=address, phone=phone)\n\t\t\tprint(\"User {} is already in the database, skipping\".format(user), file=sys.stderr)\n\t\t\tcontinue\n\t\texcept:\n\t\t\t# everything is alright, the user wasn't found in the database, proceed to 'add' step\n\t\t\tpass\n\t\t\n\t\ttry:\n\t\t\tnew_user = AuthUser(username=username, first_name=first_name, last_name=last_name,\n\t\t\t address=address, phone=phone)\n\t\t\tnew_users.append(new_user)\n\t\t\t\n\t\t\tprint(\"Added \" + name)\n\t\texcept:\n\t\t\tprint(\"Error trying to save new user {}\".format(user), file=sys.stderr)\n\t\n\tsave_model_array(new_users)",
"def __parseUserRecord(self, record):\n\n match = re.search('([a-zA-Z0-9]+),([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}),([0-9]+)', record)\n username = match.group(1)\n userIp = match.group(2)\n userPort = int(match.group(3))\n self.__userList[username] = (userIp, userPort)",
"def readProperties():\n separator = \":\"\n props = {}\n \n with open('upgrade.properties') as f:\n\n for line in f:\n if separator in line:\n\n # Find the name and value by splitting the string\n name, value = line.split(separator, 1)\n\n # Assign key value pair to dict\n # strip() removes white space from the ends of strings\n props[name.strip()] = value.strip()\n\n props['JDA_HOME'] = props['JDA_HOME'].replace('-', ':')\n globs.props = props\n\n globs.UserPassDict = {}\n for user_cat in globs.CRED_DICT:\n globs.UserPassDict[props[user_cat]] = props[globs.CRED_DICT[user_cat]]",
"def create_username_name_pairs(usernames, names):\n logger.info('create username name pairs...')\n pairs = []\n for username, name in zip(usernames, names):\n \n pair = username if name == '' else username + ' (' + name + ')' \n pairs.append(pair)\n return pairs",
"def parse_user_fields(json_data):\n # Populate the fields\n user_info = {}\n for db_field, json_field in Users.UserJSON.fields.items():\n try:\n user_info[db_field] = get_json_field(json_data, json_field)\n if db_field == 'user_address_street':\n user_info[db_field] = user_info.get(db_field).replace('\\n', '')\n elif (db_field == 'user_first_login') or (db_field == 'user_last_login'):\n raw_timestamp = user_info.get(db_field)[:19]\n user_info[db_field] = core_utils.validate_timestamp(raw_timestamp)\n elif db_field == 'user_tags':\n user_info[db_field] = ', '.join(user_info.get(db_field))\n elif db_field == 'user_profile':\n profile = user_info[db_field]\n for idx in range(len(profile)):\n if profile[idx]['jive_label'] in Users.UserJSON.profile_fields:\n profile_field_name = Users.UserJSON.profile_fields.get(profile[idx]['jive_label'])\n user_info[profile_field_name] = profile[idx]['value']\n del user_info['user_profile']\n except (KeyError, IndexError, AttributeError):\n # Continue on to the next field\n continue\n # Return the user information\n return user_info",
"def read_users(users_fp):\n users = []\n with open(users_fp, 'r') as fp:\n fields = fp.readline().rstrip().split(\",\")\n for line in fp:\n user = dict(zip(fields, line.rstrip().split(\",\")))\n users.append(user)\n return users",
"def _build_account_map(self):\n account_map = {}\n for profile in self._session.available_profiles:\n self._session.profile = profile\n config = self._session.get_scoped_config()\n account_id = config.get('account_id')\n if account_id:\n account_map[account_id] = profile\n return account_map"
] | [
"0.68432105",
"0.59266603",
"0.5753227",
"0.56977576",
"0.53878903",
"0.52764636",
"0.52312744",
"0.5185261",
"0.51774687",
"0.5167766",
"0.51212883",
"0.51203704",
"0.51151085",
"0.51099247",
"0.50854266",
"0.5081361",
"0.5063556",
"0.504079",
"0.5025078",
"0.5009595",
"0.49946824",
"0.49768484",
"0.49579784",
"0.4947945",
"0.49463627",
"0.49460068",
"0.49379116",
"0.48940808",
"0.48940137",
"0.48856935"
] | 0.70934486 | 0 |
Create a notification where the notifier is the user. If the user is the owner, then do nothing Hrefs can be 1) Link to post, e.g. /post/postid 2) Link to comment or nested comment, e.g. /post/postidcommentcommentid 3) Link to reaction, e.g. /post/postidreactionreactionid | def create_notification(self, notifying_href, notifying_action, notified_href, owner):
if self.id == owner.id:
return
new_notification = Notification()
new_notification.eid = make_uuid()
new_notification.notifier = self
new_notification.notifying_href = notifying_href
new_notification.notifying_action = notifying_action
new_notification.notified_href = notified_href
new_notification.owner = owner
new_notification.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reply_this(self, user, text):\n parent = self.get_parent()\n reply_news = News.objects.create(\n user=user, content=text, reply=True, parent=parent\n )\n notification_handler(\n user,\n parent.user,\n Notification.REPLY,\n action_object=reply_news,\n id_value=str(parent.uuid_id),\n key=\"social_update\",\n )",
"def _send_notification(self, user_id):\n settings = self.settings_repo.find_one_by_id(user_id)\n if settings.req_noti:\n noti = Notification('New Request', '/topics/request', self.BORROW)\n self.noti_service.send_notification(noti)",
"async def custom_notify(bot, context):\n await utilities.notify_owners(\n bot, '{0.author} from {0.guild}: {0.arguments[0]}'.format(context))\n return Response(content=\"Notified the owners with your message!\")",
"def dispatch_comment_or_reply_notification(sender, instance: Comment | Reply, **kwargs):\n if kwargs.get('created'):\n is_comment = sender == Comment\n item_owner = instance.note.user if is_comment else instance.comment.user\n\n if instance.user != item_owner:\n if is_comment:\n message = f\"{instance.user.get_full_name()} commented on your note '{instance.note.title}'.\"\n else:\n message = f\"{instance.user.get_full_name()} replied to your comment on '{instance.comment.note.title}'.\"\n\n Notification.objects.create(\n to_user=item_owner,\n message=message,\n url=instance.get_absolute_url()\n )",
"def notify_comment(**kwargs): # pragma: no cover\n actor = kwargs[\"request\"].user\n receiver = kwargs[\"comment\"].content_object.user\n obj = kwargs[\"comment\"].content_object\n notification_handler(actor, receiver, Notification.COMMENTED, action_object=obj)",
"def notify(self, thing, redditor, link, body, author):\n if self.quiet or util.is_ignored(redditor):\n return\n\n quote = util.quote(body)\n msg = self.NOTIFICATION_BODY % (thing, link, author, quote)\n\n while msg.__len__() > 10000: # Check message size\n quote_len = quote.__len__() # Get the quote length\n quote = quote[:quote_len - 2] # Chop off a character\n msg = self.NOTIFICATION_BODY % (permalink, author, quote) # Reassign the message\n\n username = redditor.name\n print('Sending message to ' + username + '...', end=\"\")\n self.reddit.send_message(username, 'You have been mentioned in a comment.', msg)\n print('[DONE]')",
"def notification_handler(actor, recipient, verb, **kwargs):\n key = kwargs.pop(\"key\", \"notification\")\n id_value = kwargs.pop(\"id_value\", None)\n if recipient == \"global\":\n users = get_user_model().objects.all().exclude(username=actor.username)\n for user in users:\n Notification.objects.create(\n actor=actor,\n recipient=user,\n verb=verb,\n action_object=kwargs.pop(\"action_object\", None),\n )\n notification_broadcast(actor, key)\n\n elif isinstance(recipient, list):\n for user in recipient:\n Notification.objects.create(\n actor=actor,\n recipient=get_user_model().objects.get(username=user),\n verb=verb,\n action_object=kwargs.pop(\"action_object\", None),\n )\n\n elif isinstance(recipient, get_user_model()):\n Notification.objects.create(\n actor=actor,\n recipient=recipient,\n verb=verb,\n action_object=kwargs.pop(\"action_object\", None),\n )\n notification_broadcast(\n actor, key, id_value=id_value, recipient=recipient.username\n )\n\n else:\n pass",
"def send_referral_answered_to_created_by(cls, referral, version):\n template_created_by_id = settings.SENDINBLUE[\n \"REFERRAL_ANSWERED_CREATED_BY_TEMPLATE_ID\"\n ]\n\n data = {\n \"params\": {\n \"case_number\": referral.id,\n \"title\": referral.title or referral.object,\n },\n \"replyTo\": cls.reply_to,\n \"templateId\": template_created_by_id,\n \"to\": [{\"email\": version.created_by.email}],\n }\n cls.send(data)",
"def send_new_message_for_requester(cls, user, referral, message):\n\n template_id = settings.SENDINBLUE[\n \"REFERRAL_NEW_MESSAGE_FOR_REQUESTER_TEMPLATE_ID\"\n ]\n\n # Get the path to the referral detail view from the requester's \"my referrals\" view\n link_path = FrontendLink.sent_referrals_referral_detail_messages(\n referral=referral.id\n )\n\n data = {\n \"params\": {\n \"case_number\": referral.id,\n \"link_to_referral\": f\"{cls.location}{link_path}\",\n \"message_author\": message.user.get_full_name(),\n \"topic\": referral.topic.name,\n \"units\": \", \".join([unit.name for unit in referral.units.all()]),\n },\n \"replyTo\": cls.reply_to,\n \"templateId\": template_id,\n \"to\": [{\"email\": user.email}],\n }\n\n cls.send(data)",
"def send_notifications_to_all_users(sender,\n instance,\n created, *args, **kwargs):\n\n if instance and created:\n users_followers = instance.author.followers.all()\n\n link = f\"\"\"{os.getenv(\"HEROKU_BACKEND_URL\")}/articles/\\n\"\"\"\n f\"\"\"{instance.slug}\"\"\"\n for user in users_followers:\n if user.get_notifications:\n uuid = urlsafe_base64_encode(force_bytes(user)\n ).decode(\"utf-8\")\n subscription = f'{os.getenv(\"HEROKU_BACKEND_URL\")}/api/' +\\\n 'v1/users/' +\\\n f'unsubscribe/{uuid}/'\n sender = os.getenv('EMAIL_HOST_USER')\n email = user.email\n email_subject = \"Author's Haven Email Notification\"\n message = render_to_string('create_article.html', {\n 'title': email_subject,\n 'username': user.username,\n 'link': link,\n 'subscription': subscription\n })\n\n send_mail(email_subject, '', sender, [\n email, ], html_message=message)\n notify.send(instance.author, recipient=user,\n verb='A user you follow has a new post',\n action_object=instance)",
"def send_referral_saved(cls, referral, created_by):\n\n template_id = settings.SENDINBLUE[\"REFERRAL_SAVED_TEMPLATE_ID\"]\n\n data = {\n \"params\": {\"case_number\": referral.id},\n \"replyTo\": cls.reply_to,\n \"templateId\": template_id,\n \"to\": [{\"email\": created_by.email}],\n }\n\n cls.send(data)",
"def notify_new_user(self, user):\n # join to default group\n g = self.root.get('community-general')\n if g:\n self.join_group(user, g)",
"def create_notification(self, user_id, message, notification_type):\n dao = NotificationsDAO()\n try:\n notif_id = dao.create_notification(user_id, message, notification_type)\n return jsonify(NotificationID=notif_id), 200\n except:\n return jsonify(Error=\"Error processing, query.\"), 400",
"def task_created(user, friends, task_description):\n\n title = f\"{user.username}'s task\"\n body = f\"{task_description}\"\n\n for user in friends:\n if user.fir_push_notif_token is not None and user.task_notifs:\n notify_user(user, title, body)",
"def note(self, irc, msg, args, user, id):\n try:\n note = self.db.get(id)\n except dbi.NoRecordError:\n irc.errorInvalid('note id')\n if user.id != note.frm and user.id != note.to:\n s = 'You may only retrieve notes you\\'ve sent or received.'\n irc.error(s)\n return\n newnote = self._formatNote(note, user.id)\n irc.reply(newnote, private=(not note.public))\n self.db.setRead(id)",
"def like(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.add(self.request.user)\n to_user = user_wall_post.owner\n from_user = request.user\n\n UserNotification.create_post_friend_liked_notification(from_user, to_user, 'Right', id=pk)\n return Response(status=201)",
"def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")",
"def notify(self, event, user):\n raise NotImplementedError(\"Subclasses must override notify() method\")",
"def create_notification(self, text: str, user_id: str, target_id: str, target_type: NotificationTargetType, *args, **kwargs):\n\n notification_data = api.create_notification(\n text, \n user_id, \n target_id,\n target_type,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Notification(notification_data)",
"def add_notification_for_user(\n self, login, type, channel=\"EmailNotificationChannel\", project=None\n ):",
"def notify_users_of_reminders():\n\n #Get current date into dd/mm/YYYY format.\n now = datetime.datetime.now()\n todays_date = now.strftime(\"%d/%m/%Y\")\n\n #Get current time and convert it to hh:mm.\n todays_time = now.strftime(\"%H:%M\")\n print(todays_time)\n\n #Select all notifications from the database based on that date and time.\n notifications_query = \"\"\"SELECT user, reminder_msg FROM reminders WHERE (date=%s AND time=%s);\"\"\"\n\n #Setup our parameters\n notifications_params = (todays_date, todays_time)\n\n #TODO: Add in cursor.\n #TODO: Run query and get reminder data.\n #TODO: Loop over returned rows, and notify users with send_message_to_irc()",
"def alert(user, follow, data, client_fb):\n users_notify = database.get_from_notify(username=user, username_follow=follow)\n for user in users_notify:\n if user['thread_type'] == '0':\n if user['image_flag']:\n message = get_message(message_type='image', source=follow, data=data)\n client_fb.sendLocalImage(image_path='tmp.jpg', message=message, thread_id=str(user['thread_id']))\n client_fb.sendMessage(message=MESSAGE_FOOTER, thread_id=str(user['thread_id']))\n logger.info('User %s notified %s on facebook.', user, str(user['thread_id']))\n # clean image created\n os.remove('tmp.jpg')\n else:\n message = get_message(message_type='no_image', source=follow)\n client_fb.sendMessage(message=message, thread_id=str(user['thread_id']))\n logger.info('%s got notified on facebook.', str(user['thread_id']))",
"async def process_note_hook(data: models.NoteHook):\n note = data.note\n user = data.user\n project = data.project\n colour = discord.Colour.greyple()\n embed = discord.Embed(url=note.url, description=note.description, colour=colour)\n embed.set_author(name=user.username, icon_url=user.avatar_url)\n if data.issue:\n issue = data.issue\n embed.title = f\"[{project.namespace}/{project.name}] New comment on issue #{issue.iid}: {issue.title}\"\n if data.commit:\n commit = data.commit\n embed.title = f\"[{project.namespace}/{project.name}] New comment on commit `{commit.id[:7]}`\"\n if data.merge_request:\n merge = data.merge_request\n embed.title = f\"[{project.namespace}/{project.name}] New comment on merge request !{merge.iid}: {merge.title}\"\n await send_message(None, embed=embed)",
"def send_referral_changeurgencylevel(\n cls, contact, referral, history_object, created_by\n ):\n\n requester_template_id = settings.SENDINBLUE[\n \"REFERRAL_CHANGED_URGENCYLEVEL_FOR_REQUESTER_TEMPLATE_ID\"\n ]\n unit_member_template_id = settings.SENDINBLUE[\n \"REFERRAL_CHANGED_URGENCYLEVEL_FOR_UNIT_MEMBER_TEMPLATE_ID\"\n ]\n\n if referral.users.filter(id=contact.id).exists():\n template_id = requester_template_id\n # Get the path to the referral detail view from the requester's \"my referrals\" view\n link_path = FrontendLink.sent_referrals_referral_detail(referral.id)\n else:\n template_id = unit_member_template_id\n # Get the path to the referral detail view from the unit inbox\n unit = referral.units.filter(members=contact).first()\n link_path = FrontendLink.unit_referral_detail(\n unit=unit.id, referral=referral.id\n )\n\n data = {\n \"params\": {\n \"case_number\": referral.id,\n \"created_by\": created_by.get_full_name(),\n \"link_to_referral\": f\"{cls.location}{link_path}\",\n \"message\": history_object.explanation,\n \"old_due_date\": dateformat.format(\n referral.created_at + history_object.old_referral_urgency.duration,\n \"j F Y\",\n ),\n \"new_due_date\": dateformat.format(referral.get_due_date(), \"j F Y\"),\n \"topic\": referral.topic.name,\n },\n \"replyTo\": cls.reply_to,\n \"templateId\": template_id,\n \"to\": [{\"email\": contact.email}],\n }\n cls.send(data)",
"def send(self):\n ReferralActivity.objects.create(\n actor=self.user,\n verb=ReferralActivityVerb.CREATED,\n referral=self,\n )\n # Confirm the referral has been sent to the requester by email\n Mailer.send_referral_saved(self)\n # Send this email to all owners of the unit(s) (admins are not supposed to receive\n # email notifications)\n for unit in self.units.all():\n contacts = unit.members.filter(\n unitmembership__role=UnitMembershipRole.OWNER\n )\n for contact in contacts:\n Mailer.send_referral_received(self, contact=contact, unit=unit)",
"def create_notification_rule(headers, user_id, payload):\n\n # Alter base_url's endpoint\n url = base_url + '/' + user_id + '/notification_rules'\n\n r = requests.post(url, headers=headers, data=json.dumps(payload))\n\n print 'Notification rule response code: ' + str(r.status_code)\n return",
"def test_notify_user(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users([self.user_a], foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 1)",
"def set_user_notification(notification_data):\n if not notification_data:\n return\n chat_id = notification_data[\"chat_id\"]\n status = notification_data[\"status\"]\n notify_at = notification_data[\"time\"]\n restaurant_id = notification_data[\"restaurant\"].split(\"_\")[1]\n\n notify_info = {\n \"status\": \"включены\" if int(status) else \"выключены\",\n \"restaurant\": restaurant_name[restaurant_id],\n }\n\n db = sqlite3.connect(database)\n cursor = db.cursor()\n\n find_notify_record = f\"SELECT id FROM notifications WHERE chat_id={chat_id} AND restaurant_id={restaurant_id}\"\n notify_record = cursor.execute(find_notify_record)\n if len(notify_record.fetchall()):\n query_update = (\n f\"UPDATE notifications SET notify_at='{notify_at}', updated_at='{datetime.now()}', status='{status}' \"\n f\" WHERE chat_id='{chat_id}' AND restaurant_id='{restaurant_id}'\"\n )\n cursor.execute(query_update)\n else:\n query_insert = (\n f\"INSERT OR IGNORE INTO notifications ('chat_id', 'restaurant_id', 'status', 'notify_at', 'created_at', 'updated_at')\"\n f\" VALUES ('{chat_id}', '{restaurant_id}', '{status}', '{notify_at}', '{datetime.now()}', '{datetime.now()}')\"\n )\n cursor.execute(query_insert)\n\n db.commit()\n db.close()\n\n return notify_info",
"def _public(self, request, entity, context):\n\n # if the user viewing is the user for which this notification is meant\n # and the notification has not been read yet\n if entity.unread:\n # get the current user\n user = user_logic.getForCurrentAccount()\n \n # if the message is meant for the user that is reading it\n # pylint: disable-msg=E1103\n if entity.scope.key() == user.key():\n # mark the entity as read\n self._logic.updateEntityProperties(entity, {'unread' : False} )\n\n context['entity_type_url'] = self._params['url_name']\n context['entity_suffix'] = entity.key().id_or_name() if entity else None\n\n return True",
"def save(self):\n super(Notification, self).save()\n # get user recipent\n us = self.notified_user\n # check that user has a valid email address\n if us.email.find('@') > 0 and us.email.find('.') > 0:\n # mandatory fields\n subject = strings.EMAIL_NOTIFICATION_SUBJECT\n to = us.email\n from_email = settings.DEFAULT_FROM_EMAIL\n # get text version of the message\n text_content = self.get_email_content_from_type(\n self.notification_type\n )\n # FIXME: HTML version implementation pending\n html_content = self.get_email_content_from_type(\n self.notification_type\n )\n msg = EmailMultiAlternatives(\n subject, \n text_content,\n from_email,\n [to]\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()"
] | [
"0.6424427",
"0.63672864",
"0.6204566",
"0.619281",
"0.5919673",
"0.5777313",
"0.5768302",
"0.5762613",
"0.5619914",
"0.5582401",
"0.55254275",
"0.5516454",
"0.548841",
"0.5461826",
"0.54616433",
"0.5460863",
"0.5425252",
"0.54158276",
"0.5414549",
"0.54036623",
"0.5383166",
"0.5381483",
"0.5361285",
"0.5350613",
"0.53499967",
"0.5341818",
"0.53368074",
"0.5336807",
"0.53331137",
"0.5327908"
] | 0.69772345 | 0 |
Mark a notification as read | def mark_notification_as_read(self, notification_id):
n = Notification.objects.get(eid=notification_id)
if self.id != n.owner.id:
return False
if not n.unread:
return True
n.unread = False
n.save()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mark_read(self):\n # Obviously remove the exception when Kippt says the support it.\n raise NotImplementedError(\n \"The Kippt API does not yet support marking notifications as read.\"\n )\n\n data = json.dumps({\"action\": \"mark_seen\"})\n r = requests.post(\n \"https://kippt.com/api/notifications\",\n headers=self.kippt.header,\n data=data\n )\n return (r.json())",
"def mark_as_read(self):\r\n url = '{0}/markAssociatedNotificationsRead'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json",
"def mark_as_read(self):\n params = {\"ffauth_device_id\": self._DiscretelyAuthenticatedObject__device_id,\n \"ffauth_secret\": self._DiscretelyAuthenticatedObject__device_token}\n data = {\"data\": str({\"recipient\": {\"guid\": self._DiscretelyAuthenticatedObject__guid, \"type\": \"user\"}})}\n requests.post(\n self._DiscretelyAuthenticatedObject__portal + \"/_api/1.0/tasks/\" + str(self.id) + \"/mark_as_read\",\n params=params, data=data)",
"def test_topic_notification_mark_as_read(self):\n private = utils.create_private_topic()\n comment = utils.create_comment(topic=private.topic)\n TopicNotification.objects.create(\n user=private.user, topic=private.topic,\n comment=comment, is_read=False)\n TopicNotification.mark_as_read(\n user=private.user, topic=private.topic)\n notification = TopicNotification.objects.get(\n user=private.user, topic=private.topic)\n self.assertTrue(notification.is_read)",
"def mark_messages_read(self, mbox, msgset):\n self._add_flag(mbox, msgset, r'(\\Seen)')",
"def read(self):\n if self.status == 'read':\n return\n self.status = 'read'\n self.emit('read')\n self.emit('modified')",
"def test_mark_read(db, session): # pylint: disable=unused-argument\n # add a notification\n user_id = 'notf-user'\n request_id = 226\n request_type = 'registration'\n request_status = 3\n message = 'this is a test notification'\n notification = Notification(user_id=user_id, request_id=request_id, request_type=request_type,\n request_status=request_status, message=message)\n notification.add()\n\n # get notification id\n notification_id = get_notification_id(session, request_id)\n notification_data = get_single_notification(session, notification_id)\n assert notification_data.marked_read is False\n\n # call mark_read()\n Notification.mark_read(notification_id)\n notification_data = get_single_notification(session, notification_id)\n assert notification_data.marked_read is True",
"def mark_as_read(self):\n if \"markAsRead\" in self._prop_dict:\n return self._prop_dict[\"markAsRead\"]\n else:\n return None",
"def mark_all_notifications_as_read(self):\n for n in Notification.objects(owner=self, unread=True):\n n.unread = False\n n.save()",
"def register_read(self):\n self._reads_since_check += 1",
"def _notify_read(self, cuds_object):",
"def trigger_item_read(self, checked):\n if self.selected_item and checked == self.selected_item.unread:\n if self.selected_item.unread:\n self.selected_item.mark_as_read()\n else:\n self.selected_item.mark_as_unread()\n self.controller.item_read(self.selected_item)",
"def mark_as_read(entry):\n if has_been_read(entry):\n return\n title = entry.title\n date = date_parser.parse(entry.updated)\n READLIST.append(str(date.timestamp()) + '|' + title)\n save_datfile()",
"def put(self, notification_id):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n if not mark_notification_as_read(user, notification_id):\n return {'msg': \"Not allowed to mark notification as read\"}, 401",
"def read(self, read):\n\n self._read = read",
"def toggle_item_read(self):\n self.get_selected()\n if not self.selected_item:\n return\n was_unread = self.selected_item.unread\n message = 'Entry now marked as unread'\n if was_unread:\n message = 'Entry now marked as read'\n self.trigger_item_read(was_unread)\n self.display_message(message)",
"async def patch_notifications(\n self,\n notification_ids: List[UUID],\n read: Optional[bool] = Query(None),\n visited: Optional[bool] = Query(None)):\n return await self._service.mark_notifications_as(\n notification_ids, read, visited)",
"def put(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n mark_all_notifications_as_read(user)",
"def trigger_mark_all_read(self):\n self.current_feed.mark_as_read()\n self.controller.feed_read(self.current_feed)",
"def observeRead(self, observer):\n self.observerRead = observer",
"def update_read(self, article):\n read_statistics = ReadingStatistics.objects.filter(\n user=self.request.user, article=article)\n if read_statistics:\n read_time = read_statistics[0].article.read_time\n read_time_int = int(read_time.split(' ')[0]) * 60\n no_sec = read_statistics[0].time_since_last_read().seconds\n if no_sec > int(read_time_int):\n read_statistics.update(no_read=read_statistics[0].no_read +\n 1, read_last_at=\n datetime.datetime.now())\n else:\n ReadingStatistics.objects.create(user=self.request.user,\n article=article)",
"def markPMsRead(self):\n\t\turl = \"https://habitica.com/api/v3/user/mark-pms-read\"\n\t\treturn(postUrl(url, self.credentials))",
"def set_book_read(self, id, read):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('UPDATE books SET read = ? WHERE rowid = ?', (read, id))\n if not cur.rowcount:\n raise BookError('Tried to modify book that doesn\\'t exist')\n except sqlite3.Error as e:\n raise BookError(f'Error setting book {id} to read={read}') from e",
"def read(self, request, *args, **kwargs):\n message = self.get_object()\n # Skip reading / unreading own messages\n if request.user.id != message.user_id:\n if request.method == 'POST':\n message.mark_as_read(request.user)\n elif request.method == 'DELETE':\n message.mark_as_unread(request.user)\n return Response(status=status.HTTP_202_ACCEPTED)",
"def test_mark_as_read(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-read', subdomain=self.company.subdomain)\n \n response = self.client.post(url, [], HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, [], HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertTrue(content.has_key('count'))\n self.assertTrue(content.has_key('statistics'))",
"def mark_as_read(self):\n\n # TODO: NewsBlur requests that stories are maked as read in batches.\n return self._api_client.mark_stories_as_read([self])",
"def write(self, notification):",
"def cmd_notification_mark(client, args):\n # Converted to list because in current implemented in imgurpython, client method\n # expected a comma separated list of ids\n ids = args.ids.split(',')\n notifications_marked_as_viewed = client.mark_notifications_as_read(args.ids)\n generate_output({'notifications_marked_as_viewed':\n notifications_marked_as_viewed})",
"def test_isread_command(self):\r\n bm = BmarkMock()\r\n bm.tags['toread'] = True\r\n updated = IsRead.run(bm)\r\n self.assertTrue(\r\n 'toread' not in updated.tags,\r\n \"Updated bmark should not have 'toread' tag set\")",
"def trigger_unread_only(self, checked):\n if self.show_unread_only != checked:\n self.get_selected()\n self.current_feed.unread_only = checked\n self.update_item_list()"
] | [
"0.8075644",
"0.74416107",
"0.6954183",
"0.6929599",
"0.68651927",
"0.6858649",
"0.6849861",
"0.66907734",
"0.6574252",
"0.65078586",
"0.647467",
"0.64699465",
"0.6376799",
"0.632871",
"0.6303812",
"0.6294918",
"0.62934786",
"0.6202659",
"0.60688096",
"0.6044562",
"0.6039973",
"0.5975289",
"0.595497",
"0.5896813",
"0.5865129",
"0.58470905",
"0.58335805",
"0.58123803",
"0.5779499",
"0.57715285"
] | 0.744433 | 1 |
Mark all user's notifications as read | def mark_all_notifications_as_read(self):
for n in Notification.objects(owner=self, unread=True):
n.unread = False
n.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mark_all_read(self):\n caller = self.caller\n player = caller.player_ob\n all_msgs = Journal.white_journals.all_unread_by(player)\n # we'll do a bulk create of the through-model that represents how journals are marked as read\n ReadJournalModel = Journal.db_receivers_accounts.through\n bulk_list = []\n for msg in all_msgs:\n bulk_list.append(ReadJournalModel(accountdb=player, msg=msg))\n ReadJournalModel.objects.bulk_create(bulk_list)",
"def mark_as_read(self):\r\n url = '{0}/markAssociatedNotificationsRead'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json",
"def mark_read(self):\n # Obviously remove the exception when Kippt says the support it.\n raise NotImplementedError(\n \"The Kippt API does not yet support marking notifications as read.\"\n )\n\n data = json.dumps({\"action\": \"mark_seen\"})\n r = requests.post(\n \"https://kippt.com/api/notifications\",\n headers=self.kippt.header,\n data=data\n )\n return (r.json())",
"def mark_all_read(self):\n response = self._connection.session.post(self.url + \"/mark_all_as_read\")\n return self._raise_or_return_json(response)",
"def put(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n mark_all_notifications_as_read(user)",
"def mark_notification_as_read(self, notification_id):\n n = Notification.objects.get(eid=notification_id)\n if self.id != n.owner.id:\n return False\n if not n.unread:\n return True\n n.unread = False\n n.save()\n return True",
"def mark_as_read(self):\n params = {\"ffauth_device_id\": self._DiscretelyAuthenticatedObject__device_id,\n \"ffauth_secret\": self._DiscretelyAuthenticatedObject__device_token}\n data = {\"data\": str({\"recipient\": {\"guid\": self._DiscretelyAuthenticatedObject__guid, \"type\": \"user\"}})}\n requests.post(\n self._DiscretelyAuthenticatedObject__portal + \"/_api/1.0/tasks/\" + str(self.id) + \"/mark_as_read\",\n params=params, data=data)",
"def trigger_mark_all_read(self):\n self.current_feed.mark_as_read()\n self.controller.feed_read(self.current_feed)",
"def mark_all_as_read(self, recipient=None):\n qs = self.unread()\n if recipient:\n qs = qs.filter(recipient=recipient)\n\n return qs.update(unread=False)",
"async def patch_notifications(\n self,\n notification_ids: List[UUID],\n read: Optional[bool] = Query(None),\n visited: Optional[bool] = Query(None)):\n return await self._service.mark_notifications_as(\n notification_ids, read, visited)",
"def mark_messages_read(self, mbox, msgset):\n self._add_flag(mbox, msgset, r'(\\Seen)')",
"def mark_all_as_unread(self, recipient=None):\n qs = self.read()\n if recipient:\n qs = qs.filter(recipient=recipient)\n\n return qs.update(unread=True)",
"def mark_all_seen(request):\n for notice in Notice.objects.notices_for(request.user, unseen=True):\n notice.unseen = False\n notice.save()\n return HttpResponseRedirect(reverse(\"notification_notices\"))",
"def markPMsRead(self):\n\t\turl = \"https://habitica.com/api/v3/user/mark-pms-read\"\n\t\treturn(postUrl(url, self.credentials))",
"def test_topic_notification_mark_as_read(self):\n private = utils.create_private_topic()\n comment = utils.create_comment(topic=private.topic)\n TopicNotification.objects.create(\n user=private.user, topic=private.topic,\n comment=comment, is_read=False)\n TopicNotification.mark_as_read(\n user=private.user, topic=private.topic)\n notification = TopicNotification.objects.get(\n user=private.user, topic=private.topic)\n self.assertTrue(notification.is_read)",
"def util_unread(self):\n try:\n query = 'from:[email protected]'\n page_token = None\n p_emails = []\n while True:\n request = self.service.users().messages().list(userId='me',\n q=query, pageToken=page_token)\n response = request.execute()\n if 'messages' not in response:\n break\n p_emails.extend(response['messages'])\n if 'nextPageToken' not in response:\n break\n page_token = response['nextPageToken']\n self.service.users().messages().batchModify(userId='me', body={\n 'addLabelIds': ['UNREAD'],\n 'ids': [e['id'] for e in p_emails]\n }).execute()\n\n except errors.HttpError as error:\n _utils.logger.error(f'An error occurred: ${error}')",
"def test_mark_read(db, session): # pylint: disable=unused-argument\n # add a notification\n user_id = 'notf-user'\n request_id = 226\n request_type = 'registration'\n request_status = 3\n message = 'this is a test notification'\n notification = Notification(user_id=user_id, request_id=request_id, request_type=request_type,\n request_status=request_status, message=message)\n notification.add()\n\n # get notification id\n notification_id = get_notification_id(session, request_id)\n notification_data = get_single_notification(session, notification_id)\n assert notification_data.marked_read is False\n\n # call mark_read()\n Notification.mark_read(notification_id)\n notification_data = get_single_notification(session, notification_id)\n assert notification_data.marked_read is True",
"def read_all(self):\n for user_id in self.user_ids:\n self.read(user_id)",
"def trigger_unread_only(self, checked):\n if self.show_unread_only != checked:\n self.get_selected()\n self.current_feed.unread_only = checked\n self.update_item_list()",
"def unread_messages(self, unread_messages):\n\n self._unread_messages = unread_messages",
"def mark_messages_unread(self, mbox, msgset):\n self._remove_flag(mbox, msgset, r'(\\Seen)')",
"def put(self, notification_id):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n if not mark_notification_as_read(user, notification_id):\n return {'msg': \"Not allowed to mark notification as read\"}, 401",
"def cmd_notification_mark(client, args):\n # Converted to list because in current implemented in imgurpython, client method\n # expected a comma separated list of ids\n ids = args.ids.split(',')\n notifications_marked_as_viewed = client.mark_notifications_as_read(args.ids)\n generate_output({'notifications_marked_as_viewed':\n notifications_marked_as_viewed})",
"def filter_users_by_mute_subscription(\n self, user_ids: List[UserId], mute_all_dataset_notifications=None\n ) -> List[U]:\n ...",
"def test_read_all(self):\n self.assertEqual(self.category.categoryread_set.count(), 0)\n\n response = self.client.post(self.category.get_read_api_url())\n self.assertEqual(response.status_code, 200)\n\n self.category.categoryread_set.get(user=self.user)\n\n # user was resynced\n self.reload_user()\n\n self.assertFalse(self.user.sync_unread_private_threads)\n self.assertEqual(self.user.unread_private_threads, 0)",
"def get_user_notifications(self, login):",
"async def get_user_notifications(self):\n self._old_notifications = self.user_notifications # important for keeping track of what is new.\n\n async with self.web_session.get(self._api_notifications_url, headers=self._headers) as resp:\n if self.check_status(resp.status, self._api_notifications_url):\n data = await resp.json()\n self.user_notifications = create_notification_objects(data.get('notifications'))\n for user_notification in self.user_notifications:\n self.all_notifications[user_notification.id] = user_notification\n return self.user_notifications",
"def mark_as_read(self):\n\n # TODO: NewsBlur requests that stories are maked as read in batches.\n return self._api_client.mark_stories_as_read([self])",
"def trigger_item_read(self, checked):\n if self.selected_item and checked == self.selected_item.unread:\n if self.selected_item.unread:\n self.selected_item.mark_as_read()\n else:\n self.selected_item.mark_as_unread()\n self.controller.item_read(self.selected_item)",
"def feed_read(self, feed):\n if feed != self.current_feed:\n return\n self.action_mark_all_read.setDisabled(True)\n for item in self.current_feed.get_items():\n self.update_item(item)"
] | [
"0.74910504",
"0.7462836",
"0.7359577",
"0.73337513",
"0.7281608",
"0.7210877",
"0.7019616",
"0.69373256",
"0.6911804",
"0.6884166",
"0.64511365",
"0.6383615",
"0.6305257",
"0.61974895",
"0.6158948",
"0.61453354",
"0.60896534",
"0.6042512",
"0.60403097",
"0.59861183",
"0.5946506",
"0.58677846",
"0.585084",
"0.58171505",
"0.58120114",
"0.57989633",
"0.57987565",
"0.5753183",
"0.57255316",
"0.56826913"
] | 0.866048 | 0 |
the main loop of the data server | def main(self):
while True:
if not self.data_server_command.empty():
command_data_server = self.data_server_command.get()
if command_data_server[0] == 4:
thread.start_new_thread(self.get_file, (command_data_server[1],))
else:
self.data_server_command_def[command_data_server[0]](command_data_server[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main_loop(self):\n # main loop...don't ever exit\n while True:\n # collect data\n # get the time...the local clock is set with NTP regularly\n self._get_time()\n \n # get the latest metar data from the closest location\n self._get_metar()\n \n # get the latest fence station data\n self._get_fence_station()\n \n # get the lastest roof station data\n #METAR self._get_roof_station()\n \n # publish the data to our data file\n self.write_data_files()\n \n # show the user we are running\n print(\"{:s}\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d %H:%M:%S.%f\")), end=\"\\r\", flush=True)\n \n # wait a bit for the next loop\n time.sleep(3.0)\n \n return",
"def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)",
"def run(self):\r\n self.rpc_server.serve_forever(0.5)",
"def server():",
"def server():",
"def run(self):\n\t\tself.data_source.connect()\n\t\twhile self.running:\n\t\t\tself.data_source.read()",
"def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()",
"def DataServer(data):\n\thttpd, t, baseURL = getServerInThread(data)\n\n\tyield baseURL\n\n\thttpd.shutdown()\n\tt.join(10)",
"def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])",
"def run(self):\n self.connect()\n self.run_forever()",
"async def _main(self):\n while True:\n time.sleep(1)",
"def loop(self):\n pass",
"def run(self):\n self.__server.serve_forever()",
"def run(self):\n self.cmdloop()",
"def _run(self):\n while(self._loop):\n pass",
"def run(self):\n self._server = self._get_server()\n self._server.serve_forever()",
"def run(self):\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n\n ID = None\n data = None\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n ID, data = result\n # Data sequence is unimportant, simply write it out and proceed\n self.writePairs(data)\n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()\n self.closeFileHandles()",
"def handle(self):\n try:\n conn = sqlite.connect(\"temp.db\")\n while True:\n data = self.request.recv(48)\n if not data:\n break\n parts = struct.unpack(\"dddddd\", data)\n print_datapoint(parts)\n store_datapoint(conn, parts)\n except KeyboardInterrupt:\n pass\n finally:\n conn.close()",
"def run(self):\n self._create_data_socket()\n\n self._is_running = True\n\n # self._clear_buffer(data_socket)\n\n # prevent recv from block indefinitely\n self._socket.settimeout(DataThread.TIMEOUT)\n\n while self._is_running:\n try:\n data = self._socket.recv(SIZE_BUFFER)\n if len(data):\n self._adapter.process_message(data)\n except (KeyboardInterrupt, SystemExit, OSError):\n print('Exiting data socket')\n\n except socket.timeout:\n print('NatNetClient data socket timeout!')\n continue\n\n self._close_socket()",
"def run_loop(self):\r\n server_log.info('Server now accepting client connections.')\r\n while not self.clients_done():\r\n asyncore.loop(timeout=config[\"server_timeout\"], count=config[\"server_loop_count\"])",
"def launch_dataserver(self):\n # server thread interface to DB\n self.read_conn = sqlite3.connect(\"file:%s?mode=ro\"%self.dbname, timeout=30, uri=True)\n self.read_curs = self.read_conn.cursor()\n\n # xmlrpc web interface for data updates\n class RequestHandler(SimpleXMLRPCRequestHandler):\n rpc_paths = ('/RPC2',)\n server = SimpleXMLRPCServer(('', self.readport), requestHandler=RequestHandler, allow_none=True)\n #server.register_introspection_functions()\n server.register_function(self.get_newest, 'newest')\n server.register_function(self.get_readtypes, 'readtypes')\n server.register_function(self.get_datapoints, 'datapoints')\n server.register_function(self.get_datapoints_compressed, 'datapoints_compressed')\n server.register_function(self.get_readout_info, 'readout_info')\n server.register_function(self.get_messages, 'messages')\n\n print(\"Launching readserver on\", self.host, self.readport)\n sys.stdout.flush()\n server.serve_forever()",
"def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)",
"def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)",
"def Listen(self):\n while True:\n time.sleep(1)",
"def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()",
"def run(self):\n while self.running:\n self.handle_request()",
"def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()",
"def run(self):\n self._connection = self.open_connection()\n self._connection.ioloop.start()",
"def run(self):\n global socket_connections\n\n self.start_server_socket(self.ticks_per_min)\n\n while True: \n\n try: \n # keep track of the time that the server started\n start_time = time.time() \n c, addr = self.server.accept()\n data, addr_2 = c.recvfrom(1024)\n\n self.server.shutdown(socket.SHUT_RDWR)\n self.server.close()\n\n # keep track of the time that the server finishes receiving\n # a request\n end_time = time.time() \n\n # set the timeout of the server to end_time - start_time to get\n # around the GIL\n self.start_server_socket(end_time - start_time)\n\n data = data.decode()\n\n # add the received message to the msg_queue\n if data: \n self.msg_queue.put(data)\n print str(self.id) + \" got some! \" + data\n\n # every time the socket timesout, callback to the clock's instruction\n except Exception, e:\n # shutdown the server first \n try: \n self.server.shutdown(socket.SHUT_RDWR)\n except:\n pass\n self.server.close()\n print \"exception: \" + str(e)\n print \"complete an instruction\"\n self.perform_clock_instruction()\n # restart server\n self.start_server_socket(self.ticks_per_min)",
"def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()"
] | [
"0.7505039",
"0.74601567",
"0.7351386",
"0.7342225",
"0.7342225",
"0.73240626",
"0.7100712",
"0.70430654",
"0.7006645",
"0.6992665",
"0.69915676",
"0.6894115",
"0.68917155",
"0.6885587",
"0.68853796",
"0.6882698",
"0.6868027",
"0.68606335",
"0.6837757",
"0.68368477",
"0.6820294",
"0.68057966",
"0.68057966",
"0.6804059",
"0.6802329",
"0.67841953",
"0.67823917",
"0.67587984",
"0.6745251",
"0.6732322"
] | 0.78023845 | 0 |
Find vertices in neighborhood of peak vertices. | def peak_neighborhood(apsp, peak, n_size):
dpeaks = apsp[peak, :]
nhood = np.where(dpeaks < n_size)[0]
return nhood | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def peak_indices(self, **kwargs):\n kwarg_defaults = {\n 'width': 5, # ensure small spikes are ignored\n }\n kwarg_defaults.update(kwargs)\n return signal.find_peaks(self.ys, **kwarg_defaults)",
"def find_delaunay_with_max_vertices(bbox, nvertex):\n # find bracketing values\n a1 = a2 = 1e6\n t1 = calculate_mesh(a1, bbox, nvertex)\n afac = np.power(10., -np.sign(t1))\n while (\n np.sign(t1) ==\n np.sign(calculate_mesh(a2, bbox, nvertex))\n ):\n a2 *= afac\n val_at_root = -1\n nvtweak = nvertex\n while val_at_root < 0:\n a = scipy.optimize.brentq(\n calculate_mesh,\n a1,\n a2,\n args=(bbox, nvtweak, ))\n val_at_root = calculate_mesh(a, bbox, nvertex)\n a1 = a * 2\n a2 = a * 0.5\n nvtweak -= 1\n mesh = calculate_mesh(a, bbox, None, get_t=True)\n return mesh, a",
"def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys",
"def peakdet2d(image):\n # define an 8-connected neighborhood\n neighborhood = generate_binary_structure(2,2)\n\n #apply the local maximum filter; all pixel of maximal value \n #in their neighborhood are set to 1\n local_max = maximum_filter(image, footprint=neighborhood)==image\n #local_max is a mask that contains the peaks we are \n #looking for, but also the background.\n #In order to isolate the peaks we must remove the background from the mask.\n\n #we create the mask of the background\n background = (image==0)\n\n #a little technicality: we must erode the background in order to \n #successfully subtract it form local_max, otherwise a line will \n #appear along the background border (artifact of the local maximum filter)\n eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)\n\n #we obtain the final mask, containing only peaks, \n #by removing the background from the local_max mask\n detected_peaks = local_max - eroded_background\n\n return(detected_peaks)",
"def peak_indices(pgram, thresh=0):\n diff = pgram[1:] - pgram[0:-1]\n peaks_offset1 = (diff[0:-1] > 0) & (diff[1:] < 0)\n peaks = np.insert(peaks_offset1, 0, False) # fix the offset; the first point cannot be a peak\n peaks = np.insert(peaks, -1, False) # ... nor the last point\n peaks = peaks & (pgram >= thresh) # apply the threshold\n peak_ixs = np.arange(peaks.size)[peaks] # convert bool array to index list\n order = np.argsort(pgram[peak_ixs])[::-1] # find order of decreasing power\n ordered_peaks = peak_ixs[order] # reorder the peak indices to decreasing power\n return ordered_peaks",
"def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary",
"def peak_finder(filt_im, dist, threshold):\n from skimage.feature import peak_local_max\n coordinates = peak_local_max(filt_im, min_distance=dist, threshold_abs=threshold)\n return coordinates",
"def peak(self):\n pass",
"def find_edges(starting_point, max_dist, hi, lo, bgArray):\n try:\n b = fetch_val(bgArray, starting_point)\n except IndexError:\n return None\n offsets = [(0,1), (1,0), (0,-1), (-1,0)]\n edgePoints = []\n for offset in offsets:\n first_result = find_edge(starting_point, offset, max_dist, hi, lo, bgArray)\n if first_result is not None:\n edgePoints.append(first_result[0])\n if b < lo or b > hi:\n # Try to find second point, since starting click was outside threshold\n second_result = find_edge(first_result[0], offset, max_dist - first_result[1], hi, lo, bgArray)\n if second_result is not None:\n edgePoints.append(second_result[0])\n return edgePoints",
"def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours",
"def find_peak_locations(data, tol=prominence_tolerance, ranked=False):\n\n prominences = [(i, calculate_peak_prominence(data, i)) for i in range(len(data))]\n\n # normalize to interval [0,1]\n prom_max = max([x[1] for x in prominences])\n if prom_max == 0 or len(prominences) == 0:\n # failure to find any peaks; probably monotonically increasing / decreasing\n return []\n\n prominences[:] = [(x[0], x[1] / prom_max) for x in prominences]\n\n # take only the tallest peaks above given tolerance\n peak_locs = [x for x in prominences if x[1] > tol]\n\n # if a peak has a flat top, then both 'corners' of that peak will have high prominence; this\n # is rather unavoidable. just check for adjacent peaks with exactly the same prominence and\n # remove the lower one\n to_remove = [\n peak_locs[i]\n for i in range(len(peak_locs) - 2)\n if peak_locs[i][1] == peak_locs[i + 1][1]\n ]\n for r in to_remove:\n peak_locs.remove(r)\n\n if ranked:\n peak_locs.sort(key=lambda x: x[1] * -1)\n else:\n peak_locs[:] = [x[0] for x in peak_locs]\n\n return peak_locs",
"def detect_peaks(image):\r\n\r\n # define an 8-connected neighborhood\r\n neighborhood = ndimage.morphology.generate_binary_structure(2,2)\r\n\r\n #apply the local maximum filter; all pixel of maximal value \r\n #in their neighborhood are set to 1\r\n local_max = ndimage.filters.maximum_filter(image, footprint=neighborhood)==image\r\n #local_max is a mask that contains the peaks we are \r\n #looking for, but also the background.\r\n #In order to isolate the peaks we must remove the background from the mask.\r\n\r\n #we create the mask of the background\r\n background = (image==0)\r\n\r\n #a little technicality: we must erode the background in order to \r\n #successfully subtract it form local_max, otherwise a line will \r\n #appear along the background border (artifact of the local maximum filter)\r\n eroded_background = ndimage.morphology.binary_erosion(background, structure=neighborhood, border_value=1)\r\n\r\n #we obtain the final mask, containing only peaks, \r\n #by removing the background from the local_max mask (xor operation)\r\n detected_peaks = local_max ^ eroded_background\r\n\r\n return detected_peaks",
"def hist_peak_search(hist, bins):\n\n ix = peakutils.indexes(-hist, thres = 0.15/max(-hist), min_dist = 2)\n peaks = list(bins[list(ix)])\n\n return peaks",
"def find_inner_edge(wrap, dist=25, prom=0.08): # used to be named as find_cell_corner\n if len(wrap.shape) == 2:\n wrap_g = wrap\n elif len(wrap.shape) == 3:\n wrap_g = cv.cvtColor(wrap, cv.COLOR_BGR2GRAY)\n\n sum_x = np.sum(wrap_g, axis=0)\n sum_x = sum_x / np.max(sum_x)\n peak_x, _ = signal.find_peaks(-sum_x, distance=dist, prominence=prom)\n\n sum_y = np.sum(wrap_g, axis=1)\n sum_y = sum_y / np.max(sum_y)\n peak_y, _ = signal.find_peaks(-sum_y, distance=dist, prominence=prom)\n\n return peak_x, peak_y",
"def rook_neighbors_face(self, face):\n edges = self.cw_face_edges(face)\n return list(set([ self.left_region[edge] for edge in edges]))",
"def findPeakAndValley(np):\n peakValleyArray = []\n for i in range (1, len(np) - 1):\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] > 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] < 1):\n peakValleyArray.append(i)\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] < 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] > 1):\n peakValleyArray.append(i)\n return peakValleyArray",
"def find_nearby_genes(sub, apeak):\n cutoff = 1e3\n genes_for_peak = []\n for index, _gene in sub.iterrows():\n dist = get_distance_pandas(_gene, apeak)\n if abs(dist) < cutoff:\n genes_for_peak.append(_gene)\n return genes_for_peak",
"def find_nearby_genes(sub, apeak):\n cutoff = 1e3\n genes_for_peak = []\n for index, _gene in sub.iterrows():\n dist = get_distance_pandas(_gene, apeak)\n if abs(dist) < cutoff:\n genes_for_peak.append(_gene)\n return genes_for_peak",
"def findNeighbor(cur, dataList, eps):\n neighbors = []\n for pt in dataList:\n if (cur.x - pt.x) ** 2 + (cur.y - pt.y) ** 2 <= eps ** 2:\n neighbors.append(pt)\n return neighbors",
"def get_neighbour_vertices(self, cur: Union[str, int]) -> list:\n\t\tvertices = [edge[0] if edge[1] == cur else edge[1] for edge in self.get_neighbour_edges(cur)]\n\t\treturn vertices",
"def detectpeaks (image):\n # define an 8-connected neighborhood\n neighborhood = generate_binary_structure(2,2)\n #apply the local maximum filter; all pixel of maximal value \n #in their neighborhood are set to 1\n local_max = maximum_filter(image, footprint=neighborhood)==image\n #local_max is a mask that contains the peaks we are \n #looking for, but also the background.\n #In order to isolate the peaks we must remove the background from the mask.\n #we create the mask of the background\n background = (image==0)\n #a little technicality: we must erode the background in order to \n #successfully subtract it form local_max, otherwise a line will \n #appear along the background border (artifact of the local maximum filter)\n eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)\n #we obtain the final mask, containing only peaks, \n #by removing the background from the local_max mask (xor operation)\n detected_peaks = local_max ^ eroded_background\n return detected_peaks",
"def get_neighbours(self):\n return self.points_to.keys()",
"def peak_local_max_nD(img, size=(70,100,100), min_dist=0):\n def has_neighbor(peak, peak_list, min_dist):\n \"\"\"Find whether a peak already exists within minimum distance of this peak\"\"\"\n for testpeak in peak_list:\n if (distance.euclidean(peak, testpeak) < min_dist):\n return True\n return False\n # Find pixels that represent local maxima. Produces clusters of connected\n # pixels at the centers of objects.\n maxes = local_max(img, size)\n # Connect these pixels in a labelmask.\n conn_comp, info = ndi.label(maxes)\n # Get the centroids of each local max object, update mask and list.\n local_peak_mask = np.zeros_like(img)\n local_peaks = []\n peak_num=1\n\n for id_ in np.unique(conn_comp)[1:]:\n centroid = get_object_centroid(conn_comp, id_)\n # If there is no already-added seed within the minimum distance,\n # add this seed to the mask and list.\n if (not has_neighbor(centroid, local_peaks, min_dist)):\n local_peak_mask[centroid] = peak_num\n local_peaks.append(centroid)\n peak_num = peak_num + 1\n return local_peak_mask, local_peaks",
"def FindPeaks_graph(self):\n import string\n \n maxima = self['FP_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['FP_DETECT'].copy()\n \n id = self._getGraphId()\n root = 'FindPeaks_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NPEAKS']))\n text = 'NP=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindPeaks'] = epsname\n self['figcomms']['FindPeaks'] = text",
"def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads",
"def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]",
"def get_neighbourhood(self, source: int) -> Iterable[GraphEdge]:\n return filter(\n lambda e: e.fst == source,\n self.__edges\n )",
"def get_neighbours(self):\n return []",
"def extractPeak( image, nSizeX, nSizeY, nMaxSize, nMaxNbr = 5, nErrorValue = -1 ):\n blobs = []; # will contain the center of the blob and it's max value\n nSmallerMax = 0; # the max value of the smallest peak\n nSmallerIdx = -1;\n nMaxSizeSquared = nMaxSize*nMaxSize;\n for y in range( nSizeY ):\n for x in range( nSizeX ):\n# print( \"x,y: %d,%d\" % (x,y) );\n nVal = image[x+y*nSizeX];\n if( nVal != nErrorValue ):\n if( nVal > nSmallerMax ):\n # update blobs\n # find in blobs\n bFound = False; \n bUpdateSmallerMax = False;\n n = 0;\n while( n < len( blobs ) ):\n if( distSquared( blobs[n][0], blobs[n][1], x, y ) < nMaxSizeSquared ):\n # found it!\n if( nVal > blobs[n][2] ):\n # update this blobs\n blobs[n][0] = x;\n blobs[n][1] = y;\n blobs[n][2] = nVal;\n if( nSmallerMax == nVal ):\n # update smaller max\n bUpdateSmallerMax = True;\n bFound = True;\n break;\n n += 1;\n if( not bFound ):\n # create a new one\n if( len( blobs ) < nMaxNbr ):\n # create from scratch\n blobs.append( [x,y,nVal] );\n bUpdateSmallerMax = True;\n else:\n # reuse smaller\n blobs[nSmallerIdx][0] = x;\n blobs[nSmallerIdx][1] = y;\n blobs[nSmallerIdx][2] = nVal;\n bUpdateSmallerMax = True;\n \n if( bUpdateSmallerMax ):\n nSmallerMax = 0xFFFFFFF;\n for idx, blob in enumerate( blobs ):\n if( blob[2] < nSmallerMax ):\n nSmallerMax = blob[2];\n nSmallerIdx = idx;\n# print( \"blobs: %s\" % str( blobs ) );\n # if( nVal > nSmallerMax ) - end\n # if( nVal != nErrorValue ) - end\n \n # convert to fixed size\n for idx, blob in enumerate( blobs ):\n blobs[idx].append( 50-idx*10 );\n\n return blobs;",
"def find_open_edges_voronoi(graph, grid):\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n # First check if we're off the map\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n # Next check if we're in collision\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n # If the edge does not hit on obstacle\n # add it to the list\n if not hit:\n # array to tuple for future graph creation step)\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n return edges"
] | [
"0.6164366",
"0.6151801",
"0.6076451",
"0.6051318",
"0.6048757",
"0.60092044",
"0.6003077",
"0.59836006",
"0.5955315",
"0.5863139",
"0.58380353",
"0.58234304",
"0.58089036",
"0.58047277",
"0.57400477",
"0.57065547",
"0.56949335",
"0.56949335",
"0.5683954",
"0.5674413",
"0.5657473",
"0.5656645",
"0.56466806",
"0.5603336",
"0.55804807",
"0.556979",
"0.5568237",
"0.5559944",
"0.55596936",
"0.5550554"
] | 0.64697915 | 0 |
Compute the Kernel Density Estimate, in target coordinate space, of the mapped vertices. Each target vertex will be smoothed using an isotropic Gaussian kernel of width ```sigma```. We compute, for each target vertex, the number of mapped source vertices. The transformed value of each target is the convolution of the isotropic Gaussian, centered at itself, with the count map. | def kde(sregion, tregion, tdist, mapping, index_map, sigma=1.5):
tinds = index_map[tregion]
# mapping of target indices to 0 : # targets
t2i = dict(zip(tinds, np.arange(len(tinds))))
# determine number of source vertices mapping to each target
counts = np.zeros((len(tinds),))
for i in mapping.index:
mu = mapping.loc[i, 'mu']
counts[t2i[mu]] += 1
# iterate over target vertices, and convolve count map
# with isotropic Gaussian kernel
density = np.zeros((counts.shape[0],))
for i in np.arange(len(tinds)):
pdf = models.geodesic(tdist[i, :], [sigma])
d = (pdf*counts).sum()
density[i] = d
return density | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val",
"def kernel_density_estimation(x, sigma, n_bins, eps=1e-2):\n N, _, W, H = x.shape\n device = x.device\n ind = torch.linspace(0, n_bins, n_bins+1).unsqueeze(0).unsqueeze(-1).unsqueeze(-1).expand(N, -1, W, H).to(device)\n y = torch.exp((-1./sigma**2)*(x - ind)**2)\n y = threshold_and_normalize_pixels(y, eps=eps)\n return y",
"def gaussian_kernel(sigma, truncate=4.0):\n\n sigma = float(sigma)\n radius = int(truncate * sigma + 0.5)\n\n x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1]\n sigma = sigma**2\n\n k = 2 * np.exp(-0.5 * (x**2 + y**2) / sigma)\n k = k / np.sum(k)\n\n return k",
"def GaussianKernel(sigma: float = 1., width: int = 0):\n assert not ((width is None or width == 0) and\n (sigma is None or sigma == 0)), \\\n \"GaussianKernel :: both sigma ({}) & width ({}) are not valid\".format(\n sigma, width)\n\n if width is None or width == 0:\n width = int(2.0 * 3.0 * sigma + 1.0)\n if width % 2 == 0:\n width += 1\n\n if sigma is None or sigma == 0:\n sigma = (width - 1)/6.\n half = width//2\n x, y = np.meshgrid(np.linspace(-half, half, width),\n np.linspace(-half, half, width), indexing='xy')\n w = np.exp(- (x**2 + y**2) / (2.*(sigma**2)))\n w /= np.sum(w)\n return torch.from_numpy(w.astype(np.float32)).view(1, 1, width, width)",
"def gauss_kern(sigma, size):\r\n size = int(np.floor(size/2))\r\n sizey = size\r\n x, y = scipy.mgrid[-size:size+1, -sizey:sizey+1]\r\n g = scipy.exp(-(x**2+y**2) / (2*(sigma)**2))\r\n return np.ravel(g / g.max())",
"def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma",
"def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s",
"def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel",
"def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)",
"def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B",
"def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)",
"def gaussian_kernel(size, sigma):\n\n m, n = [(s - 1.) / 2. for s in size]\n y, x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp(-(x*x + y*y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\n sumh = h.sum()\n if sumh != 0: h /= sumh\n return h",
"def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)",
"def GaussianKernel(shape=(3, 3), sigma=0.5):\r\n radius_x, radius_y = [(radius-1.)/2. for radius in shape]\r\n y_range, x_range = np.ogrid[-radius_y:radius_y+1, -radius_x:radius_x+1]\r\n h = np.exp(- (x_range*x_range + y_range*y_range) / (2.*sigma*sigma))\r\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\r\n sumofh = h.sum()\r\n if sumofh != 0:\r\n h /= sumofh\r\n return h",
"def _generate_gaussian_kernel(self, size: int, sigma: float = 1.0, mu: float = 0.0) -> ndarray:\n # create the 1D array of equally spaced distance point of given size\n self.kernel_1d = np.linspace(-(size//2), size//2, size)\n # get the gaussian distribution of the 1D array\n self.kernel_1d = self._gaussian_distribution(\n self.kernel_1d, mu, sigma)\n\n # Compute the outer product of kernel1D tranpose and kernel1D\n self.kernel_2d = np.outer(self.kernel_1d.T, self.kernel_1d)\n # normalize the the outer product to suish the values between 0.0-1.0\n self.kernel_2d *= 1.0/self.kernel_2d.max()\n return self.kernel_2d",
"def gaussian_kernel(dim, sigma):\n kernel = np.zeros(dim)\n\n if dim%2 == 0:\n begin = dim//2-1\n else:\n begin = dim//2\n\n for i in range(dim):\n kernel[i] = gaussian(i-begin, sigma)\n\n return kernel",
"def generate_gaussian_kernel(shape=(3,3),sigma=0.8):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h",
"def gaussian_kernel(N, mu, sigma):\n # Asserting N is odd and sigma is number\n assert assert_odd(N)\n \n # Create the normal here (with ID covariance) \n normal = multivariate_normal(mean=mu, cov=sigma*np.identity(2))\n \n # Create the position matries (x_1,x_2 in 2D)\n X_1 = np.ones((N,N))*np.arange(N) # x_1 pos\n X_2 = X_1.T #x_2 pos, just transpose the above\n \n # Shift the positions so center is at middle\n s = np.floor(N/2) #shift value\n X_1, X_2 = X_1-s, X_2-s # shifted matrices\n \n # Create holder matrix\n X = np.zeros((N,N)) # Below we have the iterator \n for (i,j) in [(i,j) for i in range(N) for j in range(N)]:\n X[i,j] = normal.pdf([X_1[i,j], X_2[i,j]]) # Normal values\n \n # Finally just return the normalized kernel\n return X*(1/np.sum(X))",
"def mask_density(mask):\n return get_number_of_unpruned_weights(mask).float() / get_number_of_weights(mask).float()",
"def gaussian_kernel(fsize, sigma):\n\n _x = _y = (fsize - 1) / 2\n x, y = np.mgrid[-_x:_x + 1, -_y:_y + 1]\n G = np.exp(-0.5 * (x**2 + y**2) / sigma**2)\n\n return G / G.sum()",
"def gauss_kern(size, sigma=1.0):\n h1 = size[0]\n h2 = size[1]\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );\n return g / g.sum()",
"def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()",
"def _poisson_gauss_smooth(counts, bkg):\n from scipy.ndimage import convolve\n Nev = np.sum(counts)\n Np = len(counts)\n\n # Number of pixels per sigma of the kernel gaussian to have more than 150 events/sigma\n Npix_sigma = (150 / Nev) * Np\n\n # For high statistic, we impose a minimum of 4pixel/sigma\n Npix_sigma = np.maximum(Npix_sigma, 4)\n\n # For very low statistic, we impose a maximum lenght of the kernel equal of the number of bin\n # in the counts histogram\n Npix_sigma = np.minimum(Npix_sigma, Np / 6)\n\n # kernel gaussian define between -3 and 3 sigma\n x = np.linspace(-3, 3, 6 * Npix_sigma)\n kernel = np.exp(-0.5 * x ** 2)\n bkg_smooth = convolve(bkg, kernel / np.sum(kernel), mode=\"reflect\")\n return bkg_smooth",
"def gaussian_kernel(shape: Tuple[int, int]=(3, 3), sigma: float=0.5):\n m, n = [int((ss - 1.) / 2.) for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n kernel = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n kernel[kernel < np.finfo(kernel.dtype).eps * kernel.max()] = 0\n sumh = kernel.sum()\n if sumh != 0:\n kernel /= sumh\n return kernel",
"def gaussian_kernel(windowX, windowY, sigma):\n X,Y = createKernalWindowRanges(windowX, windowY, increment)\n \n gKernel = gaussianNormalised(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(gKernel))\n \n if gSum == 0:\n print \"Warning gaussian_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (gKernel)\n else:\n return (gKernel / gSum)",
"def kde(x, mu, sigma, DIMENSION=2):\n dist_sq = np.sum((x - mu)**2, axis=1)\n kde_val = (1/((sigma**2)*2*np.pi))**(0.5*DIMENSION)*np.exp(-dist_sq/(2*(sigma**2)))\n return np.mean(kde_val)",
"def gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx",
"def gaussian_kernel(sigma, radius):\r\n if sigma <= 0:\r\n raise ValueError('sigma must be larger than zero.')\r\n tmp_x = np.arange(-radius, radius + 1)\r\n phi_x = gaussian_expfun(tmp_x, sigma)\r\n phi_x /= phi_x.sum()\r\n return phi_x",
"def computeMuSigma(self):\n totedge = self.countEdges()\n for i in range(self.totbs):\n prob = 1.0 * self.totedge[i+1] / totedge\n self.mu += prob\n self.sigma += prob * (1 - prob)\n if self.sigma > 0:\n self.sigma = math.sqrt(self.sigma)\n print \"Mu = {}, Sigma = {}\".format(self.mu, self.sigma)",
"def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel"
] | [
"0.62467575",
"0.60877776",
"0.60786873",
"0.60424685",
"0.6018096",
"0.5949306",
"0.5885207",
"0.5851151",
"0.5841302",
"0.58193064",
"0.58002514",
"0.5785205",
"0.576055",
"0.57541895",
"0.5740346",
"0.5706156",
"0.5704515",
"0.5691362",
"0.56588477",
"0.5651872",
"0.5633133",
"0.56319815",
"0.5630941",
"0.5625284",
"0.5625188",
"0.5601237",
"0.5600488",
"0.56004745",
"0.5591159",
"0.5588148"
] | 0.69313246 | 0 |
Returns a Keyword object from keyword_set corresponding to the given string. | def get_keyword(arg: str, keyword_set: set) -> Keyword or None:
arg = arg.lower().lstrip('-')
for keyword in keyword_set:
if arg == keyword.keyword or arg in keyword.aliases:
return keyword
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keyword(self, keyword):\r\n return keywords.Keyword(self, keyword)",
"def from_str ( cls, vstr ):\n return cls ( cls.OV_KEYWORDS[vstr] )",
"def from_string(cls, str_value):\n for m in cls:\n if m.value == str_value:\n return m\n else:\n return None",
"def run_single_keyword(self, keyword_string, language):\n keyword = self.mongo_controller.get_keyword(keyword_string, language, cast=True)\n twitter_results = self.crawler.search(keyword, limit=self.limit_requests)\n return self.__save_tweets(twitter_results)",
"def sdcToClassifier_keyword(self, sdc):\n for srname, engine in self.engineMap.iteritems():\n if(srname in sdc[\"spatialRelation\"].text):\n return engine\n return None",
"def get_keyword(self, collection_id, name):\n sql = \"\"\"SELECT keyword.name, keyword.args, keyword.doc\n FROM keyword_table as keyword\n WHERE keyword.collection_id == ?\n AND keyword.name like ?\n \"\"\"\n cursor = self._execute(sql, (collection_id,name))\n # We're going to assume no library has duplicate keywords\n # While that in theory _could_ happen, it never _should_,\n # and you get what you deserve if it does.\n row = cursor.fetchone()\n if row is not None:\n return {\"name\": row[0],\n \"args\": json.loads(row[1]),\n \"doc\": row[2],\n \"collection_id\": collection_id\n }\n return {}",
"def from_keyword_parameters(cls, keyword_parameters):\n return cls()",
"def from_string(string):\n return Sentence(string.split(\" \"))",
"def test_get_keyword_string(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert the expected result\n self.assertEqual(resource.get_keyword_string(),\n \"culture, life, society, survival\")",
"def _course_key_from_string(self, string):\r\n return self.course_locations[string].course_key",
"def from_string(cls, term_string):\n term_string = term_string.replace(\" \", \"\")\n\n if term_string in cls.term_dict:\n return cls.term_dict[term_string]\n\n if term_string[0] == NALSyntax.StatementSyntax.Start.value:\n \"\"\"\n Compound or Statement Term\n \"\"\"\n assert (term_string[-1] == NALSyntax.StatementSyntax.End.value), \"Compound/Statement term must have ending parenthesis: \" + term_string\n\n copula, copula_idx = NALSyntax.Copula.get_top_level_copula(term_string)\n if copula is None:\n # compound term\n term = CompoundTerm.from_string(term_string)\n else:\n term = StatementTerm.from_string(term_string)\n elif NALSyntax.TermConnector.is_set_bracket_start(term_string[0]):\n # set term\n term = CompoundTerm.from_string(term_string)\n elif term_string[0] == NALSyntax.TermConnector.Array.value:\n if NALSyntax.StatementSyntax.ArrayElementIndexStart.value in term_string:\n term = ArrayTermElementTerm.from_string(term_string)\n else:\n term = ArrayTerm.from_string(term_string)\n elif term_string[0] == VariableTerm.VARIABLE_SYM or term_string[0] == VariableTerm.QUERY_SYM:\n # variable term\n dependency_list_start_idx = term_string.find(\"(\")\n if dependency_list_start_idx == -1:\n variable_name = term_string[1:]\n dependency_list_string = \"\"\n else:\n variable_name = term_string[1:dependency_list_start_idx]\n dependency_list_string = term_string[term_string.find(\"(\") + 1:term_string.find(\")\")]\n\n term = VariableTerm.from_string(variable_name=variable_name,\n variable_type_symbol=term_string[0],\n dependency_list_string=dependency_list_string)\n else:\n term_string = re.sub(\",\\d+\", \"\", term_string)\n term = AtomicTerm(term_string)\n\n cls.term_dict[term_string] = term\n\n return term",
"def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)",
"def from_string (cls, string, access=DEFAULT_ACCESS, accept_value=True):\n hKey, moniker, value = cls._from_string (string, access, accept_value)\n if value is None:\n return cls (moniker, access)\n else:\n return cls (moniker, access).get_value (value)",
"def pykeyword(operation='list', keywordtotest=None):\n\n # If the operation was 'list'\n if operation == 'list':\n # Return an array of keywords\n return str(keyword.kwlist)\n\n # If the operation was 'in'\n elif operation == 'in':\n # Return a boolean for if the string was a keyword\n return keyword.iskeyword(str(keywordtotest))\n\n # Raise a warning\n raise ValueError(\"Invalid operation specified.\")",
"def readable_keyword(s):\n if s and not s.startswith(\"*\") and not s.startswith(\"[\"):\n if s.count(\".\"):\n library, name = s.rsplit(\".\", 1)\n return library + \".\" + name[0:].title()\n else:\n return s\n else:\n return s",
"def from_string(cls, compound_term_string):\n compound_term_string = compound_term_string.replace(\" \", \"\")\n subterms, connector, intervals = cls.parse_toplevel_subterms_and_connector(compound_term_string)\n return cls(subterms, connector,intervals=intervals)",
"def getSpecific(self, keyword, key):",
"def load_set_by_name(set_name):\n return get_default_repo().get_set_by_labels({'name': set_name})",
"def get(self, keyword, not_found=None):\n if (type(keyword) != str):\n raise TypeError(\"The key for metadata items must be a string\")\n if (keyword in self._key_set):\n for item in self._metadata:\n if (item.keyword == keyword):\n return item\n return not_found",
"def from_string(cls, name):\n if hasattr(cls,name):\n return cls.__getattribute__(name)\n else:\n return None",
"def from_string(\n cls: Type[_CromwellWorkflowLabel], workflow_label: str\n ) -> _CromwellWorkflowLabel:\n count_equals = workflow_label.count(\"=\")\n count_escaped_equals = workflow_label.count(\"\\\\=\")\n\n if count_equals - count_escaped_equals == 0:\n return cls(cls.CAPER_STR_LABEL, workflow_label)\n\n if count_equals - count_escaped_equals != 1:\n raise ValueError(\n \"Found more than one unescaped `=` in key=value pair, must only '\"\n \"specify one so parsing is not ambiguous\"\n )\n\n for i, char in enumerate(workflow_label):\n if char == \"=\":\n if workflow_label[i - 1] != \"\\\\\":\n key, value = workflow_label[0:i], workflow_label[i + 1 :]\n return cls(key, value)\n\n # Can skip coverage here, we know the loop above always executes on a string\n # with one non-escaped equals sign in it\n raise ValueError(\"Could not detect key-value pair\") # pragma: no cover",
"def defaultKeywords(self, kwSet):\n return QsciLexerJava.keywords(self, kwSet)",
"def test_set_keywords_1(self):\n data_dict = {\"type\":\"ADD\",\n \"cluster\":\"RETRIEVE\",\n \"subcluster\": \"NONE\",\n \"host_genus\": \"PARSE\",\n \"retrieve_record\": \"RETAIN\"}\n keywords = set([\"retrieve\", \"retain\"])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"ADD\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(data_dict[\"subcluster\"], \"none\")\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"parse\")\n with self.subTest():\n self.assertEqual(data_dict[\"retrieve_record\"], \"retain\")",
"def getKeyWordById(self, id):\n kWord = KeyWord()\n i = 0\n while i < len(self.sentence):\n if self.sentence[i].getId() == id:\n kWord = self.sentence[i]\n i += 1\n return kWord",
"def set_keywords(self, **kwargs):\n keywords = dict()\n\n for key, value in self.allowed_keys.items():\n keywords[key] = value[1]\n\n for key, value in kwargs.items():\n if key not in self.allowed_keys:\n error = 'Keyword %s for %s object not found' % \\\n (key, self.__class__.__name__)\n MASTError(self.__class__.__name__, error)\n\n# raise RuntimeError('Keyword %s for %s object not found' % \\\n# (key, self.__class__.__name__))\n\n if isinstance(value, self.allowed_keys[key][0]):\n keywords[key] = value\n else:\n error = 'Keyword %s value %s invalid; expected type %s, got type %s' % (key, str(value), self.allowed_keys[key][0], type(value))\n MASTError(self.__class__.__name__, error)\n# raise RuntimeError('Keyword %s value invalid' % key)\n\n return keywords",
"def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords",
"def from_normalised_string(string):\n if string[0] == '_':\n return RealPred.from_normalised_string(string)\n else:\n return GPred.from_normalised_string(string)",
"def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})",
"def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)",
"def keyword_search(keywords):\n try:\n return itunespy.search(keywords)[0]\n except LookupError:\n return None"
] | [
"0.617394",
"0.5931612",
"0.54410845",
"0.5338358",
"0.53138894",
"0.5306772",
"0.5240785",
"0.5178541",
"0.51635635",
"0.51292783",
"0.511156",
"0.50753915",
"0.49567872",
"0.4942246",
"0.49277085",
"0.49002466",
"0.48902506",
"0.48736504",
"0.48280266",
"0.4818104",
"0.47770098",
"0.47570926",
"0.47494912",
"0.47366005",
"0.47359595",
"0.47276953",
"0.47221842",
"0.4721456",
"0.47209135",
"0.471473"
] | 0.6735547 | 0 |
Parses a list of strings into a dictionary whose keys are the keys in keyword_dict, and such that the value at the key K is a list of the next keyword_dict[K] strings after K in the list args. All keys in keyword_dict should be lowercase; keywords are not parsed casesensitively. The returned dict holds a list of args that were not successfully parsed under the emptystring key. | def parse(args: list, keyword_set: set) -> dict:
parsed_dict = {'': []}
while args:
keyword = get_keyword(arg=args[0], keyword_set=keyword_set)
if keyword is not None:
args.pop(0)
keyword_name = keyword.keyword_name
if keyword_name in parsed_dict:
raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)
if keyword.param_for is not None:
parsed_dict[keyword_name] = [keyword.keyword]
else:
parsed_dict[keyword_name] = []
num_args_pulled = 0
while num_args_pulled < keyword.num_args:
if not args:
raise necrobot.exception.NumParametersException(
keyword=keyword,
num_expected=keyword.num_args,
num_given=num_args_pulled
)
else:
num_args_pulled += 1
parsed_dict[keyword_name].append(args[0])
args.pop(0)
else:
parsed_dict[''].append(args[0])
args.pop(0)
return parsed_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_kwargs(kwargs_list: List[str]) -> Dict[str, Any]:\n\n kwargs_dict = {}\n\n for kwarg in kwargs_list:\n key = kwarg[2:].split('=')[0]\n value = '='.join(kwarg.split('=')[1:])\n\n try:\n if re.match(r'^(-)?[0-9]+$', value):\n value = int(value)\n\n elif re.match(r'^(-)?[0-9]*.[0-9]+$', value) or re.match(r'^(-)?[0-9]*(\\.)?[0-9]+e(-|\\+)[0-9]+$', value):\n value = float(value)\n\n elif re.match(r'^\\[.*]$', value) or re.match(r'^\\{.*}$', value):\n value = json.loads(value)\n\n elif value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n\n elif value.lower() == 'none':\n value = None\n\n except:\n logging.warning(f'Could not automatically parse argument \"{key}.\" Its type will remain string.')\n\n kwargs_dict[key] = value\n\n return kwargs_dict",
"def split_keywords(keywords):\n try:\n keywords = keywords.replace(u'\\u201c', '\"').replace(u'\\u201d', '\"')\\\n .replace(\"-\", \" \")\n\n except AttributeError:\n # In the event that keywords = nan\n return []\n\n if '\"' in keywords:\n # for handling key phrases\n final_set = []\n imperfect_set = map(lambda x: x.split(' \"'), keywords.split('\" '))\n # imperfect_set will contain a list of lists. Must break down\n\n for sublist in imperfect_set:\n for item in sublist:\n # clear out remaining quotations\n item = item.replace('\"', '').lower() \n # only add if not already there\n if item not in final_set: \n final_set.append(item)\n\n # we may still want individual components of key phrases\n # and permutations of words in those phrases\n if \" \" in item: \n phrase = item.split(\" \")\n if len(phrase) > 2:\n for ii in range(len(phrase) - 1):\n for jj in range(ii + 1, len(phrase)):\n word = \" \".join([phrase[ii], phrase[jj]])\n if word not in final_set:\n final_set.append(word)\n\n else:\n for word in phrase: \n # again, only if not already there\n if word not in final_set:\n final_set.append(word)\n\n else:\n final_set = keywords.split(\" \")\n\n return final_set",
"def arglist2dict(args):\n arg_dict = {}\n\n if len(args) == 0:\n return arg_dict\n\n if not args[0].startswith('--'):\n raise ValueError(f\"Positional keywords are not supported: {args[0]}\")\n\n i = 0\n while i < len(args):\n arg = args[i]\n i = i + 1\n if arg.startswith('--'):\n dest = arg[2:]\n j, arglist = Parser.get_args(args[i:])\n i = i + j\n Parser.update_arg_dict(arg_dict, dest, arglist)\n return arg_dict",
"def load_cli_kwargs(kwargs_list, delimiter='='):\n kwargs = {}\n for kv in kwargs_list:\n k, v = kv.split(delimiter, 1)\n kwargs[k] = v\n return kwargs",
"def parse_unknown_args(args):\n retval = {}\n preceded_by_key = False\n for arg in args:\n if arg.startswith('--'):\n if '=' in arg:\n key = arg.split('=')[0][2:]\n value = arg.split('=')[1]\n retval[key] = value\n else:\n key = arg[2:]\n preceded_by_key = True\n elif preceded_by_key:\n retval[key] = arg\n preceded_by_key = False\n\n return retval",
"def keyword_subset(keyword_arguments, allowed_keywords):\n keywords = set(keyword_arguments.keys())\n keyswords_to_extract = keywords.intersection(allowed_keywords)\n\n new_kw = {key: keyword_arguments[key] for key in keyswords_to_extract}\n\n return new_kw",
"def create_dict_from_list(parse_list, key_ind, *val_inds):\n parse_dict=defaultdict(list)\n for string in parse_list:\n if not val_inds:\n parse_dict[string[key_ind]]=string\n else:\n parse_dict[string[key_ind]]=[string[i] for i in range(len(string)) if i in val_inds]\n return(parse_dict)",
"def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict",
"def parseKwargs(acceptable,kwargs):\n \n output = {}\n\n if kwargs:\n for key in kwargs.keys():\n \n if key in acceptable:\n output[key] = kwargs[key]\n\n return output",
"def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L",
"def populate_keywords(kwds, pkg_id):\n if not kwds:\n return\n for word in kwds:\n # @todo(Check data and use the special character-list\n # variable in the constants' file.)\n word = word.strip(\".:;=-,\\\"'\\n $_%{}()[]^*?& +#`\").lower()\n if len(word) <= 1 or (word in constants.STOP_WORDS) or \\\n has_special_chars(word):\n continue\n insert_keyword(word, pkg_id)",
"def keywords_rege(keywords):\n searches = {}\n for kw in keywords:\n searches[kw] = re.compile(r'\\b' + kw + r'\\b', re.IGNORECASE)\n return searches",
"def join_string_lists(**kwargs) -> dict:\n return {k: \",\".join(v) for k, v in kwargs.items() if v}",
"def token_kwargs(bits, parser):\r\n if not bits:\r\n return {}\r\n kwargs = SortedDict()\r\n while bits:\r\n match = kwarg_re.match(bits[0])\r\n if not match or not match.group(1):\r\n return kwargs\r\n key, value = match.groups()\r\n del bits[:1]\r\n kwargs[parser.compile_filter(key)] = parser.compile_filter(value)\r\n return kwargs",
"def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]",
"def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs",
"def args2dict(args, dict_args={}):\n \n for arg in args:\n #this_entry = re.findall(r'[^\"\\s]\\S*|\".+?\"', arg)\n p_arg = arg.split('=')\n if len(p_arg) > 1:\n dict_args[p_arg[0]] = False if p_arg[1].lower() == 'false' else \\\n True if p_arg[1].lower() == 'true' else \\\n None if p_arg[1].lower() == 'none' else \\\n '='.join(p_arg[1:]) if len(p_arg) > 2 else \\\n p_arg[1]\n \n return(dict_args)",
"def set_keywords(self, **kwargs):\n keywords = dict()\n\n for key, value in self.allowed_keys.items():\n keywords[key] = value[1]\n\n for key, value in kwargs.items():\n if key not in self.allowed_keys:\n error = 'Keyword %s for %s object not found' % \\\n (key, self.__class__.__name__)\n MASTError(self.__class__.__name__, error)\n\n# raise RuntimeError('Keyword %s for %s object not found' % \\\n# (key, self.__class__.__name__))\n\n if isinstance(value, self.allowed_keys[key][0]):\n keywords[key] = value\n else:\n error = 'Keyword %s value %s invalid; expected type %s, got type %s' % (key, str(value), self.allowed_keys[key][0], type(value))\n MASTError(self.__class__.__name__, error)\n# raise RuntimeError('Keyword %s value invalid' % key)\n\n return keywords",
"def listify_values(params):\n return dict((k, listify(v)) for (k, v) in params.iteritems())",
"def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords",
"def priority_keyword_merge(*args: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n keyword_lists = [*args]\n base_list = []\n if len(keyword_lists) == 1:\n return keyword_lists[0]\n\n while len(keyword_lists) > 1:\n base_list, priority_list = keyword_lists[0], keyword_lists[1]\n keyword_set = set([x[0] for x in base_list])\n for item in priority_list:\n if item[0] in keyword_set:\n for index, keyword in enumerate(base_list):\n if keyword[0] == item[0]:\n base_list.pop(index)\n break\n base_list.append(item)\n\n keyword_lists.pop(1)\n\n return base_list",
"def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]':\n if not args:\n return {}\n result = {}\n for a in args:\n if not a:\n continue\n arg = Argument(a)\n result[arg.name] = arg\n return result",
"def parse_key_value_arg(self, arg_value, argname):\n result = {}\n for data in arg_value:\n\n # Split at first '=' from left\n key_value_pair = data.split(\"=\", 1)\n\n if len(key_value_pair) != 2:\n raise exceptions.InvalidKeyValuePairArgumentError(\n argname=argname,\n value=key_value_pair)\n\n result[key_value_pair[0]] = key_value_pair[1]\n\n return result",
"def _validate_arglist_and_kwlist(self, p, items, keywords):\n kwnames = set()\n args = []\n kws = []\n self._validate_arglist_list(items, p.lexer.lexer)\n for arg in items:\n if isinstance(arg, ast.keyword):\n kws.append(arg)\n kwnames.add(arg.arg)\n else:\n args.append(arg)\n for kw in keywords:\n if not isinstance(kw, ast.keyword):\n msg = 'only named arguments may follow *expression'\n tok = FakeToken(p.lexer.lexer, p.lineno(2))\n syntax_error(msg, tok)\n if kw.arg in kwnames:\n msg = 'keyword argument repeated'\n tok = FakeToken(p.lexer.lexer, kw.lineno)\n syntax_error(msg, tok)\n kwnames.add(kw.arg)\n kws.extend(keywords)\n\n return args, kws",
"def get_keywords_and_values(words):\n d={}\n triple_keyword_value = 5\n double_keyword_value= 3\n single_keyword_occurance_value = 1\n\n stop_words = set(stopwords.words(\"english\"))\n\n for i in range(0, len(words)-2):\n if words[i] not in stop_words and words[i].isalnum():\n d[words[i]] = d.get(words[i],0.0)+ single_keyword_occurance_value\n if words[i+1] not in stop_words and words[i+1].isalnum():\n d[words[i]+\" \"+words[i+1]] = d.get(words[i]+\" \"+words[i+1],0.0)+double_keyword_value\n if words[i + 2] not in stop_words and words[i + 2].isalnum():\n d[words[i]+\" \"+words[i+1]+\" \"+words[i+2]] = d.get(words[i]+\" \"+words[i+1]+\" \"+words[i+2],0.0)+triple_keyword_value\n\n print(i, len(words))\n\n if words[i+1] not in stop_words and words[i+1].isalnum():\n d[words[i+1]] = d.get(words[i+1],0.0)+ single_keyword_occurance_value\n if words[i+2] not in stop_words and words[i+2].isalnum():\n d[words[i+1]+\" \"+words[i+2]] = d.get(words[i+1]+\" \"+words[i+2],0.0)+double_keyword_value\n if words[i+2] not in stop_words and words[+2].isalnum():\n d[words[i+2]] = d.get(words[i+2],0.0)+ single_keyword_occurance_value\n return d",
"def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result",
"def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")",
"def _convert_tags_to_dict(text_list_tags):\n return OrderedDict([re.findall(r\"\"\"\\s*_(\\w+)\\s+(.+?)\\s*$\"\"\", row)[0] for row in text_list_tags])",
"def parse_args_dict(args=None):\n return vars(parse_args(args))",
"def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict"
] | [
"0.617602",
"0.60784173",
"0.6058244",
"0.5894706",
"0.5815839",
"0.57707494",
"0.57487035",
"0.57175565",
"0.56887645",
"0.5618045",
"0.56060874",
"0.55750597",
"0.553328",
"0.5478511",
"0.54528874",
"0.5370976",
"0.530249",
"0.5263757",
"0.5255872",
"0.523665",
"0.5228101",
"0.520639",
"0.5170644",
"0.5166384",
"0.5165256",
"0.51529545",
"0.51408046",
"0.51381516",
"0.51177645",
"0.51130515"
] | 0.6823548 | 0 |
The API has a schema route that answers. | def test_api_schema(self):
response = self.client.get("/api/schema/")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.get("Content-Type"), "application/vnd.oai.openapi; charset=utf-8"
)
self.assertEqual(
response.get("Content-Disposition"), 'inline; filename="Marsha API.yaml"'
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def schema_view(request):\n generator = schemas.SchemaGenerator(title='Experiment Data Depot')\n return response.Response(generator.get_schema(request=request))",
"def schema(self):",
"def routes(self, body):\n pass",
"def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)",
"def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)",
"def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)",
"def schema() -> None:\n pass",
"def test_openapi_schema(app, client):\n response = client.get(\"/swagger/\")\n assert response.status_code == 200\n assert len(json.loads(response.data)[\"paths\"]) > 0",
"def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)",
"def index():\n definition = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"title\": flask.current_app.config.get(\"APPNAME\", \"Not specified\"),\n \"version\": flask.current_app.config.get(\"VERSION\", \"Not specified\"),\n },\n \"host\": request.host,\n \"schemes\": [\"http\"],\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n \"definitions\": registry._definitions,\n \"paths\": {}\n }\n\n rules = list(flask.current_app.url_map.iter_rules())\n for r in sorted(rules, key=operator.attrgetter('rule')):\n if r.rule.startswith('/static'):\n continue\n if r.endpoint in registry._skipped:\n continue\n\n rule = re.sub(r\"<(?:[_a-zA-Z0-9\\(\\)]+:)?([a-zA-Z0-9_]+)>\", r\"{\\1}\", r.rule)\n if rule not in definition['paths']:\n definition['paths'][rule] = {}\n\n methods_handled = r.methods & REST_METHODS\n handler = flask.current_app.view_functions.get(r.endpoint)\n doc = handler.func_doc\n\n if len(methods_handled) == 1:\n method = methods_handled.pop().lower()\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule][method] = validated\n except Exception:\n pass\n\n else:\n # We need to handle multi-method docstrings differently\n # because the documentation needs to define both, and\n # it's a higher level of the swagger hierarchy\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule].update(validated)\n except Exception:\n definition['paths'][rule] = {}\n\n resp = flask.make_response(\n json.dumps(definition, for_json=True))\n resp.headers.set(\"Content-type\", 'application/json')\n resp.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n return resp",
"def schema_handler(self, schema):\n dict_for_render = schema.get('properties', {}).items()\n if schema.get('$ref', None):\n def_name = schema.get('$ref').split('/')[-1]\n dict_for_render = self.definitions[def_name].get('properties', {}).items()\n elif schema.get('properties', None) is None:\n return ''\n\n answer_dict = {}\n json_dict = {}\n for opt_name, opt_value in dict_for_render:\n var_type = opt_value.get('format', None) or opt_value.get('type', None) or 'object'\n json_name = self.indent + f':jsonparameter {var_type} {opt_name}:'\n json_dict[json_name] = self.get_json_props_for_response(var_type, opt_value)\n\n answer_dict[opt_name] = self.get_response_example(opt_name, var_type, opt_value)\n if var_type == 'string':\n answer_dict[opt_name] = answer_dict[opt_name].format(opt_name)\n\n self.write('')\n for line in json.dumps(answer_dict, indent=4).split('\\n'):\n self.write(line, self.indent_depth)\n\n self.write('')\n for json_param_name, json_param_value in json_dict.items():\n desc = f'{json_param_value[\"title\"]}{json_param_value[\"props_str\"]}' or 'None'\n self.write(json_param_name + ' ' + desc)",
"def test_swagger(self):\n response = self.client.get(\"/api/v1/swagger\", query_string=dict(validate_schema=True))\n assert_that(response.status_code, is_(equal_to(200)))\n swagger = loads(response.get_data().decode(\"utf-8\"))\n # we have the swagger docs endpoint too, which is implemented as a query.\n # ignore it here for now.\n del swagger[\"paths\"][\"/swagger/docs\"]\n assert_that(swagger[\"paths\"], is_(equal_to({\n \"/foo/get\": {\n \"get\": {\n \"description\": \"My doc string\",\n \"tags\": [\"foo\"],\n \"responses\": {\n \"default\": {\n \"description\": \"An error occurred\", \"schema\": {\n \"$ref\": \"#/definitions/Error\",\n }\n },\n \"200\": {\n \"description\": \"My doc string\",\n \"schema\": {\n \"$ref\": \"#/definitions/QueryResult\",\n }\n }\n },\n \"parameters\": [\n {\n \"in\": \"header\",\n \"name\": \"X-Response-Skip-Null\",\n \"required\": False,\n \"type\": \"string\",\n \"description\": \"Remove fields with null values from the response.\"\n },\n {\n \"required\": False,\n \"type\": \"string\",\n \"name\": \"optional_value\",\n \"in\": \"query\",\n },\n {\n \"required\": True,\n \"type\": \"string\",\n \"name\": \"required_value\",\n \"in\": \"query\",\n },\n ],\n \"operationId\": \"query\",\n }\n }\n })))",
"def schema(self):\n pass",
"def schema(self):\n # NOTE This is exactly the same as the other thing.\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }",
"def check_schema_existence_api_call(context, schema, version):\n check_schema_existence(context, schema, version, \"api\")",
"def route(self):\n pass",
"def check_schema(self, response):\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result",
"def test_successful_parse_undocumented_endpoints(monkeypatch) -> None:\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {'PATH': yml_path})\n monkeypatch.setattr('django_swagger_tester.static_schema.loader.LoadStaticSchema.get_schema', ret_schema)\n for url in ['/api/v1/cars/incorrect/', '/api/v1/trucks/incorrect/']:\n base = LoadStaticSchema(url, 'get', status_code=200)\n base.get_response_schema()",
"def index():\n endpoints = []\n for api_endpoint in app.url_map.iter_rules():\n if api_endpoint.rule.startswith('/api'):\n url = api_endpoint.rule\n methods = api_endpoint.methods\n endpoints.append((url, str(methods)))\n return jsonify(endpoints)",
"def _Dynamic_GetSchema(self, req, schema, request_id=None):\n # This is not used, but it is required for the method signature.\n del request_id\n\n app_str = req.app()\n self.__ValidateAppId(app_str)\n schema.set_more_results(False)",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def _CreateRouterMethodSchemas(self, visiting: Set[str]) -> None:\n router_methods = self.router.__class__.GetAnnotatedMethods()\n for method_metadata in router_methods.values():\n args_type = method_metadata.args_type\n if args_type:\n self._CreateSchema(args_type, visiting)\n\n result_type = method_metadata.result_type\n if result_type:\n self._CreateSchema(result_type, visiting)",
"def process_api_declaration(self, resources, resource, context):\n pass",
"def api_index():\n func_list = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__\n return jsonify(func_list)",
"def schema(self):\n raise NotImplementedError",
"def home():\n return(\n f\"Available Routes: <br/>\"\n\n f\"For Precipitation: /api/v1.0/precipitation<br/>\"\n f\"Returns Jsonify dictionary of dates and Precepitation<br/><br/>\"\n\n f\"For list of Stations: /api/v1.0/stations<br/>\"\n f\"Returns Jasonify list of stations <br/><br/>\"\n\n f\"For last year temperatures: /api/v1.0/tobs<br/>\"\n f\"Returns Jsonify dictionary of Temperature Observations for last year<br/><br/>\"\n\n f\"Temperature result from the date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures from given start date of dataset<br/><br/>\"\n\n f\"Temperature result from start date to end date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range\"\n\n )",
"def api(self) -> str:",
"def response(schema):\n def _response(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if issubclass(schema, BaseModel):\n has_root = True if '__root__' in schema.__fields__ else False\n function_res = function(*args, **kwargs)\n\n if not function_res:\n if has_root is True:\n return jsonify([])\n return jsonify({})\n\n if type(function_res) == list:\n res = schema.parse_obj(function_res)\n else:\n res = schema.from_orm(function_res)\n\n res = res.dict()\n\n if has_root is True:\n return jsonify(res['__root__'])\n\n return jsonify(res)\n elif isinstance(schema, dict):\n return jsonify(schema)\n else:\n raise CustomException('invalid response type', code=400)\n\n return wrapper\n return _response",
"def express(self):\n raise NotImplementedError",
"def post(self, *args, **kwargs):\n json_data = request.get_json()\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n\n if 'data' not in json_data:\n raise BadRequest('/data', 'You must provide data with a \"data\" route node')\n if isinstance(json_data['data'], dict):\n if 'type' not in json_data['data']:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in json_data['data']:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if json_data['data']['type'] != related_type_:\n raise InvalidType('/data/type', 'The type field does not match the resource type')\n if isinstance(json_data['data'], list):\n for obj in json_data['data']:\n if 'type' not in obj:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in obj:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if obj['type'] != related_type_:\n raise InvalidType('/data/type', 'The type provided does not match the resource type')\n\n self.before_post(args, kwargs, json_data=json_data)\n\n obj_, updated = self._data_layer.create_relationship(json_data,\n model_relationship_field,\n related_id_field,\n kwargs)\n\n qs = QSManager(request.args, self.schema)\n includes = qs.include\n if relationship_field not in qs.include:\n includes.append(relationship_field)\n schema = compute_schema(self.schema, dict(), qs, includes)\n\n if updated is False:\n return '', 204\n\n result = schema.dump(obj_).data\n if result.get('links', {}).get('self') is not None:\n result['links']['self'] = request.path\n self.after_post(result)\n return result, 200"
] | [
"0.5833563",
"0.5763245",
"0.5693397",
"0.5690656",
"0.5690656",
"0.5690656",
"0.56629115",
"0.5609442",
"0.5589248",
"0.5574372",
"0.5463491",
"0.54347056",
"0.54218733",
"0.5400311",
"0.5400095",
"0.53853744",
"0.5339836",
"0.5331879",
"0.52693224",
"0.5267477",
"0.523229",
"0.5222513",
"0.5205916",
"0.5188325",
"0.51743853",
"0.5171802",
"0.5124183",
"0.51233464",
"0.5121673",
"0.5121636"
] | 0.5968774 | 0 |
Test the `clean_permission` expected behavior. | def test_clean_permission(self):
for permission, expected_string in [
(
PermissionA & PermissionB,
" **(** PermissionA **AND** PermissionB **)** ",
),
(
PermissionA | PermissionB,
" **(** PermissionA **OR** PermissionB **)** ",
),
(
~PermissionA,
" **(NOT** PermissionA **)** ",
),
(
PermissionA,
"PermissionA",
),
(
(PermissionA & PermissionB) | ~PermissionC,
(
" **(** **(** PermissionA **AND** PermissionB **)** "
"**OR** **(NOT** PermissionC **)** **)** "
),
),
]:
with self.subTest(permission=permission):
self.assertEqual(
# mimic `get_permissions` by calling permission
clean_permission(permission()),
expected_string,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))",
"def test_get_permissions(self):\n pass",
"async def permission_valid_check(cls):\n pass",
"def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)",
"def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)",
"def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))",
"def test_permission_remove_multiple_actions_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous WIKI_CREATE WIKI_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_change_permission(self):\r\n self.assertTrue(self.creator_admin.has_change_permission(self.request))\r\n\r\n self.request.user = self.user\r\n self.assertFalse(self.creator_admin.has_change_permission(self.request))",
"def test_permission_remove_all_actions_for_user(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous *')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_check_permissions(mock_list_permissions, mock_dry_permissions):\n view = views.ListEntryListView()\n\n view.check_permissions(None)\n\n assert mock_dry_permissions.call_count == 1\n assert mock_list_permissions.call_count == 1",
"def test_remove_facility_pt4(self):\n with self.assertRaises(InvalidPermission):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility', obj=[]))",
"def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)",
"def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)",
"def test_permissions(self):\n \n from pages.permissions import PagePermission\n admin = User.objects.get(username='admin')\n page = self.new_page()\n pp = PagePermission(user=page.author)\n self.assertTrue(pp.check('change', page=page, method='GET'))\n self.assertTrue(pp.check('change', page=page, method='POST'))\n \n staff = User.objects.get(username='staff')\n pp = PagePermission(user=staff)\n # weird because nonstaff?\n self.assertTrue(pp.check('change', page=page, method='GET',\n lang='en-us'))\n self.assertFalse(pp.check('change', page=page, method='POST',\n lang='en-us'))\n\n self.assertFalse(pp.check('delete', page=page, method='POST',\n lang='en-us'))\n self.assertFalse(pp.check('add', page=page, method='POST',\n lang='en-us'))\n self.assertFalse(pp.check('freeze', page=page, method='POST',\n lang='en-us'))\n\n self.assertFalse(pp.check('doesnotexist', page=page, method='POST',\n lang='en-us'))",
"def test_wrong_permission(self):\n with self.assertRaises(InvalidPermissionStringError):\n client_has_permission('test', 'asdf')",
"def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"[email protected]\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)",
"def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))",
"def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))",
"def test_filter_user_permissions(self):\n data = {\n \"users\": {\n 1: \"view\",\n 2: \"NONE\",\n }\n }\n\n with self.assertRaises(exceptions.PermissionDenied):\n check_user_permissions(data, 1)\n\n with self.assertRaises(exceptions.PermissionDenied):\n check_user_permissions(data, 2)\n\n check_user_permissions(data, 3)",
"def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)",
"def test_post_no_permission(self):\n self.user.user_permissions.clear()\n response = self._post()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()",
"def test_only_default_perms(self):\n p1 = Permission.objects.get(codename='eat_spam')\n # Change the codename so that clean_permissions has something to clean:\n p1.codename = 'eat_lovelyspam'\n p1.save()\n # Add a permission that isn't a default permission of 'Spam':\n ct = ContentType.objects.get_for_model(Spam)\n p2 = Permission.objects.create(\n name='Can reject spam', codename='reject_spam', content_type=ct\n )\n stream = StringIO()\n with self.patcher(new=Mock(return_value=[p1, p2])):\n utils.clean_permissions(stream)\n self.assertTrue(stream.getvalue())\n p1.refresh_from_db()\n self.assertEqual(p1.codename, 'eat_spam', msg=\"p1.codename should have been reset\")\n p2.refresh_from_db()\n self.assertEqual(p2.codename, 'reject_spam', msg=\"p2.codename should have not been altered\")",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_user_permission_change_and_clear(url):\n test_clear(url)\n resp = requests.post(url + 'auth/register', json={\n 'email': '[email protected]',\n 'password': '123123',\n 'name_first': 'Bruce',\n 'name_last': 'Wayne',\n })\n user = resp.json()\n\n token = user['token']\n u_id = user['u_id']\n permission_id = 123 #other.PERM_ID_FLOCKR_ADMIN\n \n resp = requests.post(url + 'admin/userpermission/change', json={\n 'token': token,\n 'u_id': u_id,\n 'permission_id': permission_id,\n })\n resp = resp.json()\n assert resp == {}\n\n resp = requests.post(url + 'admin/userpermission/change', json={\n 'token': token,\n 'u_id': u_id,\n 'permission_id': permission_id,\n })\n resp = resp.json()\n assert resp == {}\n \n resp = requests.post(url + 'admin/userpermission/change', json={\n 'token': token,\n 'u_id': u_id + 1,\n 'permission_id': permission_id,\n })\n resp = resp.json()\n assert resp['code'] == 400",
"def test_change_permission_with_any(self):\n permission_logic = self.permission_logic_class(\n any_permission=True\n )\n add_permission_logic(Article, permission_logic)\n self._auto_test_permission('change')\n self._auto_test_permission('change', obj=True)\n remove_permission_logic(Article, permission_logic)",
"def test_no_permission(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n req.user = Mock()\n req.user.has_perm.return_value = False\n\n res = self.view(\n req,\n decorator=self.actions(\n self.mock_model, [\"doit\"], permission=\"do_things\")\n )\n\n self.assertEqual(res.status_code, 403)\n req.user.has_perm.assert_called_with(\"do_things\")"
] | [
"0.69687974",
"0.6870181",
"0.6782665",
"0.6718146",
"0.6718146",
"0.6688817",
"0.6600725",
"0.6581123",
"0.6561051",
"0.6554503",
"0.6521542",
"0.65094894",
"0.65094894",
"0.64864784",
"0.6462128",
"0.64617753",
"0.64537555",
"0.64529544",
"0.64493746",
"0.64291537",
"0.6425238",
"0.6408316",
"0.6407004",
"0.63762724",
"0.63762724",
"0.63762724",
"0.63762724",
"0.63631314",
"0.63415354",
"0.63371754"
] | 0.8311579 | 0 |
Test the `extract_permission_docstring` expected behavior. | def test_extract_permission_docstring(self):
for permission, expected_dict in [
(
PermissionA & PermissionB,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
},
),
(
PermissionA | PermissionB,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
},
),
(
~PermissionA,
{
"PermissionA": "Permission A.",
},
),
(
PermissionA,
{
"PermissionA": "Permission A.",
},
),
(
(PermissionA & PermissionB) | ~PermissionA,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
},
),
(
(PermissionA & PermissionB) | ~PermissionC,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
"PermissionC": "Permission C.",
},
),
]:
with self.subTest(permission=permission):
self.assertEqual(
# mimic `get_permissions` by calling permission
extract_permission_docstring(permission()),
expected_dict,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )",
"def test_get_permissions(self):\n pass",
"def test_clean_permission(self):\n for permission, expected_string in [\n (\n PermissionA & PermissionB,\n \" **(** PermissionA **AND** PermissionB **)** \",\n ),\n (\n PermissionA | PermissionB,\n \" **(** PermissionA **OR** PermissionB **)** \",\n ),\n (\n ~PermissionA,\n \" **(NOT** PermissionA **)** \",\n ),\n (\n PermissionA,\n \"PermissionA\",\n ),\n (\n (PermissionA & PermissionB) | ~PermissionC,\n (\n \" **(** **(** PermissionA **AND** PermissionB **)** \"\n \"**OR** **(NOT** PermissionC **)** **)** \"\n ),\n ),\n ]:\n with self.subTest(permission=permission):\n self.assertEqual(\n # mimic `get_permissions` by calling permission\n clean_permission(permission()),\n expected_string,\n )",
"def docstring_hack():\n pass",
"def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)",
"def check_docstring():\n num_chars = 50\n\n def inner_doc(fn):\n if len(\" \".join(fn.__doc__.split())) >= num_chars:\n return True\n else:\n return False\n\n return inner_doc",
"def test__get_doc():\n docstring = util._get_doc(\"midgard\")\n assert isinstance(docstring, str) and len(docstring) > 0",
"def test__parse_allow(input_data):\n output = parse_allow(input_data)\n vampytest.assert_instance(output, Permission)\n return output",
"def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)",
"def test_0_check_xc_docstring(self):\n self.banner(\"Checking the docstring on your extra credit.\") \n filename = self.find_file('project9_xc.py')\n self.check_docstring(filename)",
"def test_uses_wraps(self):\n @self.actions(\"ctx_name\", [])\n def myview(request, some_id):\n \"\"\"docstring\"\"\"\n\n self.assertEqual(myview.func_name, \"myview\")\n self.assertEqual(myview.func_doc, \"docstring\")",
"def test_doc():\n pass",
"def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)",
"def test_permission_import_ok(self):\n test_name = sys._getframe().f_code.co_name\n user = u'test_user\\u0250'\n self._execute('permission add ' + user + ' WIKI_VIEW')\n self._execute('permission add ' + user + ' TICKET_VIEW')\n rv, output = self._execute('permission export')\n self._execute('permission remove ' + user + ' *')\n rv, output = self._execute('permission import', input=output)\n self.assertEqual(0, rv)\n self.assertEqual('', output)\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_buildWithPolicy(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n docstring = \"text in docstring\"\n\n projectRoot = FilePath(self.mktemp())\n packagePath = projectRoot.child(\"twisted\")\n packagePath.makedirs()\n packagePath.child(\"__init__.py\").setContent(\n \"def foo():\\n\"\n \" '%s'\\n\" % (docstring,))\n packagePath.child(\"_version.py\").setContent(\n genVersion(\"twisted\", 1, 0, 0))\n outputPath = FilePath(self.mktemp())\n\n script = BuildAPIDocsScript()\n script.buildAPIDocs(projectRoot, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n self.assertTrue(\n indexPath.exists(),\n \"API index %r did not exist.\" % (outputPath.path,))\n self.assertIn(\n '<a href=\"http://twistedmatrix.com/\">Twisted</a>',\n indexPath.getContent(),\n \"Project name/location not in file contents.\")\n\n twistedPath = outputPath.child(\"twisted.html\")\n self.assertTrue(\n twistedPath.exists(),\n \"Package documentation file %r did not exist.\"\n % (twistedPath.path,))\n self.assertIn(\n docstring, twistedPath.getContent(),\n \"Docstring not in package documentation file.\")\n #Here we check that it figured out the correct version based on the\n #source code.\n self.assertIn(\n '<a href=\"http://twistedmatrix.com/trac/browser/tags/releases/'\n 'twisted-1.0.0/twisted\">View Source</a>',\n twistedPath.getContent())\n\n self.assertEqual(stdout.getvalue(), '')",
"def testSummaryDOCstr(self):\n pass",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_user_func_docstrings(self):\n for func in self.student_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def test_missing_docstring(a, b): # noqa: D213, D407",
"def main_docstring():",
"def documentation_only():\n pass",
"def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")",
"def test_permission_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)",
"def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)",
"def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)",
"def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)"
] | [
"0.7750301",
"0.63256484",
"0.61832505",
"0.6159631",
"0.60564035",
"0.59957445",
"0.5964562",
"0.5904991",
"0.59038615",
"0.58144975",
"0.58118314",
"0.58020514",
"0.5793214",
"0.5759372",
"0.5753072",
"0.57528716",
"0.57407355",
"0.57407355",
"0.57407355",
"0.57407355",
"0.5738862",
"0.5738327",
"0.57380223",
"0.5725937",
"0.571856",
"0.5713811",
"0.5702653",
"0.5700503",
"0.5700503",
"0.5696722"
] | 0.8519844 | 0 |
Test the `format_permissions_and_docstring` expected behavior. | def test_format_permissions_and_docstring(self):
self.assertEqual(
format_permissions_and_docstring(
["permission formatted string"],
{"some": "docstring"},
),
(
"## Permissions\n\n"
"permission formatted string\n"
"### Permission description\n\n"
"- **some** : docstring"
),
)
self.assertEqual(
format_permissions_and_docstring(
["permission formatted string", "another permission"],
{"some": "docstring", "another": "docstring"},
),
(
"## Permissions\n\n"
"- permission formatted string\n"
"- another permission\n"
"### Permission description\n\n"
"- **some** : docstring\n"
"- **another** : docstring"
),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_extract_permission_docstring(self):\n for permission, expected_dict in [\n (\n PermissionA & PermissionB,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n },\n ),\n (\n PermissionA | PermissionB,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n },\n ),\n (\n ~PermissionA,\n {\n \"PermissionA\": \"Permission A.\",\n },\n ),\n (\n PermissionA,\n {\n \"PermissionA\": \"Permission A.\",\n },\n ),\n (\n (PermissionA & PermissionB) | ~PermissionA,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n },\n ),\n (\n (PermissionA & PermissionB) | ~PermissionC,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n \"PermissionC\": \"Permission C.\",\n },\n ),\n ]:\n with self.subTest(permission=permission):\n self.assertEqual(\n # mimic `get_permissions` by calling permission\n extract_permission_docstring(permission()),\n expected_dict,\n )",
"def test_get_permissions(self):\n pass",
"def test_clean_permission(self):\n for permission, expected_string in [\n (\n PermissionA & PermissionB,\n \" **(** PermissionA **AND** PermissionB **)** \",\n ),\n (\n PermissionA | PermissionB,\n \" **(** PermissionA **OR** PermissionB **)** \",\n ),\n (\n ~PermissionA,\n \" **(NOT** PermissionA **)** \",\n ),\n (\n PermissionA,\n \"PermissionA\",\n ),\n (\n (PermissionA & PermissionB) | ~PermissionC,\n (\n \" **(** **(** PermissionA **AND** PermissionB **)** \"\n \"**OR** **(NOT** PermissionC **)** **)** \"\n ),\n ),\n ]:\n with self.subTest(permission=permission):\n self.assertEqual(\n # mimic `get_permissions` by calling permission\n clean_permission(permission()),\n expected_string,\n )",
"def test_custom_permissions(self, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\"])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\"])\n self._empty_notebook(join(course_dir, \"source\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"generate_assignment\", \"ps1\"])\n\n self._empty_notebook(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\"])\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--GenerateFeedback.permissions=444\"])\n\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\"))\n assert self._get_permissions(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\")) == '444'",
"def docstring_hack():\n pass",
"def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))",
"def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)",
"def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)",
"def test_check_permissions(mock_list_permissions, mock_dry_permissions):\n view = views.ListEntryListView()\n\n view.check_permissions(None)\n\n assert mock_dry_permissions.call_count == 1\n assert mock_list_permissions.call_count == 1",
"def test_buildWithPolicy(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n docstring = \"text in docstring\"\n\n projectRoot = FilePath(self.mktemp())\n packagePath = projectRoot.child(\"twisted\")\n packagePath.makedirs()\n packagePath.child(\"__init__.py\").setContent(\n \"def foo():\\n\"\n \" '%s'\\n\" % (docstring,))\n packagePath.child(\"_version.py\").setContent(\n genVersion(\"twisted\", 1, 0, 0))\n outputPath = FilePath(self.mktemp())\n\n script = BuildAPIDocsScript()\n script.buildAPIDocs(projectRoot, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n self.assertTrue(\n indexPath.exists(),\n \"API index %r did not exist.\" % (outputPath.path,))\n self.assertIn(\n '<a href=\"http://twistedmatrix.com/\">Twisted</a>',\n indexPath.getContent(),\n \"Project name/location not in file contents.\")\n\n twistedPath = outputPath.child(\"twisted.html\")\n self.assertTrue(\n twistedPath.exists(),\n \"Package documentation file %r did not exist.\"\n % (twistedPath.path,))\n self.assertIn(\n docstring, twistedPath.getContent(),\n \"Docstring not in package documentation file.\")\n #Here we check that it figured out the correct version based on the\n #source code.\n self.assertIn(\n '<a href=\"http://twistedmatrix.com/trac/browser/tags/releases/'\n 'twisted-1.0.0/twisted\">View Source</a>',\n twistedPath.getContent())\n\n self.assertEqual(stdout.getvalue(), '')",
"def test_missing_docstring(a, b): # noqa: D213, D407",
"def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")",
"def test_user_func_docstrings(self):\n for func in self.student_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def test_descriptions_render_correctly(self):\n # help text in fields\n self.assertContains(\n self.response, \"<td>first name - The person's first name</td>\"\n )\n self.assertContains(\n self.response, \"<td>last name - The person's last name</td>\"\n )\n\n # method docstrings\n self.assertContains(self.response, \"<p>Get the full name of the person</p>\")\n\n link = '<a class=\"reference external\" href=\"/admindocs/models/%s/\">%s</a>'\n markup = \"<p>the related %s object</p>\"\n company_markup = markup % (link % (\"admin_docs.company\", \"admin_docs.Company\"))\n\n # foreign keys\n self.assertContains(self.response, company_markup)\n\n # foreign keys with help text\n self.assertContains(self.response, \"%s\\n - place of work\" % company_markup)\n\n # many to many fields\n self.assertContains(\n self.response,\n \"number of related %s objects\"\n % (link % (\"admin_docs.group\", \"admin_docs.Group\")),\n )\n self.assertContains(\n self.response,\n \"all related %s objects\"\n % (link % (\"admin_docs.group\", \"admin_docs.Group\")),\n )\n\n # \"raw\" and \"include\" directives are disabled\n self.assertContains(\n self.response,\n \"<p>"raw" directive disabled.</p>\",\n )\n self.assertContains(\n self.response, \".. raw:: html\\n :file: admin_docs/evilfile.txt\"\n )\n self.assertContains(\n self.response,\n \"<p>"include" directive disabled.</p>\",\n )\n self.assertContains(self.response, \".. include:: admin_docs/evilfile.txt\")\n out = self.docutils_stderr.getvalue()\n self.assertIn('\"raw\" directive disabled', out)\n self.assertIn('\"include\" directive disabled', out)",
"def test_permission_import_ok(self):\n test_name = sys._getframe().f_code.co_name\n user = u'test_user\\u0250'\n self._execute('permission add ' + user + ' WIKI_VIEW')\n self._execute('permission add ' + user + ' TICKET_VIEW')\n rv, output = self._execute('permission export')\n self._execute('permission remove ' + user + ' *')\n rv, output = self._execute('permission import', input=output)\n self.assertEqual(0, rv)\n self.assertEqual('', output)\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_",
"def test_user_module_docstring(self):\n self.assertIsNot(user.__doc__, None,\n \"user.py needs a docstring\")\n self.assertTrue(len(user.__doc__) >= 1,\n \"user.py needs a docstring\")",
"def test_buildWithPolicy(self):\n stdout = BytesIO()\n self.patch(sys, \"stdout\", stdout)\n docstring = \"text in docstring\"\n\n projectRoot = FilePath(self.mktemp())\n packagePath = projectRoot.child(\"twisted\")\n packagePath.makedirs()\n packagePath.child(\"__init__.py\").setContent(\n \"def foo():\\n\" \" '{}'\\n\".format(docstring).encode()\n )\n packagePath.child(\"_version.py\").setContent(\n genVersion(\"twisted\", 1, 0, 0).encode()\n )\n outputPath = FilePath(self.mktemp())\n\n script = BuildAPIDocsScript()\n script.buildAPIDocs(projectRoot, outputPath)\n\n indexPath = outputPath.child(\"index.html\")\n self.assertTrue(\n indexPath.exists(), \"API index {} did not exist.\".format(outputPath.path)\n )\n self.assertIn(\n '<a href=\"http://twistedmatrix.com/\">Twisted</a>',\n indexPath.getContent().decode(),\n \"Project name/location not in file contents.\",\n )\n\n twistedPath = outputPath.child(\"twisted.html\")\n self.assertTrue(\n twistedPath.exists(),\n \"Package documentation file {!r} did not exist.\".format(twistedPath.path),\n )\n self.assertIn(\n docstring,\n twistedPath.getContent().decode(),\n \"Docstring not in package documentation file.\",\n )\n # Here we check that it figured out the correct version based on the\n # source code.\n self.assertIn(\n '<a href=\"https://github.com/twisted/twisted/tree/'\n 'twisted-1.0.0/src/twisted/__init__.py\">(source)</a>',\n twistedPath.getContent().decode(),\n )\n\n self.assertEqual(stdout.getvalue(), b\"\")",
"def check_docstring():\n num_chars = 50\n\n def inner_doc(fn):\n if len(\" \".join(fn.__doc__.split())) >= num_chars:\n return True\n else:\n return False\n\n return inner_doc",
"def test_permission_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_user_func_docstrings(self):\n for func in self.user_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def testSummaryDOCstr(self):\n pass",
"def test_fs_func_docstrings(self):\n for func in self.fs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))",
"def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)",
"def test_0_check_docstring(self):\n self.banner(\"Looking for your program's docstring.\")\n filename = self.find_file('project9.py')\n self.assertIsNotNone(filename, \"I can't find your project file (project9.py)\")\n self.check_docstring(filename)",
"def test__parse_allow(input_data):\n output = parse_allow(input_data)\n vampytest.assert_instance(output, Permission)\n return output",
"def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)",
"def consistent_documentation():\n\n return 3",
"def documentation_only():\n pass",
"def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)"
] | [
"0.77590525",
"0.63539076",
"0.6225805",
"0.6169162",
"0.6058129",
"0.59855884",
"0.596356",
"0.5924213",
"0.58999294",
"0.5832375",
"0.5824305",
"0.58180207",
"0.5810781",
"0.58056927",
"0.58020186",
"0.5790352",
"0.5752622",
"0.57435054",
"0.5739238",
"0.57340395",
"0.57254606",
"0.5718458",
"0.57050836",
"0.5704162",
"0.5689131",
"0.5667198",
"0.5664994",
"0.56321466",
"0.5616464",
"0.55917954"
] | 0.9066155 | 0 |
Base command for SpoilerChannel. | async def spoilerchannel(self, ctx):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command(self):\n raise NotImplementedError",
"def send_command(self, cmd, shell=None, silent=False):",
"def command():\n pass",
"def send_command_line(self, command):\n raise NotImplementedError",
"def _setup_command(self):\r\n raise NotImplementedError",
"def _command(self, *cmd, handler=None):",
"def wrapper(callback):\n self.commands[name] = SlashCommand(callback, name, description, options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)",
"def shell(self, cmd):\n raise NotImplementedError",
"def cli():\n pass # do nothing here, it just defines the name for other subcommands",
"async def channel(self, ctx):\n pass",
"def cmd(self):",
"def __init__(self, vanillaCommand, usage):\n super(VanillaCommandWrapper, self).__init__(vanillaCommand.b())\n # PAIL: rename\n self.vanillaCommand = vanillaCommand\n self.setPermission(\"minecraft.command.\" + vanillaCommand.getCommand())",
"def __init__(self):\n ElixirCommand.__init__(self)",
"def commands():",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def channel(self):\n raise NotImplementedError",
"def do_command(self, args):\n pass",
"def get_command(self, player):\n return super().get_command(player)",
"def cmd(self, message):\n pass",
"def additional_command(self):\n pass",
"def main(connection, info, args, conf) :\r\n connection.rawsend(\"KICK %s %s :%s\\n\" % (info[\"channel\"], args[1], \" \".join(args[2:])))",
"def command_short():\n pass",
"def shell(self, **options):\n pass",
"def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n base_default_permission: bool = True,\n base_permissions: typing.Dict[int, list] = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None,\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n guild_ids = guild_ids if guild_ids else []\n if not base_permissions:\n base_permissions = {}\n\n def wrapper(cmd):\n decorator_permissions = getattr(cmd, \"__permissions__\", None)\n if decorator_permissions:\n base_permissions.update(decorator_permissions)\n\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n if guild_ids and not all(isinstance(item, int) for item in guild_ids):\n raise IncorrectGuildIDType(\n f\"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed.\"\n )\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"guild_ids\": guild_ids.copy(),\n \"api_options\": [],\n \"default_permission\": base_default_permission,\n \"api_permissions\": base_permissions,\n \"connector\": {},\n \"has_subcommands\": True,\n }\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(base, _cmd, subcommand_group, name or cmd.__name__, _sub)\n\n return wrapper",
"async def szuru(self, ctx: commands.Context):\n pass",
"def _channel_invoke_shell(self) -> None:\n self._shell = True\n self.channel.shell()",
"def Shell(self, cmd): # pylint: disable=invalid-name\n raise NotImplementedError"
] | [
"0.6149058",
"0.61001325",
"0.60898775",
"0.58528775",
"0.581973",
"0.5755966",
"0.57535255",
"0.5715783",
"0.5669034",
"0.5654281",
"0.56286776",
"0.56231105",
"0.56219894",
"0.56180894",
"0.5612029",
"0.5612029",
"0.5612029",
"0.5612029",
"0.55656666",
"0.54957414",
"0.54930294",
"0.54758036",
"0.543079",
"0.54153657",
"0.53947574",
"0.5381985",
"0.53774756",
"0.53647983",
"0.5339366",
"0.53214"
] | 0.6587875 | 0 |
Add a channel to the list of spoiler channels. | async def add(self, ctx, channel: discord.TextChannel):
config = await self.config.guild(ctx.guild).channels()
if channel.id in config:
return await ctx.send("This channel is already a spoiler channel.")
await ctx.send("Channel added to the spoiler channel list.")
config.append(channel.id)
await self.config.guild(ctx.guild).channels.set(config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_channel(self, channel):\n if channel in self.clients:\n return False\n self.clients[channel] = []\n return True",
"def add_channel(self, channel):\n self._channels[channel.fileno] = channel\n self._poller.add(channel.fileno, channel._events)",
"async def addchannel(self, ctx, channel: discord.TextChannel):\n guild = ctx.message.guild\n excluded_channels = await self.config.guild(guild).excluded_channels()\n\n for excluded_channel in excluded_channels:\n if excluded_channel == channel.id:\n await ctx.send(\n \"%s already added to channel exclusion list\" % channel.name\n )\n return\n\n excluded_channels.append(channel.id)\n await self.config.guild(guild).excluded_channels.set(excluded_channels)\n await ctx.send(\"%s added to channel exclusion list\" % channel.name)",
"def addchan(channel):",
"def add(self, name, chan_id):\r\n self.channels.append(Channel(name, chan_id))",
"def addChannel(self, channel):\n c = SubElement(self.root, 'channel')\n self.setattr(c, 'id', channel['id'])\n\n # Display Name\n for display_name in channel['display-name']:\n dn = SubElement(c, 'display-name')\n self.settext(dn, display_name)\n\n # Icon\n if 'icon' in channel:\n self.seticons(c, channel['icon'])\n\n # URL\n if 'url' in channel:\n for url in channel['url']:\n u = SubElement(c, 'url')\n self.settext(u, url, with_lang=False)",
"def add_channel(self, channel: discord.Channel):\n to_exec = \"INSERT INTO channel(channel_id, server_id, enabled) VALUES(%s, %s, 1)\"\n self.__cursor.execute(to_exec, (str(channel.id), str(channel.server.id)))\n self.__connection.commit()",
"async def remove(self, ctx, channel: discord.TextChannel):\n config = await self.config.guild(ctx.guild).channels()\n if not channel.id in config:\n return await ctx.send(\"This channel is not a spoiler channel.\")\n config.remove(channel.id)\n await self.config.guild(ctx.guild).channels.set(config)\n await ctx.send(\"Channel removed from the spoiler channel list.\")",
"def _new_channel_added(self, channel_name):\r\n if self._match_filter(channel_name):\r\n self._add_filter_channel(channel_name)",
"async def spoilerchannel(self, ctx):\n pass",
"def add_channel(self, channel):\n self.task.ai_channels.add_ai_voltage_chan(channel)",
"async def add(self, ctx, *, channel: discord.VoiceChannel):\n auto_channels = await self.config.guild(ctx.guild).auto_channels()\n if auto_channels is None:\n auto_channels = []\n auto_channels.append(channel.id)\n await self.config.guild(ctx.guild).auto_channels.set(auto_channels)\n await ctx.send(\n _(\"Startchannel used for automatic voicechannels added: {channel}\").format(\n channel=channel.name\n )\n )",
"def add_channels(self, channels):\n for i in range(len(channels)):\n self.task.ai_channels.add_ai_voltage_chan(channels[i])",
"def test_add_channel_adds_channel(self):\n channel = Mock()\n with mock.patch.object(self.notifier, \"_silenced_channels\") as silenced_channels:\n self.notifier.add_channel(channel)\n silenced_channels.__setitem__.assert_called_with(channel, self.notifier._current_loop)",
"def set_chanlist(self,loc,newchannel):\n # TODO, add checks and illegal arguments to protect Pi\n # TODO actually add the functionality\n # self.chanlist(loc) = newchannel",
"def register(self, cli: Client, channel: str) -> None:\n subscribers = self._channels_to_subscribers.get(channel, [])\n subscribers.append(cli)\n self._channels_to_subscribers[channel] = subscribers",
"async def add(self, ctx, channel : discord.Channel):\r\n \r\n server = ctx.message.server\r\n if server.id not in self.set:\r\n self.server_init(server)\r\n await self.bot.say(\"Server initialized!\")\r\n if channel.id in self.set[server.id][\"channels\"]:\r\n await self.bot.say(\":x: This channel is already a counting channel!\")\r\n return\r\n self.set[server.id][\"channels\"][channel.id] = {\"last\": None, \"count\": 0, \"goal\": 0, \"strict\": False}\r\n self.save()\r\n await self.bot.edit_channel(channel,topic = \"Next message must start with 1\")\r\n await self.bot.say(\"Channel added!\")",
"async def cmd_galtogglechannel(self, ctx, channel):\n\n # ===== GET CHANNEL ID\n try:\n ch_id = int(channel.lower().replace('<').replace('>').replace('#').strip())\n\n except ValueError:\n ctx.send_help('galtogglechannel', delete_after=Gallery.delete_after)\n \n ret_msg=\"\"\n\n # ===== REMOVE CHANNEL ID FROM LIST\n if ch_id in self.cogset['channel_ids']:\n self.cogset['channel_ids'].remove(ch_id)\n\n ret_msg = f\"<#{ch_id}> is no longer a gallery channel.\"\n\n ###=== DELETE LOGGED MESSAGES FROM DATABASE\n await self.db.execute(pgCmds.DEL_GALL_MSGS_FROM_CH, ch_id, self.cogset['guild_id'])\n\n # ===== ADD CHANNEL ID TO LIST\n else:\n self.cogset['channel_ids'] = list(set(self.cogset['channel_ids']) + {ch_id})\n ret_msg = f\"<#{ch_id}> has been made a gallery channel.\"\n\n # ===== SAVE SETTINGS \n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== END\n await ctx.channel.send(content=ret_msg, delete_after=Gallery.delete_after)\n return",
"async def ccdeny(self, ctx, channel: discord.TextChannel):\n channel_list = await self.config.guild(ctx.guild).channel_deny()\n if channel.id not in channel_list:\n channel_list.append(channel.id)\n await self.config.guild(ctx.guild).channel_deny.set(channel_list)\n await ctx.send(f\"{channel.mention} was added to the deny list for chatchart.\")",
"def new_channel(self, *args, **kwargs):\n logger.debug('creating channel -> connection.channel(%r, %r)' % (args, kwargs))\n if self.enabled:\n channel = self.connection.channel(*args, **kwargs)\n self._channels.append(channel)\n return channel\n else:\n return None",
"def new_channel(session, channel):\n session.create_chan_event.clear()\n key = b64encode(messaging.common.pkc_encrypt(get_random_bytes(\n config.SECURE_CHANNEL_KEY_SIZE_BYTES), session.encryption_key)).decode()\n msg = {\n kk.typ: kk.add_user,\n kk.inviter: session.user,\n kk.invitee: session.user,\n kk.chid: channel,\n kk.chkey: key\n }\n msg[kk.signature] = b64encode(\n messaging.common.create_msg_sig(session, msg)).decode()\n messaging.common.send_msg(session.sock, msg, key=session.symkey)",
"async def votechannel_add(self, ctx, channel: discord.TextChannel, reaction_type=None):\n if reaction_type is None:\n channel_type = \"voting\"\n elif reaction_type.lower() in [\"rate\", \"rating\"]:\n channel_type = \"rating\"\n elif reaction_type.lower() in [\"vote\", \"voting\"]:\n channel_type = \"voting\"\n else:\n raise exceptions.Warning(f\"Unknown reaction type `{reaction_type}`\", help_footer=True)\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO voting_channel (guild_id, channel_id, voting_type)\n VALUES (%s, %s, %s)\n ON DUPLICATE KEY UPDATE\n voting_type = VALUES(voting_type)\n \"\"\",\n ctx.guild.id,\n channel.id,\n channel_type,\n )\n self.bot.cache.votechannels.add(channel.id)\n await util.send_success(\n ctx, f\"{channel.mention} is now a voting channel of type `{channel_type}`\"\n )",
"def _onaddchannel(self):\n\n self._fileinfolayout.insertWidget(\n self._fileinfolayout.count() - 1,\n ChannelInfoWidget(self._channels)\n )",
"def _add_channel(self, chan_id: str):\n if not chan_id in self._messages:\n self._messages[chan_id] = {}\n else:\n raise ValueError(\"ReactionListener tried to create space for an already listened channel!\")",
"def connect(self, channel, a, b):\n a.sender.channels.append(channel)\n channel.receivers.append(b)",
"async def blacklist_channel(\n self, ctx: commands.Context, channel: discord.TextChannel\n ):\n if str(channel.id) in self.channel_blacklist:\n self.channel_blacklist.remove(str(channel.id))\n await self._update_db()\n removed = True\n else:\n self.channel_blacklist.append(str(channel.id))\n await self._update_db()\n removed = False\n\n await ctx.send(f\"{'Un' if removed else None}Blacklisted {channel.mention}\")\n return",
"def list_channels(title=None, uri=None):\r\n # Set plugin category. It is displayed in some skins as the name\r\n # of the current section.\r\n xbmcplugin.setPluginCategory(_handle, 'Channels')\r\n\r\n # Set plugin content. It allows Kodi to select appropriate views\r\n # for this type of content -- didn't use this since it's not working well\r\n # with the video item.\r\n # xbmcplugin.setContent(_handle, 'videos')\r\n\r\n # Get channels.\r\n result = _get_data(uri or 'https://api.hotstar.com/o/v1/channel/list?perPage=1000')\r\n # Iterate through categories\r\n\r\n for channel in result['items']:\r\n # Channel JSON structure.\r\n # {\r\n # \"title\": \"Star Vijay\",\r\n # \"categoryId\": 748,\r\n # \"contentId\": 824,\r\n # \"uri\": \"https://api.hotstar.com/o/v1/channel/detail?id=12&avsCategoryId=748&contentId=824&offset=0&size=20\r\n # &pageNo=1&perPage=20\",\r\n # \"description\": \"A Tamil general entertainment channel with family drama, comedy and reality shows.\",\r\n # \"assetType\": \"CHANNEL\",\r\n # \"genre\": [\r\n # \"LiveTV\"\r\n # ],\r\n # \"lang\": [\r\n # \"Tamil\"\r\n # ],\r\n # \"showCnt\": 137\r\n # },\r\n #\r\n _add_directory_item(\r\n parent_title=title,\r\n title=channel['title'],\r\n content_id=channel['contentId'],\r\n genre=channel.get('genre'),\r\n description=channel['description'],\r\n uri=channel['uri'],\r\n action='programs',\r\n image=get_thumbnail_image(channel)\r\n )\r\n\r\n if not uri:\r\n # Add Sports\r\n _add_directory_item(\r\n title='HotStar Sports',\r\n description='Sports',\r\n content_id=821,\r\n genre='Sports',\r\n uri='https://api.hotstar.com/o/v1/page/1327?tas=30',\r\n action='program_details',\r\n country_code='CA'\r\n )\r\n # Movies\r\n _add_directory_item(\r\n title='HotStar Movies',\r\n content_id=821,\r\n genre='Movies',\r\n description='Movies',\r\n uri='https://api.hotstar.com/o/v1/page/1328?tas=30',\r\n action='program_details',\r\n country_code='CA'\r\n )\r\n\r\n # TV\r\n _add_directory_item(\r\n title='HotStar TV',\r\n content_id=821,\r\n description='TV',\r\n genre='TV',\r\n uri='https://api.hotstar.com/o/v1/page/1329?tas=30',\r\n action='program_details',\r\n country_code='CA'\r\n )\r\n\r\n # Genre\r\n _add_directory_item(\r\n title='HotStar Genres',\r\n content_id=821,\r\n description='Genres',\r\n genre='Genre',\r\n uri='https://api.hotstar.com/o/v1/genre/list?perPage=1000',\r\n action='programs',\r\n )\r\n\r\n _add_search_item()\r\n\r\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\r\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL)\r\n\r\n # Finish creating a virtual folder.\r\n xbmcplugin.endOfDirectory(_handle)",
"async def channel_add(\n self, ctx: commands.Context, channel: Union[discord.TextChannel, discord.VoiceChannel], *team_ids: int):\n if set(team_ids) - set(self.teams):\n await ctx.send('Missing data for the following team IDs: %s' % (\n ', '.join(map(str, set(team_ids) - set(self.teams))),))\n return\n\n await asyncio.gather(*[\n self._permit_team_in_channel(self.teams[team_id], channel)\n for team_id in team_ids],\n return_exceptions=True)\n await ctx.send('Added team%s `%s` to channel %s' % (\n nl.s(len(team_ids)),\n '`, `'.join(self.teams[team_id].username for team_id in team_ids),\n channel.mention))",
"def add_chnl(self):\n\n # new widget\n chnl = QWidget()\n \n # load elements\n uic.loadUi(self.chnl_ui, chnl)\n\n # connect remove button\n chnl.rmv.clicked.connect(lambda _: self.rmv_chnl(chnl))\n\n # add to list\n self.chnls.append(chnl)\n\n # add to layout\n self.channels_layout.addWidget(chnl)",
"async def addjoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one_or_none()\n if existing:\n existing.joinable = True\n else:\n db_session.add(Channel(id=channel.id, name=channel.name, joinable=True))\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was added as a joinable channel.\")"
] | [
"0.72365296",
"0.7023313",
"0.6736699",
"0.6601461",
"0.65783",
"0.6487989",
"0.6431095",
"0.6341024",
"0.63276815",
"0.6260294",
"0.62081283",
"0.6052182",
"0.5912715",
"0.58898664",
"0.57825136",
"0.577486",
"0.57667595",
"0.5757747",
"0.57558197",
"0.5752964",
"0.57452536",
"0.5742773",
"0.57053393",
"0.56241447",
"0.55464023",
"0.5512724",
"0.55072063",
"0.55041945",
"0.54836005",
"0.5459078"
] | 0.83342344 | 0 |
Remove a channel from the list of spoiler channels. | async def remove(self, ctx, channel: discord.TextChannel):
config = await self.config.guild(ctx.guild).channels()
if not channel.id in config:
return await ctx.send("This channel is not a spoiler channel.")
config.remove(channel.id)
await self.config.guild(ctx.guild).channels.set(config)
await ctx.send("Channel removed from the spoiler channel list.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_channel(self, channel):\n self._channels.pop(channel.fileno, None)\n\n try:\n self._poller.remove(channel.fileno, channel._events)\n except (IOError, OSError):\n log.exception(\"Error while removing %r.\" % channel)",
"def remove_channel(self, channel):\n to_exec = \"DELETE FROM channel WHERE channel_id = %s\"\n self.__cursor.execute(to_exec, (str(channel.id),))\n self.__connection.commit()",
"def test_remove_channel_removes_channel(self):\n channel = Mock()\n with mock.patch.object(self.notifier, \"_silenced_channels\") as silenced_channels:\n self.notifier.remove_channel(channel)\n silenced_channels.__delitem__.assert_called_with(channel)",
"def remove(self, irc, msg, args, channel):\n res = self._checkDBhasChannel(channel)\n if res is True:\n SQL = 'DELETE FROM registry WHERE channel = ?'\n SQLargs = (channel,)\n self._SQLexec(SQL, SQLargs)\n self.lock.acquire()\n for x in range(0, len(self.channelscontrol)):\n v0 = str(self.channelscontrol[x][0])\n if v0 == channel:\n self.channelscontrol.pop(x)\n break\n self.lock.release()\n irc.reply(\"Channel removed from DB.\", prefixNick=True)\n else:\n irc.reply(\"Channel does not exist in DB.\", prefixNick=True)",
"async def removechannel(self, ctx, channel: discord.TextChannel):\n guild = ctx.message.guild\n excluded_channels = await self.config.guild(guild).excluded_channels()\n\n if channel.id in excluded_channels:\n excluded_channels.remove(channel.id)\n await self.config.guild(guild).excluded_channels.set(excluded_channels)\n await ctx.send(\"Removed %s from channel exclusion list.\" % channel.name)\n else:\n await ctx.send(\"%s is not excluded channel.\" % channel.name)",
"async def votechannel_remove(self, ctx, *, channel: discord.TextChannel):\n await self.bot.db.execute(\n \"DELETE FROM voting_channel WHERE guild_id = %s and channel_id = %s\",\n ctx.guild.id,\n channel.id,\n )\n self.bot.cache.votechannels.discard(channel.id)\n await util.send_success(ctx, f\"{channel.mention} is no longer a voting channel.\")",
"def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()",
"async def remove(self, ctx, channel : discord.Channel):\r\n \r\n server = ctx.message.server\r\n if server.id not in self.set:\r\n await self.bot.say(\":x: Uninitialized server!\")\r\n return\r\n if channel.id not in self.set[server.id][\"channels\"]:\r\n await self.bot.say(\":x: This is not a counting channel!\")\r\n return\r\n del self.set[server.id][\"channels\"][channel.id]\r\n self.save()\r\n await self.bot.edit_channel(channel,topic = None)\r\n await self.bot.say(\"Channel removed!\")",
"def drop_channel(self, channel):\n return self.clients.pop(channel, None)",
"def remove(self, channels=None):\n if channels is None:\n channels = self.get_channels()\n self.remove_from_frames(\n self.data, self.integration.frames, channels)",
"async def unwatch(self, ctx, channel: discord.TextChannel):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n if channel.id in channel_list:\r\n channel_list.remove(channel.id)\r\n else:\r\n return await ctx.send(\"Channel is not being watched.\")\r\n await self.config.guild(ctx.guild).watching.set(channel_list)\r\n await ctx.send(f\"{self.bot.get_channel(channel.id).mention} will not have bad gifs removed.\")",
"async def remove(self, ctx, *, channel: discord.VoiceChannel):\n auto_channels = await self.config.guild(ctx.guild).auto_channels()\n auto_channels.remove(channel.id)\n await self.config.guild(ctx.guild).auto_channels.set(auto_channels)\n await ctx.send(\n _(\"Startchannel used for automatic voicechannels removed: {channel}\").format(\n channel=channel.name\n )\n )",
"async def clear(self, ctx):\n await self.config.guild(ctx.guild).channels.clear()\n await ctx.send(\"Spoiler channel list cleared.\")",
"async def blacklist_channel(\n self, ctx: commands.Context, channel: discord.TextChannel\n ):\n if str(channel.id) in self.channel_blacklist:\n self.channel_blacklist.remove(str(channel.id))\n await self._update_db()\n removed = True\n else:\n self.channel_blacklist.append(str(channel.id))\n await self._update_db()\n removed = False\n\n await ctx.send(f\"{'Un' if removed else None}Blacklisted {channel.mention}\")\n return",
"def cleanup(self, channel=None):\n # falls `channel` angegeben wurden, werden nur diese bereinigt,\n # ansonsten wird alles bereinigt\n if channel:\n # ueberpruefe, ob `channel` eine Zahl ist und erstelle eventuell eine Liste nur mit dieser Zahl\n # dies ist wichtig, weil eine For-Schleife nicht ueber eine Zahl,\n # sondern in meinem Fall nur ueber eine Liste, iterieren kann\n if type(channel) == int:\n channel = [channel]\n for c in channel:\n # loesche den channel `c` aus dem dictionary `self.channels`\n del self.channels[c]\n print(f\"cleanup von channel {c}\")\n else:\n print(\"cleanup\")\n self.channels = {}",
"def remove_channels(self, *channels):\n channels = set(c.id for c in channels)\n conf_to_remove = set()\n\n # Check every FollowConfig\n for chan_conf in self.follows:\n if set(c.id for c in chan_conf.discord_channels) & channels:\n # Remove the given channels from this FollowConfig\n dchans_to_remove = set(c for c in chan_conf.discord_channels if c.id in channels)\n chan_conf.discord_channels = [c for c in chan_conf.discord_channels if c not in dchans_to_remove]\n\n # If this FollowConfig ended up with 0 channel, save it to remove it later\n if not chan_conf.discord_channels:\n conf_to_remove.add(chan_conf)\n\n if conf_to_remove:\n self.follows = [c for c in self.follows if c not in conf_to_remove]",
"async def channel_remove(\n self, ctx: commands.Context, channel: discord.abc.GuildChannel, *team_ids: int):\n if set(team_ids) - set(self.teams):\n await ctx.send('Missing data for the following team IDs: %s' % (\n ', '.join(map(str, set(team_ids) - set(self.teams))),))\n return\n\n await asyncio.gather(*[\n self._forbid_team_in_channel(self.teams[team_id], channel)\n for team_id in team_ids],\n return_exceptions=True)\n await ctx.send('Removed team%s `%s` from channel %s' % (\n nl.s(len(team_ids)),\n '`, `'.join(self.teams[team_id].username for team_id in team_ids),\n channel.mention))",
"def remove(self, channel, nick, comment=\"\"):\n time.sleep(1)\n self.s.send(\"REMOVE %s %s%s\\n\" % (channel, nick, (comment and (\" :\" + comment))))\n logger.log(\"REMOVE %s %s%s\" % (channel, nick, (comment and (\" :\" + comment)))).LogSend()",
"async def add(self, ctx, channel: discord.TextChannel):\n config = await self.config.guild(ctx.guild).channels()\n if channel.id in config:\n return await ctx.send(\"This channel is already a spoiler channel.\")\n await ctx.send(\"Channel added to the spoiler channel list.\")\n config.append(channel.id)\n await self.config.guild(ctx.guild).channels.set(config)",
"def channel_leave(token, channel_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel does not exist\")\n if auth_u_id not in channel[\"all_members\"]:\n raise AccessError(\"Authorised user is not a member of the channel.\")\n channels.remove(channel_id, \"all_members\", auth_u_id)",
"def left(self, channel):\n ss = self.findSessions(channel)[0]\n self.sessions.remove(ss)",
"def unsubscribe(self, inst, channel):\r\n if channel not in self._channels:\r\n raise ValueError(\"Channel {} not exists!\".format(channel))\r\n self._channels[channel].unsubscribe(inst)\r\n return\r\n # TODO: ?delete channels if there is no subscribers\r\n # if len(self._channels[channel].subscribers) == 0:\r\n # del self._channels[channel]\r",
"def remove_servers_channels(self):\n for _hash in self._sections.keys():\n if not re.match(ur'^ server ', _hash) and not re.match(ur'^ channel ', _hash):\n continue\n del self._sections[_hash]",
"def unsubscribe(self, channel, update_handler=None):\n pass",
"def remove_event_detect(self, channel):\n self._check_mode()\n # entferne den channel aus der Liste `self.events`\n self.events.remove(channel)\n print(f\"event detect fuer channel {channel} entfernt\")",
"def unsubscribe(self, client, channel):\n clients = self.clients.get(channel)\n if clients is None:\n return False\n index = None\n for i, pair in enumerate(clients):\n if pair[0] != client:\n continue\n index = i\n break\n if index is not None:\n del self.clients[channel][index]\n return True",
"def remove_from_list(self, params):\n video_id = params.get('video_id', [''])[0]\n return self.netflix_session.remove_from_list(video_id=video_id)",
"async def remove_bot_channels(self, guild_id):\n api_cog = self.bot.get_cog('RR_API')\n channelInfo = await api_cog.get_channel_info(guild_id)\n\n if not channelInfo:\n print(\"Server Name Not in DB, Can't delete channels. Server: \" + str(guild_id))\n return\n if channelInfo['futurechannelid']:\n await self.bot.get_channel(int(channelInfo['futurechannelid'])).delete()\n if channelInfo['pastchannelid']:\n await self.bot.get_channel(int(channelInfo['pastchannelid'])).delete()\n if channelInfo['lootchannelid']:\n await self.bot.get_channel(int(channelInfo['lootchannelid'])).delete()\n if channelInfo['commandschannelid']:\n await self.bot.get_channel(int(channelInfo['commandschannelid'])).delete()\n if channelInfo['categoryid']:\n await self.bot.get_channel(int(channelInfo['categoryid'])).delete()",
"async def removejoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n try:\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one()\n existing.joinable = False\n except NoResultFound:\n await ctx.send(f\"There was no record for {channel.mention}. The channel is not currently joinable.\")\n return\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was removed as a joinable channel.\")",
"def channel_leave(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n user_channel = is_user_channel_member(channel_id, curr_id)\n if user_channel is False:\n raise error.AccessError(description=\"user is not a member of this channel\")\n\n # remove user with u_id from the channel (from member_ids)\n curr_channel = database.get_channel_data(channel_id)\n\n curr_channel[\"member_ids\"].remove(curr_id)\n # if user is an owner it removes them as an owner as well\n for owner_id in curr_channel[\"owner_ids\"]:\n if owner_id == curr_id:\n curr_channel[\"owner_ids\"].remove(curr_id)\n\n database.set_channel_data(curr_channel)"
] | [
"0.69637007",
"0.6853917",
"0.6657176",
"0.6604634",
"0.65927154",
"0.654691",
"0.64287716",
"0.6404131",
"0.63939744",
"0.6344553",
"0.6340599",
"0.63152224",
"0.6293544",
"0.6290537",
"0.60735846",
"0.59953487",
"0.5956108",
"0.5943499",
"0.58851695",
"0.58548623",
"0.58102643",
"0.5807849",
"0.58012813",
"0.57825357",
"0.5728961",
"0.5670686",
"0.5658898",
"0.560593",
"0.5591281",
"0.5578778"
] | 0.80995065 | 0 |
Clear the spoiler channel list. | async def clear(self, ctx):
await self.config.guild(ctx.guild).channels.clear()
await ctx.send("Spoiler channel list cleared.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def spoilerchannel(self, ctx):\n pass",
"async def remove(self, ctx, channel: discord.TextChannel):\n config = await self.config.guild(ctx.guild).channels()\n if not channel.id in config:\n return await ctx.send(\"This channel is not a spoiler channel.\")\n config.remove(channel.id)\n await self.config.guild(ctx.guild).channels.set(config)\n await ctx.send(\"Channel removed from the spoiler channel list.\")",
"def clearList(self):\r\n self.addons.clear()",
"def clear_all():\n viewer = connect_to_ginga()\n shell = viewer.shell()\n chnames = shell.get_channel_names()\n for ch in chnames:\n shell.delete_channel(ch)",
"async def clear(self, ctx):\n if ctx.voice_client is None or ctx.voice_client.channel is None:\n await ctx.reply(f\"I am not in a voice channel, invite me first with `{self.bot_config['prefix']}join`.\")\n raise commands.CommandError(\"Bot not connected to a voice channel.\")\n\n if ctx.author.voice is None or ctx.author.voice.channel is None:\n await ctx.reply(\"You need to be in a voice channel to use this command.\")\n raise commands.CommandError(\"Invoker not connected to a voice channel.\")\n\n if ctx.voice_client is not None and ctx.author.voice.channel != ctx.voice_client.channel:\n await ctx.reply(\"You need to be in the same voice channel as the bot to use this command.\")\n raise commands.CommandError(\"Invoker not in same voice channel as bot.\")\n\n controller = SpotifyController.get_instance(ctx.voice_client.channel.id)\n controller.stop_playlist_playback()\n controller.clear_playlist()\n await ctx.send(f\"Queue cleared!\")",
"async def vote_clear(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is None:\n await ctx.send('This channel was never setup for votes.')\n return\n old_votes = session.query(Vote).filter_by(channel_id=ctx.channel.id).all()\n for old_vote in old_votes:\n session.delete(old_vote)\n session.commit()\n await ctx.send(f'Votes for {ctx.channel} cleared!')",
"def reset_bot() :\r\n\r\n open_list.clear()\r\n closed_list.clear()",
"def clear(self):\n self.__list = []",
"def clearList(self):\r\n self.players.clear()",
"def refresh_chanlist(self):\n self._chanlist.delete(0, Tix.END)\n for name in sorted(self._channel_frames.keys(), _k.cmp_channels):\n self._chanlist.insert(Tix.END, name)",
"def clear(self, channel=None, lines=0):\n f = self.get_channel_frame(channel, create=False)\n if f:\n f.clear(lines)",
"def blank(self, channel):\n pass",
"def clear(self):\n self._list.clear()",
"def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()",
"def resetPlayerList(self):\n self.playerList = []",
"def clearCards(self):\r\n self.cards = []",
"def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()",
"async def async_clear_playlist(self):\n await self._player.async_clear_playlist()",
"def clear_collection(self):\n self._cards = []",
"def clear():",
"def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []",
"def Clear(self):\r\n\r\n self._items = []\r\n self._sizer_element_count = 0",
"def cleanup(self, channel=None):\n # falls `channel` angegeben wurden, werden nur diese bereinigt,\n # ansonsten wird alles bereinigt\n if channel:\n # ueberpruefe, ob `channel` eine Zahl ist und erstelle eventuell eine Liste nur mit dieser Zahl\n # dies ist wichtig, weil eine For-Schleife nicht ueber eine Zahl,\n # sondern in meinem Fall nur ueber eine Liste, iterieren kann\n if type(channel) == int:\n channel = [channel]\n for c in channel:\n # loesche den channel `c` aus dem dictionary `self.channels`\n del self.channels[c]\n print(f\"cleanup von channel {c}\")\n else:\n print(\"cleanup\")\n self.channels = {}",
"async def unwatch(self, ctx, channel: discord.TextChannel):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n if channel.id in channel_list:\r\n channel_list.remove(channel.id)\r\n else:\r\n return await ctx.send(\"Channel is not being watched.\")\r\n await self.config.guild(ctx.guild).watching.set(channel_list)\r\n await ctx.send(f\"{self.bot.get_channel(channel.id).mention} will not have bad gifs removed.\")",
"def clear_streams(self):\n self.stop_streams()\n self.streams.clear()",
"def clear(self):\n wait(self.proto.vanish())",
"async def watchlist(self, ctx):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n msg = \"Bad gifs will be removed in:\\n\"\r\n for channel in channel_list:\r\n channel_obj = self.bot.get_channel(channel)\r\n if channel_obj is None: # Catch deleted/unexisting channels\r\n continue\r\n msg += f\"{channel_obj.mention}\\n\"\r\n await ctx.send(msg)",
"def clear_playlist(self, playlist_name):\n print(\"clears_playlist needs implementation\")",
"def Clear(self) -> None:",
"async def _clear(self, ctx):\n try:\n a = discord.Streaming\n p = ctx.bot.config[\"prefix\"]\n g = a(\n name=f\"{p}help | v{ctx.bot.version}\", url=\"https://twitch.tv/monstercat\"\n )\n await self.bot.change_presence(activity=g)\n except Exception:\n await ctx.send(f\"```\\n{traceback.format_exc()}```\")\n else:\n await ctx.send(\":white_check_mark: Cleared.\")"
] | [
"0.65423995",
"0.64471906",
"0.64198625",
"0.6385896",
"0.6345822",
"0.6259973",
"0.6207083",
"0.6160827",
"0.6145606",
"0.6049271",
"0.59991276",
"0.5911955",
"0.58956164",
"0.57868534",
"0.5777718",
"0.57751626",
"0.5747458",
"0.57456756",
"0.57445383",
"0.573946",
"0.5738058",
"0.573497",
"0.5727996",
"0.5726277",
"0.5713608",
"0.57109797",
"0.5710374",
"0.5705782",
"0.57049286",
"0.568561"
] | 0.8602701 | 0 |
user mobility func update users' location in every frame. mobility range comes from user mobility type. Meanwhile, user should only move in the cell range, restricted by the MARGIN. | def user_mobility(user_list):
new_user_list = list()
for user in user_list:
# update loc according to user mobility type
ii = random.randint(-user[3], user[3])
print("user[3]= ", user[3])
print("ii= ", ii)
user[0] += random.randint(-user[3], user[3])
user[1] += random.randint(-user[3], user[3])
# restrict user loc in the cell range
user[0] = max(user[0], LEFT_MARGIN + 4)
user[0] = min(user[0], RIGHT_MARGIN - 4)
user[1] = max(user[1], TOP_MARGIN + 4)
user[1] = min(user[1], BOTTOM_MARGIN - 4)
# update which cell user is in
user[2] = which_cell(user[0], user[1])
new_user_list.append(user)
return new_user_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _move_user(self):\n self.terminal = False\n mobility_speed = 1\n\n for i in range(self.num_user):\n move_x = random.randint(-mobility_speed, mobility_speed)\n user_x_tmp = self.users[i][0] + move_x\n move_y = random.randint(-mobility_speed, mobility_speed)\n user_y_tmp = self.users[i][1] + move_y\n\n if np.abs(user_x_tmp) > np.sqrt(3) / 2.0 * outer_radius or (np.abs(user_x_tmp) + np.abs(user_y_tmp) / np.sqrt(\n 3)) > outer_radius: # and (np.abs(user_x_tmp) > np.sqrt(3)/2.0*inner_radius or (np.abs(user_x_tmp)+np.abs(user_y_tmp)/np.sqrt(3)) > inner_radius):\n self.terminal = True\n user_x = user_x_tmp\n user_y = user_y_tmp\n else:\n user_x = user_x_tmp\n user_y = user_y_tmp\n\n self.users[i] = np.hstack((user_x, user_y))",
"def follow_player(self, player_x, player_y):\n \n # Determines the horizontal and vertical distance between the Player and the\n # Minotaur, converting them into that are measured by the tiles (Dividing the\n # difference by 50 turns each of these values into integers that represent tile\n # distances)\n x_distance = (self.rect.x - player_x) / 50 \n y_distance = (self.rect.y - player_y) / 50 \n \n # If the Minotaur is NOT on the same tile as the Player i.e. touching them\n if x_distance != 0 and y_distance != 0:\n \n # Check if the Player is situated ABOVE the Minotaur and if the Minotaur is\n # not currently animating\n if y_distance > 0 and not self.__animating:\n \n # If the tile above the Minotaur is free, then set direction as up, set\n # animating to True, and change the minotaur_y (Tile space)\n if (self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y - 1] != 1):\n self.__direction = \"UP\"\n self.__animating = True\n self.__minotaur_y -= 1\n \n # If the tile above the Minotaur is NOT free, and the tile to the left\n # of the Minotaur IS free, then set direction to left, set animating\n # to True, and change the minotaur_x (Tile space) accordingly\n elif self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y - 1] and \\\n (self.__maze_arrangement[self.__minotaur_x - 1][self.__minotaur_y] != 1) and \\\n player_x < self.rect.x:\n self.__direction = \"LEFT\"\n self.__animating = True\n self.__minotaur_x -= 1\n \n # If the tile above the Minotaur is NOT free, and the tile to the RIGHT\n # of the Minotaur IS free, then set direction to right, set animating\n # to True, and change the minotaur_x (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y - 1] and \\\n (self.__maze_arrangement[self.__minotaur_x + 1][self.__minotaur_y] != 1) and \\\n player_x > self.rect.x:\n self.__direction = \"RIGHT\"\n self.__animating = True\n self.__minotaur_x += 1 \n \n # Check if the Player is situated BELOW the Minotaur and if the Minotaur is\n # not currently animating \n if y_distance < 0 and not self.__animating:\n \n # If the tile below the Minotaur is free, then set direction as down, set\n # animating to True, and change the minotaur_y (Tile space) \n if (self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y + 1] != 1):\n self.__direction = \"DOWN\"\n self.__animating = True\n self.__minotaur_y += 1\n \n # If the tile below the Minotaur is NOT free, and the tile to the RIGHT\n # of the Minotaur IS free, then set direction to right, set animating\n # to True, and change the minotaur_x (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y + 1] and \\\n (self.__maze_arrangement[self.__minotaur_x + 1][self.__minotaur_y] != 1) and \\\n player_x > self.rect.x:\n self.__direction = \"RIGHT\"\n self.__animating = True\n self.__minotaur_x += 1\n \n # If the tile below the Minotaur is NOT free, and the tile to the LEFT\n # of the Minotaur IS free, then set direction to left, set animating\n # to True, and change the minotaur_x (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y + 1] and \\\n (self.__maze_arrangement[self.__minotaur_x - 1][self.__minotaur_y] != 1) and \\\n player_x < self.rect.x:\n self.__direction = \"LEFT\"\n self.__animating = True\n self.__minotaur_x -= 1 \n \n # Check if the Player is situated RIGHT of the Minotaur and if the Minotaur is\n # not currently animating \n if x_distance < 0 and not self.__animating:\n \n # If the tile to the right of the Minotaur is free, then set direction as \n # right, set animating to True, and change the minotaur_x (Tile space) \n if (self.__maze_arrangement[self.__minotaur_x + 1][self.__minotaur_y] != 1):\n self.__direction = \"RIGHT\"\n self.__animating = True\n self.__minotaur_x += 1 \n \n # If the tile to the right of the Minotaur is NOT free, and the tile ABOVE\n # the Minotaur IS free, then set direction to up, set animating\n # to True, and change the minotaur_y (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x + 1][self.__minotaur_y] and \\\n (self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y - 1] != 1) and \\\n player_y < self.rect.y:\n self.__direction = \"UP\"\n self.__animating = True\n self.__minotaur_y -= 1\n \n # If the tile to the right of the Minotaur is NOT free, and the tile BELOW\n # the Minotaur IS free, then set direction to down, set animating\n # to True, and change the minotaur_y (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x + 1][self.__minotaur_y] and \\\n (self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y + 1] != 1) and \\\n player_y > self.rect.y:\n self.__direction = \"DOWN\"\n self.__animating = True\n self.__minotaur_y += 1 \n \n # Check if the Player is situated LEFT of the Minotaur and if the Minotaur is\n # not currently animating \n if x_distance > 0 and not self.__animating:\n \n # If the tile to the LEFT of the Minotaur is free, then set direction as \n # left, set animating to True, and change the minotaur_x (Tile space) \n if (self.__maze_arrangement[self.__minotaur_x - 1][self.__minotaur_y] != 1):\n self.__direction = \"LEFT\"\n self.__animating = True\n self.__minotaur_x -= 1\n \n # If the tile to the LEFT of the Minotaur is NOT free, and the tile BELOW\n # the Minotaur IS free, then set direction to down, set animating\n # to True, and change the minotaur_y (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x - 1][self.__minotaur_y] and \\\n (self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y + 1] != 1) and \\\n player_y > self.rect.y:\n self.__direction = \"DOWN\"\n self.__animating = True\n self.__minotaur_y += 1\n \n # If the tile to the LEFT of the Minotaur is NOT free, and the tile ABOVE\n # the Minotaur IS free, then set direction to up, set animating\n # to True, and change the minotaur_y (Tile space) accordingly \n elif self.__maze_arrangement[self.__minotaur_x - 1][self.__minotaur_y] and \\\n (self.__maze_arrangement[self.__minotaur_x][self.__minotaur_y - 1] != 1) and \\\n player_y > self.rect.y:\n self.__direction = \"UP\"\n self.__animating = True\n self.__minotaur_y -= 1",
"def movement(self):",
"def player_movement(self):",
"def mobility_player(self, board):\n valid_moves = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n rows, columns = np.where(valid_moves == 1)\n max_mobility = -200\n location = (-2, -2)\n for i in range(len(rows)):\n temp_board = np.copy(board)\n temp_board = self.game.flip_opponent_stones((rows[i], columns[i]), temp_board, self.board_size,\n self.computer_num, self.opponent_num)\n mobility_value = self.stone_parity(temp_board)\n if mobility_value > max_mobility:\n max_mobility = mobility_value\n location = (rows[i], columns[i])\n return location",
"def adjust_mario_position(self):\n self.last_x_position = self.mario.rect.right\n self.mario.rect.x += round(self.mario.x_vel)\n self.check_mario_x_collisions()\n\n if self.mario.in_transition_state == False:\n self.mario.rect.y += round(self.mario.y_vel)\n self.check_mario_y_collisions()",
"def assign_upLimit():\r\n player.rect.y = 25",
"def Modifier_UserGPS(self):",
"def _update_battle_position(self, new_cells=[], previous_cells=[]):\n if previous_cells:\n for previous_cell in previous_cells:\n self._battle_area.set_cell(previous_cell.get_name(), False)\n if new_cells:\n for new_cell in new_cells:\n self._battle_area.set_cell(new_cell.get_name(), self)",
"def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)",
"def _update_motion_data(self, msg):\n if self._auv_motion != msg.motion:\n self._target_euler[\"alpha\"] = self._actual_euler[\"alpha\"]\n self._target_euler[\"beta\"] = self._actual_euler[\"beta\"]\n self._target_euler[\"gamma\"] = self._actual_euler[\"gamma\"]\n self._auv_motion = msg.motion\n self._thrusters_actual_speed[\"1\"] = msg.thrusters_speed.thruster_id1_speed\n self._thrusters_actual_speed[\"2\"] = msg.thrusters_speed.thruster_id2_speed\n self._thrusters_actual_speed[\"3\"] = msg.thrusters_speed.thruster_id3_speed\n self._thrusters_actual_speed[\"4\"] = msg.thrusters_speed.thruster_id4_speed\n self._thrusters_actual_speed[\"5\"] = msg.thrusters_speed.thruster_id5_speed\n self._thrusters_actual_speed[\"6\"] = msg.thrusters_speed.thruster_id6_speed\n self._thrusters_actual_speed[\"7\"] = msg.thrusters_speed.thruster_id7_speed\n self._thrusters_actual_speed[\"8\"] = msg.thrusters_speed.thruster_id8_speed",
"def update(self):\r\n if self.able_to_move:\r\n self.pix_pos += self.direction*self.speed\r\n if self.time_to_move():\r\n if self.stored_direction != None:\r\n self.direction = self.stored_direction\r\n self.able_to_move = self.can_move()\r\n # calls to the next function in order to check that the player is within bounds \r\n\r\n self.grid_pos[0] = (self.pix_pos[0]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_width//2)//self.app.cell_width+1\r\n self.grid_pos[1] = (self.pix_pos[1]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_height//2)//self.app.cell_height+1\r\n # keep track of where the player is currently to the grid \r\n\r\n if self.on_coin():\r\n self.eat_coin()\r\n # removes the coin once the player is over the tile\r\n\r\n if self.on_fruit():\r\n self.eat_fruit()\r\n # removes the fruit once the player is over the tile\r",
"def update_players_locations(self):\n self.loc = self.find_value(1)\n self.opponent_loc = self.find_value(2)",
"def get_user_move(board, entities):\n return False",
"def update_screen_loc(self):\n old_loc = list(self.loc)\n\n self.loc = self.next_loc()\n if self.loc != old_loc:\n for action in self.actions:\n if action == Action.running:\n if self.current_speed < self.max_speed:\n self.current_speed += 0.5\n elif action == Action.slide:\n self.velocity -= 0.5\n self.current_speed = self.velocity\n if self.velocity <= 0:\n self.remove_action(Action.slide)\n self.remove_action(Action.damaged)\n elif action == Action.jumping:\n if self.jump_charge > self.jump_start_charge:\n self.jump_charge -= 1\n else:\n self.remove_action(Action.jumping)\n elif action == Action.attack:\n self.attack_charge -= 1\n if self.attack_charge <= 0:\n self.remove_action(Action.attack)\n\n self.redraw = True",
"def moveMyAvatar(self) :\n\t\tif Util.AUTO : self.autoMove(); return\n\n\t\tnewH = self.avatarNP.getH()\n\t\tnewY = 0\n\t\tnewX = 0\n\n\t\tif self.keyMap[\"left\"] :\n\t\t\tnewH += 30 * globalClock.getDt()\n\t\tif self.keyMap[\"right\"] :\n\t\t\tnewH -= 30 * globalClock.getDt()\n\t\tif self.keyMap[\"forward\"] :\n\t\t\tnewY -= 2 * globalClock.getDt()\n \t\tif self.keyMap[\"backward\"] :\n \t\t\tnewY += 2 * globalClock.getDt()\n \t\tif self.keyMap[\"dash\"] :\n \t\t\tnewY -= 20 * globalClock.getDt()\n\t\tif self.keyMap[\"slide-left\"] :\n \t\t\tnewX += 2 * globalClock.getDt()\n\t\tif self.keyMap[\"slide-right\"] :\n \t\t\tnewX -= 2 * globalClock.getDt()\n\n\t\tself.avatarNP.setH(newH)\n\t\tself.avatarNP.setFluidX(self.avatarNP, newX)\n\t\tself.avatarNP.setFluidY(self.avatarNP, newY)",
"def update(self, frame):\n\n if(frame % 1 == 0): \n\n # Calling method to move people, and check and infect them and perform\n # other functions.\n self.putil.move(frame)\n \n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n currently_infected = len(infected_x)\n\n # Update healthcare status\n if currently_infected > self.putil.total_healthcare_capacity*3/2:\n self.healthcare_status = \"Extreme\"\n elif currently_infected > self.putil.total_healthcare_capacity:\n self.healthcare_status = \"Worse\"\n elif currently_infected > self.putil.total_healthcare_capacity*2/3:\n self.healthcare_status = \"Manageable\"\n else:\n self.healthcare_status = \"Normal\"\n\n # Update Graphs\n data1 = np.c_[healthy_x,healthy_y]\n data2 = np.c_[infected_x,infected_y]\n data3 = np.c_[immune_x,immune_y]\n data4 = np.c_[dead_x,dead_y]\n\n if frame == self.putil.enforce_mask_wearing_at:\n self.mask_wearing_info = \"Active\" \n \n if frame == self.putil.enforce_social_distance_at:\n self.social_distancing_info = \"Active\"\n\n self.text.set_text(\"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" % (frame,len(infected_x), str(len(healthy_x)) + \" or \" + str(round(len(healthy_x)*100/self.putil.size,1)) + \"%\",\n str(len(immune_x)) + \" or \" + str(round(len(immune_x)*100/self.putil.size,1)) + \"%\", str(len(dead_x)) + \" or \" + str(round(len(dead_x)*100/self.putil.size,1)) + \"%\",\n self.healthcare_status))\n self.text2.set_text(\"%s \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n self.scat.set_offsets(data1)\n self.scat2.set_offsets(data2)\n self.scat3.set_offsets(data3)\n self.scat4.set_offsets(data4)\n \n self.infected.append(len(infected_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.deaths.append(len(dead_x))\n self.frames.append(frame)\n self.immunes.append(len(immune_x))\n\n self.currently_infected.set_ydata(self.infected)\n self.currently_infected.set_xdata(self.frames)\n\n self.total_deaths.set_ydata(self.deaths)\n self.total_deaths.set_xdata(self.frames)\n\n self.total_immune.set_ydata(self.immunes)\n self.total_immune.set_xdata(self.frames)\n\n \n \n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected,",
"def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path",
"def move(self, usercmd):\n newPosX = self.robot.posX\n newPosY = self.robot.posY\n logging.info(\"Avant action :: newPosX={} / newPosY={}\".\\\n format(newPosX, newPosY))\n step = 1\n cmd = usercmd[0:1]\n if (len(usercmd) != 1):\n stpStr = usercmd[1:]\n if (stpStr.isdigit()):\n step = int(stpStr)\n else:\n step = 0\n if cmd.startswith(\"E\"):\n newPosX = newPosX + step\n elif cmd.startswith(\"W\"):\n newPosX = newPosX - step\n elif cmd.startswith(\"N\"):\n newPosY = newPosY - step\n elif cmd.startswith(\"S\"):\n newPosY = newPosY + step\n elif (cmd == \"Q\"):\n #quit\n print(\"Quit\")\n return False\n logging.info(\"newPosX={} / newPosY={}\".format(newPosX, newPosY))\n oldCar = \"\"\n newCar = \"\"\n if (self.canMove(cmd, self.robot, newPosX, newPosY)):\n oldCar = self.map[newPosY][newPosX]\n logging.info(\"originalMap[{}] : {}\".format(self.robot.posY, \\\n self.originalMap[self.robot.posY]))\n if (self.originalMap[self.robot.posY][self.robot.posX] == \".\"):\n self.map[self.robot.posY][self.robot.posX] = \".\"\n else:\n self.map[self.robot.posY][self.robot.posX] = \" \"\n self.robot.posX = newPosX\n self.robot.posY = newPosY\n self.map[newPosY][newPosX] = \"X\"\n logging.info(\"self.map[{}]={}\".format(newPosY, self.map[newPosY]))\n newCar = self.map[newPosY][newPosX]\n #print(oldCar, newCar)\n if (oldCar == \"U\" and newCar == \"X\"):\n print(\"Bravo, vous avez gagné !!!!!\")\n #Quit\n return False\n return True",
"def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8",
"def runMaze(mazze, rectangles):\n global lvl\n global mobsKilled\n startx, starty, endx, endy = 0, 0, 0, 0\n startx, starty, endx, endy = startEndPoints(mazze)\n screen = pygame.display.set_mode((600,600,)) #pygame display object\n all_entities = pygame.sprite.LayeredUpdates() #sprite group of all sprites\n walls = pygame.sprite.LayeredUpdates() #sprite group for the walls\n players = pygame.sprite.GroupSingle() #sprite group for the player\n monstors = pygame.sprite.LayeredUpdates() #sprite group for the monsters\n exit = pygame.sprite.GroupSingle() #sprite group for the exit point\n floors = pygame.sprite.LayeredUpdates() #sprite group for the floors\n entry = pygame.sprite.GroupSingle() #sprite group for the entry point\n daggers = pygame.sprite.GroupSingle() #sprite group for the attacks\n done = False\n\n mobList = npc.main(rectangles, lvl)\n mobCounter = 0\n for i in mobList:\n i.setListPos(mobCounter)\n mobCounter += 1\n # i.displayStats()\n\n clock = pygame.time.Clock()\n\n x = copy.deepcopy(startx)\n y = copy.deepcopy(starty)\n\n total_level_width = 3000\n total_level_height = 3000\n global facing\n camera = Camera(complex_camera, total_level_width, total_level_height) #camera object used to generate the offsets\n playerOne = Player(x, y, facing) #player object and sprite\n playerOne.add(players)\n while not done:\n try:\n pygame.event.pump()\n except:\n None\n for event in pygame.event.get():\n pressed = pygame.key.get_pressed()\n if event.type == pygame.QUIT:\n done = True\n print(\"You Killed \" + str(mobsKilled) + \" Monsters.\")\n highScores.setHighScore( lvl-1, mobsKilled)\n pygameMazeDraw(screen, mazze, y, x, mobList, walls, monstors, exit, floors, entry) #generates the necessary sprites and objects to later be displayed\n pressed = pygame.key.get_pressed()\n if pressed[pygame.K_w] or pressed[pygame.K_s] or pressed[pygame.K_a] or pressed[pygame.K_d]:\n if pressed[pygame.K_w]:\n testSprite = Player(x,(y-1), facing)\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('wcollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # y -= 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n y -= 1\n #testSprite.add(players)\n players.update(testSprite.pos, facing)\n elif pressed[pygame.K_s]:\n testSprite = Player(x,(y+1), facing)\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('scollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # y += 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n y += 1\n #testSprite.add(players)\n players.update(testSprite.pos, facing)\n elif pressed[pygame.K_a]:\n testSprite = Player((x-1),y, 'left')\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('acollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # x -= 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n x -= 1\n #testSprite.add(players)\n players.update(testSprite.pos, 'left')\n elif pressed[pygame.K_d]:\n testSprite = Player((x+1),y, 'right')\n if pygame.sprite.spritecollideany(testSprite, walls) is not None:\n # print('dcollide')\n continue\n elif pygame.sprite.spritecollideany(testSprite, exit) is not None:\n screen.blit(loading, (0,0,))\n pygame.display.flip()\n return main()\n # elif len(pygame.sprite.spritecollide(testSprite, monstors, dokill=True)) > 0:\n # print('monstCollide')\n # x += 1\n # #testSprite.add(players)\n # players.update(testSprite.pos)\n else:\n x += 1\n #testSprite.add(players)\n players.update(testSprite.pos, 'right')\n (players.sprite).add(daggers)\n if pressed[pygame.K_UP] or pressed[pygame.K_DOWN] or pressed[pygame.K_LEFT] or pressed[pygame.K_RIGHT] or pressed[pygame.K_RSHIFT]:\n if pressed[pygame.K_UP] or pressed[pygame.K_RSHIFT]:\n dagger = Dagger(x, y-1, 'up')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n #pygame.sprite.groupcollide(daggers, monstors, True, True)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n elif pressed[pygame.K_DOWN]:\n dagger = Dagger(x, y+1, 'down')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n #pygame.sprite.groupcollide(daggers, monstors, True, True)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n elif pressed[pygame.K_LEFT]:\n dagger = Dagger(x-1, y, 'left')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n #pygame.sprite.groupcollide(daggers, monstors, True, True)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n elif pressed[pygame.K_RIGHT]:\n dagger = Dagger(x+1, y, 'right')\n dagger.add(daggers)\n pygame.sprite.groupcollide(daggers, walls, True, False)\n if len(daggers.sprites()) > 0:\n coll = pygame.sprite.spritecollideany(daggers.sprite, monstors)\n if coll is not None:\n mobList[coll.monst.listPos] = None\n mobsKilled += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n pygame.sprite.groupcollide(daggers, monstors, False, True)\n # pygame.sprite.groupcollide(daggers, walls, True, False)\n # pygame.sprite.groupcollide(daggers, monstors, True, True)\n\n #opens up the settings screen but need a way to clear it\n if pressed[pygame.K_o]:\n done = settingsscreen.main()\n # if done==True:\n # print(\"You Killed \" + str(mobsKilled) + \" Monsters.\")\n # highScores.setHighScore( lvl-1, mobsKilled)\n\n all_entities.add(walls)\n if daggers.sprite is None:\n all_entities.add(players)\n all_entities.add(daggers)\n all_entities.add(monstors)\n all_entities.add(exit)\n all_entities.add(floors)\n all_entities.add(entry)\n camera.update(players.sprite)\n #all_entities.draw(screen)\n for e in all_entities:\n screen.blit(e.image, camera.apply(e)) #Applies the offsets to the sprites and draws them to the screen\n\n screen.convert_alpha()\n pygame.display.flip() #updates the screen to show the changes\n all_entities.empty()\n walls.empty()\n floors.empty()\n\n\n clock.tick(100)\n screen.fill((0,0,0,))\n #pygame.event.clear()\n #I don't know where to put this that it would work\n # print(\"You Killed \" + str(mobsKilled) + \" Monsters.\")\n # highScores.setHighScore( lvl-1, mobsKilled)",
"def moveBasedOnCurrentMomentum(self):\n self.xPos-=self.xMomentum\n self.yPos-=self.yMomentum\n self.syncSpriteCoordinates()",
"def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits",
"def movement(self, screen):\n if self.tx is not None and self.ty is not None: # Target is set\n\n X = self.x - self.tx\n Y = self.y - self.ty\n\n if X < 0: # --->\n self.img = pygame.image.load(next(self.walking_east_images))\n self.x += self.velocity\n elif X > 0: # <----\n self.img = pygame.image.load(next(self.walking_west_images))\n self.x -= self.velocity\n if Y > 0: # up\n self.img = pygame.image.load(next(self.walking_north_images))\n self.y -= self.velocity\n elif Y < 0: # dopwn\n self.img = pygame.image.load(next(self.walking_south_images))\n self.y += self.velocity\n screen.blit(self.img, (self.x, self.y))\n\n if X == 0 and Y == 0:\n self.tx, self.ty = None, None\n self.agent.actionCompleted()",
"def update_mug(self, msg):\n # Create a transform from the wrist to the mug\n posn = [0, 0, 0.140]\n ornt = [0, 0.707, 0, 0.707]\n t_i = rospy.Time.now()\n self.send_pose(posn, ornt, \"right_hand\", \"mug_frame\", t_i)\n xform = self.tfBuffer.lookup_transform(\n \"base\", \"mug_frame\", rospy.Time(0))\n load_xform_into_pose(xform.transform, self.marker.pose)\n self.pub.publish(self.marker)",
"def move_humans(self, zombie_distance_field):\r\n blocked = self.get_grid_height() * self.get_grid_width() #getting the distance value of obstacles\r\n new_positions = []\r\n for human in self.humans(): #calculate move for each human\r\n moves = self.eight_neighbors(human[0], human[1]) #getting list of up to 8 possible moves\r\n moves.append((human[0], human[1]))\r\n potential_moves = []\r\n distance = zombie_distance_field[human[0]][human[1]]\r\n for move in moves: #storing potential move if the distance is the max but not that of an obstacle\r\n if zombie_distance_field[move[0]][move[1]] < blocked:\r\n if zombie_distance_field[move[0]][move[1]] > distance:\r\n potential_moves = [move]\r\n distance = zombie_distance_field[move[0]][move[1]]\r\n elif zombie_distance_field[move[0]][move[1]] == distance: #getting multiple moves if valid\r\n potential_moves.append(move) \r\n \r\n new_positions.append(random.choice(potential_moves))\r\n self._human_list = new_positions",
"def move_friendly(self):\n self.friendly_pos[0]+=self.x_speed\n self.friendly_pos[1]+=self.y_speed",
"def update_position(self, u_i, forward):\n # Call the right funcion for the joint sequence update_seq_G for example\n getattr(self, f'update_seq_{self.sequence}')(u_i, forward)\n return self.update_legs()",
"def potential_mobility_player(self, board):\n valid_moves = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n rows, columns = np.where(valid_moves == 1)\n max_potential_mobility = -200\n location = (-2, -2)\n for i in range(len(rows)):\n temp_board = np.copy(board)\n temp_board = self.game.flip_opponent_stones((rows[i], columns[i]), temp_board, self.board_size,\n self.computer_num, self.opponent_num)\n potential_mobility_value = self.stone_parity(temp_board)\n if potential_mobility_value > max_potential_mobility:\n max_potential_mobility = potential_mobility_value\n location = (rows[i], columns[i])\n return location",
"def update(self):\n if self.is_moving_up:\n self.dirty = 1\n if self.is_moving_down:\n self.dirty = 1\n if self.is_moving_right:\n self.dirty = 1\n if self.is_moving_left:\n self.dirty = 1\n \n self.rect.x += self.moveX\n self.logic.wall_hit_logic(self.moveX, \"x\", self.room.wall_list)\n self.room_change.change_room()\n \n self.rect.y += self.moveY\n self.logic.wall_hit_logic(self.moveY, \"y\", self.room.wall_list)\n self.room_change.change_room()"
] | [
"0.6258764",
"0.5653074",
"0.5542158",
"0.5471045",
"0.5376977",
"0.53672373",
"0.5281903",
"0.5248371",
"0.521853",
"0.51883197",
"0.5184659",
"0.5160175",
"0.515324",
"0.51410437",
"0.5134581",
"0.5125037",
"0.5112508",
"0.50968057",
"0.50905675",
"0.5066284",
"0.50647473",
"0.5061",
"0.50504184",
"0.5045799",
"0.50346404",
"0.50282735",
"0.5015196",
"0.49758407",
"0.49723506",
"0.4956536"
] | 0.69230926 | 0 |
draw cell square and paint color according to cell location and outrage ratio. outrage ratio denotes the balance between cell resources and cell load. Color in each cell is chosen according to the outrage ratio. Dark color means the cell is highburden, opposite otherwise. Black color means ratio is lager than 1, meaning that cell is outrage. | def draw_cells(cell_list):
outrage_ratio = [x[4]/x[3] for x in cell_list]
# print(cell_list)
# print(outrage_ratio)
outrage_ratio = [min(x, 1) for x in outrage_ratio] # larger than 1 is outrage, use black color directly
# print_list = [round(x, 2) for x in outrage_ratio]
# print(print_list)
color_index = [int(x * len(COLOR_LIST)) for x in outrage_ratio]
for cell in cell_list:
this_color_index = color_index[cell[2]-1]
this_cell_color = [i * 255 for i in list(COLOR_LIST[this_color_index-1].rgb)]
pygame.draw.rect(DISPLAY_SURF, this_cell_color, (cell[0], cell[1], CELL_SIZE, CELL_SIZE)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def green_cell(self, x, y):\n r = self.rect_area(x, y) # gets rect area for cell\n pygame.draw.rect(self.screen, (0, 255, 0), r, 3)\n pygame.display.update(r) # updates screen to showcase green rect",
"def draw_grid(self):\n\n screen.fill(GREY)\n\n for row in self.grid:\n for cell in row:\n if cell.root:\n color = GREEN\n elif cell.goal:\n color = RED\n elif cell.value:\n color = DARK_BLUE\n elif cell.visited:\n color = LIGHT_BLUE\n elif cell.f:\n color = LIGHT_GREEN\n elif cell.wall:\n color = GRAY\n else:\n color = WHITE\n\n pygame.draw.rect(screen, color, cell.rect)\n\n x, y = cell.rect.x, cell.rect.y\n\n if cell.g:\n self.draw_score(x + 2, y + 2, cell.g)\n if cell.h:\n self.draw_score(x + 18, y + 2, cell.h)\n if cell.f:\n self.draw_score(x + 2, y + self.cell_size - 10, cell.f)",
"def draw_progress_bar(self, col, row, size, ratio, color=GREEN):\n npixels = size * ratio\n for n in range(int(npixels)):\n self.putpixel(col + n, row, color)\n # flash an addiotional pixel as fraction indicator\n if npixels - int(npixels) > .5 and self.nframes & 1 == 0:\n self.putpixel(col + int(npixels), row, color)",
"def render_cell(self, row, col):\n env = self.env\n size = self.CELL_WIDTH\n panel = self.PANEL_HEIGHT\n margin = self.MARGIN\n screen = self.screen\n\n radius = (size - 2 * margin) // 2\n cellX = (margin + size) * row + margin\n celly = (margin + size) * col + margin + panel\n diskX = margin + size // 2 + row * (size + margin)\n diskY = margin + size // 2 + col * (size + margin) + panel\n\n DrawRect(screen, Colors.GREEN, [cellX, celly, size, size])\n if env.isWhite(row, col):\n DrawCircle(screen, Colors.WHITE, (diskX, diskY), radius)\n if env.isBlack(row, col):\n DrawCircle(screen, Colors.BLACK, (diskX, diskY), radius)",
"def draw_stone(self, row, col, color):\n\n inner_start_x = (row + 1) * CELL_PIXELS - 12\n inner_start_y = (col + 1) * CELL_PIXELS - 12\n inner_end_x = (row + 1) * CELL_PIXELS + 12\n inner_end_y = (col + 1) * CELL_PIXELS + 12\n\n outer_start_x = (row + 1) * CELL_PIXELS - 14\n outer_start_y = (col + 1) * CELL_PIXELS - 14\n outer_end_x = (row + 1) * CELL_PIXELS + 14\n outer_end_y = (col + 1) * CELL_PIXELS + 14\n\n start_pixel_x = (row + 1) * CELL_PIXELS - 17\n start_pixel_y = (col + 1) * CELL_PIXELS - 17\n end_pixel_x = (row + 1) * CELL_PIXELS + 17\n end_pixel_y = (col + 1) * CELL_PIXELS + 17\n\n if color == BLACK:\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='black')\n self.create_oval(outer_start_x, outer_start_y, outer_end_x, outer_end_y, fill='white')\n self.create_oval(inner_start_x, inner_start_y, inner_end_x, inner_end_y, fill='black')\n else:\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='white')\n self.create_oval(outer_start_x, outer_start_y, outer_end_x, outer_end_y, fill='black')\n self.create_oval(inner_start_x, inner_start_y, inner_end_x, inner_end_y, fill='white')",
"def red2blue(self):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n self.cells[x][y] = 2",
"def draw_grid(self, darken=1):\n if not(0 < darken < 1):\n darken = 1\n for x in range(0, int(self.settings['grid_size'])):\n for y in range(0, int(self.settings['grid_size'])):\n if self.grid[x][y] == g.EMPTY:\n if (x + y) % 2 == 0:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (93 * darken, 216 * darken, 228 * darken), r)\n else:\n rr = pygame.Rect((x * self.block_width, y * self.block_width),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (84 * darken, 194 * darken, 205 * darken), rr)\n elif self.grid[x][y] == g.WALL:\n rr = pygame.Rect((x * self.block_width, y * self.block_width), (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (175 * darken, 34 * darken, 6 * darken), rr)\n elif self.grid[x][y] == g.PLAYER:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (17 * darken, 24 * darken, 47 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)\n elif self.grid[x][y] == g.FOOD:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (223 * darken, 163 * darken, 49 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)",
"def draw_occupied_cells(self):\n reds = [cell for cell in self.game.get_cells() if cell.player == 1]\n blacks = [cell for cell in self.game.get_cells() if cell.player == 2]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=reds,\n edgecolors='black', node_color='red', linewidths=2)\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=blacks,\n edgecolors='black', node_color='black', linewidths=2)",
"def cell(x, y):\n try:\n if cells[y][x]['filled'] == 1:\n return # this has already been processed\n except IndexError:\n return\n cells[y][x]['filled'] = 1 # this cell is now filled\n\n nn = []\n for nx, ny in neighbours(x, y):\n try:\n if cells[ny][nx]['filled']:\n nn.append(cells[ny][nx])\n except IndexError:\n continue\n \n c = 0 # colour weighting\n \n #------ Flippedness\n flipped = sum([i['inverted'] for i in nn if i['inverted']])\n cells[y][x]['inverted'] = (randint(0, 3) + flipped) % 4\n \n #------- Colour calculation\n avg_colour = sum([i['colour'][0] for i in nn]) / len(nn)\n avg_sat = sum([i['colour'][1] for i in nn]) / len(nn)\n avg_bri = sum([i['colour'][2] for i in nn]) / len(nn)\n \n # small chance of going totally random otherwise small variation from neighbours\n if random(100) > 90:\n h = randint(0, 100)\n s = randint(0, 100)\n b = randint(0, 100)\n else:\n h = (avg_colour + randint(-15, 15)) % 100\n s = (avg_sat + randint(-15, 15)) % 100\n b = (avg_bri + randint(-15, 15)) % 100\n cells[y][x]['colour'] = (h, s, b)\n \n #------- Alpha calculation\n d = sqrt((x*cell_size - rx)**2 + (y*cell_size - ry)**2) # distance from epicenter\n mx = sqrt((w-rx*cell_size)**2 + (h-ry*cell_size)**2)\n a = d/sqrt(w**2+h**2)*255\n cells[y][x]['alpha'] = a\n \n for cx,cy in neighbours(x, y):\n cell(cx, cy)",
"def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )",
"def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True",
"def drawHealth(self, maxHealth, currentHealth):\n percentageHealth = currentHealth/maxHealth\n if not currentHealth == 0 and not percentageHealth == 1:\n if(percentageHealth <= 1/3):\n pg.draw.rect(self.screen, pg.Color(\"snow 4\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"red\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70 * percentageHealth, self.tileSize * .15))\n elif(percentageHealth <= 2/3):\n pg.draw.rect(self.screen, pg.Color(\"snow 4\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"yellow\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70 * percentageHealth, self.tileSize * .15))\n elif(percentageHealth < 1):\n pg.draw.rect(self.screen, pg.Color(\"snow 4\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"green\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70 * percentageHealth, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"black\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15), 2)",
"def _draw_square(self, left_x, top_y, side, color, fill):\n self.pen.up()\n self.pen.color(color)\n self.pen.goto(left_x, top_y)\n self.pen.down()\n self.pen.begin_fill()\n for _ in range(4):\n self.pen.forward(side)\n self.pen.right(90)\n self.pen.end_fill()",
"def overwrite_board_square(self, row, col):\n x = self.board_lft_x + col * self.next_square\n y = self.board_top_y - row * self.next_square\n color = self.square_light if (row + col) % 2 == 0 else self.square_dark\n self._draw_square(x, y, self.square_side_size, color, True)",
"def draw_grid(self):\n buf = self.__hbar\n for rInd in range(self.row):\n line = '\\t|'\n for cInd in range(self.col):\n this = ((rInd * self.col) + cInd)\n cell = self.get_cell(this)\n if not cell:\n line += '%s|' % ' '.center(5)\n else:\n if this == self.new_cell:\n tmp = green(str(cell).center(5))\n else:\n tmp = str(cell).center(5)\n line += '%s|' % tmp\n buf += line + '\\n' + self.__hbar\n print(buf)",
"def drawCell(self,land,uland,vland,marked):\n from math import sqrt, pow\n #--Tranlate grid point (u,v) to pixel point\n if not self.changed: self.edit()\n #--u/v max/min are grid range of visible map. \n #--wcell is bit width of cell. 512 is bit width of visible map.\n (umin,umax,vmin,vmax,wcell,wmap) = (-28,27,-27,28,9,512)\n if not ((umin <= uland <= umax) and (vmin <= vland <= vmax)):\n return\n #--x0,y0 is bitmap coordinates of top left of cell in visible map.\n (x0,y0) = (4 + wcell*(uland-umin), 4 + wcell*(vmax-vland))\n #--Default to deep\n mapc = [Fmap.DEEP]*(9*9)\n heights = land and land.getHeights()\n if heights:\n #--Land heights are in 65*65 array, starting from bottom left. \n #--Coordinate conversion. Subtract one extra from height array because it's edge to edge.\n converter = [(65-2)*px/(wcell-1) for px in range(wcell)]\n for yc in range(wcell):\n ycoff = wcell*yc\n yhoff = (65-1-converter[yc])*65\n for xc in range(wcell):\n height = heights[converter[xc]+yhoff]\n if height >= 0: #--Land\n (r0,g0,b0,r1,g1,b1,scale) = (66,48,33,32,23,16,sqrt(height/3000.0))\n scale = int(scale*10)/10.0 #--Make boundaries sharper.\n r = chr(max(0,int(r0 - r1*scale)) & ~1)\n else: #--Sea\n #--Scale color from shallow to deep color.\n (r0,g0,b0,r1,g1,b1,scale) = (37,55,50,12,19,17,-height/2048.0)\n r = chr(max(0,int(r0 - r1*scale)) | 1)\n g = chr(max(0,int(g0 - g1*scale)))\n b = chr(max(0,int(b0 - b1*scale)))\n mapc[xc+ycoff] = r+g+b\n #--Draw it\n mapd = self.mapd\n for yc in range(wcell):\n ycoff = wcell*yc\n ymoff = wmap*(y0+yc)\n for xc in range(wcell):\n cOld = mapd[x0+xc+ymoff]\n cNew = mapc[xc+ycoff]\n rOld = ord(cOld[0])\n #--New or old is sea.\n if (ord(cNew[0]) & 1) or ((rOld & 1) and\n (-2 < (1.467742*rOld - ord(cOld[1])) < 2) and\n (-2 < (1.338710*rOld - ord(cOld[2])) < 2)):\n mapd[x0+xc+ymoff] = cNew\n if marked:\n self.drawBorder(Fmap.MARKED,x0+2,y0+2,x0+7,y0+7,1)\n pass",
"def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()",
"def drawmaze(self):\n win=GraphWin(\"Perfect Maze\",600,600) \n win.setBackground(\"White\")\n scale=600/self.N #Used to generalize the size difference for the input of larger numbers. The background resolution/ grid size, N\n\n x1=scale\n y1=0\n x2=scale\n y2=scale\n\n ##VERTICAL LINES ####\n for i in range(self.N,0,-1):\n for j in range(1,self.N):\n if self.East[j][i]: #If East is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2)) #lines | |\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale #Increment causes |->|\n x2+=scale #Increment causes |->|\n y1+=scale #Used to draw two more\n y2+=scale #of the same spaced lines further down.\n x1=scale #Reset\n x2=scale #Reset\n\n\n ##HORIZONTAL LINES##\n x1=0\n y1=scale\n x2=scale\n y2=scale\n\n\n for i in range(self.N,1,-1):\n for j in range(1,self.N+1):\n if self.South[j][i]: #If South is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2))\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale\n x2+=scale\n y1+=scale\n y2+=scale\n x1=0\n x2=scale\n\n const=scale//5 #Very useful const which helps in placing circles on grid.\n x=scale//2\n y=600-scale//2\n #radius=(scale-(4*scale//self.N))/2\n radius=scale//2-(const)\n start=Point(x,y) #START POINT HERE \n circ=Circle(start,radius)\n circ.setFill(\"Red\")\n label=Text(start,\"Start\")\n label.setFill(\"Black\")\n circ.draw(win)\n label.draw(win)\n #print(self.CurrentCell)\n #Using the current cell from the finished algorithm(last place visited), a circle can be placed at that point.\n endpointx=(self.CurrentCell[0]-1)*scale +scale//2 ####MAKING END POINT X\n endpointy=600-(self.CurrentCell[1]-1)*scale-scale//2 ####MAKING END POINT Y\n endpoint=Point(endpointx,endpointy)\n circ2=Circle(endpoint,radius)\n circ2.setFill(\"White\")\n label2=Text(endpoint,\"End\")\n circ2.draw(win)\n label2.draw(win)\n \n ###############CREATE KEY########################\n \n \n keypointx=(self.MazeKey[0]-1)*scale +scale//2 ####MAKING END POINT X\n keypointy=600-(self.MazeKey[1]-1)*scale-scale//2 ####MAKING END POINT Y\n keypoint=Point(keypointx,keypointy)\n circ3=Circle(keypoint,radius)\n circ3.setFill(\"Blue\")\n label3=Text(keypoint,\"Key\")\n circ3.draw(win)\n label3.draw(win)\n pathcol=\"Yellow\"\n##\n\n \n for i in range(1,len(self.EntirePath)): \n pathpointx=(self.EntirePath[i][0]-1)*scale +scale//2 ####MAKING END POINT X\n pathpointy=600-(self.EntirePath[i][1]-1)*scale-scale//2 ####MAKING END POINT Y\n pathpoint=Point(pathpointx,pathpointy)\n drawpath=Circle(pathpoint,radius)\n drawpath.setFill(pathcol)\n if self.EntirePath[i]==self.KeyPath[-1]:\n pathcol=\"Violet\"\n label4=Text(keypoint,\"Key\")\n label4.draw(win) \n drawpath.draw(win)\n drawpath.setWidth(1)\n sleep(0.1)\n \n #drawpath.draw(win)\n \n label5=Text(endpoint,\"Maze Solved \")\n label5.draw(win)\n circ4=Circle(start,radius)\n circ4.setFill(\"Red\")\n circ4.draw(win) \n label6=Text(start,\"Start \")\n label6.draw(win)",
"def draw_gameBoard(self):\n\n # 15 horizontal lines\n for i in range(9):\n start_pixel_x = (i + 1) * CELL_PIXELS\n start_pixel_y = (0 + 1) * CELL_PIXELS\n end_pixel_x = (i + 1) * CELL_PIXELS\n end_pixel_y = (9 + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(9):\n start_pixel_x = (0 + 1) * CELL_PIXELS\n start_pixel_y = (j + 1) * CELL_PIXELS\n end_pixel_x = (9 + 1) * CELL_PIXELS\n end_pixel_y = (j + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(7, 7)",
"def paint_cell(self, col, row, color):\r\n if isinstance(color, Number):\r\n self.A[row, col] = color\r\n else:\r\n self.A[row, col] = self.cdict[color]\r\n self.plot()",
"def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)",
"def drawSquare(t, sz):\n\n t.shape(\"turtle\")\n while 1:\n\t if sz > 200:\n\t \tbreak\n\t for j in range (36):\n\t \tt.left(10)\n\t \tsz = sz + 1 \n\n\t \tif j%2 == 1:\n\t \t\tt.color(\"red\")\n\t \telse:\n\t \t\tt.color(\"blue\")\n\t \tfor i in range(4):\n\t \t\tt.forward(sz)\n\t \t\tt.left(90)\n\t sz = sz + 1",
"def drawCell(self, x, y, bgColor, closedSection=255, borderColor=\"\", bgPattern=\"\"):\n left = self.margin + y * self.cellSize\n right = left + self.cellSize\n top = self.margin + x * self.cellSize\n bottom = top + self.cellSize\n adjustValue = self.cellSize - self.pentPadding\n # sections tuple = ([rectangles], [lines])\n sections = ([], [])\n # main section\n sections[0].append(self.canvas.create_rectangle(\n left + self.pentPadding,\n top + self.pentPadding,\n 1 + right - self.pentPadding,\n 1 + bottom - self.pentPadding,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n # border sections\n if closedSection & (1 << 0):\n sections[0].append(self.canvas.create_rectangle(\n left,\n top,\n 1 + right - adjustValue,\n 1 + bottom - adjustValue,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 1):\n sections[0].append(self.canvas.create_rectangle(\n left + self.pentPadding,\n top,\n 1 + right - self.pentPadding,\n 1 + bottom - adjustValue,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 2):\n sections[0].append(self.canvas.create_rectangle(\n left + adjustValue,\n top,\n 1 + right,\n 1 + bottom - adjustValue,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 3):\n sections[0].append(self.canvas.create_rectangle(\n left,\n top + self.pentPadding,\n 1 + right - adjustValue,\n 1 + bottom - self.pentPadding,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 4):\n sections[0].append(self.canvas.create_rectangle(\n left + adjustValue,\n top + self.pentPadding,\n 1 + right,\n 1 + bottom - self.pentPadding,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 5):\n sections[0].append(self.canvas.create_rectangle(\n left,\n top + adjustValue,\n 1 + right - adjustValue,\n 1 + bottom,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 6):\n sections[0].append(self.canvas.create_rectangle(\n left + self.pentPadding,\n top + adjustValue,\n 1 + right - self.pentPadding,\n 1 + bottom,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n if closedSection & (1 << 7):\n sections[0].append(self.canvas.create_rectangle(\n left + adjustValue,\n top + adjustValue,\n 1 + right,\n 1 + bottom,\n fill=bgColor, outline=borderColor, stipple=bgPattern\n ))\n # main section's borders\n if not closedSection & (1 << 1):\n sections[1].append(self.canvas.create_line(\n left + self.pentPadding,\n top + self.pentPadding,\n 1 + right - self.pentPadding,\n bottom - adjustValue\n ))\n if not closedSection & (1 << 3):\n sections[1].append(self.canvas.create_line(\n left + self.pentPadding,\n top + self.pentPadding,\n right - adjustValue,\n 1 + bottom - self.pentPadding\n ))\n if not closedSection & (1 << 4):\n sections[1].append(self.canvas.create_line(\n left + adjustValue,\n top + self.pentPadding,\n right - self.pentPadding,\n 1 + bottom - self.pentPadding\n ))\n if not closedSection & (1 << 6):\n sections[1].append(self.canvas.create_line(\n left + self.pentPadding,\n top + adjustValue,\n 1 + right - self.pentPadding,\n bottom - self.pentPadding\n ))\n # border sections' borders\n if (closedSection & (1 << 1)) and not (closedSection & (1 << 0)):\n sections[1].append(self.canvas.create_line(\n left + self.pentPadding,\n top,\n right - adjustValue,\n 1 + bottom - adjustValue\n ))\n if (closedSection & (1 << 1)) and not (closedSection & (1 << 2)):\n sections[1].append(self.canvas.create_line(\n left + adjustValue,\n top,\n right - self.pentPadding,\n 1 + bottom - adjustValue\n ))\n if (closedSection & (1 << 4)) and not (closedSection & (1 << 2)):\n sections[1].append(self.canvas.create_line(\n left + adjustValue,\n top + self.pentPadding,\n 1 + right,\n bottom - adjustValue\n ))\n if (closedSection & (1 << 4)) and not (closedSection & (1 << 7)):\n sections[1].append(self.canvas.create_line(\n left + adjustValue,\n top + adjustValue,\n 1 + right,\n bottom - self.pentPadding\n ))\n if (closedSection & (1 << 6)) and not (closedSection & (1 << 7)):\n sections[1].append(self.canvas.create_line(\n left + adjustValue,\n top + adjustValue,\n right - self.pentPadding,\n 1 + bottom\n ))\n if (closedSection & (1 << 6)) and not (closedSection & (1 << 5)):\n sections[1].append(self.canvas.create_line(\n left + self.pentPadding,\n top + adjustValue,\n right - adjustValue,\n 1 + bottom\n ))\n if (closedSection & (1 << 3)) and not (closedSection & (1 << 5)):\n sections[1].append(self.canvas.create_line(\n left,\n top + adjustValue,\n 1 + right - adjustValue,\n bottom - self.pentPadding\n ))\n if (closedSection & (1 << 3)) and not (closedSection & (1 << 0)):\n sections[1].append(self.canvas.create_line(\n left,\n top + self.pentPadding,\n 1 + right - adjustValue,\n bottom - adjustValue\n ))\n return sections",
"def shade_locked_cells(self):\n for i in range(9):\n for j in range(9):\n if self.grid_check[i][j] != 0:\n self.color_cell(pos=(i, j), color=LOCKED_CELL)",
"def _draw_red(self, intrusion):\n\n\t\tif intrusion is None:\n\t\t\treturn\n\n\t\tif intrusion not in self.POSSIBLE_INTRUSION_LEVELS:\n\t\t\traise ValueError(\"Given value [{}] for argument \\\"intrusion\\\" is invalid\".format(intrusion))\n\n\t\tfrom_point = Point(0, 0)\n\t\tto_point = Point(0, 0)\n\t\tcolour = Rgb()\n\n\t\tassert(len(self.POSSIBLE_INTRUSION_LEVELS) == 3)\n\n\t\t# Easy: 40 % strong_red / 316 * 316\n\t\tif intrusion == self.POSSIBLE_INTRUSION_LEVELS[0]:\n\t\t\tfrom_point = Point(92, 92)\n\t\t\tto_point = Point(407, 407)\n\t\t\tcolour = Rgb.strong_red()\n\t\t# Medium: 20 % med_red / 224 * 224\n\t\telif intrusion == self.POSSIBLE_INTRUSION_LEVELS[1]:\n\t\t\tfrom_point = Point(138, 138)\n\t\t\tto_point = Point(361, 361)\n\t\t\tcolour = Rgb.med_red()\n\t\t# Hard: 5 % light_red / 112 * 112\n\t\telif intrusion == self.POSSIBLE_INTRUSION_LEVELS[2]:\n\t\t\tfrom_point = Point(194, 194)\n\t\t\tto_point = Point(305, 305)\n\t\t\tcolour = Rgb.light_red()\n\t\telse:\n\t\t\traise NotImplementedError(\"draw_red: Intrusion level not implemented\")\n\n\t\t# TODO TEMP Currently intruded means ALL is red!\n\t\tfrom_point = Point(0, 0)\n\t\tto_point = Point(499, 499)\n\n\t\tself._draw_area(colour, from_point, to_point)",
"def draw(self, win):\n for y in range(len(self.board)):\n for x, color in enumerate(self.board[y]):\n pygame.draw.rect(win, color, (self.x+x*self.cell_size, self.y+y*self.cell_size,\n self.cell_size, self.cell_size), 0)\n\n pygame.draw.rect(win, (0, 0, 0), (self.x, self.y, self.width, self.height), BORDER_THICKNESS)",
"def update(self, grid, colRamp = ['white', 'blue']):\n \n # update the cell colors\n for y in range(len(grid)):\n yl = y + 1\n for x in range(len(grid[y])):\n xl = x + 1\n color = colRamp[int(grid[y][x])]\n self.displayWindow.update((xl, yl), color)\n\n # refresh the window\n self.displayWindow.tkupdate()",
"def draw_cell(self, board, x, y, color):\n r = self.rect_area(x, y) # gets rect area for given cell\n pygame.draw.rect(self.screen, color, r, 3)\n e = self.font.render(str(board[y][x]), 1, (0, 0, 0)) # creates number\n self.screen.blit(e, (self.x_pos + x * 80, self.y_pos + y * 80)) # draws number\n pygame.display.update(r) # updates screen to showcase rect",
"def draw(self, ctx):\n self.set_size(self.width, self.available_height) \n #Drawing cell lines\n for i in range(0, (max(self.available_width,int(self.width)) / self.cell_width) + 1):\n ctx.move_to(i * self.cell_width, 0)\n ctx.line_to(i * self.cell_width, self.available_height)\n ctx.set_line_width(1)\n red = float(self.get_style().fg[gtk.STATE_INSENSITIVE].red) / 65535\n green = float(self.get_style().fg[gtk.STATE_INSENSITIVE].green) / 65535\n blue = float(self.get_style().fg[gtk.STATE_INSENSITIVE].blue) / 65535\n ctx.set_source_rgba(red, green, blue, 0.3)\n ctx.stroke()\n greatest = self.calculate_greatest() \n # Drawing scale lines\n step = greatest / 5\n ctx.save()\n ctx.set_dash([5],5)\n for i in range(int(step), int(greatest),5):\n ctx.move_to(0, self.available_height - (self.available_height - 20) * i / greatest)\n ctx.line_to(max(self.available_width,int(self.width)), self.available_height - (self.available_height - 20) * i / greatest)\n ctx.set_source_rgba(red,green,blue,0.3)\n ctx.stroke()\n\n ctx.restore()\n # Drawing the diagram\n loadingCopy = copy.deepcopy(self.loading)\n colorIndex = 0\n loadingKeys = loadingCopy.keys()\n loadingKeys.sort()\n for key in loadingKeys:\n while loadingCopy[key] != []:\n x1, y1 = loadingCopy[key].pop(0)\n if loadingCopy[key] != []:\n x2, y2 = loadingCopy[key][0]\n else:\n x2 = self.duration\n ctx.line_to (x1 * self.cell_width, self.available_height - (self.available_height - 20) * y1 / greatest)\n ctx.line_to (x2 * self.cell_width, self.available_height - (self.available_height - 20) * y1 / greatest)\n \n ctx.set_line_width(2)\n ctx.set_source_rgba(self.colors[colorIndex][0], self.colors[colorIndex][1], self.colors[colorIndex][2],0.5)\n ctx.stroke()\n colorIndex = (colorIndex + 1) % 11",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()"
] | [
"0.61169076",
"0.60230124",
"0.5847452",
"0.58186793",
"0.5788967",
"0.57456297",
"0.5700501",
"0.5635756",
"0.56212103",
"0.5600866",
"0.55408627",
"0.5493554",
"0.54904616",
"0.5441999",
"0.5429428",
"0.54221565",
"0.5420412",
"0.5406437",
"0.5404276",
"0.5400624",
"0.5391552",
"0.5382282",
"0.53793955",
"0.53761435",
"0.53673065",
"0.5358611",
"0.535543",
"0.53362",
"0.53199494",
"0.5319751"
] | 0.70212466 | 0 |
calculate cell load according to the sum of users in its range. | def cal_cell_load(cell_list, user_list):
# count users in each cell
cell_load = [0] * CELL_COLUMN * CELL_ROW
for user in user_list:
cell_load[user[2] - 1] += 1
# print(cell_load)
# update the load of each cell in cell list
for i in range(len(cell_list)):
cell_list[i][4] = cell_load[i]
return cell_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def total_load(userloads, user_ids, period):\n series = [userloads[user][period[0]:period[1]] for user in user_ids]\n total = series[0].copy()\n for single_series in series[1:]:\n total += single_series\n return total",
"def total_load_in_experiment_periods(userloads, user_ids):\n periods = experiment_periods()\n return [total_load(userloads, user_ids, period) for period in periods]",
"def get_rate_all_cell(self, users, cells, resource_id, resource_length, transmit_power):\n channels_square = np.random.rayleigh(1,[self.num_user, self.num_cell,self.num_rb]) # the fast fading from the user to all the cells\n distance = np.zeros([self.num_user, self.num_cell])\n fadings = np.zeros([self.num_user,self.num_cell,self.num_rb])\n rates = np.zeros([self.num_user, self.num_cell])\n for i in range(self.num_user):\n for j in cell_id:\n # print(num)\n # print (cells[num][0] - users[0]) ** 2\n # print (cells[num][1] - users[1]) ** 2\n # print np.sqrt((cells[num][0] - users[0]) ** 2 + (cells[num][1] - users[1]) ** 2) * resolution / 20.0\n distance[i,j] = np.sqrt((cells[j][0] - users[i][0]) ** 2 + (cells[j][1] - users[i][1]) ** 2) * resolution # calculate the distance between user and each base station\n fadings = channels_square[i,j]*(distance[i,j]**-4)\n\n for i in range(self.num_user):\n for j in range(resource_length[i]):\n interference_rb_all_cell = 0\n for m in range(np.nonzero(resource_id[:,np.nonzero(resource_id[i])[0][j]])[0].shape[0]):\n if m != i:\n interference_rb_all_cell += fadings[m,:,np.nonzero(resource_id[i])[0][j]] * transmit_power[m]/resource_length[m]\n snr_rb_all_cell = fadings[i,:,np.nonzero(resource_id[i])[0][j]] * transmit_power[i]/resource_length[i]/(interference_rb_all_cell+noise)\n rates[i] += np.log2(1+snr_rb_all_cell)\n\n return rates",
"def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity",
"def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)",
"def total_experiment_load():\n loads = tempfeeder_exp()\n return total_load_in_experiment_periods(loads, loads.user_ids)",
"def mean_experiment_load_for_user_subset(num_users, seed=None):\n loads = tempfeeder_exp()\n if seed is None:\n seed = np.random.randint(1, 2**16)\n user_ids = np.random.RandomState(seed).permutation(loads.user_ids)[:num_users]\n return [l / len(user_ids) for l in total_load_in_experiment_periods(loads, user_ids)]",
"def _load_factor(self):\n return self.size / len(self.buckets)",
"def get_load_factor(self):\n # Your code here\n return self.count/len(self.data)",
"def NSK129(userloads):\n nsk129 = NSK129_user_ids()\n total = userloads.read(nsk129[0])\n for user_id in nsk129[1:]:\n total += userloads.read(user_id)\n return total",
"def occupation_distribution(data):",
"def totalEffectiveLoad(self):\n return sum(s.effectiveLoad() for s in self.dispatcher.statuses)",
"def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]",
"def probe_load(self, user_id, current_time):\r\n if get_param(\"load_metric\") == \"estimate\":\r\n probe_start = current_time - 2 * get_param(\"network_delay\")\r\n start_index = 0\r\n while start_index < len(self.probes) and \\\r\n self.probes[start_index] <= probe_start:\r\n start_index += 1\r\n self.probes = self.probes[start_index:]\r\n estimated_load = (self.queued_tasks + self.running_tasks +\r\n len(self.probes))\r\n self.probes.append(current_time)\r\n return estimated_load\r\n elif get_param(\"load_metric\") == \"per_user_length\":\r\n return len(self.queues[user_id])\r\n elif get_param(\"load_metric\") == \"per_user_estimate\":\r\n relative_weights = get_param(\"relative_weights\")\r\n # First, we compute the number of rounds needed to empty user_id's\r\n # queue and run the potential new task. Based on that number of\r\n # rounds, we examine the queues for all users to determine how\r\n # many tasks will run before the potential task for user_id.\r\n \r\n # Tasks that will be run before a task for the given user_id\r\n # (including any currently running tasks, since we realistically\r\n # assume that we don't know when these will complete).\r\n total_tasks_before = self.running_tasks\r\n # Compute the number of rounds (including the current one) needed to empty\r\n # the queue and ultimately run the task for this user. 1 indicates\r\n # that the task will be run as part of the current round, and so forth.\r\n queue_length = len(self.queues[user_id]) + 1\r\n if self.current_user == user_id:\r\n queue_length += self.task_count\r\n rounds = math.ceil(float(queue_length) /\r\n relative_weights[user_id])\r\n # Whether the user specified by index (below) comes after user_id\r\n # in the scheduling round.\r\n past_user = False\r\n for count in range(len(self.queues)):\r\n index = (count + self.current_user) % len(self.queues)\r\n if past_user:\r\n # The user specified by index comes after user_id, so\r\n # there will be one less scheduling round before\r\n # index.\r\n potential_tasks_before = ((rounds - 1) *\r\n relative_weights[index])\r\n else:\r\n potential_tasks_before = (rounds *\r\n relative_weights[index])\r\n if self.running_tasks > 0 and self.current_user == index:\r\n # Account for tasks that have already run in this round.\r\n potential_tasks_before -= self.task_count\r\n tasks_before = min(len(self.queues[index]),\r\n potential_tasks_before)\r\n \r\n total_tasks_before += tasks_before\r\n\r\n if index == user_id:\r\n past_user = True\r\n return total_tasks_before\r\n else:\r\n return self.queued_tasks + self.running_tasks",
"def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users",
"def estimate_energy_saving_all_users(start, session):\n\n savings = 0.0\n for meter_id in get_all_user_meter_ids(session):\n saving = calc_estimated_energy_saving(meter_id, start)\n if saving is not None:\n savings += saving\n\n return savings",
"def calculate_min_max_tiles(self):",
"def get_load_factor(self):\r\n return self.num_items / self.table_size",
"def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)",
"def _calculate_range_stats(self, x_copy):\n # get the min, max values of the data\n min_val_cur, max_val_cur = torch.aminmax(x_copy)\n\n # calculate new epoch range values\n epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)\n epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)\n\n self.epoch_activation_min.copy_(epoch_min_val)\n self.epoch_activation_max.copy_(epoch_max_val)\n\n # calculate the average batch activation range\n current_batch_range = max_val_cur - min_val_cur\n new_range = (\n self.average_batch_activation_range * self.num_batches_tracked\n + current_batch_range\n ) / (self.num_batches_tracked + 1)\n\n self.average_batch_activation_range = new_range\n self.num_batches_tracked += 1 # new batch was processed\n\n return x_copy",
"def sum_range(self, lower, upper):\n if upper>self.upper:\n upper=self.upper\n if lower<self.lower:\n lower = self.lower\n\n i_l = int(np.floor((lower-self.lower)/self._dx))\n i_u = int(np.floor((upper-self.lower)/self._dx))\n total = 0.0\n for i in range(i_l,i_u):\n total+= self.y[i]\n return total",
"def sum_living_cell(self,x, y, current_round):\n if 1 <= x <= Lx - 2 and 1 <= y <= Ly - 2:#if the square is not on a side of the grid\n sum_living_cell = sum(sum(current_round[x - 1:x + 2, y - 1:y + 2])) #we take the 8 squares around it to comput the sum\n\n elif x == 0: #if the square is on the left side of the grid\n if 1 <= y <= Ly - 2: #if the square isn't on the top or bottom squares of the grid\n sum_living_cell = sum(sum(current_round[x:x + 2, y - 1:y + 2])) #we take the 5 squares around it to comput the sum\n elif y == 0: #if the square is on the top side\n sum_living_cell = sum(sum(current_round[x:x + 2, y:y + 2])) #we take the 3 squares around it to comput the sum\n elif y == Ly - 1: #if the square is on the bottom side\n sum_living_cell = sum(sum(current_round[x:x + 2, y - 1:y+1])) #we take the 3 squares around it to comput the sum\n #and so on for the other side\n elif x == Lx - 1:\n if 1 <= y <= Ly - 2:\n sum_living_cell = sum(sum(current_round[x - 1:x+1, y - 1:y + 2]))\n elif y == 0:\n sum_living_cell = sum(sum(current_round[x - 1:x+1, y:y + 2]))\n elif y == Ly - 1:\n sum_living_cell = sum(sum(current_round[x - 1:x+1, y - 1:y+1]))\n\n elif y == 0:\n if 1 <= x <= Lx - 2:\n sum_living_cell = sum(sum(current_round[x - 1:x + 2, y:y + 2]))\n elif x == 0:\n sum_living_cell = sum(sum(current_round[x:x + 2, y:y + 2]))\n elif x == Lx - 1:\n sum_living_cell = sum(sum(current_round[x - 1:x+1, y:y + 2]))\n\n elif y == Ly - 1:\n if 1 <= x <= Lx - 2:\n sum_living_cell = sum(sum(current_round[x:x + 2, y - 1:y+1]))\n elif x == 0:\n sum_living_cell = sum(sum(current_round[x:x + 2, y - 1:y+1]))\n elif x == Lx - 1:\n sum_living_cell = sum(sum(current_round[x - 1:x+1, y - 1:y+1]))\n else:\n print(\"Square situation not found !\")\n sum_living_cell -= current_round[x][y] #we delete the square that we add on the sum\n return sum_living_cell",
"def load_factor(self) -> float:\n return self.filled_count / self.table_size",
"def test_get_total_users_get(self):\n pass",
"def sum_across_rows(grid):\n pass",
"def get_total_n_cpu(self) -> int:",
"def find_boxes_used_by_subrange(\n self, subrange, box_to_user_rank_starts, box_to_user_rank_lists,\n contributing_boxes_list):\n box_in_subrange = cl.array.zeros(\n contributing_boxes_list.queue,\n contributing_boxes_list.shape[0],\n dtype=np.int8\n )\n knl = self.find_boxes_used_by_subrange_kernel(\n self.traversal.tree.box_id_dtype)\n\n knl(\n contributing_boxes_list,\n subrange[0],\n subrange[1],\n box_to_user_rank_starts,\n box_to_user_rank_lists,\n box_in_subrange\n )\n\n return box_in_subrange",
"def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load",
"def _pload4_total(loadcase_id, load, scale, xyz, F, M, p):\n assert load.line_load_dir == 'NORM', 'line_load_dir = %s' % (load.line_load_dir)\n for elem in load.eids_ref:\n fi, mi = _pload4_helper(loadcase_id, load, scale, elem, xyz, p)\n F += fi\n M += mi\n return F, M",
"def _get_read_range(self):\n\n self.total_size = get_data_size(self.storage, self.read_bucket, self.read_path)\n\n partition_size = floor(self.total_size / self.task_info.num_tasks)\n\n self.lower_bound = self.task_info.task_id * partition_size\n self.upper_bound = self.lower_bound + partition_size\n\n # self.lower_bound, self.upper_bound = adjust_bounds(self.storage, self.read_bucket, self.read_path,\n # self.lower_bound, self.upper_bound, self.total_size)\n\n print(\"Scanning bytes=%d-%d (%d)\"%(self.lower_bound, self.upper_bound,\n self.upper_bound - self.lower_bound))"
] | [
"0.64571244",
"0.5781821",
"0.5779872",
"0.57753253",
"0.56961805",
"0.56795824",
"0.55291647",
"0.5336819",
"0.5334902",
"0.5319453",
"0.5313331",
"0.52933884",
"0.5156394",
"0.51557285",
"0.515272",
"0.51381844",
"0.5076189",
"0.5056973",
"0.50533557",
"0.5039493",
"0.50256586",
"0.5022255",
"0.5021017",
"0.49710876",
"0.4959476",
"0.49585068",
"0.49552506",
"0.495372",
"0.49510303",
"0.49199393"
] | 0.71235216 | 0 |
Detects local maxima in a 3D array | def local_maxima_3D(data, order=3):
size = 1 + 2 * order
footprint = np.ones((size, size, size))
footprint[order, order, order] = 0
filtered = ndi.maximum_filter(data, footprint=footprint)
mask_local_maxima = data > filtered
coords = np.asarray(np.where(mask_local_maxima)).T
values = data[mask_local_maxima]
return coords, values | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_max(img, size=(70,100,100)):\n # Apply a maximum filter.\n max_f = ndi.maximum_filter(img, size=size)\n # Find pixels that are local maxima.\n local_max = np.where(max_f == img, 1, 0)\n return(local_max)",
"def find_local_maxima(self, arr):\n\n # walks through array, finding local maxima ranges\n\n # to initalize a new array to all false\n maxima = empty(len(arr), dtype='bool')\n maxima.fill(False)\n\n max_range_start = 0\n increasing = True\n for i in range(len(arr[:-1])):\n\n # update location of maxima start until\n if arr[i] < arr[i + 1]:\n max_range_start = i + 1\n increasing = True\n\n if (arr[i] > arr[i + 1]) and increasing is True:\n increasing = False\n # gets the local maxima midpoint\n midpoint = floor((max_range_start + i) / 2)\n maxima[midpoint] = True\n\n # catches last case\n if increasing:\n midpoint = floor((max_range_start + len(arr) - 1) / 2)\n maxima[midpoint] = True\n\n return maxima",
"def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))",
"def detect_local_minima(arr):\n # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710\n # define an connected neighborhood\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure\n neighborhood = morphology.generate_binary_structure(len(arr.shape),2)\n # apply the local minimum filter; all locations of minimum value \n # in their neighborhood are set to 1\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter\n local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)\n # local_min is a mask that contains the peaks we are \n # looking for, but also the background.\n # In order to isolate the peaks we must remove the background from the mask.\n # \n # we create the mask of the background\n background = (arr==0)\n # \n # a little technicality: we must erode the background in order to \n # successfully subtract it from local_min, otherwise a line will \n # appear along the background border (artifact of the local minimum filter)\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion\n eroded_background = morphology.binary_erosion(\n background, structure=neighborhood, border_value=1)\n # \n # we obtain the final mask, containing only peaks, \n # by removing the background from the local_min mask\n detected_minima = local_min - eroded_background\n return np.where(detected_minima)",
"def detect_local_minima(arr, mask=None):\n # https://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710\n # define an connected neighborhood\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure\n neighborhood = morphology.generate_binary_structure(len(arr.shape),2)\n # apply the local minimum filter; all locations of minimum value \n # in their neighborhood are set to 1\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter\n local_max = (filters.maximum_filter(arr, footprint=neighborhood)==arr)\n local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)\n # local_min is a mask that contains the peaks we are \n # looking for, but also the background.\n # In order to isolate the peaks we must remove the background from the mask.\n # \n # we create the mask of the background\n background = (arr==0)\n # \n # a little technicality: we must erode the background in order to \n # successfully subtract it from local_min, otherwise a line will \n # appear along the background border (artifact of the local minimum filter)\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion\n eroded_background = morphology.binary_erosion(\n background, structure=neighborhood, border_value=1)\n # \n # we obtain the final mask, containing only peaks, \n # by removing the background from the local_min mask\n detected_maxima = np.bitwise_xor(local_max, eroded_background)\n detected_minima = np.bitwise_xor(local_min, eroded_background)\n detected_maxmin = np.bitwise_or(detected_maxima, detected_minima)\n if mask is not None:\n detected_maxmin[mask==0] = 0\n return np.where(detected_maxmin)",
"def find_all_maxima(arr):\n\n checks = np.r_[True, arr[1:] > arr[:-1]] & np.r_[arr[:-1] > arr[1:], True]\n maxima = np.where(checks)[0]\n return maxima",
"def local_max(x, threshold=1e-5):\n maxima = np.r_[True, x[1:] > x[:-1]] & np.r_[x[:-1] > x[1:] , True]\n # select all local maxima above the threshold\n maxima_f = maxima & np.r_[x > threshold , True][:-1]\n peak_indices = np.where(maxima_f==True)[0]\n return np.array(peak_indices)",
"def local_maxima(X_nm):\n N,M = X_nm.shape\n return (\n np.r_[X_nm[:-1,:] >= X_nm[1:,:],np.zeros((1,M),bool)] & \n np.r_[np.zeros((1,M),bool),X_nm[1:,:] >= X_nm[:-1,:]] & \n np.c_[X_nm[:,:-1] >= X_nm[:,1:],np.zeros((N,1),bool)] &\n np.c_[np.zeros((N,1),bool),X_nm[:,1:] > X_nm[:,:-1]])",
"def peak_local_max_nD(img, size=(70,100,100), min_dist=0):\n def has_neighbor(peak, peak_list, min_dist):\n \"\"\"Find whether a peak already exists within minimum distance of this peak\"\"\"\n for testpeak in peak_list:\n if (distance.euclidean(peak, testpeak) < min_dist):\n return True\n return False\n # Find pixels that represent local maxima. Produces clusters of connected\n # pixels at the centers of objects.\n maxes = local_max(img, size)\n # Connect these pixels in a labelmask.\n conn_comp, info = ndi.label(maxes)\n # Get the centroids of each local max object, update mask and list.\n local_peak_mask = np.zeros_like(img)\n local_peaks = []\n peak_num=1\n\n for id_ in np.unique(conn_comp)[1:]:\n centroid = get_object_centroid(conn_comp, id_)\n # If there is no already-added seed within the minimum distance,\n # add this seed to the mask and list.\n if (not has_neighbor(centroid, local_peaks, min_dist)):\n local_peak_mask[centroid] = peak_num\n local_peaks.append(centroid)\n peak_num = peak_num + 1\n return local_peak_mask, local_peaks",
"def local_maxima2(array, min_distance = 1, periodic=False, edges_allowed=True):\n array = np.asarray(array)\n cval = 0 \n \n if periodic: \n mode = 'wrap' \n elif edges_allowed: \n mode = 'nearest' \n else: \n mode = 'constant' \n cval = array.max()+1 \n max_points = array == ndimage.maximum_filter(array, 1+2*min_distance, mode=mode, cval=cval) \n \n return [indices[max_points] for indices in np.indices(array.shape)][0]",
"def find_local_maxima(tens):\n return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P')))",
"def get_scale_local_maximas_vectorized(cube_coordinates, laplacian_cube):\n x, y, z = [ cube_coordinates[:, ind] for ind in range(3) ]\n \n point_responses = laplacian_cube[x, y, z]\n lowers = point_responses.copy()\n uppers = point_responses.copy()\n not_layer_0 = z > 0\n lower_responses = laplacian_cube[x[not_layer_0], y[not_layer_0], z[not_layer_0]-1]\n lowers[not_layer_0] = lower_responses \n \n not_max_layer = z < (laplacian_cube.shape[2] - 1)\n upper_responses = laplacian_cube[x[not_max_layer], y[not_max_layer], z[not_max_layer]+1]\n uppers[not_max_layer] = upper_responses\n \n lo_check = np.ones(z.shape, dtype=np.bool)\n lo_check[not_layer_0] = (point_responses > lowers)[not_layer_0]\n hi_check = np.ones(z.shape, dtype=np.bool)\n hi_check[not_max_layer] = (point_responses > uppers)[not_max_layer]\n \n return cube_coordinates[lo_check & hi_check]",
"def _get_max_preds_3d(heatmaps):\n assert isinstance(heatmaps, np.ndarray), 'heatmaps should be numpy.ndarray'\n assert heatmaps.ndim == 5, 'heatmaps should be 5-ndim'\n N, K, D, H, W = heatmaps.shape\n heatmaps_reshaped = heatmaps.reshape((N, K, -1))\n idx = np.argmax(heatmaps_reshaped, 2).reshape((N, K, 1))\n maxvals = np.amax(heatmaps_reshaped, 2).reshape((N, K, 1))\n preds = np.zeros((N, K, 3), dtype=np.float32)\n _idx = idx[..., 0]\n preds[..., 2] = _idx // (H * W)\n preds[..., 1] = _idx // W % H\n preds[..., 0] = _idx % W\n preds = np.where(maxvals > 0.0, preds, -1)\n return preds, maxvals",
"def peak_finder(filt_im, dist, threshold):\n from skimage.feature import peak_local_max\n coordinates = peak_local_max(filt_im, min_distance=dist, threshold_abs=threshold)\n return coordinates",
"def peakdet2d(image):\n # define an 8-connected neighborhood\n neighborhood = generate_binary_structure(2,2)\n\n #apply the local maximum filter; all pixel of maximal value \n #in their neighborhood are set to 1\n local_max = maximum_filter(image, footprint=neighborhood)==image\n #local_max is a mask that contains the peaks we are \n #looking for, but also the background.\n #In order to isolate the peaks we must remove the background from the mask.\n\n #we create the mask of the background\n background = (image==0)\n\n #a little technicality: we must erode the background in order to \n #successfully subtract it form local_max, otherwise a line will \n #appear along the background border (artifact of the local maximum filter)\n eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)\n\n #we obtain the final mask, containing only peaks, \n #by removing the background from the local_max mask\n detected_peaks = local_max - eroded_background\n\n return(detected_peaks)",
"def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:\n temp = np.maximum(array_list[0], array_list[1])\n all_maxs = np.maximum(temp, array_list[2])\n\n return all_maxs",
"def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2,2)\n local_max = maximum_filter(image, footprint=neighborhood)==image\n local_max[image<(image.max()*0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num)+1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:,0], centers[:,1]] = True\n\n return ret",
"def find_max_score_location(grid, shape):",
"def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2, 2)\n local_max = maximum_filter(image, footprint=neighborhood) == image\n local_max[image < (image.max() * 0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num) + 1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:, 0], centers[:, 1]] = True\n\n return ret",
"def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2, 2)\n local_max = maximum_filter(image, footprint=neighborhood) == image\n local_max[image < (image.max() * 0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num) + 1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:, 0], centers[:, 1]] = True\n\n return ret",
"def detect_peaks(image):\r\n\r\n # define an 8-connected neighborhood\r\n neighborhood = ndimage.morphology.generate_binary_structure(2,2)\r\n\r\n #apply the local maximum filter; all pixel of maximal value \r\n #in their neighborhood are set to 1\r\n local_max = ndimage.filters.maximum_filter(image, footprint=neighborhood)==image\r\n #local_max is a mask that contains the peaks we are \r\n #looking for, but also the background.\r\n #In order to isolate the peaks we must remove the background from the mask.\r\n\r\n #we create the mask of the background\r\n background = (image==0)\r\n\r\n #a little technicality: we must erode the background in order to \r\n #successfully subtract it form local_max, otherwise a line will \r\n #appear along the background border (artifact of the local maximum filter)\r\n eroded_background = ndimage.morphology.binary_erosion(background, structure=neighborhood, border_value=1)\r\n\r\n #we obtain the final mask, containing only peaks, \r\n #by removing the background from the local_max mask (xor operation)\r\n detected_peaks = local_max ^ eroded_background\r\n\r\n return detected_peaks",
"def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])",
"def is_mountain_array(self, a):\r\n n = len(a)\r\n if n < 3:\r\n return False\r\n # Invalidate monotonic slopes\r\n elif (a[0] > a[1] or\r\n a[n - 2] < a[n - 1]):\r\n return False\r\n\r\n p = None\r\n for i in range(0, n - 1):\r\n\r\n # Search for local maxima\r\n if p is None:\r\n if a[i] > a[i + 1]:\r\n p = i\r\n if a[i] == a[i + 1]:\r\n return False\r\n\r\n # Confirm maxima as global maxima\r\n else:\r\n if a[i] <= a[i + 1]:\r\n return False\r\n\r\n return True",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def get_gridpoint_max(self):\n ind_array = np.indices(self.results_array.shape)\n maxes = []\n\n def get_max(x, y, z):\n \"\"\"\n Would be funnier if I knew a Max.\n \"\"\"\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))\n\n vget_max = np.vectorize(get_max, otypes=[list])\n vget_max(ind_array[0], ind_array[1], ind_array[2])\n return maxes",
"def batch_maximum(imstack):\n maxpos = np.zeros((imstack.shape[0], 2))\n for i in range(imstack.shape[0]):\n if np.isnan(imstack[i, 0, 0]):\n maxpos[i, 0] = np.nan\n maxpos[i, 1] = np.nan\n else:\n ind = np.unravel_index(\n np.argmax(np.squeeze(imstack[i]), axis=None),\n np.squeeze(imstack[i]).shape,\n )\n maxpos[i, 0] = ind[1]\n maxpos[i, 1] = ind[0]\n return maxpos",
"def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}",
"def spatial_argmax(logit):\n weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)\n\n\n \n two = (torch.ones_like(logit)*2).mean(dim=(-2,-1))\n \n\n true = torch.stack((logit.mean(dim=[-2,-1]),(weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)\n # print(true.size())\n\n false = torch.stack((logit.mean(dim=[-2,-1]),two,two), 1)\n\n # print(false.size())\n\n # print(logit.mean(dim=[-2,-1])[:,None].repeat(1,3).size())\n\n return torch.where(logit.mean(dim=[-2,-1])[:,None].repeat(1,3) > 0,true,false)",
"def get_max_point(image):\r\n max_value= 0\r\n better_point= None\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n if image[line][column]>max_value:\r\n max_value= image[line][column]\r\n better_point = [line,column]\r\n return better_point",
"def life(arr):\n\tres_arr = arr\n\tmax_x = len(arr[0]) - 1\n\tmax_y = len(arr) - 1\n\n\tfor y, y_value in enumerate(arr):\n\t\tfor x, x_value in enumerate(y_value):\n\t\t\tneighb_count = get_count_life_neighbor(arr, x, y, max_x, max_y)\n\t\t\tif x_value:\n\t\t\t\tif neighb_count < 2 or neighb_count > 3:\n\t\t\t\t\tres_arr[y][x] = False\n\t\t\telse:\n\t\t\t\tif neighb_count == 3:\n\t\t\t\t\tres_arr[y][x] = True\n\treturn res_arr"
] | [
"0.74168485",
"0.7036359",
"0.700396",
"0.6864814",
"0.6845005",
"0.66943693",
"0.6628028",
"0.6617119",
"0.6600229",
"0.6450245",
"0.6397976",
"0.6194478",
"0.6130189",
"0.60908407",
"0.6077834",
"0.60268056",
"0.5982528",
"0.595608",
"0.5951729",
"0.5951729",
"0.59401405",
"0.5939758",
"0.59240496",
"0.5821478",
"0.5811485",
"0.58097357",
"0.57554257",
"0.57378656",
"0.5727378",
"0.5715449"
] | 0.70661926 | 1 |
Function to build an HTML report of number of games per category | def gamecategory_list(request):
if request.method == 'GET':
# Connect to project database
with sqlite3.connect(Connection.db_path) as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
# Query for all games, with related rating info.
db_cursor.execute("""
SELECT
c.label,
COUNT(g.title) as number
FROM raterprojectapi_categories c
JOIN
raterprojectapi_game_categories gc on gc.categories_id = c.id
JOIN
raterprojectapi_game g on gc.game_id = g.id
GROUP BY c.label
""")
dataset = db_cursor.fetchall()
number_games_per_cat = []
for row in dataset:
# Create a Game instance and set its properties. String in brackets matches the SQL results
categories = Categories()
categories.label = row["label"]
categories.count = row["number"]
number_games_per_cat.append(categories)
# Specify the Django template and provide data context
template = 'categories/number_of_games_per_category.html'
context = {
'gamecategory_list': number_games_per_cat
}
return render(request, template, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_count_grouped_by_genre():\n dict_of_genre = reports.count_grouped_by_genre(filename)\n print(\"Game grouped by genre:\")\n for genre, value in dict_of_genre.items():\n print(\"{}: {}\".format(genre, value))\n print()",
"def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")",
"def app_count_data():\n stmt = db.session.query(Appsdata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n #df = pd.read_csv(\n # 'googleplaystore.csv')\n\n #print(\"df:\")\n #print(df)\n\n reduced_df = df.loc[: , ['Category' , 'Installs']]\n #print(\"reduced_df:\")\n #print(reduced_df)\n\n reduced_df['Installs'] = reduced_df['Installs']\n grouped_reduced_df = reduced_df.groupby(['Category']).count()\n # print(\"grouped:\")\n #print(list(grouped_reduced_df.index))\n\n category_list = list(grouped_reduced_df.index)\n installs_count = list(grouped_reduced_df['Installs'])\n\n # Format the data for Plotly\n plot_trace = {\n \"x\": category_list,\n \"y\": installs_count,\n \"type\": \"bar\"\n \n }\n return jsonify(plot_trace)",
"def get_stories(df):\n categories = df.get_categorical().columns\n continuous = df.get_numerical().columns\n\n stories = []\n cat_copy = list(categories)\n for col in categories:\n # Remove the current col\n if col in cat_copy:\n cat_copy.remove(col)\n try:\n # Get comparison variable\n x = cat_copy.pop()\n d = pd.pivot_table(df.data, index=(col), values=[x],\\\n aggfunc='count').reset_index().sort_values(by=x, ascending=False)\n stories.append({\n 'question': \"%s with high count of %s\" %(col, x),\n 'question_html': \"<span class='tag is-primary is-light'>%s</span>\\\n with high count of <span class='tag is-success is-light'>%s</span>\" % (col, x),\n 'answer': d[col].head(1).values[0],\n 'misc': d\n })\n except IndexError as e:\n pass\n \n for num in continuous:\n d = pd.pivot_table(df.data, index=[col], values=[num],\\\n aggfunc=np.sum).reset_index().sort_values(by=num, ascending=False)\n stories.append({\n 'question': \"%s with sum of %s\" % (col, num),\n 'question_html': \"<span class='tag is-primary is-light'>%s</span>\\\n with sum of <span class='tag is-success is-light'>%s</span>\" % (col, num),\n 'answer': round(d[num].head(1).values[0]),\n 'misc': d\n })\n\n return stories",
"def n_count(category):\r\n sql = text('''\r\n WITH uniq AS (\r\n SELECT COUNT(app.id) FROM task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id)\r\n SELECT COUNT(*) FROM uniq\r\n ''')\r\n\r\n results = db.engine.execute(sql, category=category)\r\n count = 0\r\n for row in results:\r\n count = row[0]\r\n return count",
"def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)",
"def writing_count_grouped_by_genre(file_name):\n result = str(reports.count_grouped_by_genre(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")",
"def create_table(categories:list)->str:\n\n total_spent = get_total_spent(categories)\n\n table = str()\n\n for row_num in range(11):\n row_label = 100 - row_num*10\n\n # Row label creation - ie 100| .. 90| ... 80| ...etc\n row = f\"{row_label:>3}|\"\n\n for category in categories:\n percentage = math.floor(category.total_spent/total_spent * 10) * 10\n if percentage >= row_label:\n row += ' o '\n else:\n row += ' '\n \n table += row + ' \\n'\n return table",
"def _print_summary_counts(\n self, out_file, categories, result_events_by_status, extra_rows):\n\n # Get max length for category printed name\n category_with_max_printed_name = max(\n categories, key=lambda x: len(x[1]))\n max_category_name_length = len(category_with_max_printed_name[1])\n\n # If we are provided with extra rows, consider these row name lengths.\n if extra_rows is not None:\n for row in extra_rows:\n name_length = len(row[0])\n if name_length > max_category_name_length:\n max_category_name_length = name_length\n\n self._print_banner(out_file, \"Test Result Summary\")\n\n # Prepend extra rows\n if extra_rows is not None:\n for row in extra_rows:\n extra_label = \"{}:\".format(row[0]).ljust(\n max_category_name_length + 1)\n out_file.write(\"{} {:4}\\n\".format(extra_label, row[1]))\n\n for category in categories:\n result_status_id = category[0]\n result_label = \"{}:\".format(category[1]).ljust(\n max_category_name_length + 1)\n count = len(result_events_by_status[result_status_id])\n out_file.write(\"{} {:4}\\n\".format(\n result_label,\n count))",
"def generate_simple_report(final_dictionary):\n otpt = open('multifind_simple_summary.txt', 'w')\n for cat in final_dictionary:\n category_name = cat[0]\n category_cont = str(len(cat[1]))\n otpt.write(category_name + ' ')\n otpt.write(category_cont + '\\n')\n otpt.close()",
"def create_spend_chart(category_list:list) -> str:\n\n header = 'Percentage spent by category\\n'\n table = create_table(category_list)\n divider = ' ' + '-'*len(category_list*3) + '-'\n footer = create_footer(category_list)\n full_chart = header + table + divider + footer\n\n return full_chart[:-1]",
"def display_categories(\n sortby: str = \"market_cap_desc\",\n limit: int = 15,\n export: str = \"\",\n sheet_name: Optional[str] = None,\n pie: bool = False,\n) -> None:\n fig = OpenBBFigure()\n\n df = gecko.get_top_crypto_categories(sortby)\n df_data = df\n if not df.empty:\n if pie or fig.is_image_export(export):\n df_data[f\"% relative to top {limit}\"] = (\n df_data[\"Market Cap\"] / df_data[\"Market Cap\"].sum()\n ) * 100\n stables_to_display = df_data[df_data[f\"% relative to top {limit}\"] >= 1]\n other_stables = df_data[df_data[f\"% relative to top {limit}\"] < 1]\n values_list = list(stables_to_display[f\"% relative to top {limit}\"].values)\n values_list.append(other_stables[f\"% relative to top {limit}\"].sum())\n labels_list = list(stables_to_display[\"Name\"].values)\n labels_list.append(\"Others\")\n\n fig = plot_pie_chart(\n labels=labels_list,\n values=values_list,\n title=f\"Market Cap distribution of top {limit} crypto categories\",\n )\n if not fig.is_image_export(export):\n fig.show()\n\n df = df.applymap(lambda x: lambda_long_number_format_with_type_check(x))\n print_rich_table(\n df,\n headers=list(df.columns),\n floatfmt=\".2f\",\n show_index=False,\n export=bool(export),\n limit=limit,\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"cgcategories\",\n df_data,\n sheet_name,\n fig,\n )\n else:\n console.print(\"\\nUnable to retrieve data from CoinGecko.\\n\")",
"def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n\n # to be read and inserted into db\n totalads = 0 # total number of ads seen during this session\n totaluniqads = len(ads) # does not support multicategories at this point\n\n # for each category, for each test site, count total number of ads seen\n totalad_category = {} \n # for each category, for each test site, count total number of uniq ads seen\n uniqad_category = {}\n \n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:\n bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\\\n Site-Context, BugCount, BugSrc\\n')\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0 # for each site\n uniq_ads = [] # for each site\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(uuid, test_site,\n refresh_num, train_category, 'N/A', bugcount, bug.get_src()))\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads # global count for total ads\n\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:\n # write some metadata information about this session\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n\n for train_category, cat_dict in results.items(): \n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(train_category,\n test_site, num_of_visit, totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))",
"def category_freq(dataframe):\n total_rows = len(dataframe)\n for col in dataframe:\n # don't include the Name category in our results\n if dataframe[col].name == 'Name':\n continue\n num_categories = len(dataframe.groupby(col))\n print(\n \"---- %s TOTAL CATEGORIES FOR %s ----\"\n % (num_categories, dataframe[col].name))\n # generate series to list occurrences of each column value\n col_vals = dataframe[col].value_counts()\n # store series as DataFrame\n result_df = col_vals.to_frame()\n # generate series to display percentages\n as_percent = 100 * col_vals / float(total_rows)\n # append percentages column to DataFrame\n result_df['percentage'] = as_percent\n print(result_df)",
"def create_spend_chart(categories):\n graph = \"Percentage spent by category\\n\"\n total_spendings = 0\n spendings = {}\n for category in categories:\n spendings[category.name] = 0\n for x in category.ledger:\n if x['amount'] < 0: #the withdraws are the ones with negative values\n spendings[category.name] += x['amount']\n spendings[category.name] = abs(spendings[category.name])\n for amount in spendings:\n total_spendings += spendings[amount]\n for amount in spendings:\n spendings[amount] = round_down(spendings[amount] / total_spendings * 100) #getting the percentage rounded down\n\n for i in range(100, -10, -10):\n \"\"\"getting the main part of the graph\"\"\"\n graph += str(i).rjust(3) + '| '\n for category in categories:\n if spendings[category.name] >= i:\n graph += 'o '\n else:\n graph += ' '\n graph += '\\n'\n graph += ' ' + '-' * (1 + len(categories) * 3) + '\\n'\n\n maxlen = 0\n for category in categories:\n if len(category.name) > maxlen:\n maxlen = len(category.name) # max string length between category names\n for i in range(maxlen):\n \"\"\"getting the labels for the x-axis\"\"\"\n graph += ' '\n for category in categories:\n if len(category.name) > i:\n graph += category.name[i] + ' '\n else:\n graph += ' '\n graph += '\\n '\n return graph[0:-1]",
"def buildStatsTitle(category, benchmarkNames, transactionCount):\n\n title = '{} latency statistics ({} transactions) {}'.format(\n category, transactionCount, ' vs benchmark - ' if benchmarkNames else ''\n )\n element = HTML().div(klass=TIME_POINT_STATS_TITLE)\n element.h3(title, style='display: inline')\n\n if benchmarkNames:\n bechmarkSelector = element.select(onchange='onSelectBenchmark(this)', klass=SELECTOR)\n for benchmarkName in benchmarkNames:\n bechmarkSelector.option(benchmarkName)\n return element",
"async def cgames(self, ctx):\r\n server = ctx.message.server\r\n members = server.members\r\n\r\n freq_list = {}\r\n for member in members:\r\n if member != None and member.game != None and member.game.name != None and not member.bot:\r\n if member.game.name not in freq_list:\r\n freq_list[member.game.name] = 0\r\n freq_list[member.game.name]+=1\r\n\r\n sorted_list = sorted(freq_list.items(), key=operator.itemgetter(1), reverse = True) \r\n\r\n if not freq_list:\r\n await self.bot.say(\"Surprisingly, no one is playing anything.\")\r\n else: \r\n # create display\r\n msg = \"```These are the server's most played games at the moment: \\n\\n\"\r\n msg += \"{:<25s}{:>25s}\\n\".format(\"Game:\", \"# Playing:\")\r\n max_games = min(len(sorted_list), 10)\r\n for i in range(max_games):\r\n game, freq = sorted_list[i]\r\n if len(game) > 25:\r\n trunc_game = game [0:21] + \"...\"\r\n msg+= \"{:<25s}{:>25d}\\n\".format(trunc_game, freq_list[game])\r\n else:\r\n msg+= \"{:<25s}{:>25d}\\n\".format(game, freq_list[game])\r\n msg += \"```\" \r\n await self.bot.say(msg)",
"def category(request):\r\n\tcategory = request.GET.get('category')\r\n\tgames = Game.objects.filter(category=category)\r\n\treturn render(request, 'home.html', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})",
"def get_layout(self, songs: dict) -> html.Div:\n # Prepare the data that is to be shown.\n columns = [\"title\", \"artist\", \"album\", \"release_date\", \"genre\", \"publisher\", \"composer\", \"duration\", \"bit_rate\"]\n complete = 0\n years = []\n genre = {}\n for song in songs:\n if all(i in songs[song].keys() for i in columns):\n complete += 1\n if songs[song].get(\"genre\") not in genre:\n genre[songs[song].get(\"genre\")] = 1 \n else:\n genre[songs[song].get(\"genre\")] += 1\n if \"release_date\" in songs[song]:\n if songs[song].get(\"release_date\") not in years:\n years.append(songs[song].get(\"release_date\"))\n\n if len(genre) >= 10:\n for element in list(genre.keys()):\n if genre[element] <= 10:\n genre.pop(element)\n if None in genre:\n genre[\"Unknown\"] = genre.pop(None)\n\n # Create basic statistics.\n quick_stat = html.Div(\n children=[\n dbc.Alert(\"Music number : \" + str(len(songs)) , color=\"primary\"),\n dbc.Alert(\"Music number with complete data : \" + str(complete) , color=\"success\"),\n dbc.Alert(\"Music number with incomplete data : \" + str(len(songs) - complete) , color=\"danger\"),\n dbc.Alert(\"Music dates range from : \" + str(min(years)) + \" to \" + str(max(years)), color=\"dark\"),\n dbc.Alert(\"Music is from \" + str(len(years)) + \" different years\", color=\"dark\"),\n dbc.Alert(\"Music is from \" + str(len(genre)) + \" different genres\", color=\"dark\"),\n ]\n )\n\n # Create the \"Number of genres\" pie chart.\n pie = dcc.Graph(\n id=\"piechart\",\n figure={\n \"data\": [\n {\n \"labels\": list(genre.keys()),\n \"values\": list(genre.values()),\n \"type\": \"pie\",\n \"marker\": {\"line\": {\"color\": \"white\", \"width\": 1}},\n \"hoverinfo\": \"values\",\n \"textinfo\": \"label\",\n }\n ],\n \"layout\": {\n \"title\": \"Number of genres\",\n \"showlegend\": True,\n \"autosize\": True,\n },\n },\n )\n\n # Make the pie chart appear from a collapsable button.\n collapse = html.Div(\n [\n dbc.Button(\n \"Basic statistics\",\n id=\"collapse-button\",\n className=\"mb-3\",\n color=\"primary\",\n ),\n dbc.Collapse(\n quick_stat,\n id=\"collapse\",\n ),\n ], style={'marginTop': 50, 'textAlign': 'center',}\n )\n\n @app.callback(\n Output(\"collapse\", \"is_open\"),\n [Input(\"collapse-button\", \"n_clicks\")],\n [State(\"collapse\", \"is_open\")],\n )\n def toggle_collapse(n, is_open):\n if n:\n return not is_open\n return is_open\n\n # Group the whole layout in a single HTML division.\n layout_home = html.Div(\n children=[\n nav,\n collapse,\n pie,\n ]\n )\n\n return layout_home",
"def create_files_with_aggregates(df):\n # Save data grouped by title and channel\n df.groupby(['title', 'channel'])\\\n .size()\\\n .reset_index(name='counter')\\\n .sort_values(by=['counter'], ascending=False)\\\n .to_csv('views_by_title&channel.xlsx', index=False)\\\n\n\n # Views by channel\n df['channel'].value_counts().to_csv('views_by_channel.xlsx')\n\n # Views by day\n days = list()\n for t in df['time'].str.split('T'):\n # t[0] => day !!! t[1] => time\n days.append(t[0])\n df['day'] = days\n\n df.groupby(['day']).size().reset_index(name='counter').to_csv('views_by_day.xlsx', index=False)\n\n\n # Views by day of week\n df['day'] = pd.to_datetime(df['day'])\n df['day_of_week'] = df['day'].dt.day_name()\n df.groupby(['day_of_week']).size().reset_index(name='counter').to_csv('views_by_day_week.xlsx', index=False)\n\n create_plots(df)\n return df",
"def summarizeNuclideCategories(self):\n runLog.info(\n \"Nuclide categorization for cross section temperature assignments:\\n\"\n + tabulate.tabulate(\n [\n (\n \"Fuel\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"fuel\"]\n ),\n ),\n (\n \"Coolant\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"coolant\"]\n ),\n ),\n (\n \"Structure\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"structure\"]\n ),\n ),\n ],\n headers=[\"Nuclide Category\", \"Nuclides\"],\n tablefmt=\"armi\",\n )\n )",
"def print_stats(category, a_column, limit_hi, limit_lo, num_outliers):\n print(\"\"\"\\nThe '{}' category:\n Count: {}\n Distinct: {}\n Min_value: {}\n Max_value: {}\n Median: {}\n Mean: {:.3f}\n St. dev.: {:.3f}\n Limit_Low: {:.3f}\n Limit_High: {:.3f}\n # outliers: {:.3f}\n \"\"\"\n .format(category,\n a_column.count(),\n len(a_column.unique()),\n np.min(a_column),\n np.max(a_column),\n np.median(a_column),\n np.mean(a_column),\n np.std(a_column),\n limit_lo,\n limit_hi,\n num_outliers\n )\n )",
"def print_stats(kmer_table):\n print(\"MY OUTPUT\")\n res = kmer_table\n unique = [i for i, j in res.items() if j == 1]\n print(\"Unique: {}\".format(len(unique)))\n print(\"Distinct: {}\".format(len(res)))\n total = sum(res.values())\n print(\"Total: {}\".format(total))\n max_count = max(res.values())\n print(\"Max count: {}\".format(max_count))\n for k, v in res.items():\n if v == max_count:\n print(k, v)\n print('----')\n return None",
"def get_interesting_stories(df):\n categories = df.get_categorical().columns\n continuous = df.get_numerical().columns\n\n stories = []\n cat_copy = list(categories)\n for col in categories:\n # Remove the current col\n if col in cat_copy:\n cat_copy.remove(col)\n try:\n # Get comparison variable\n x = cat_copy.pop()\n d = pd.pivot_table(df.data, index=(col), values=[x],\\\n aggfunc='count').reset_index().sort_values(by=x, ascending=False)\n\n\n # Highest %s was %s on the %s %s\n stories.append({\n 'question': \"Highest %s was %s, %s %s.\" %(x, d[x].head(1).values[0], col, d[col].head(1).values[0]),\n 'question_html': \"Highest <span class='tag is-primary is-light'>%s</span>\\\n was %s of <span class='tag is-success is-light'>%s</span> %s.\" % (x, d[x].head(1).values[0], col, d[col].head(1).values[0]),\n 'answer': d[col].head(1).values[0],\n 'misc': d\n })\n\n # Lowest %s was %s on the %s %s\n stories.append({\n 'question': \"Lowest %s was %s, %s %s.\" %(x, d[x].tail(1).values[0], col, d[col].tail(1).values[0]),\n 'question_html': \"Lowest <span class='tag is-primary is-light'>%s</span>\\\n was %s of <span class='tag is-success is-light'>%s</span> %s.\" % (x, d[x].tail(1).values[0], col, d[col].tail(1).values[0]),\n 'answer': d[col].tail(1).values[0],\n 'misc': d\n })\n\n\n except IndexError as e:\n pass\n \n for num in continuous:\n d = pd.pivot_table(df.data, index=[col], values=[num],\\\n aggfunc=np.sum).reset_index().sort_values(by=num, ascending=False)\n\n\n stories.append({\n 'question': \"Highest %s is %s, for %s %s.\" % (num, d[num].head(1).values[0], col, d[col].head(1).values[0]),\n 'question_html': \"Highest <span class='tag is-primary is-light'>%s</span>\\\n is %s for <span class='tag is-success is-light'>%s</span> %s.\" % (num, d[num].head(1).values[0], col, d[col].head(1).values[0]),\n 'answer': round(d[num].head(1).values[0]),\n 'misc': d\n })\n\n\n stories.append({\n 'question': \"Lowest %s is %s, for %s %s.\" % (num, d[num].tail(1).values[0], col, d[col].tail(1).values[0]),\n 'question_html': \"Lowest <span class='tag is-primary is-light'>%s</span>\\\n is %s for <span class='tag is-success is-light'>%s</span> %s.\" % (num, d[num].tail(1).values[0], col, d[col].tail(1).values[0]),\n 'answer': round(d[num].tail(1).values[0]),\n 'misc': d\n })\n\n return stories",
"def _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types):\n params = _get_secure_message_threads_params(\n survey_id, business_id, conversation_tab, category, all_conversation_types\n )\n url = f'{current_app.config[\"SECURE_MESSAGE_URL\"]}/messages/count'\n response = requests.get(url, headers={\"Authorization\": _get_jwt()}, params=params)\n return response",
"def get_penalty_counts(game):\n game_type = get_game_type_from_season_type(game)\n\n pen_counts = dict()\n pen_counts['home'] = defaultdict(int)\n pen_counts['road'] = defaultdict(int)\n\n game_events_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_events',\n str(game['season']), str(game_type), \"%d.json\" % game['game_id'])\n events_data = json.loads(open(game_events_src_path).read())\n\n for period in events_data:\n for event in events_data[period]:\n if event['type'] == 'penalty':\n duration = int(event['data']['duration'] / 60)\n if event['data']['team'] == 'home':\n pen_counts['home'][duration] += 1\n else:\n pen_counts['road'][duration] += 1\n\n return pen_counts",
"def countplot_cat1(df, cat1, title_suffix = '', perc = False, num_label = 15, save_plot = False, path_dir = None):\n \n # Count the number of records for each value in category #1\n if perc == True : \n comp_count = df[cat1].value_counts()/len(df[cat1])\n comp_count = comp_count.nlargest(len(comp_count))\n else : \n comp_count = df[cat1].value_counts()\n comp_count = comp_count.nlargest(len(comp_count))\n sns.set(font_scale=1.2)\n plt.figure(figsize=(12,5))\n plot = sns.countplot(x=cat1, data=df)\n plt.title('Count the number of different %s values %s' % (cat1, title_suffix));\n plt.xticks(rotation=90)\n for ind, label in enumerate(plot.get_xticklabels()):\n if ind % num_label == 0: # every num_label label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n plt.show()\n if save_plot == True:\n plt.savefig((plot_dir + \"count_of\"+str(cat1)+\".png\"))\n plt.clf()",
"def get_categories():\n page = requests.get(BASE_URL, verify=False)\n soup = BeautifulSoup(page.content)\n output = [{'title': 'Top 10 Courses'}]\n\n for c in soup.find(id='main_aside').findAll('h4'):\n output.append({'title': c.text})\n\n return output",
"def get_about_count_results(soup):\n title = soup.find('div', {'id': 'gs_ab_md'})\n if title:\n title = title.find('div', {'class': 'gs_ab_mdw'})\n if title:\n count_papers = title.text\n if count_papers:\n count_papers = count_papers.split(' ')[1].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n try:\n int(count_papers)\n except:\n count_papers = title.text.split(' ')[0].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n return int(count_papers)",
"def buildStatsTable(self, category, timelineStats, benchmarkTlsMap):\n statsReport = str(self.buildStatsTitle(category, benchmarkTlsMap.keys(), len(timelineStats)))\n if len(timelineStats.deltaSeriesRepo) > 1:\n tabHeader = ''\n tabBody = ''\n tableCount = 0\n for eventName, deltaSeriesCollection in timelineStats.deltaSeriesRepo.iteritems():\n tabId = '{}_{}'.format(eventName, makeUniqueId())\n tabId = tabId.replace(' ', '_').replace('.', '_').replace(':', '_')\n tabHeader += TAB_HEADER_FMT.format(tabId, tabState(tableCount == 0), eventName)\n table = self._buildStatsTable(eventName, deltaSeriesCollection, benchmarkTlsMap)\n tabBody += TAB_BODY_FMT.format(tabId, tabContentState(tableCount == 0), table)\n tableCount += 1\n tabBody = TAB_BODY_PREFIX + tabBody + TAB_BODY_SUFFIX\n statsReport += TAB_CONTAINER_FMT.format(tabHeader, tabBody) + TAB_JS\n else:\n deltaSeriesCollection = timelineStats.getTscDeltaSeriesCollection()\n statsReport += self._buildStatsTable(TSC_EVENT_NAME, deltaSeriesCollection, benchmarkTlsMap)\n return statsReport"
] | [
"0.6554664",
"0.6068233",
"0.5920364",
"0.56900066",
"0.5659495",
"0.5643938",
"0.56211424",
"0.5558111",
"0.55319446",
"0.5501326",
"0.5473281",
"0.5464325",
"0.53525615",
"0.53275794",
"0.5317448",
"0.5313982",
"0.5292075",
"0.5285532",
"0.5283213",
"0.5224503",
"0.52075994",
"0.51816154",
"0.51795495",
"0.5174624",
"0.5166999",
"0.5163937",
"0.51515925",
"0.51471645",
"0.5146536",
"0.51380473"
] | 0.6814326 | 0 |
Determine if value is only a partial url and needs to be a full url. | def partial_url(row, index):
if len(row[index]) != 0:
if row[index][0] == '/':
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_short_url(self, value: str) -> str:\n url_id = self.context.get(\"url_id\") # just in update mode we have id.\n\n if url_id: # for update step old and new short_value could be same.\n try:\n old_short_url = URL.objects.get(id=url_id).short_url\n except URL.DoesNotExist:\n raise serializers.ValidationError(\"url does not exists!\")\n if old_short_url == value:\n return value\n\n if value and url_validator(value):\n raise serializers.ValidationError(\n \"custom short_url could not be URL itself.Please try for sequence of string instead of a valid URL!\"\n )\n return value",
"def IsValidURL(s):\n return RE_COMPLEX_URL.match(s)",
"def url_type(verifield, required):\n return verifield is None or urlparse(verifield) is not None",
"def is_absolute_url(path):\n return path.startswith(\"http\")",
"def is_url(val):\n res = urlparse(val)\n return bool(res.scheme and res.netloc and res.params == \"\")",
"def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False",
"def ISURL(value):\n value = value.strip()\n if ' ' in value: # Disallow spaces inside value.\n return False\n return bool(_url_regexp.match(value))",
"def validate(self, value: str | None, path: list[str], **kwargs: Any) -> str | None:\n value = super().validate(value, path, **kwargs)\n\n if value == \"\" and self.blank:\n return value\n\n if value is None and self.default_is_none:\n return value\n\n try:\n result = urlparse(value)\n except ValueError:\n raise ValidationError(self, path, value, \"url expected\")\n\n if not result.scheme:\n raise ValidationError(self, path, value, \"no url scheme specified\")\n\n if not result.netloc:\n raise ValidationError(self, path, value, \"no url netloc specified\")\n\n if self.schemes and result.scheme not in self.schemes:\n raise ValidationError(\n self, path, value, f\"invalid url scheme: {result.scheme}\"\n )\n\n return value",
"def clean_url_part(self):\n complete_url = \"{url_prefix}{url_part}\".format(\n url_prefix=self.URL_PREFIX,\n url_part=self.cleaned_data['url_part']\n )\n URLValidator(complete_url)\n self.cleaned_data['repo_url'] = complete_url\n return self.cleaned_data['url_part']",
"def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False",
"def check_url(value):\n\n valid = validators.url(value)\n if valid != True:\n return False",
"def item_url(url):\n return all(map(lambda x: str.isdigit(x), str(url.strip('/').split('/')[-1])))",
"def check_url(value):\n\n valid = validators.url(value)\n if valid is not True:\n return False",
"def validate_url(self):\n pass",
"def _is_url(string):\n return \"http\" in string",
"def is_url_requirement(ireq):\n return bool(ireq.original_link)",
"def isRegularURI(self):\n return self._isRegularURI",
"def _validate_url(url):\n if not url or url.count('/') != 1 or url[0] != '@':\n return False\n return True",
"def is_url(url):\n return re.search(r\"^[a-zA-Z][-+\\.\\w]*://[^\\s]+$\", url) is not None and url[:4] != 'uuid'",
"def url(value: Any) -> str:\n url_in = str(value)\n\n if urlparse(url_in).scheme in [\"http\", \"https\"]:\n return cast(str, vol.Schema(vol.Url())(url_in))\n\n raise vol.Invalid(\"Invalid URL\")",
"def test_non_urls():\n assert normalize_url(\"\") is None\n assert normalize_url(\"abc xyz\") is None\n assert normalize_url(\"asb#abc\") is None\n assert normalize_url(\"Яндекс.рф\") is not None\n assert normalize_url(\"google.blog\") is not None\n assert normalize_url(\"http//google.com\") is None\n assert normalize_url(\"http://user@pass:example.com\") is None",
"def is_url_arg(str):\n return (True if URL_REGEX.match(str[1:-1] if is_quoted(str) else str)\n else False)",
"def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)",
"def check_url_format(self):\n\n m = re.match(r\"^http://www\\.flipkart\\.com/.*/p/.*$\", self.product_page_url)\n\n return not not m",
"def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True",
"def check_url(url):\n return 'products.json' in url",
"def get_full_url(self, part_url):\n return BASE_URL + part_url",
"def valid(url):\n return 0 < len(urlparse(url)[1])",
"def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')",
"def _is_url(s: str) -> bool:\n\n return urlparse(s).netloc != \"\""
] | [
"0.6601381",
"0.6435911",
"0.637825",
"0.6365589",
"0.63523203",
"0.6233559",
"0.623043",
"0.6205113",
"0.61700535",
"0.6157163",
"0.61503124",
"0.6121164",
"0.61019105",
"0.6038575",
"0.5991075",
"0.59017664",
"0.588379",
"0.588122",
"0.5857775",
"0.5825847",
"0.58074635",
"0.580001",
"0.5755221",
"0.5723801",
"0.57213014",
"0.5676455",
"0.5673034",
"0.5671914",
"0.56553626",
"0.565018"
] | 0.7026639 | 0 |
Script for updating url fields in CSV file. | def main(script):
original_file = open('data/customers_original.csv')
original_object = csv.reader(original_file)
output_csv = []
for row in original_object:
if partial_url(row, 4):
full_url = prepend_url(row[4])
row[4] = full_url
if partial_url(row, 5):
full_url = prepend_url(row[5])
row[5] = full_url
if partial_url(row, 6):
full_url = prepend_url(row[6])
row[6] = full_url
output_csv.append(row)
with open('data/customers_edited.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for row in output_csv:
writer.writerow(row)
original_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)",
"def csv_url(self, csv_url):\n\n self._csv_url = csv_url",
"def csv_url(self, csv_url):\n\n self._csv_url = csv_url",
"def process_csv():\n csv_rows = []\n fieldnames = ['site',\n 'latitude',\n 'longitude',\n 'city',\n 'region_code',\n 'country_code',\n 'continent_code',\n 'min_ip_hex',\n 'max_ip_hex',\n 'transit_provider',\n 'min_ip',\n 'max_ip',\n 'ip_prefix',\n 'min_ipv6_hex',\n 'max_ipv6_hex',\n 'min_ipv6',\n 'max_ipv6',\n 'ipv6_prefix']\n\n location_map = build_location_map()\n\n # Read in the CSV file and augment the columns\n with open(INPUT_FILE, 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n csv_rows.append(process_row(row, location_map))\n\n # Write the new CSV file with new columns\n with open(OUTPUT_FILE, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in csv_rows:\n writer.writerow(row)\n\n print(\"MLab Sites CSV generated at {0}\".format(OUTPUT_FILE))",
"def process_links():\n from pymongo import Connection\n conn = Connection()\n db = conn['mchs']\n# db.drop_collection('svodki')\n coll = db['svodki']\n coll.ensure_index(\"url\")\n f = open('alllinks.csv', 'r')\n for l in f:\n parts = l.strip().split('\\t')\n if len(parts) < 4: continue\n year, month, day, url = parts\n o = coll.find_one({'url' : url})\n if o is not None: \n print url, 'passed'\n continue\n u = urllib2.urlopen(url)\n data = u.read()\n u.close()\n data = data.decode('cp1251')\n record = {'year' : int(year), 'month' : int(month), 'day' : int(day), 'url' : url, 'text' : data.encode('utf8')}\n coll.save(record)\n # MCHS site is badly designed and it could block us if we will download pages too often\n time.sleep(5)\n print url, 'processed'",
"def update():\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n field_definitions = get_field_definitions(config)\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n for row in csv_data:\n row = clean_csv_values(row)\n if not ping_node(config, row['node_id']):\n print(\"Node \" + row['node_id'] + \" not found or not accessible, skipping update.\")\n continue\n\n # Add the target_id field.\n node = {\n 'type': [\n {'target_id': config['content_type']}\n ]\n }\n\n node_field_values = get_node_field_values(config, row['node_id'])\n\n # Add custom (non-required) fields.\n required_fields = ['node_id']\n custom_fields = list(\n set(csv_column_headers) - set(required_fields))\n for custom_field in custom_fields:\n # Skip updating field if value is empty.\n if len(row[custom_field]) == 0:\n continue\n\n # Entity reference fields: for taxonomy terms, target_type is 'taxonomy_term';\n # for nodes, it's 'node_type'.\n if field_definitions[custom_field]['field_type'] == 'entity_reference':\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n field_vocabs = get_field_vocabularies(config, field_definitions, custom_field)\n if config['subdelimiter'] in row[custom_field]:\n prepared_tids = []\n delimited_values = row[custom_field].split(config['subdelimiter'])\n for delimited_value in delimited_values:\n tid = prepare_term_id(config, field_vocabs, delimited_value)\n tid = str(tid)\n prepared_tids.append(tid)\n row[custom_field] = config['subdelimiter'].join(prepared_tids)\n else:\n row[custom_field] = prepare_term_id(config, field_vocabs, row[custom_field])\n row[custom_field] = str(row[custom_field])\n\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n\n if field_definitions[custom_field]['cardinality'] == 1:\n subvalues = row[custom_field].split(config['subdelimiter'])\n node[custom_field] = [\n {'target_id': subvalues[0], 'target_type': target_type}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, row['node_id'], '1')\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n # Append to existing values.\n existing_target_ids = get_target_ids(node_field_values[custom_field])\n num_existing_values = len(existing_target_ids)\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n if subvalue in existing_target_ids:\n existing_target_ids.remove(subvalue)\n # Slice the incoming values to a length that matches the field's\n # cardinality minus its existing length. Also log fact that we're\n # slicing off values.\n num_values_to_add = field_definitions[custom_field]['cardinality'] - num_existing_values\n subvalues = subvalues[:num_values_to_add]\n if len(subvalues) > 0:\n logging.warning(\"Adding all values in CSV field %s for node %s would exceed maximum number of \" +\n \"allowed values (%s), so only adding %s values.\", custom_field, row['node_id'], field_definitions[custom_field]['cardinality'], num_values_to_add)\n logging.info(\"Updating node %s with %s values from CSV record.\", row['node_id'], num_values_to_add)\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = node_field_values[custom_field] + field_values\n else:\n logging.info(\"Not updating field %s node for %s, provided values do not contain any new values for this field.\", custom_field, row['node_id'])\n else:\n if num_existing_values + 1 <= field_definitions[custom_field]['cardinality']:\n node[custom_field] = node_field_values[custom_field] + [\n {'target_id': row[custom_field],\n 'target_type': 'taxonomy_term'}]\n else:\n logging.warning(\"Not updating field %s node for %s, adding provided value would exceed maxiumum number of allowed values.\", custom_field, row['node_id'])\n # Cardinality is unlimited.\n else:\n # Append to existing values.\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = node_field_values[custom_field] + field_values\n else:\n node[custom_field] = node_field_values[custom_field] + [\n {'target_id': row[custom_field],\n 'target_type': 'taxonomy_term'}]\n\n # Typed relation fields.\n elif field_definitions[custom_field]['field_type'] == 'typed_relation':\n # Create a copy of the existing values in the current field so we can compare\n # them to the incoming values in the CSV file for deduping. To compare these\n # values with the incoming ones, we need to remove the 'url' and\n # 'target_uuid' members.\n node_comparison_values = copy.deepcopy(node_field_values[custom_field])\n for comparison_value in node_comparison_values:\n del comparison_value['url']\n del comparison_value['target_uuid']\n\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n # Append to existing values.\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = node_field_values[custom_field] + field_values\n # Append to existing values.\n else:\n value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = node_field_values[custom_field] + [value]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n existing_target_ids = get_target_ids(node_field_values[custom_field])\n num_existing_values = len(existing_target_ids)\n\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n if subvalue not in node_comparison_values:\n field_values.append(subvalue)\n # Slice the incoming values to a length that matches the field's\n # cardinality minus its existing length. Also log fact that we're\n # slicing off values.\n num_values_to_add = field_definitions[custom_field]['cardinality'] - num_existing_values\n if num_values_to_add > 0:\n logging.warning(\"Adding all values in CSV field %s for node %s would exceed maximum number of \" +\n \"allowed values (%s), so only adding %s values.\", custom_field, row['node_id'], field_definitions[custom_field]['cardinality'], num_values_to_add)\n logging.info(\"Updating node %s with %s values from CSV record.\", row['node_id'], num_values_to_add)\n field_values = field_values[:num_values_to_add]\n node[custom_field] = node_field_values[custom_field] + field_values\n else:\n logging.info(\"Not updating field %s node for %s, provided values do not contain any new values for this field.\", custom_field, row['node_id'])\n else:\n if num_existing_values + 1 <= field_definitions[custom_field]['cardinality']:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = node_field_values[custom_field] + field_value\n else:\n logging.warning(\"Not updating field %s node for %s, adding provided value would exceed maxiumum number of allowed values (%s).\",\n custom_field, row['node_id'], field_definitions[custom_field]['cardinality'])\n # Cardinality is 1. Do not append to existing values, replace existing value.\n else:\n field_values = split_typed_relation_string(config, row[custom_field], target_type)\n if len(field_values) > 1:\n node[custom_field] = [field_values[0]]\n log_field_cardinality_violation(custom_field, row['node_id'], '1')\n logging.info(\"Updating node %s with 1 values from CSV record.\", row['node_id'])\n\n # Geolocation fields.\n elif field_definitions[custom_field]['field_type'] == 'geolocation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, row['node_id'], field_definitions[custom_field]['cardinality'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_geolocation_string(config, row[custom_field])\n node[custom_field] = [field_values[0]]\n if len(field_values) > 1:\n log_field_cardinality_violation(custom_field, row['node_id'], field_definitions[custom_field]['cardinality'])\n\n # For non-entity reference and non-typed relation fields (text, etc.).\n else:\n if field_definitions[custom_field]['cardinality'] == 1:\n subvalues = row[custom_field].split(config['subdelimiter'])\n subvalues[0] = truncate_csv_value(custom_field, row['node_id'], field_definitions[custom_field], subvalues[0])\n node[custom_field] = [{'value': subvalues[0]}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, row['node_id'], '1')\n elif field_definitions[custom_field]['cardinality'] > 1:\n # Append to existing values.\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, row['node_id'], field_definitions[custom_field]['cardinality'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, row['node_id'], field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = node_field_values[custom_field] + field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, row['node_id'], field_definitions[custom_field], row[custom_field])\n node[custom_field] = node_field_values[custom_field] + [{'value': row[custom_field]}]\n # Cardinatlity is unlimited.\n else:\n # Append to existing values.\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, row['node_id'], field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = node_field_values[custom_field] + field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, row['node_id'], field_definitions[custom_field], row[custom_field])\n node[custom_field] = node_field_values[custom_field] + [{'value': row[custom_field]}]\n\n node_endpoint = config['host'] + '/node/' + row['node_id'] + '?_format=json'\n node_headers = {'Content-Type': 'application/json'}\n node_response = issue_request(config, 'PATCH', node_endpoint, node_headers, node)\n\n if node_response.status_code == 200:\n print(\"Node for \" + config['host'] + '/node/' +\n row['node_id'] + \" updated.\")\n logging.info(\"Node for %s updated.\", config['host'] + '/node/' + row['node_id'])",
"def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def csv_book_update(csvfile, book):\n row = {'timestamp' : now_nanos(),\n 'secid' : book.security,\n 'trade_valid' : False,\n 'book_valid' : 'True'}\n bid = book.bid[-1]._asdict()\n offer = book.offer[0]._asdict()\n row.update({\n LEVEL_FORMAT.format(1, 'bid', f) : bid[f] for f in LEVEL_FIELDS\n })\n row.update({\n LEVEL_FORMAT.format(1, 'offer', f) : offer[f] for f in LEVEL_FIELDS\n })\n row.update({TRADE_FORMAT.format(f) : None for f in TRADE_FIELDS})\n csvfile.writerow(row)",
"def update_csv(\n self,\n csv_paths: Union[str, List[str]],\n commit_msg: Optional[str] = None,\n graph_type: Optional[str] = \"instance\",\n graph_id: Optional[str] = \"main\",\n ) -> None:\n self._check_connection()\n if commit_msg is None:\n commit_msg = f\"Update csv from {csv_paths} by python client {__version__}\"\n commit = self._generate_commit(commit_msg)\n if isinstance(csv_paths, str):\n csv_paths_list = [csv_paths]\n else:\n csv_paths_list = csv_paths\n\n self._dispatch(\n \"post\",\n self._csv_url(graph_type, graph_id),\n commit,\n file_list=csv_paths_list,\n )",
"def put_books_info_in_csv(url):\n books_urls = get_books_url(url)\n for url in books_urls:\n put_book_info_in_csv(url)\n download_book_image(url)",
"def date_setup(date, page_offset, url,c):\r\n\r\n if date <= 10:\r\n page_offset = 0\r\n url = \"http://data.terapeak.com/?id=0&search=1&view=item_browse&query=iphone+5s&date=2015-02-1&date_range=1&buyer_country_id=1&condition=rollup_3&type%5Bfixed%5D=1&from_start_price=100&to_start_price=800&from_end_price=100&to_end_price=800&seller_country_id=1&txn_site_id=0&numPages=12&siteID=0&offset={0}\".format(page_offset)\r\n u = list(url)\r\n new = str(date)\r\n u[86] = new #this will update the date from date=2014-09-1 to date=2014-09-2\r\n date_ed_url = \"\".join(u)\r\n #print(edited)\r\n page_offset_update(date, page_offset, date_ed_url, c) # the date has now been updated and the page_offset has been reset to 0\r\n else:\r\n with open(\"5s_Feb_2015_.csv\", \"w\", newline='', encoding='UTF-8') as f:\r\n writer = csv.writer(f)\r\n writer.writerows(listof_listof_lists)\r\n print(\"done\")\r\n quit",
"def make_urls(csvfile):\n result = []\n with open(csvfile, 'rU') as infile: \n reader = csv.DictReader(infile, dialect=csv.excel,\n fieldnames=['ID','URL','Latitude','Longitude'])\n for row in reader:\n idnum = row['ID']\n url = row['URL']\n lat = row['Latitude']\n lon = row['Longitude']\n result.append((url, idnum, lat, lon))\n return result",
"def process_links(conn: Connection, path: Path) -> None:\n sql = \"INSERT OR IGNORE INTO Links (src, dest, annotation) VALUES (?, ?, ?)\"\n run_sql_on_csv(conn, path, sql, (int, int, str))",
"def csv_reader(file_obj):\n reader = csv.reader(file_obj)\n for row in reader:\n data = Body(posLinkToken=row[5]).__dict__\n print(\" \".join(row))\n client = APIClient(login=login, password=password, data=data, count=row[4])\n status = client.retail_point_update()\n print(status.status_code, status.content)",
"def import_photos(input_csv=\"../2012_ROOMS_phote.csv\"):\n reader = csv.DictReader(open(input_csv))\n for s in reader:\n number = s[\"Site ID\"]\n site = models.NewSite.all().filter('number =', number).get()\n if not site:\n continue\n if s['Flickr Pages']:\n site.photo_link = s['Flickr Pages']\n site.put()",
"def update_url(url):\n url_db = select(u for u in Url if u.id == url.id).get()\n url_db.date_scanned = datetime.now()",
"def _reformat_csv(self, csv):\n\n # avoid using extra backslashes because sed uses them as delimiter\n date = csv[-19:-9].replace('/', '-')\n cmds = [f'cut -d , -f 1 --complement <{csv} >{csv}.new',\n f'mv {csv}.new {csv}',\n f'sed -i \"1d\" {csv}',\n f'sed -i \"s/AS//g\" {csv}',\n f'sed -i \"s/,/\\t/g\" {csv}',\n f'sed -i \"s/$/\\t{date}/\" {csv}']\n\n utils.run_cmds(cmds)",
"def update_download_info():\n with open(UPLOAD_HISTORY, 'r') as uh:\n downloads.csv = uh.read()\n file_statuses = []\n for ordered_dict in downloads.dict:\n filename = ordered_dict[\"Torrent Name\"]\n file_statuses.append(tadapt.check_download(filename))\n if len(file_statuses) > 0:\n clean_statuses = [a[1] if a[0] else 0.0 for a in file_statuses]\n downloads.append_col(clean_statuses, header='Download percent')",
"def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))",
"def main(hdf5_file_name, csv_file_with_links, add_map=1):\n\n with open(csv_file_with_links, \"r\") as fc:\n dict_reader = csv.DictReader(fc)\n\n with h5py.File(hdf5_file_name, \"r+\") as fwj:\n\n for row_dict in dict_reader:\n map_from = row_dict[\"map_from\"]\n map_to = row_dict[\"map_to\"]\n\n if len(map_from.strip()):\n if len(map_to.strip()):\n if map_from != map_to:\n if map_from in fwj:\n print(map_from, map_to)\n if add_map:\n fwj[map_to] = fwj[map_from]",
"def svn_info_t_URL_set(svn_info_t_self, char_URL): # real signature unknown; restored from __doc__\n pass",
"def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")",
"def database_mass_update(table,file_location):\n with open(file_location,'r') as csv:\n con = lite.connect(DB_FILE, timeout = TIMEOUT)\n con.row_factory = lite.Row\n with con:\n cur = con.cursor()\n lines = csv.readlines()\n for l in lines:\n l = l.split(',')\n if len(l) > 2:\n started = l[0]\n rownum = l[2]\n attempt_num = l[3]\n bs_id = l[1]\n\n else:\n end_time = l[0]\n errorcode = l[1]\n\n cur.execute(\"UPDATE {0} SET Started=? WHERE Rownum=? AND AttemptNum=? AND BSID=?\".format(table), (started, rownum, attempt_num, bs_id))\n\n cur.execute(\"SELECT * FROM {0} WHERE Rownum=? AND AttemptNum=? AND BSID=?\".format(table), (rownum, attempt_num, bs_id))\n columns = cur.fetchone()\n #get search data\n cur.execute(\"SELECT * FROM PulsarSearch WHERE Rownum=?\", (str(bs_id),))\n bs_columns = cur.fetchone()\n\n if int(errorcode) == 0:\n #add processing times and job completion count\n end_s = date_to_sec(str(end_time))\n start_s = date_to_sec(columns['Started'])\n processing = (end_s - start_s)\n\n cur.execute(\"UPDATE {0} SET Proc=?, Ended=?, Exit=? WHERE Rownum=? AND AttemptNum=? AND BSID=?\".format(table), (processing, end_time, errorcode, rownum, attempt_num, bs_id))\n\n tot_proc = float(bs_columns['TotalProc']) + processing\n job_proc = float(bs_columns[table+'Proc']) + processing\n tot_jc = int(bs_columns['TotalJobComp']) + 1\n job_jc = int(bs_columns[table+'JobComp']) + 1\n\n cur.execute(\"UPDATE PulsarSearch SET TotalProc=?, {0}Proc=?, TotalJobComp=?, {0}JobComp=? WHERE Rownum=?\".format(table),\n (str(tot_proc)[:9], str(job_proc)[:9], str(tot_jc)[:9],\n str(job_jc)[:9], bs_id))\n else:\n tot_er = int(bs_columns['TotalErrors']) + 1\n job_er = int(bs_columns[table+'Errors']) + 1\n\n cur.execute(\"UPDATE {0} SET Ended=?, Exit=? WHERE Rownum=? AND \"\n \"AttemptNum=? AND BSID=?\".format(table),\n (end_time, errorcode, rownum, attempt_num, bs_id))\n\n cur.execute(\"UPDATE PulsarSearch SET TotalErrors=?, {0}Errors=? \"\n \"WHERE Rownum=?\".format(table), (tot_er,job_er, bs_id))\n return",
"def csv_trade_update(csvfile, book, trades):\n for trade in trades:\n row = {'timestamp' : now_nanos(),\n 'secid' : book.security,\n 'trade_valid' : True,\n 'book_valid' : 'False'}\n trade_dict = trade._asdict()\n row.update({\n LEVEL_FORMAT.format(1, 'bid', f) : None for f in LEVEL_FIELDS\n })\n row.update({\n LEVEL_FORMAT.format(1, 'offer', f) : None for f in LEVEL_FIELDS\n })\n row.update({TRADE_FORMAT.format(f) : trade_dict[f] for f in TRADE_FIELDS})\n csvfile.writerow(row)",
"def update_url_record(short_url, long_url, sidecar_text = None, owner = None):\r\n updated_record = {}\r\n updated_record['deleted'] = False\r\n updated_record['long_url'] = long_url\r\n updated_record['sidecar_text'] = sidecar_text\r\n updated_record['owner'] = owner\r\n updated_record['last_updated'] = datetime.now(timezone.utc)\r\n\r\n db_filter = {'name': short_url }\r\n\r\n db_collection.update_one(db_filter, { \"$set\": updated_record }, upsert=True)",
"def getcongressURLs():\n\n\tdates = getdates()\n\n\tf = open('congressional_records_URLs2.csv', 'w')\n\ttry:\n\t\tfor dt in range(0, len(dates)):\n\t\t\tdate = str(dates[dt]).replace(\"'\", \"\").replace(\",\", \"-\").replace(\" \", \"\").replace(\"(\", \"\").replace(\")\", \"\")\n\t\t\tfull_url = getfullURL(date)\n\t\t\tf.write(u'%s\\n' % (full_url))\n\tfinally:\n\t\tf.close()",
"def url_entry(url: str) -> None:\r\n if not url:\r\n parent.clone_list_metadata_url = config.clone_list_metadata_download_location\r\n return\r\n else:\r\n if validators.url(url):\r\n self.ui.labelURLError.hide()\r\n parent.clone_list_metadata_url = url\r\n write_config(parent, dat_details, config, self)\r\n else:\r\n self.ui.labelURLError.show()",
"def updateVersionNumber(self, *args):\n file = '//corp-nas01/DC/dc_art/character/GEPPETTO/CharacterTracker.csv'\n \n character = []\n character = cmds.textScrollList(self.UIElements['characterName'] , q=True, si=True)\n \n key = character[0]\n characterInfo = self.trackerInfo[key]\n versionNumber = characterInfo[2][1]\n \n index = 0\n \"\"\" Open the file for reading \"\"\"\n with open(file, 'rb') as f:\n reader = csv.reader(f, delimiter=',', quotechar='\"')\n for row in reader:\n newIndex = index+1\n index = newIndex\n if row[0] == character[0]:\n numLine = index\n versionNumber = int(row[2])\n newVersion = versionNumber + 1\n \n row[2] = newVersion\n print row\n \n \n \n line_to_override = {numLine:[row]} \n\n \"\"\" Open the csv in maya for writing \"\"\"\n writer = csv.writer(open(file, 'wb'), delimiter=',')\n\n data = line_to_override.get(numLine, row)\n writer.writerow(data)\n \n #writer.writerow(version)",
"def _updateScraped(self, table, record_type):\n print ('Updating scraped ids from %s' % table)\n conn = dbo.getConnection()\n query = \"\"\"\n UPDATE records\n SET scraped=True\n FROM %s AS updater\n WHERE updater.id=records.id\n AND records.type=%s\n \"\"\" % (table, \"'{0}'\".format(record_type))\n dbo.execute_query(conn, query)\n dbo.closeConnection(conn)\n return",
"def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()"
] | [
"0.6645071",
"0.65307665",
"0.65307665",
"0.618359",
"0.6104837",
"0.60744625",
"0.59018284",
"0.5862033",
"0.57731605",
"0.5760353",
"0.5751486",
"0.574974",
"0.5711782",
"0.5708614",
"0.56546396",
"0.5644092",
"0.5479853",
"0.54011136",
"0.5396848",
"0.5350193",
"0.5342484",
"0.53153205",
"0.52988297",
"0.52971894",
"0.52807355",
"0.5273894",
"0.52590084",
"0.52496254",
"0.5233408",
"0.5232567"
] | 0.6833886 | 0 |
Reads the commandline parameters checks to make sure they seem right and returns them Returns | def parse_and_validate_cmd_line():
if len(sys.argv) != 4:
print USAGE_STR.format(sys.argv[0])
sys.exit()
# attempt to parse the parameters tell the user and exit if we can't
num_segments = parse_and_validate_num_segs(sys.argv[1])
# try to parse numThreads
num_threads = parse_and_validate_num_threads(sys.argv[2])
# try to parse and test the data directory
data_dir = parse_and_validate_data_dir(sys.argv[3])
return num_segments, num_threads, data_dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing",
"def checkCommandArgs():\n try:\n int(sys.argv[1]) #sin\n int(sys.argv[2]) #sout\n int(sys.argv[3]) #csin\n except (ValueError, IndexError) as e:\n print (\"One or more port numbers are not ints or were not entered\")\n sys.exit()\n \n for i in range(3):\n if int(sys.argv[i+1]) > PORT_RANGE_UPPER or int(sys.argv[i+1]) < PORT_RANGE_LOWER:\n print(\"One or more port number out of range\")\n sys.exit()\n \n if not os.path.isfile(sys.argv[4]):\n print(\"file does not exist\")\n sys.exit()\n \n return int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), sys.argv[4]",
"def readArgs():\n args = sys.argv\n if len(args) != 3:\n print(\"ERROR - Wrong number of arguments! \\n\")\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n exit(5)\n if args[1] != \"MTS\" and args[1] != \"SCH\":\n print(\"ERROR - Wrong type specified! : \" + args[1])\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n return args",
"def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 4:\n print(\"[ERR] Invalid number of command line arguments!\")\n print(len(sys.argv))\n print(sys.argv[:])\n _usage()\n sys.exit(1)\n\n # Check if lis.config template exists.\n lis_config_template = sys.argv[1]\n if not os.path.exists(lis_config_template):\n print(f\"[ERR] {lis_config_template} does not exist!\")\n sys.exit(1)\n\n # Check if directory for restart files exists. Actual restart file\n # shall be checked later.\n restart_dir = sys.argv[2]\n if not os.path.exists(restart_dir):\n print(f\"[ERR] Directory {restart_dir} does not exist!\")\n sys.exit(1)\n\n # Get start date of new LIS run.\n yyyymmdd = sys.argv[3]\n if len(yyyymmdd) != 8:\n print(\"[ERR] Invalid length for YYYYMMDD, must be 8 characters!\")\n sys.exit(1)\n year = int(yyyymmdd[0:4])\n month = int(yyyymmdd[4:6])\n day = int(yyyymmdd[6:8])\n try:\n startdate = datetime.date(year, month, day)\n except ValueError:\n print(\"[ERR] Invalid YYYYMMDD passed to script!\")\n sys.exit(1)\n\n return lis_config_template, restart_dir, startdate",
"def parsare_argumente():\n for arg in sys.argv:\n if arg == \"-h\":\n display_usage()\n\n in_dir=\"input\"\n out_dir=\"output\"\n n=3\n timeout=10\n for arg in sys.argv[1:]:\n check = arg.split(\"=\")\n if len(check) < 2:\n print(\"invalid\")\n exit()\n if check[0] == \"if\":\n in_dir = ''.join(check[1:])\n elif check[0] == \"of\":\n out_dir = ''.join(check[1:])\n elif check[0] == 'n':\n try:\n n = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n elif check[0] == 't':\n try:\n timeout = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n\n return [in_dir, out_dir, n, timeout]",
"def read_cmd_args():\n\n if len(sys.argv) != 9:\n print(\"[ERR] Invalid number of command line arguments!\")\n usage()\n sys.exit(1)\n\n # FCST_SYR\n try:\n fcst_syr = int(sys.argv[1])\n except ValueError:\n print(f\"[ERR] Invalid argument for FCST_SYR! Received {(sys.argv[1])}\")\n usage()\n sys.exit(1)\n if fcst_syr < 0:\n print(f\"[ERR] Invalid argument for FCST_SYR! Received {(sys.argv[1])}\")\n usage()\n sys.exit(1)\n\n # FCST_EYR\n try:\n fcst_eyr = int(sys.argv[2])\n except ValueError:\n print(f\"[ERR] Invalid argument for FCST_EYR! Received {(sys.argv[2])}\")\n usage()\n sys.exit(1)\n if fcst_eyr < 0:\n print(f\"[ERR] Invalid argument for FCST_EYR! Received {(sys.argv[2])}\")\n usage()\n sys.exit(1)\n\n # MONTH_ABBR\n month_abbr = str(sys.argv[3])\n\n # MONTH_NUM\n try:\n month_num = int(sys.argv[4])\n except ValueError:\n print(f\"[ERR] Invalid argument for MONTH_NUM! Received {(sys.argv[4])}\")\n usage()\n sys.exit(1)\n if month_num < 1:\n print(f\"[ERR] Invalid argument for MONTH_NUM! Received {(sys.argv[4])}\")\n usage()\n sys.exit(1)\n if month_num > 12:\n print(f\"[ERR] Invalid argument for MONTH_NUM! Received {(sys.argv[4])}\")\n usage()\n sys.exit(1)\n\n # FCST_TYPE\n fcst_type = str(sys.argv[5])\n\n # LEAD_MONTHS\n try:\n lead_months = int(sys.argv[6])\n except ValueError:\n print(f\"[ERR] Invalid argument for LEAD_MONTHS! Received {(sys.argv[6])}\")\n usage()\n sys.exit(1)\n if lead_months < 0:\n print(f\"[ERR] Invalid argument for LEAD_MONTHS! Received {(sys.argv[6])}\")\n usage()\n sys.exit(1)\n\n # ENS_NUM\n try:\n ens_num = int(sys.argv[7])\n except ValueError:\n print(f\"[ERR] Invalid argument for ENS_NUM! Received {(sys.argv[7])}\")\n usage()\n sys.exit(1)\n if ens_num < 0:\n print(f\"[ERR] Invalid argument for ENS_NUM! Received {(sys.argv[7])}\")\n usage()\n sys.exit(1)\n\n # CONFIG_FILE\n config_file = sys.argv[8]\n if not os.path.exists(config_file):\n print(f\"[ERR] {config_file} does not exist!\")\n sys.exit(1)\n\n return fcst_syr, fcst_eyr, month_abbr, month_num, fcst_type, lead_months, \\\n \tens_num, config_file",
"def checkArguments ( ) :\r\n\r\n if len( sys.argv ) <= 1 : return None\r\n\r\n\r\n # splits the arguments that contain quotes\r\n \r\n wordList = [ ]\r\n\r\n for argument in sys.argv :\r\n\r\n wordList.extend( argument.split( '\"' ) )\r\n\r\n\r\n # places all the arguments that start with \"--\" at the end, and joins the others into words\r\n\r\n noMinusList = [ ]\r\n\r\n minusList = [ ]\r\n\r\n argument = \"\"\r\n\r\n for word in wordList[ 1 : ] :\r\n\r\n # strips spaces and quotes\r\n \r\n word = word.strip( \" \\\"'\" ) \r\n\r\n if word.startswith( \"--\" ) :\r\n\r\n minusList.append( word )\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n argument = \"\"\r\n\r\n elif argument == \"\" :\r\n\r\n argument = word\r\n\r\n else :\r\n\r\n argument = argument + \" \" + word\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n\r\n # library = 1st argument of the form \"-- ... /\" that exists\r\n\r\n libraryPath = None\r\n\r\n for argument in minusList :\r\n\r\n if ( ( argument.endswith( os.sep ) ) and ( os.path.exists( argument.strip( \"- \" ) ) ) ) :\r\n\r\n libraryPath = argument.strip( \"-\" )\r\n\r\n break\r\n\r\n # recomposes the command line\r\n \r\n sys.argv = wordList[ : 1 ] + noMinusList + minusList \r\n\r\n return libraryPath",
"def check_cli():\n if \"--help\" in sys.argv or \"-h\" in sys.argv:\n _exit(__help__)\n debug = False\n if \"--debug\" in sys.argv:\n debug = True\n sys.argv.remove(\"--debug\")\n input_file_names = sys.argv[1:]\n return debug, input_file_names",
"def parameters_are_valid():\n # The only accepted number of command line arguments is 3: they are\n # aggregator.py, the filename, and the topic\n if len(sys.argv) != 3:\n # Issue error message if invalid number of command line arguments\n print(\"Error: invalid number of arguments\")\n print(\"Usage: aggregator.py filename topic\")\n return False\n else:\n return True",
"def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)",
"def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params",
"def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)",
"def read_params():\n \n # Get the input list\n params = argv\n params.pop(0)\n \n #Check if quoting is necessary\n params = check_double_quote(params)\n \n # Declare input file\n fin = read_item(params, '-s=', './opt_probl.txt')\n # Find standard output directory\n fout = read_item(params, '-d=', './opt_sol.txt')\n \n return ( fin, fout )",
"def get_args():\n\n parser = argparse.ArgumentParser(\n description=\"Script tests the HCSR04 sensor under different configurations\"\n )\n\n parser.add_argument(\n \"-t\",\n \"--trig\",\n type=int,\n help=\"Trig Pin (Required - must be an integer, must \\\n use BCM pin values)\",\n required=True,\n )\n\n parser.add_argument(\n \"-e\",\n \"--echo\",\n type=int,\n help=\"Echo Pin (Required - must be an integer, must \\\n use BCM pin values)\",\n required=True,\n )\n\n parser.add_argument(\n \"-sp\",\n \"--speed\",\n type=float,\n help=\"Time between individual reading samples \\\n (Optional - must be a float, default\\\n is 0.1 seconds)\",\n required=False,\n default=0.1,\n )\n\n parser.add_argument(\n \"-ss\",\n \"--samples\",\n type=int,\n help=\"Reading Sample Size (Optional - must be an \\\n integer, default is 11)\",\n required=False,\n default=11,\n )\n\n args = parser.parse_args()\n\n trig = args.trig\n echo = args.echo\n speed = args.speed\n samples = args.samples\n\n return trig, echo, speed, samples",
"def check_args(argv):\n arg_length = len(argv)\n\n usage_str = '\\nUSAGE:\\npython {} [ls_hostname] [ls_listen_port]\\npython {} [ls_hostname] [ls_listen_port] [input_file_name]\\npython {} [ls_hostname] [ls_listen_port] [input_file_name] [output_file_name]\\n'.format(argv[0], argv[0], argv[0])\n\n ls_hostname = DEFAULT_HOSTNAME_LS\n ls_portno = DEFAULT_PORTNO_LS\n\n input_file_str = DEFAULT_INPUT_FILE_STR_HNS\n output_file_str = DEFAULT_OUTPUT_FILE_STR_RESOLVED\n\n # debugging args\n if arg_length is 1:\n pass\n elif arg_length is 2:\n ls_hostname = argv[1]\n # end debugging args\n elif arg_length is 3:\n ls_hostname = argv[1]\n ls_portno = int(argv[2])\n elif arg_length is 4:\n ls_hostname = argv[1]\n ls_portno = int(argv[2])\n\n input_file_str = argv[3]\n elif arg_length is 5:\n ls_hostname = argv[1]\n ls_portno = int(argv[2])\n\n input_file_str = argv[3]\n output_file_str = argv[4]\n else:\n print(usage_str)\n exit()\n\n return (ls_hostname, ls_portno), (input_file_str, output_file_str)",
"def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"keywordslist\", help=\"Numpy output file\")\n parser.add_argument(\"dirs\", help=\"Numpy output file\")\n parser.add_argument(\"npz_train\", help=\"Numpy output file\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()",
"def read_parameter(self):\n\n # default parameter\n if len(sys.argv) == 1:\n print(\"\\n||| Python Running\", sys.argv,\n \"\\n||| need to tell me the [inputData, outputData]\"\n \"\\n||| or i would used default parameter\"\n \"\\n||| -i *.tbl\\t*.arwen\\t[inputData]\"\n \"\\n||| -o repair.tbl\\t\\t[outputData]\")\n\n cwd = os.path.dirname(__file__)\n listdir = os.listdir(cwd)\n\n file_tbl = [x for x in listdir if (x.find(\".tbl\") >= 0) and (x.find(\"_repair\") < 0)][0]\n file_arwen = [x for x in listdir if x.find(\".arwen\") >= 0][0]\n\n sys.argv.extend([\n \"-i %s %s\" % (file_tbl, file_arwen),\n \"-o %s_repair.tbl\" % file_tbl[:-4],\n ])\n\n command = reduce(lambda a, b: a + b + \" \", sys.argv[1:])\n command = command.split(\"-\")[1:]\n\n infile = []\n outfile = []\n for item in command:\n if item[0] == 'i':\n infile = item[1:].split()\n elif item[0] == 'o':\n outfile = item[1:].split()\n\n return infile, outfile",
"def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u",
"def check_arguments(self):\n ## only four test operation is permitted, if given anything apart from this, then it should print error message\n if (self.args.snap is False and self.args.snapcheck is False and self.args.check is False and self.args.diff is False and self.args.version is False):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n\n if(((self.args.snap is True and (self.args.pre_snapfile is None or self.args.file is None)) or\n (self.args.snapcheck is True and self.args.file is None) or\n (self.args.check is True and self.args.file is None)) and \n (self.args.testfiles is None or self.args.hostname is None)\n ):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n if self.args.diff is True:\n if (self.args.pre_snapfile is not None and os.path.isfile(self.args.pre_snapfile)) and (\n self.args.post_snapfile is not None and os.path.isfile(self.args.post_snapfile)):\n comp = Comparator()\n comp.compare_diff(\n self.args.pre_snapfile,\n self.args.post_snapfile,\n None)\n sys.exit(1)\n else:\n if (self.args.file is None) and (\n self.args.testfiles is None or self.args.hostname is None):\n self.parser.print_help()\n sys.exit(1)",
"def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale",
"def checkArgs():\n\n #-- 1 --\n # [ if sys.argv[1:] has exactly four elements ->\n # rawAltAz, rawLat, rawLon, rawDT := those elements\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n argList = sys.argv[1:]\n if len(argList) != 4:\n usage (\"Incorrect command line argument count.\" )\n else:\n rawAltAz, rawLat, rawLon, rawDT = argList\n #-- 2 --\n # [ if rawAltAz is a valid set of horizon coordinates ->\n # altAz := those coordinates as a sidereal.AltAz instance\n altAz = checkAltAz ( rawAltAz )\n\n #-- 3 --\n # [ if rawLat is a valid latitude ->\n # lat := that latitude in radians\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n try:\n lat = sidereal.parseLat ( rawLat )\n except SyntaxError, detail:\n usage ( \"Invalid latitude: %s\" % detail )\n\n #-- 4 --\n # [ if rawLon is a valid longitude ->\n # lon := that longitude in radians\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n try:\n lon = sidereal.parseLon ( rawLon )\n except SyntaxError, detail:\n usage ( \"Invalid longitude: %s\" % detail )\n\n #-- 5 --\n # [ if rawDT is a valid date-time string ->\n # dt := that date-time as a datetime.datetime instance\n # else ->\n # sys.stderr +:= error message\n # stop execution ]\n try:\n dt = sidereal.parseDatetime ( rawDT )\n except SyntaxError, detail:\n usage ( \"Invalid timestamp: %s\" % detail )\n\n #-- 6 --\n latLon = sidereal.LatLon ( lat, lon )\n return (altAz, latLon, dt)",
"def read_arguments(argv):\n\tif argv[0] in ('1', '2'):\n\t\tconos_config['endpoint'] = endpoint[argv[0]]\n\telse:\n\t\tusage()\n\n\tif argv[1] in ('dev', 'test', 'int', 'prod'):\n\t\tconos_config['environment'] = argv[1]\n\t\tconos_config['sts_url'] = eval(argv[1] + '_sts_url')\n\t\tconos_config['aicuu_url'] = eval(argv[1] + '_aicuu_url')\n\telse:\n\t\tusage()\n\n\tif len(argv) == 6:\n\t\tconos_config['number_threads'] = '1'\n\telse:\n\t\tif argv[6] in ('1', '2', '3', '4', '5', '6', '7', '8'):\n\t\t\tconos_config['number_threads'] = argv[6]\n\t\telse:\n\t\t\tusage()\n\n\tconos_config['client_id'] = argv[2]\n\tconos_config['client_secret'] = argv[3]\n\tconos_config['input_file'] = argv[4]\n\tconos_config['output_file'] = argv[5]",
"def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"data_dir\", type=str, help=\"data directory\")\n parser.add_argument(\n \"--init_am_n_iter\", type=int, help=\"default: %(default)s\",\n default=default_options_dict[\"init_am_n_iter\"]\n )\n parser.add_argument(\n \"--segment_n_iter\", type=int, help=\"default: %(default)s\",\n default=default_options_dict[\"segment_n_iter\"]\n )\n parser.add_argument(\n \"--K_max\", type=str, help=\"default: %(default)s\",\n default=default_options_dict[\"K_max\"]\n )\n parser.add_argument(\n \"--min_duration\", type=int, help=\"default: %(default)s\",\n default=default_options_dict[\"min_duration\"]\n )\n parser.add_argument(\n \"--n_slices_max\", type=int, help=\"default: %(default)s\",\n default=default_options_dict[\"n_slices_max\"]\n )\n parser.add_argument(\n \"--p_boundary_init\", type=float, help=\"default: %(default)s\",\n default=default_options_dict[\"p_boundary_init\"]\n ) \n parser.add_argument(\n \"--n_cpus\", type=int, help=\"default: %(default)s\",\n default=default_options_dict[\"n_cpus\"]\n )\n parser.add_argument(\n \"--n_batches\", type=int, help=\"default: %(default)s\",\n default=default_options_dict[\"n_batches\"]\n )\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def get_args( self, ):\r\n for iarg in sys.argv[1:]:\r\n #print iarg\r\n argsplits = iarg.split(\"=\")\r\n parm_name = argsplits[0]\r\n parm_value = argsplits[1]\r\n\r\n # so far only one is captured\r\n if parm_name == \"parameters\":\r\n self.parmeters_x = parm_value #\r\n msg = \"command line arg >>{iarg}\" # log file not open but use alt\r\n AppGlobal.logger.info( msg )\r\n else:\r\n msg = \"no parmeter extensions\"\r\n AppGlobal.logger.info( msg )\r\n return",
"def parseArgs():\n # Configure the option parser for CLI options to the script\n usage = \"usage: %prog [options] userName password configlet xlfile\"\n parser = argparse.ArgumentParser(description=\"Excel File to JSON Configlet Builder\")\n parser.add_argument(\"--userName\", help='Username to log into CVP')\n parser.add_argument(\"--password\", help='Password for CVP user to login')\n parser.add_argument(\"--target\", nargs=\"*\", metavar='TARGET', default=[],\n help='List of CVP appliances to get snapshot from URL,URL')\n parser.add_argument(\"--snapshot\", help='CVP Snapshot containing Show Inventory and Show LLDP neighbor data')\n parser.add_argument(\"--opticType\", default='PSM4', help=\"Optic Type to look for\")\n parser.add_argument(\"--verbose\", default=False, help='Return more information to the command line')\n args = parser.parse_args()\n return checkArgs( args )",
"def parse_parameters():\n parser = argparse.ArgumentParser(description='Program that checks for bad evaluations.')\n parser.usage = 'bettercorrectors [-h] client_id client_secret start_date [end_date] [--sql file]'\n parser.add_argument('client_id', help='the client_id of your intranet application', type=str)\n parser.add_argument('client_secret', help='the client_secret of your intra application', type=str)\n parser.add_argument('start_date', help='the latest date in iso format', type=datetime.fromisoformat)\n parser.add_argument('end_date', help='the closest date in iso format (optional)', type=datetime.fromisoformat,\n default=datetime.now(), nargs='?')\n parser.add_argument('--sql', dest='file', help='''name of the database file in case you want to save results in a \n sqlite database''', type=str)\n args = parser.parse_args()\n return args",
"def get_cmd_args():\n\n\n\t#Creates the Argument Parser\n\tparser = ArgumentParser(description = \"ID Lab qPCR Analysis v\" + VERSION + \" \" + QUALITY)\n\n\t#Adds the input file argument\n\tparser.add_argument('-f', '--file',\n\t\t\t\tnargs = '+',\n\t\t\t\ttype = FileType('r'),\n\t\t\t\trequired = True)\n\n\t#Adds the output directory\n\tparser.add_argument('-o', '--output',\n\t\t\t\trequired = True)\n\n\t#Adds the model argument, to select between the three models\n\tparser.add_argument('-m', '--mod', '--model',\n\t\t\t\tnargs = '?',\n\t\t\t\tchoices = ['relative', 'absolute', 'stability'],\n\t\t\t\trequired = True)\n\n\t#Adds the control genes argument, taking a list of gene names\n\tparser.add_argument('-cg', '--cgenes', '--controlgenes',\n\t\t\t\tnargs = '+',\n\t\t\t\trequired = True)\n\n\t#Adds the optional control sample argument for the stability model, taking a list of sample names\n\tparser.add_argument('-cs', '--csample', '--controlsamples',\n\t\t\t\tnargs = '*')\n\n\t#Adds optional outlier cutoff\n\tparser.add_argument('-oc', '--ocutoff',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.3)\n\n\t#Adds optional max outliers\n\tparser.add_argument('-om', '--omax',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.5)\n\n\t#Adds optional encoding \n\tparser.add_argument('-e', '--encoding',\n\t\t\t\tdefault = 'ISO-8859-1')\n\n\t#Adds optional header size\n\tparser.add_argument('-hd', '--header',\n\t\t\t\tdefault = 47)\n\n\treturn vars(parser.parse_args())",
"def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecProcessScriptFindv0_1.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(),\"Data Input is None\")\n \n self.__searchPath = self.getDataInput().getSearchPath().getValue() \n self.__inputString = self.getDataInput().getInputString().getValue()",
"def test_args(self):\n parser = argparse.ArgumentParser(\n prog=\"sysbottle\", description=\"sysbottle is parsed\"\n )\n subparsers = parser.add_subparsers()\n sysbottle.build(subparsers)\n args = parser.parse_args(\n [\n \"sysbottle\",\n \"abc.txt\",\n \"-c\",\n \"90\",\n \"-q\",\n \"1\",\n \"-d\",\n \"sda\",\n \"-i\",\n \"5\",\n \"-t\",\n \"3\",\n ]\n )\n self.assertTrue(hasattr(args, \"file\"))\n self.assertTrue(hasattr(args, \"cpu\"))\n self.assertTrue(hasattr(args, \"diskQ\"))\n self.assertTrue(hasattr(args, \"disks\"))\n self.assertTrue(hasattr(args, \"iowait\"))\n self.assertTrue(hasattr(args, \"throughput\"))"
] | [
"0.7394097",
"0.7226845",
"0.7086163",
"0.7039422",
"0.69269073",
"0.68884903",
"0.6864421",
"0.6826465",
"0.67924505",
"0.6779108",
"0.67746884",
"0.6754019",
"0.67359424",
"0.667076",
"0.66613334",
"0.6638034",
"0.65763146",
"0.65523595",
"0.65419203",
"0.6500828",
"0.6467564",
"0.6453628",
"0.64466906",
"0.6436634",
"0.6435912",
"0.6423883",
"0.64228994",
"0.64009655",
"0.63791424",
"0.63790613"
] | 0.7317521 | 1 |
Attempts to parse the number of threads passed in on the command line | def parse_and_validate_num_threads(thread_str):
num_threads = 0
try:
num_threads = int(thread_str)
if num_threads < 1:
raise ValidationError(NUMTHREAD_ERR_SMALL_VAL)
elif num_threads > mp.cpu_count():
err_str = NUMMTHREAD_ERR_BIG_VAL.format(mp.cpu_count())
raise ValidationError(err_str)
except ValidationError as err:
print USAGE_STR.format(sys.argv[0])
NUMTHREAD_ERR.format(err.args[0])
sys.exit()
except ValueError:
print USAGE_STR.format(sys.argv[0])
print NUMTHREAD_ERR.format(NUMTHREAD_ERR_BAD_PARSE)
sys.exit()
except BaseException as err:
print USAGE_STR.format(sys.argv[0])
print NUMTHREAD_ERR.format("Unexpected error")
print "Error was:\n\t", err
return num_threads | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_threads(self, args):\n try:\n threads = int(args[\"threads\"])\n except KeyError:\n raise ValueError(\"Must specify the number of threads the crawler should work with.\")\n except ValueError:\n raise ValueError(\"Threads must be an integer.\")\n if threads < 0:\n raise ValueError(\"Threads must be a positive integer.\")\n # 0 is interpreted as make as many threads as there are cores.\n if threads == 0:\n threads = multiprocessing.cpu_count()\n return threads",
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"-threads\", help=\"specifies a thread count for parallel operations\", type=int)\n return parser.parse_args()",
"def parse_cmd_arguments(): # {{{\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-lf', '--logfile', nargs='?',\n const=False, help='Log file on or off (true || false)')\n parser.add_argument('-tc', '--threadcount', nargs='?', default=1,\n help=\"Set number of threads to use. Defaults to 1.\")\n\n ARGS = parser.parse_args()\n\n if ARGS.logfile:\n formatter = logging.Formatter((\"\\n%(asctime)s\"\n \" %(filename)s:%(lineno)-3d\"\n \" %(levelname)s\\n%(message)s\"))\n\n log_file = \"assignment_07\" + '.log'\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n LOGGER.addHandler(file_handler)\n\n return int(ARGS.threadcount) # }}}",
"def setNumThreads(self, num):\r\n self.threads = num",
"def getThreads():\n if sys.platform == 'win32':\n return int(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return int(os.popen('grep -c cores /proc/cpuinfo').read())",
"def parse_args():\n\n argparser = NagiosArgumentParser(description='Check number of threads for given PID (through a given command returning PID)')\n argparser.add_argument('-W', '--warning', type=int, required=True, help='Maximum number of threads threshold for warning') # pylint: disable=bad-whitespace\n argparser.add_argument('-C', '--critical', type=int, required=True, help='Maximum number of threads threshold for error') # pylint: disable=bad-whitespace\n argparser.add_argument('-P', '--pid-cmd', required=True, help='Command to run to select target PID', metavar='\"systemctl show nginx --property=MainPID --value\"') # pylint: disable=bad-whitespace\n argparser.add_argument('-D', '--debug', action='store_true', help='Debug mode: re raise Exception (do not use in production)') # pylint: disable=bad-whitespace\n args = argparser.parse_args()\n\n if args.warning > args.critical:\n argparser.error('Warning threshold cannot be greater than critical one')\n\n if args.warning < 0 or args.critical < 0:\n argparser.error('Warning/critical tresholds must be > 0')\n\n return args",
"def set_option_thread_count(self, integer, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionThreadCount/', {'Integer': integer, 'apikey': apikey})))",
"def add_ncores_arg(ap):\n ap.add_argument(\n \"-n\",\n \"--ncores\",\n dest='ncores',\n help=(\n \"The number of threads to use. Uses 1 if not specified. \"\n \"Uses all available threads if `-n` is given. Else, uses the \"\n \"number indicated, for example: `-n 4` will use 4 threads.\"\n ),\n type=int,\n default=1,\n const=None,\n nargs='?',\n )",
"def test_parse_symmetrical_threads():\n assert tartan.parse_threadcount('W/2 B1 LB/2') == [\n \"#FFFFFF\", \"#FFFFFF\", \"#0000FF\", \"#82CFFD\", \"#82CFFD\", \"#0000FF\"\n ]",
"def get_num_threads():\n\n num_cores = os.cpu_count()\n\n # the specific environment variable takes prescedence\n if \"PRA_NUM_THREADS\" in os.environ:\n return int(os.environ[\"PRA_NUM_THREADS\"])\n\n # we also respect OMP and MKL variables\n env_var = [\n \"OMP_NUM_THREADS\",\n \"MKL_NUM_THREADS\",\n ]\n\n all_limits = [int(getattr(os.environ, var, num_cores)) for var in env_var]\n\n return min(all_limits)",
"def nThreads(self):\n return self._c_param.n_threads",
"def option_thread_count(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionThreadCount/')))",
"def determine_number_of_jobs(\n parallel=False, command_line=None, config_default=None, max_cpus=None\n):\n if not parallel:\n return 1\n\n if command_line is None and \"command_line\" in spack.config.scopes():\n command_line = spack.config.get(\"config:build_jobs\", scope=\"command_line\")\n\n if command_line is not None:\n return command_line\n\n max_cpus = max_cpus or cpus_available()\n\n # in some rare cases _builtin config may not be set, so default to max 16\n config_default = config_default or spack.config.get(\"config:build_jobs\", 16)\n\n return min(max_cpus, config_default)",
"def _get_threads():\n if sys.platform == 'win32':\n # return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n return 0 # save trouble, do not use multiprocessing on windows\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())",
"def setNumIterations(*argv):",
"def _threads(self, counter):\r\n\r\n t = self.context.threads\r\n if not isinstance(t, int):\r\n t = 3\r\n if t < counter:\r\n return t\r\n return counter",
"def getNumThreads(cls) -> int:\n return cls.NUMTHREADS",
"def get_threads(self):\r\n threads = self._config.get_int('threads', DEFAULT_THREADS)\r\n if threads < THREADS_MIN_TOTAL:\r\n threads = THREADS_MIN_TOTAL\r\n if threads > THREADS_MAX_TOTAL:\r\n threads = THREADS_MAX_TOTAL\r\n return threads",
"def parse_cli():\n defaults = DefaultOptions()\n parser = CustomArgumentParser(usage=\"%(prog)s [OPTIONS] THREAD [THREAD]...\")\n\n parser.add_argument(\"thread\", nargs=\"*\", help=\"thread URL\")\n parser.add_argument(\n \"-l\", \"--list\", action=\"append\", type=valid_list, default=defaults.LIST\n )\n parser.add_argument(\n \"-q\", \"--quiet\", dest=\"verbosity\", action=\"store_const\",\n const=0, default=defaults.VERBOSITY\n )\n parser.add_argument(\"-p\", \"--path\", dest=\"base_dir\", default=defaults.PATH)\n parser.add_argument(\n \"-f\", \"--filenames\", dest=\"names\", action=\"store_true\",\n default=defaults.USE_NAMES\n )\n parser.add_argument(\n \"-a\", \"--archive\", dest=\"archive\", type=valid_archive,\n default=defaults.ARCHIVE\n )\n parser.add_argument(\n \"--connections\", type=positive_int, default=defaults.CONNECTIONS\n )\n parser.add_argument(\"--retries\", type=int, default=defaults.RETRIES)\n\n args = parser.parse_args()\n # Scan lists for thread links\n for l in args.list:\n with open(l, \"r\") as f:\n args.thread.extend([t.strip() for t in f if not t.startswith(\"#\")])\n # Make sure base_dir is an absolute path\n args.base_dir = os.path.abspath(args.base_dir)\n # Weed out clearly wrong thread URLs\n args.thread = set(fnmatch.filter(args.thread, \"*boards.4chan*.org/*/thread/*\"))\n\n return args",
"def setNThreads(self,n):\n assert(n>0)\n self._c_param.n_threads = n",
"def getThreads():\r\n return multiprocessing.cpu_count()",
"def parse_and_validate_cmd_line():\n if len(sys.argv) != 4:\n print USAGE_STR.format(sys.argv[0])\n sys.exit()\n # attempt to parse the parameters tell the user and exit if we can't\n num_segments = parse_and_validate_num_segs(sys.argv[1])\n # try to parse numThreads\n num_threads = parse_and_validate_num_threads(sys.argv[2])\n # try to parse and test the data directory\n data_dir = parse_and_validate_data_dir(sys.argv[3])\n return num_segments, num_threads, data_dir",
"def AddThreadsPerCore(parser):\n parser.add_argument(\n '--threads-per-core',\n type=int,\n required=False,\n help=\"\"\"\\\n The number of threads per core. The value of this flag can be 1 or 2.\n To disable SMT, set this flag to 1. Only available in Cloud SQL for SQL Server instances.\n \"\"\",\n )",
"def threadCount(*args, numberOfThreads: Union[int, bool]=0, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def threads_per_core(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"threads_per_core\")",
"def setNumThreads(cls, numThreads: int):\n cls.NUMTHREADS = numThreads",
"def parse_cores(core_str):\n num_cores = os.cpu_count()\n cores = []\n\n # remove spaces\n core_str.replace(\" \", \"\")\n\n # check if not a range\n if '-' not in core_str:\n cores = list(map(int, core_str.strip().split(',')))\n else:\n # parse range e.g. 2-8\n core_str = core_str.strip().split('-')\n for i in range(int(core_str[0]), int(core_str[1]) + 1):\n cores.append(i)\n\n # ensure valid cores specified\n for core in cores:\n if core < 0 or core >= num_cores:\n print(\"Core {} out of range!\".format(core), file=sys.stderr)\n raise Exception()\n\n return cores",
"def parse_parameters(filePath):\r\n numThreads, queue, affinity = 0,\"\",\"\"\r\n \r\n for line in open(filePath):\r\n if \"spec.omp2001.size:\" in line:\r\n if get_last_column_number(line)==\"test\":\r\n print(\"IS TEST SIZE!!1 : \" + filePath)\r\n \r\n if \"spec.omp2001.sw_threads:\" in line:\r\n numThreads = int(get_last_column_number(line))\r\n \r\n if \"spec.omp2001.mach:\" in line:\r\n machine = line.split(\" \")[-1]\r\n columns = machine.split(\".\")\r\n \r\n queue = columns[0]\r\n affinity = columns[1]\r\n \r\n return numThreads, queue, affinity",
"def threads_per_core(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"threads_per_core\")",
"def set_threadpool_size(nthreads):\n os.environ[\"OMP_THREAD_LIMIT\"] = \"0\" if nthreads is None else str(nthreads)"
] | [
"0.7095453",
"0.7019208",
"0.66536057",
"0.6259861",
"0.6224292",
"0.61977744",
"0.6061552",
"0.59712",
"0.5954642",
"0.5913753",
"0.5911587",
"0.590257",
"0.5894001",
"0.5893401",
"0.5878691",
"0.5852902",
"0.5841035",
"0.58334404",
"0.58245236",
"0.57961905",
"0.57697976",
"0.57141274",
"0.5703374",
"0.5695026",
"0.5669754",
"0.5668341",
"0.5633745",
"0.5630821",
"0.55734897",
"0.5549385"
] | 0.73970425 | 0 |
Attempts to parse the number of segments passed in on the command line | def parse_and_validate_num_segs(segment_str):
# try to parse numSegments
num_segments = 0
try:
num_segments = int(segment_str)
divs = math.log(num_segments, 2)
if num_segments < 2:
raise ValidationError(NUMSEG_ERR_SMALL_VAL)
elif int(divs) != divs:
raise ValidationError(NUMSEG_ERR_BAD_POW)
except ValidationError as err:
print USAGE_STR.format(sys.argv[0])
print NUMSEG_ERR.format(err.args[0])
sys.exit()
except ValueError:
print USAGE_STR.format(sys.argv[0])
print NUMSEG_ERR.format(NUMSEG_ERR_BAD_PARSE)
sys.exit()
except BaseException as err:
print USAGE_STR.format(sys.argv[0])
print NUMSEG_ERR.format("Unexpected error")
print "Error was:\n\t", err
sys.exit()
return num_segments | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_segments(s):\n s = s.strip().split()\n return len(s)",
"def parse_and_validate_cmd_line():\n if len(sys.argv) != 4:\n print USAGE_STR.format(sys.argv[0])\n sys.exit()\n # attempt to parse the parameters tell the user and exit if we can't\n num_segments = parse_and_validate_num_segs(sys.argv[1])\n # try to parse numThreads\n num_threads = parse_and_validate_num_threads(sys.argv[2])\n # try to parse and test the data directory\n data_dir = parse_and_validate_data_dir(sys.argv[3])\n return num_segments, num_threads, data_dir",
"def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen",
"def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines",
"def getSegmentCount(self) -> int:\n ...",
"def segment_n(self):\n return len(self.segment_lengths)",
"def processArgs(self, argv):\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--show_ADT\", action=\"store_true\", dest=\"show_ADT\",\n default=self.show_ADT, help=\"Display ADT value if set\")\n parser.add_option(\"-f\", \"--show_file\", action=\"store_true\", dest=\"show_file\",\n default=self.show_file, help=\"Display matching filename if set\")\n parser.add_option(\"-t\", \"--show_time\", action=\"store_true\", dest=\"show_time\",\n default=self.show_time, help=\"Display message time\")\n parser.add_option(\"-v\", \"--show_visitID\", action=\"store_true\", dest=\"show_visitID\",\n default=self.show_visitID, help=\"Display visit ID\")\n parser.add_option(\"-p\", \"--show_pc\",\n action=\"store_true\",\n dest=\"show_pc\",\n default=self.show_pc,\n help=\"Display patient class\")\n\n (options, pargs) = parser.parse_args()\n if len(pargs) < 3:\n parser.error(\"incorrect number of arguments\")\n\n self.show_ADT = parser.values.show_ADT\n self.show_file = parser.values.show_file\n self.show_time = parser.values.show_time\n self.show_visitID = parser.values.show_visitID\n self.show_pc = parser.values.show_pc\n \n self.segments_of_interest = pargs.pop(0)\n if len(self.segments_of_interest) != 3:\n parser.error(\"segment '%s' looks incorrect, expected something like 'PV1'\"\n % self.segments_of_interest)\n\n try:\n nums = pargs.pop(0).split(\",\")\n for num in nums:\n if 'MSH' == self.segments_of_interest:\n num = int(num) - 1\n self.sequences.append(int(num))\n except:\n parser.error(\"sequence must be an integer, separate multiple w/ comma and no spaces\")\n\n for patternOrFile in pargs:\n for file in glob.glob(patternOrFile):\n if not os.path.isfile(file):\n parser.error(\"can't open input file %s\" % file)\n self.filelist.append(file)\n \n # Require at least one file\n if not len(self.filelist):\n parser.error(\"at least one input file is required\")",
"def segment(args):\n from jcvi.formats.base import SetFile\n\n p = OptionParser(segment.__doc__)\n p.add_option(\n \"--chain\",\n default=1,\n type=\"int\",\n help=\"Allow next N genes to be chained\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n idsfile, bedfile = args\n bed = Bed(bedfile)\n order = bed.order\n ids = SetFile(idsfile)\n losses = Grouper()\n skip = opts.chain\n for i, a in enumerate(bed):\n a = a.accn\n for j in range(i + 1, i + 1 + skip):\n if j >= len(bed):\n break\n b = bed[j].accn\n if a in ids:\n losses.join(a, a)\n if a in ids and b in ids:\n losses.join(a, b)\n\n losses = list(losses)\n singletons = [x for x in losses if len(x) == 1]\n segments = [x for x in losses if len(x) > 1]\n ns, nm, nt = len(singletons), len(segments), len(losses)\n assert ns + nm == nt\n\n # Summary for all segments\n for x in sorted(singletons) + sorted(segments):\n print(\n \"\\t\".join(\n str(x)\n for x in (\"|\".join(sorted(x)), len(x), estimate_size(x, bed, order))\n )\n )\n\n # Find longest segment stretch\n if segments:\n mx, maxsegment = max([(len(x), x) for x in segments])\n print(\"Longest stretch: run of {0} genes\".format(mx), file=sys.stderr)\n print(\" {0}\".format(\"|\".join(sorted(maxsegment))), file=sys.stderr)\n seg_asize = sum(estimate_size(x, bed, order) for x in segments)\n seg_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in segments\n )\n else:\n seg_asize = seg_bsize = 0\n\n sing_asize = sum(estimate_size(x, bed, order) for x in singletons)\n sing_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in singletons\n )\n total_asize = sing_asize + seg_asize\n total_bsize = sing_bsize + seg_bsize\n print(\n \"Singleton ({0}): {1} - {2} bp\".format(ns, sing_asize, sing_bsize),\n file=sys.stderr,\n )\n print(\n \"Segment ({0}): {1} - {2} bp\".format(nm, seg_asize, seg_bsize), file=sys.stderr\n )\n print(\n \"Total ({0}): {1} - {2} bp\".format(nt, total_asize, total_bsize),\n file=sys.stderr,\n )\n print(\n \"Average ({0}): {1} bp\".format(nt, (total_asize + total_bsize) / 2),\n file=sys.stderr,\n )",
"def partition_Basic(segfile):\n scenelist = Recording.read_segs(segfile)\n segcount = 0\n for l in scenelist.values():\n segcount += len(l)\n return scenelist, segcount",
"def segment_from_command_line(args):\n\n input_file = BedTool(args.input)\n # Segment the input file\n return segment(input_file, args.method, p0=args.p0, prior=args.prior)",
"def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])",
"def parse_sizespec(self):\n\tline_splitted = self.line.split()\n\n\tfor argument_unit in line_splitted:\n\t # words[0] is the identifier of the argument\n\t # and words[1] is the argument value.\n\t words = self.split_argument_unit(argument_unit)\n\t if words[0] == 'N':\n\t\tself.node_number = int(words[1])\n\t elif words[0] == 'L':\n\t\tself.link_number = int(words[1])\n\t # if we do not recognize the argument an exception is raised\n\t else:\n\t raise ArgumentNotFoundError(found = words[0])",
"def get_segm_num(*args):\n return _ida_segment.get_segm_num(*args)",
"def parse_cli():\n\n parser = argparse.ArgumentParser(description='Slice 3D objects into many 2D projections')\n\n parser.add_argument(\n '-om', '--object-module',\n action='append',\n type=str,\n help='An SCAD module that makes up the object being sliced'\n )\n\n parser.add_argument(\n '-km', '--key-module',\n action='append',\n type=str,\n help='An SCAD module that makes up the keying of the sliced object'\n )\n\n parser.add_argument(\n '-i', '--include',\n action='append',\n type=str,\n help='An include that is inserted into the SCAD file'\n )\n\n parser.add_argument(\n '-st', '--start',\n action='store',\n type=float,\n default=0.0,\n help='Minimum object height to slice from'\n )\n\n parser.add_argument(\n '-ed', '--end',\n action='store',\n type=float,\n default=100.0,\n help='Maximum object height to slice to'\n )\n\n parser.add_argument(\n '-s', '--step',\n action='store',\n type=float,\n help='Seperation between slices'\n )\n\n parser.add_argument(\n '-n', '--number',\n action='store',\n type=int,\n help='Number of slices to make'\n )\n\n parser.add_argument(\n '-o', '--output',\n action='store',\n type=str,\n default='./out/slice_$height.dxf',\n help='Output directory and format, must contain \"$height\" and end in \".dxf\"'\n )\n\n parser.add_argument(\n '--scad-filename',\n action='store',\n type=str,\n default='object_slice.scad',\n help='FIlename to save OpenSCAD file that slices a single layer as'\n )\n\n parser.add_argument(\n '-k', '--keep-scad-file',\n action='store_true',\n default=False,\n help='Keep the OpenSCAD file that slices a single layer'\n )\n\n parser.add_argument(\n '--openscad-command',\n action='store',\n type=str,\n default='openscad',\n help='Command used to execute OpenSCAD'\n )\n\n parser.add_argument(\n '-j', '--jobs',\n action='store',\n type=int,\n default=4,\n help='Number oj jobs (processes)'\n )\n\n parser.add_argument(\n '--log-level',\n action='store',\n type=str,\n default='INFO',\n help='Logging level [DEBUG,INFO,WARNING,ERROR,CRITICAL]'\n )\n\n props = parser.parse_args()\n return props",
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"-threads\", help=\"specifies a thread count for parallel operations\", type=int)\n return parser.parse_args()",
"def parse_args():\n\tglobal id_width\n\n\t# Parsing the args\n\targs = parser.parse_args()\n\n\t# Retrieving the args\n\tid_width = args.id_width",
"def parse_sequence_lengths(filepath, base_pair_limit):\n\n total_count = 0\n limit_count = 0\n with open(filepath) as f:\n line = f.readline()\n while line:\n if line.startswith('@'):\n total_count += 1\n seq = f.readline()\n sep = f.readline()\n qual = f.readline()\n if len(seq.strip()) > base_pair_limit:\n limit_count += 1\n line = f.readline()\n\n return limit_count / total_count",
"def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))",
"def parse_arguments():\n parser = argparse.ArgumentParser(\n description='Get count of documents in a collection'\n )\n parser = SCRIPTS_LIBRARY.additional_arguments(parser)\n parser.add_argument(\n '--collection', '-c', nargs=1, dest='collection', required=True,\n default=False,\n help=\"\"\"Return count of documents in this collection\"\"\"\n )\n return parser.parse_args()",
"def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)",
"def parse_args():\n parser = common_parser()\n parser.description = (\n \"Given a sequence dict, fasta index or a bed file, scatter over the \"\n \"defined contigs/regions. Each contig/region will be split into \"\n \"multiple overlapping regions, which will be written to a new bed \"\n \"file. Each contig will be placed in a new file, unless the length of \"\n \"the contigs/regions doesn't exceed a given number.\")\n\n parser.add_argument(\"-c\", \"--chunk-size\", type=int, default=1e6,\n metavar=\"SIZE\",\n help=\"The size of the chunks. The first chunk in a \"\n \"region or contig will be exactly length SIZE, \"\n \"subsequent chunks will SIZE + OVERLAP and the final \"\n \"chunk may be anywhere from 0.5 to 1.5 times SIZE \"\n \"plus overlap. If a region (or contig) is smaller \"\n \"than SIZE the original regions will be returned. \"\n \"Defaults to 1e6\")\n parser.add_argument(\"-m\", \"--minimum-bp-per-file\", type=int, default=45e6,\n help=\"The minimum number of bases represented within \"\n \"a single output bed file. If an input contig or \"\n \"region is smaller than this MINIMUM_BP_PER_FILE, \"\n \"then the next contigs/regions will be placed in the \"\n \"same file untill this minimum is met. Defaults to \"\n \"45e6.\")\n parser.add_argument(\"-o\", \"--overlap\", type=int, default=150,\n help=\"The number of bases which each chunk should \"\n \"overlap with the preceding one. Defaults to 150.\")\n parser.add_argument(\"-S\", \"--split-contigs\", action=\"store_true\",\n help=\"If set, contigs are allowed to be split up over \"\n \"multiple files.\")\n args = parser.parse_args()\n return args",
"def conf_load_skeleton_num_of_searches(fin,skeleton):\n err_msg = \"Unknown variable specification. Expected NSEARCHES:(positive int)\"\n spec = fin.readline().split(':')\n if len(spec) != 2 or spec[0] != 'NSEARCHES':\n raise EnvironmentError(err_msg)\n skeleton.num_of_searches = spec[1].strip()",
"def parse_cmd_arguments(): # {{{\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-lf', '--logfile', nargs='?',\n const=False, help='Log file on or off (true || false)')\n parser.add_argument('-tc', '--threadcount', nargs='?', default=1,\n help=\"Set number of threads to use. Defaults to 1.\")\n\n ARGS = parser.parse_args()\n\n if ARGS.logfile:\n formatter = logging.Formatter((\"\\n%(asctime)s\"\n \" %(filename)s:%(lineno)-3d\"\n \" %(levelname)s\\n%(message)s\"))\n\n log_file = \"assignment_07\" + '.log'\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n LOGGER.addHandler(file_handler)\n\n return int(ARGS.threadcount) # }}}",
"def cmd_size(args):",
"def sections(self) -> int:\n return len(self.string.split(\".\"))",
"def parse_args():\r\n desc = \"Check for the longest running requests in bookie\"\r\n parser = argparse.ArgumentParser(description=desc)\r\n\r\n parser.add_argument('-l', '--log', dest='log',\r\n action='store',\r\n default=None,\r\n required=True,\r\n help=\"log file we're reading requests from\")\r\n\r\n parser.add_argument('-n', '--number', dest='count',\r\n action='store',\r\n default=10,\r\n type=int,\r\n required=False,\r\n help=\"how many urls do we wish to see, default 10\")\r\n\r\n\r\n args = parser.parse_args()\r\n return args",
"def getnseg(*args):\n return _ida_segment.getnseg(*args)",
"def parse_arguments(args):",
"def parse_arguments():\n parser = argparse.ArgumentParser(\n description='Program to split PDF book into chapters'\n )\n parser.add_argument(\n 'file', type=str,\n help='Path to the PDF file to be split'\n )\n parser.add_argument(\n 'pages', nargs='+', type=int,\n help='Space delimited list of the fist page of each chapter'\n )\n parser.add_argument(\n '-o', '--offset', type=int, default=0,\n help='Offset applied to all page numbers (line up Index with PDF pages)'\n )\n return parser.parse_args()",
"def parse_arguments():\n parser = argparse.ArgumentParser(description='Generates performance metrics from a set of Paraver traces.')\n parser.add_argument('trace_list', nargs='*',\n help='list of traces to process in .prv or .h5 format. Accepts wild cards and automaticaly filters for valid traces')\n parser.add_argument('-d', '--debug', help='increases output verbosity to debug level', action='store_true')\n parser.add_argument('-s', '--scaling',\n help='defines whether the measurements are weak or strong scaling (default: auto)',\n choices=['weak', 'strong', 'auto'], default='auto')\n parser.add_argument('-dim', '--dimemas', help='runs Dimemas to get ideal execution times', action='store_true',\n default=False)\n parser.add_argument('-p', '--only_parse', action='store_true', help='only parse the trace_list. This option is provided to control parsing parameters')\n parser.add_argument('--chunk_size', metavar='MB', type=int, default=1024, help='parser option: limits maximum size of the file to hold in memory (default 1GB)')\n parser.add_argument('-c', '--comp_lvl', metavar='LVL', default=0, help='parser option: sets the compression level (between 0 and 9). Default is 0 (no compression)')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n cmdl_args = parser.parse_args()\n\n prv_parser_args['--chunk_size'] = cmdl_args.chunk_size\n prv_parser_args['--comp_lvl'] = cmdl_args.comp_lvl\n if cmdl_args.debug:\n print('==DEBUG== Running in debug mode.')\n prv_parser_args['--verbose'] = True\n\n return cmdl_args"
] | [
"0.6515386",
"0.65148354",
"0.62029254",
"0.58394164",
"0.57485616",
"0.5623078",
"0.56027424",
"0.55949765",
"0.5576298",
"0.5547596",
"0.5544452",
"0.55239826",
"0.55036634",
"0.5494106",
"0.54743",
"0.54707456",
"0.54546857",
"0.54324806",
"0.5409895",
"0.5401737",
"0.5392792",
"0.53780955",
"0.5369399",
"0.5366368",
"0.5326298",
"0.5306437",
"0.5295951",
"0.52812344",
"0.52469754",
"0.5227213"
] | 0.6938135 | 0 |
Return all articles if no audience specified, otherwise only those from that Audience | def articles(self, audience_filter=None):
articles = ArticlePage.objects.live().descendant_of(self)
if audience_filter is not None:
articles = articles.filter(audience__name=audience_filter)
articles = articles.order_by('-date')
return articles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def articles(self):\n return self.get_queryset().filter(content_type__model='article').order_by('-articles__published_at')",
"def articles(self, subject_filter=None):\n articles = ArticlePage.objects.live().descendant_of(self)\n if subject_filter is not None:\n articles = articles.filter(\n Q(subject_1=subject_filter) | Q(subject_2=subject_filter))\n articles = articles.order_by('-date')\n return articles",
"def get_queryset(self):\n queryset = Article.objects.all()\n username = self.request.query_params.get('username', None)\n if username is not None:\n queryset = queryset.filter(author__username__iexact=username)\n tag = self.request.query_params.get('tag', None)\n if tag is not None:\n queryset = queryset.filter(tags__tag_name__iexact=tag)\n search = self.request.query_params.get('search', None)\n if search is not None:\n queryset = queryset.filter(\n Q(title__icontains=search) |\n Q(slug__icontains=search) |\n Q(description__icontains=search) |\n Q(body__contains=search)\n )\n\n return queryset",
"def get_ads():\n return coll_ad.distinct(KEY_AD_ID)",
"def filter_data(article):\n filtered = {\n 'id': article['id'],\n 'title': article['title'],\n 'perex': article['perex'],\n 'body': article['body'],\n 'author': article['author'].get('name', None) \n if article['author'] is not None \n else None,\n 'image': get_image(article),\n 'source': article['source']['name'],\n 'label': article['label']\n }\n\n return filtered",
"def get_articles(cls, CATEGORY=None, TAG=None, NUM=100):\n if CATEGORY:\n article_list = cls.objects.filter(\n Q(status=0) & Q(category__name__icontains=CATEGORY))[:NUM]\n return article_list\n if TAG:\n article_list = cls.objects.filter(\n Q(status=0) & Q(tags__icontains=TAG))[:NUM]\n return article_list\n return cls.objects.filter(status=0)[:NUM]",
"def queryset(self, request, queryset):\n # 返回文章queryset里面 所有指定作者的文章\n author_id = self.value()\n if author_id:\n return queryset.filter(author__id=author_id)\n else:\n return queryset",
"def process_article(sentences: List[Dict[str, str]],\n article: str,\n keyword: str,\n collect_all: bool\n ) -> List[Dict[str, str]]:\n with open(article, 'r') as txt:\n for line in txt.read().split('\\n'):\n if collect_all or keyword.lower() in line.lower():\n sentences.append({\n \"sentence\": line,\n \"keyword\": keyword\n })\n \n return sentences",
"def get_queryset(self):\n acc = self.kwargs['accession'].lstrip('MGYA')\n job_query = Q(pk=acc)\n\n if self.analysis_job_filters:\n job_query &= self.analysis_job_filters\n\n job = get_object_or_404(emg_models.AnalysisJob, job_query)\n\n analysis = None\n try:\n analysis = self.annotation_model.objects \\\n .get(analysis_id=str(job.job_id))\n except self.annotation_model.DoesNotExist:\n # Return an empty EmbeddedDocumentList, the entity exists\n # but it doesn't have annotations\n return EmbeddedDocumentList([], self.annotation_model, self.annotation_model_property)\n\n if hasattr(self, \"annotation_model_property_resolver\"):\n return self.annotation_model_property_resolver(analysis)\n\n return getattr(analysis, self.annotation_model_property)",
"def get_queryset(self):\n queryset = Article.objects.all().order_by('-id')\n title = self.request.query_params.get('title', None)\n limit = self.request.query_params.get('limit', None)\n random = self.request.query_params.get('random', None)\n if title is not None:\n queryset = queryset.filter(title__icontains=title).order_by('-id')\n elif limit is not None:\n queryset = queryset[:limit]\n elif random is not None:\n queryset = queryset[COUNT:COUNT_MAX]\n return queryset",
"def article_viewall(request):\n is_loggedin, username = get_session_variables(request)\n article_list = Article.objects.all()\n\n if article_list:\n return render_to_response('achievement/article_viewall.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'article_list':article_list}, \\\n RequestContext(request))\n else:\n return render_to_response('achievement/noview.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'type': 'Article'}, \\\n RequestContext(request))",
"def fetch_audiences(ga_client: discovery.Resource,\n account_id: str,\n property_id: str) -> Mapping[str, Audience]:\n request = ga_client.management().remarketingAudience().list(\n accountId=account_id,\n webPropertyId=property_id,\n start_index=None,\n max_results=_MAX_RESULTS_PER_CALL)\n result = retry.Retry()(request.execute)()\n items = result['items']\n # If there are more results than could be returned by a single call,\n # continue requesting results until they've all been retrieved.\n while result.get('nextLink', None):\n request.uri = result['nextLink']\n result = retry.Retry()(request.execute)()\n items += result['items']\n return dict((item['name'], Audience(item)) for item in items)",
"def search(self, json_query):\n res = self.es.search(index=self.index, body=json_query)\n #print(json.dumps(res, indent=2))\n # Process the results and return the article objects only\n articel_list = [src['_source'] for src in res['hits']['hits']]\n return articel_list",
"def get_articles(db:Session):\n return db.query(ArticleModel).all()",
"def audiences(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"audiences\")",
"def related_articles(self, num):\n related_articles = None\n try:\n related_articles = Article.objects.values('id', 'title', 'view_times', 'update_time', 'author').\\\n filter(tags__icontains=self.tags_list()[0]).\\\n exclude(id=self.id)[:num]\n except IndexError:\n pass\n\n if not related_articles:\n related_articles = Article.objects.values('id', 'title', 'view_times', 'update_time', 'author').\\\n filter(category=self.category).\\\n exclude(id=self.id)[:num]\n\n return related_articles",
"def findCrawlers_article(artMeta):\n global allCrawlers\n crawlers = []\n for c in allCrawlers:\n if c.canDo_article(artMeta):\n logging.log(5, 'Based on meta data: Crawler %s is OK to crawl article %s' % (c.name, artMeta['title']))\n crawlers.append(c)\n\n return crawlers",
"def get_queryset(self):\n samples = AudioSample.objects.distinct()\n if samples:\n return samples.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')\n else:\n return []",
"def articles(self):\r\n return Articles(self)",
"def get_queryset(self):\n return Article.objects.filter(pub_date__lte=timezone.now())",
"def get_queryset(self):\n queryset = ArticleImage.objects.all()\n id = self.request.query_params.get('artile_id', None)\n if id is not None:\n queryset = queryset.filter(artile_id=id)\n return queryset",
"def GetIndexArticles(self, article):\n index_articles = [a for a in self.site_config.GetFlattenedArticles() if \\\n isinstance(a, IndexArticle)]\n out = [i for i in index_articles if i.Match(article.type_name)]\n\n return out",
"def get_articles(request, search_type, search_for):\n\n template_name = \"home/caos_home_page_search.html\"\n unslug_search = search_for.replace('-', ' ')\n\n if 'category' in search_type:\n get_results = (CaosBlogPage.objects.live()\n .search(unslug_search, fields=['category']))\n query = Query.get(unslug_search)\n query.add_hit()\n\n elif 'author' in search_type:\n get_results = (CaosBlogPage.objects.live()\n .search(unslug_search, fields=['author']))\n query = Query.get(unslug_search)\n query.add_hit()\n\n else:\n get_results = CaosBlogPage.objects.none()\n\n context = {\n 'search_results': get_results,\n }\n\n return render(request, template_name, context)",
"def get_queryset(self):\n # queryset = Article.objects.all()\n user_id = self.kwargs['user_id']\n if user_id is not None:\n queryset = User.objects.filter(user_id=int(user_id))\n return queryset",
"def summary(self, *args, **kwargs):\n article = self.get_object()\n summary_data = self.get_serializer(article).data\n\n keywords = summary_data['keywords']\n related_articles = \\\n Article.objects.filter(Q(keywords__contains=keywords[:1])\n | Q(keywords__contains=keywords[1:2])\n | Q(keywords__contains=keywords[2:3])) \\\n .order_by('-publish_time')[:11] \\\n .values('identifier', 'title', 'images', 'site_name', 'domain', 'publish_time')\n\n related_articles = [related for related in list(related_articles)\n if related['identifier'] != article.identifier]\n\n summary_data['related'] = related_articles\n\n return Response(summary_data)",
"def get_article_by_entity(cls, entities):\n # Exist check by key\n entity_key = \"entity:\" + str(entity.entity_key)\n entity_obj = cls.db.hgetall(entity_key)\n if type(entity_obj) is not dict:\n return None\n\n # Extract\n articles = json.loads(entity_obj[\"articles\"])\n article_list = list()\n for key in articles:\n article_list.append(Article.build(cls.db.get(\"article:\" + key)))\n return article_list",
"def has_aids(self):\n\n return self.exclude(aids=None)",
"def mixed_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": {\"$gt\": 0},\n \"authorsMaleCount\": {\"$gt\": 0},\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query",
"def test_article_search_returns_only_published_articles(self):\n article1 = ArticleFactory(title=\"same\")\n article1.publish()\n article2 = ArticleFactory(title=\"same\")\n request = RequestFactory().get(\"\", {\"q\": \"same\"})\n response = Search.as_view()(request, category=\"articles\")\n self.assertIn(\"results\", response.context_data)\n results = response.context_data[\"results\"]\n self.assertEqual(len(results), 1)\n self.assertIn(article1, results)\n self.assertNotIn(article2, results)",
"def articles ():\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT no\n FROM article\n ORDER BY no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_articles_response (res, limit)"
] | [
"0.53986216",
"0.53402317",
"0.53081733",
"0.5305304",
"0.5211257",
"0.5153814",
"0.5101621",
"0.50824296",
"0.50327814",
"0.5030341",
"0.50282896",
"0.5019423",
"0.49807727",
"0.4972056",
"0.49614388",
"0.49547437",
"0.4866086",
"0.48600337",
"0.48344988",
"0.4825311",
"0.48205602",
"0.4805619",
"0.48013848",
"0.48006612",
"0.47767746",
"0.4766286",
"0.476329",
"0.47597498",
"0.47552946",
"0.47426587"
] | 0.68878406 | 0 |
Return all articles if no subject specified, otherwise only those from that Subject | def articles(self, subject_filter=None):
articles = ArticlePage.objects.live().descendant_of(self)
if subject_filter is not None:
articles = articles.filter(
Q(subject_1=subject_filter) | Q(subject_2=subject_filter))
articles = articles.order_by('-date')
return articles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_subjects(self):\n return self.filter_nodes('//Subjects/Subject')",
"def get_queryset(self):\n return filter_subjects(Subject.objects.all(), self.request.user)",
"def list_courses_subjects(self, all=False):\n q = {'facet': 'true',\n 'facet.field': 'course_subject',\n 'facet.mincount': '1',\n 'facet.sort': 'index', # Sort alphabetically\n 'rows': '0', # we don't need any actual document\n }\n if all:\n q['q'] = '*:*'\n else:\n q['q'] = 'NOT presentation_start:[* TO NOW]'\n results = searcher.search(q, start=0, count=1000) # Do not paginate\n subjects = subjects_facet_to_subjects_domain(results)\n return subjects",
"def subject_list():\n items = []\n\n soup = abcradionational.get_soup(URL + \"/podcasts/subjects\")\n \n subject_heading = abcradionational.get_podcast_heading(soup)\n \n for subject in subject_heading:\n items.append({\n 'label': subject['title'],\n 'path': plugin.url_for('subject_item', url=subject['url']),\n })\n\n return items",
"def get_by_subject(subject_name):\n return filter_by_prefix(Subscription.all(), subject_name + ':')",
"def subject_get_all(context, filters=None, marker=None, limit=None,\n sort_key=None, sort_dir=None,\n member_status='accepted', is_public=None,\n admin_as_user=False, return_tag=False):\n sort_key = ['created_at'] if not sort_key else sort_key\n\n default_sort_dir = 'desc'\n\n if not sort_dir:\n sort_dir = [default_sort_dir] * len(sort_key)\n elif len(sort_dir) == 1:\n default_sort_dir = sort_dir[0]\n sort_dir *= len(sort_key)\n\n filters = filters or {}\n\n visibility = filters.pop('visibility', None)\n showing_deleted = 'changes-since' in filters or filters.get('deleted',\n False)\n\n img_cond, prop_cond, tag_cond = _make_conditions_from_filters(\n filters, is_public)\n\n query = _select_subjects_query(context,\n img_cond,\n admin_as_user,\n member_status,\n visibility)\n\n if visibility is not None:\n if visibility == 'public':\n query = query.filter(models.Subject.is_public == True)\n elif visibility == 'private':\n query = query.filter(models.Subject.is_public == False)\n\n if prop_cond:\n for prop_condition in prop_cond:\n query = query.join(models.SubjectProperty, aliased=True).filter(\n sa_sql.and_(*prop_condition))\n\n if tag_cond:\n for tag_condition in tag_cond:\n query = query.join(models.SubjectTag, aliased=True).filter(\n sa_sql.and_(*tag_condition))\n\n marker_subject = None\n if marker is not None:\n marker_subject = _subject_get(context,\n marker,\n force_show_deleted=showing_deleted)\n\n for key in ['created_at', 'id']:\n if key not in sort_key:\n sort_key.append(key)\n sort_dir.append(default_sort_dir)\n\n query = _paginate_query(query, models.Subject, limit,\n sort_key,\n marker=marker_subject,\n sort_dir=None,\n sort_dirs=sort_dir)\n\n query = query.options(sa_orm.joinedload(\n models.Subject.properties)).options(\n sa_orm.joinedload(models.Subject.locations))\n if return_tag:\n query = query.options(sa_orm.joinedload(models.Subject.tags))\n\n subjects = []\n for subject in query.all():\n subject_dict = subject.to_dict()\n subject_dict = _normalize_locations(context, subject_dict,\n force_show_deleted=showing_deleted)\n if return_tag:\n subject_dict = _normalize_tags(subject_dict)\n subjects.append(subject_dict)\n return subjects",
"def find_all_by_id(self, subject_id) -> any:\n pass",
"def show_all_subjects(self):\n self.load_subjects_in_twSubjects([self.pj[SUBJECTS][x][\"name\"] for x in self.pj[SUBJECTS]])",
"def filter_subjects(queryset, user):\n if user.is_staff:\n return queryset\n return None",
"def get_subjects_urls(self, subjects: Iterable[Subject]) -> List[str]:\n self.logger.debug('Finding subjects urls.')\n all_rows = self.browser.find_elements(*MaterialLocators.SUBJECT_ROW)\n subjects = {(s.name.strip('. '), s.teacher.strip('. ')) for s in subjects}\n subjects_urls = []\n for subject in all_rows:\n name: str = subject.find_element(*MaterialLocators.SUBJECT_NAME).text\n teacher: str = subject.find_element(*MaterialLocators.SUBJECT_TEACHER).text\n if (name.strip('. '), teacher.strip('. ')) in subjects:\n url = subject.find_element(*MaterialLocators.SUBJECT_NAME).get_attribute('href')\n subjects_urls.append(url)\n\n self.logger.debug(f'Found subjects urls for {len(subjects_urls)}/{len(subjects)}.')\n return subjects_urls",
"def get_by_subject(subject):\n return MinimalSubject.all().ancestor(subject).get()",
"def get_post_subjects(self, entities):\n\n post_subjects = []\n for entity in entities:\n post = BlogPost.by_id(entity.post_id)\n post_subjects.append(post.subject)\n return post_subjects",
"def _extract_subjects(self, subjects):\n self._logger.debug(\"Started extracting subjects metadata\")\n\n subject_metadata_list = []\n\n for subject in subjects:\n self._logger.debug(\n \"Started extracting subject metadata from {0}\".format(encode(subject))\n )\n\n scheme = subject.scheme\n\n subject_type = Subject.by_uri.get(scheme)\n if not subject_type:\n # We can't represent this subject because we don't\n # know its scheme. Just treat it as a tag.\n subject_type = Subject.TAG\n\n subject_metadata = SubjectData(\n type=subject_type, identifier=subject.code, name=subject.name, weight=1\n )\n\n subject_metadata_list.append(subject_metadata)\n\n self._logger.debug(\n \"Finished extracting subject metadata from {0}: {1}\".format(\n encode(subject), encode(subject_metadata)\n )\n )\n\n self._logger.debug(\n \"Finished extracting subjects metadata: {0}\".format(\n encode(subject_metadata_list)\n )\n )\n\n return subject_metadata_list",
"def inconsistent_subject(self):\n a = [s for s in self.subjects if len([sa for sa in s.samples if sa.inconsistent_subject]) > 0]\n if len(a) == 0:\n return None\n return a",
"def subjects(self):\n return self.cache.subjects()",
"def email_sent_with_subject(subject):\n return [email.subject == subject for email in mail.outbox]",
"def articles(self):\n return self.get_queryset().filter(content_type__model='article').order_by('-articles__published_at')",
"def select_course_detail_by_subject(self, subject):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.subject LIKE ?\n \"\"\",\n (subject,),\n )\n return cursor.fetchall()",
"def subjects(self):\n if not self._subjects:\n self._subjects = [subject_factory(s, workspace=self, samples=self.samples) for s in self._get_entities(self.subject_property_name)]\n return self._subjects",
"def get_sub_collection_with_subject(self, subject):\n sub_html = self._get_html_for_subject_main(subject.id_)\n ep_html = self._get_html_for_subject_eps(subject.id_)\n sub_collection = (BangumiSubjectCollectionFactory\n .from_html_with_subject(subject, sub_html, ep_html))\n sub_collection.session = self\n return sub_collection",
"def subject_tag_get_all(context, subject_id, session=None):\n _check_subject_id(subject_id)\n session = session or get_session()\n tags = session.query(models.SubjectTag.value).filter_by(\n subject_id=subject_id).filter_by(deleted=False).all()\n return [tag[0] for tag in tags]",
"def articles(self, audience_filter=None):\n articles = ArticlePage.objects.live().descendant_of(self)\n if audience_filter is not None:\n articles = articles.filter(audience__name=audience_filter)\n articles = articles.order_by('-date')\n return articles",
"def query_subject_ids(self, subject_id, nodes=None):\n if nodes == None:\n nodes = self.list_nodes()\n elif isinstance(nodes, str):\n nodes = [nodes]\n\n if \"case\" in nodes:\n subject_node, subject_prop = \"case\", \"case_ids\"\n else:\n subject_node, subject_prop = \"subject\", \"subject_ids\"\n\n # if projects == None: #if no projects specified, get node for all projects\n # projects = list(json_normalize(self.sub.query(\"\"\"{project (first:0){project_id}}\"\"\")['data']['project'])['project_id'])\n # elif isinstance(projects, str):\n # projects = [projects]\n\n query_args = '{}:\"{}\"'.format(subject_prop, subject_id)\n results = {}\n for node in nodes:\n res = self.paginate_query(\n node=node, props=[\"project_id\", \"id\", \"submitter_id\"], args=query_args\n )\n if len(res[\"data\"][node]) > 0:\n results[node] = res[\"data\"][node]\n\n data = {}\n for node in list(results):\n # uuids = [rec['id'] for rec in results[node]]\n dfs = []\n for rec in results[node]:\n project_id = rec[\"project_id\"]\n uuid = rec[\"id\"]\n program, project = project_id.split(\"-\", 1)\n rec = self.sub.export_record(\n program=program,\n project=project,\n uuid=uuid,\n fileformat=\"tsv\",\n filename=None,\n )\n # str_list = rec.split('\\r\\n')\n # headers = str_list[0].split('\\t')\n # data = str_list[1].split('\\t')\n # df = pd.DataFrame(data,columns=headers)\n dfs.append(pd.read_csv(StringIO(rec), sep=\"\\t\", header=0))\n df = pd.concat(dfs, ignore_index=True, sort=False)\n data[node] = df\n\n return data\n\n # visits = list(set([item for sublist in [list(set(list(df['visit_id']))) for df in data.values()] for item in sublist if not pd.isnull(item)]))",
"def clear_subjects(db):\n\t\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tp.subject = None",
"def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]",
"def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]",
"def subject_item(url):\n soup = abcradionational.get_soup(url)\n \n playable_podcast = abcradionational.get_playable_podcast(soup)\n\n items = abcradionational.compile_playable_podcast(playable_podcast)\n\n\n return items",
"async def test_subjects_to_ignore_by_uuid(self):\n first_subject_uuid = first(first(self.reports[\"reports\"])[\"subjects\"].keys())\n self.set_source_parameter(\"subjects_to_ignore\", [first_subject_uuid])\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(response, value=str(int(len(self.entities) / 2)), total=self.expected_software_metrics)",
"def api_list_local_subjects():\n if 'POST' == request.method:\n per_page = get_safe_int(request.form.get('per_page'))\n page_num = get_safe_int(request.form.get('page_num'))\n else:\n per_page = get_safe_int(request.args.get('per_page'))\n page_num = get_safe_int(request.args.get('page_num'))\n\n pagination = SubjectEntity.query.paginate(page_num, per_page, False)\n items = [i.serialize() for i in pagination.items]\n # app.logger.debug(\"per_page: {}, page_num: {}\".format(per_page, page_num))\n return jsonify_success(dict(total_pages=pagination.pages,\n list_of_subjects=items))",
"def get_journalless_articles(self):\n self.setQuery(\"\"\"select ?art ?pmid where {\n ?art <http://purl.org/ontology/bibo/pmid> ?pmid .\n filter not exists { ?art <http://vivoweb.org/ontology/core#hasPublicationVenue> ?o }\n }\"\"\")\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return [(x['art']['value'], x['pmid']['value']) for x in g['results']['bindings']]\n except:\n return None"
] | [
"0.68461657",
"0.65303606",
"0.6318721",
"0.6296977",
"0.6280416",
"0.6242551",
"0.6236301",
"0.616833",
"0.5941876",
"0.58910584",
"0.585484",
"0.5786363",
"0.57801485",
"0.5764462",
"0.57628167",
"0.57169193",
"0.57076377",
"0.570699",
"0.5700736",
"0.5694488",
"0.5654827",
"0.5628687",
"0.5626616",
"0.5624819",
"0.55899364",
"0.55899364",
"0.5567625",
"0.5567466",
"0.55642045",
"0.5560574"
] | 0.72850305 | 0 |
Get all articles by this author | def author_articles(self):
return ArticlePage.objects.live().filter(author=self).order_by('-date') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_authors(cls):\n authors = from_cache('AuthorsList')\n if not authors:\n authors = SuiAuthor.all().order('name').fetch(400)\n to_cache('AuthorsList', authors)\n return authors",
"def queryset(self, request, queryset):\n # 返回文章queryset里面 所有指定作者的文章\n author_id = self.value()\n if author_id:\n return queryset.filter(author__id=author_id)\n else:\n return queryset",
"def find_all(cls):\n return [AuthorModel(a['name'], str(a['_id']))\n for a in cls.db.newsdb.find()]",
"def articles(self, audience_filter=None):\n articles = ArticlePage.objects.live().descendant_of(self)\n if audience_filter is not None:\n articles = articles.filter(audience__name=audience_filter)\n articles = articles.order_by('-date')\n return articles",
"def articles(self):\n return articles.Articles(self)",
"def articles(self):\r\n return articles.Articles(self)",
"def get_quotes_for_author(self, author: str) -> List[Quote]:\n params = (f'%{author}%',)\n query = '''\n SELECT *\n FROM quotes\n WHERE author LIKE ?\n ORDER BY created_at DESC\n '''\n\n ret = self.__execute_query(query, params)\n\n return self.__build_quotes_from_query_result(ret.fetchall())",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def get_authors(self, blogid=1):\n return self.execute('wp.getAuthors', blogid, self.username, self.password)",
"def articles(self):\n return self.get_queryset().filter(content_type__model='article').order_by('-articles__published_at')",
"def get_articles(db:Session):\n return db.query(ArticleModel).all()",
"def getAuthors(self):\n authors = []\n for each in self.context.getAuthors():\n title = each['title']\n firstname = each['firstname']\n middlename = each['middlename']\n lastname = each['lastname']\n author = Author(title, firstname, middlename, lastname)\n authors.append(author)\n return authors",
"def articles(self, subject_filter=None):\n articles = ArticlePage.objects.live().descendant_of(self)\n if subject_filter is not None:\n articles = articles.filter(\n Q(subject_1=subject_filter) | Q(subject_2=subject_filter))\n articles = articles.order_by('-date')\n return articles",
"def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors",
"def get_queryset(self):\n author = self.kwargs['author']\n target_author = get_object_or_404(Blog, author=author)\n return Blog.objects.filter(author=target_author)",
"def query_by_author(cls,author):\n bakey = 'BookByAuthor_%s'%author\n bks = from_cache(bakey)\n if not bks:\n bks = map(lambda e:str(e.id()), SuiBook.all(keys_only=True).filter('authors =',author).fetch(100))\n to_cache(bakey,bks)\n return bks",
"def articles(self):\r\n return Articles(self)",
"def load_authors(self):\n authors = self.session.query(Author).join(AuthorStatus) \\\n .filter(Author.status_id == AuthorStatus.id) \\\n .filter(AuthorStatus.status == 'active') \\\n .all()\n return authors",
"def get_authors(self):\n return [aer.author for aer in self.authorentryrank_set.all()]",
"def find_by_name(cls, name):\n authors = []\n for author in cls.db.newsdb.find({'$text': {'$search': name}}):\n authors.append(AuthorModel(author['name'], str(author['_id'])))\n\n return authors",
"def related_articles(self, num):\n related_articles = None\n try:\n related_articles = Article.objects.values('id', 'title', 'view_times', 'update_time', 'author').\\\n filter(tags__icontains=self.tags_list()[0]).\\\n exclude(id=self.id)[:num]\n except IndexError:\n pass\n\n if not related_articles:\n related_articles = Article.objects.values('id', 'title', 'view_times', 'update_time', 'author').\\\n filter(category=self.category).\\\n exclude(id=self.id)[:num]\n\n return related_articles",
"def get_authors(self, instance):\n\n # Get Authors in the specified order\n author_order = Author.objects \\\n .filter(dataset_id=instance.id) \\\n .order_by('order')\n\n # Put in a list\n authors = [a.author for a in author_order]\n\n # Return a list of person urls\n serializers = PersonSerializer(authors, many=True, context={'request': self.context['request']}).data\n return [p[\"url\"] for p in serializers]",
"def books_by_author(self, author):\n request_url = \"%s?author=%s\" % (self.API_URL, author)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books = []\n for book in json_data['docs']:\n books.append(book['title_suggest'])\n return books",
"def query_by_author(cls,aid):\n gds = from_cache('VG_%s'%aid)\n if not gds:\n gds = [str(g.id()) for g in SuiGoods.all(keys_only=True).filter('author =',aid).fetch(1000)]\n to_cache('VG_%s'%aid, gds)\n return SuiGoods.load_by_ids(gds)",
"def get_article_author(self, article_webpage):\n pass",
"def get_queryset(self):\n id = self.kwargs['pk']\n target_author=get_object_or_404(Author, pk = id)\n return Post.objects.filter(author=target_author)",
"def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list",
"def authors(self):\n user_ids = set(r.author.id for r in self.history())\n return User.query.find({'_id': {'$in': list(user_ids)}}).all()",
"def test_author_sorted_articles(self):\n\n self.make_test('articles', ArticleListSerializer, 'author:articles')",
"def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]"
] | [
"0.7249089",
"0.7019884",
"0.6874972",
"0.67850435",
"0.6751636",
"0.67413604",
"0.671379",
"0.67041487",
"0.6664029",
"0.6659049",
"0.665358",
"0.66036725",
"0.6585151",
"0.6580827",
"0.65239894",
"0.64984363",
"0.6492107",
"0.647211",
"0.64664465",
"0.6441173",
"0.64372087",
"0.643402",
"0.64028144",
"0.6354904",
"0.63491195",
"0.6303197",
"0.62449634",
"0.6237406",
"0.6226154",
"0.6219703"
] | 0.8193258 | 0 |
Switches to displaying the given group of layers. When group is None, the default CircuitPython terminal will be shown. | def show(self, group):
self._current_group = group | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_group_tree(builder, group: ServerGroup, show: bool):\n tree_component_name = group_tree_component[group]\n show_ui_component(builder, tree_component_name, show)\n header_component_name = group_header_component[group]\n show_ui_component(builder, header_component_name, show)",
"def do_standalone_display(self):\n stage = clutter.Stage()\n stage.connect('destroy', clutter.main_quit)\n stage.connect('key-press-event', lambda x,y: clutter.main_quit())\n stage.set_fullscreen(True)\n stage.set_color(clutter.color_from_string('black'))\n stage.add(self.group)\n stage.show_all()\n clutter.main()",
"async def display_group(ctx, owner: str, group_name: str=None, option: str=None):\n\n groups = bg_bot.manager.get_groups(owner, group_name)\n\n if len(groups) == 0:\n await ctx.send(\"No groups exist that match the input criteria.\")\n else:\n embed = discord.Embed(title=\"Open Groups\")\n for group in groups:\n if group.comp:\n open_spots = [group.comp[role]['number'] - len(group.comp[role]['players']) for role in group.comp]\n availability = f'Open Spots\\nTanks: {open_spots[0]}, Healers: {open_spots[1]}, DPS: {open_spots[2]}'\n else:\n availability = f'No specified comp, {group._max - group._total} spots left'\n \n embed.add_field(name=f'{group.name} by {group.owner}: {group.rating} {group.group_type}', value=availability)\n\n await ctx.send(embed=embed)",
"def __init__(self, ccwgroup: str, layer2: bool, **kwargs):\n super().__init__(**kwargs)\n self.ccwgroup = ccwgroup\n self.layer2 = layer2",
"def printOptions():\n\n # For each group, create a group option\n print(\"default\")",
"def setDisplayWireframe():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(1)\n node.end()\n else:\n node['display'].setValue(1)",
"def show_group(self, _, group):\n items = []\n for id in self.execution_manager.get_jobs(group):\n job = self.execution_manager.get(id)\n if job.retries > 0:\n items.append((\"{}\".format(job), self.show_job_details, id, ('retried job', 'retried job select')))\n else:\n items.append((\"{}\".format(job), self.show_job_details, id))\n\n menu_key = \"Jobs {}\".format(group)\n self.menu_structure[menu_key] = (\"jobs\", items)\n self.show_menu(None, menu_key)",
"def display_layers(layers, wide, tall):\n\n colours = {\n \"0\": \" \",\n \"1\": \" # \",\n }\n\n for row in range(tall):\n for col in range(wide):\n pixels = [layer[row][col] for layer in layers]\n line = next(colours[p] for p in pixels if p in colours)\n print(line, end=\"\")\n print()",
"def show_security_group(self, security_group, **_params):\r\n return self.get(self.security_group_path % (security_group),\r\n params=_params)",
"def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self",
"def cli(ctx, group_id):\n return ctx.gi.groups.show_group(group_id)",
"def display_grasps(img, groups,name=\"debug_imgs/grasps\", as_tuple = False):\n if len(groups) > 0:\n box_color = (255, 0, 0)\n line_color = box_color[::-1]\n img_data = np.copy(img.data)\n for i in range(len(groups)):\n if as_tuple:\n cm = groups[0]\n d = groups[1]\n else:\n cm = groups[i].cm\n d = groups[i].dir\n\n img_data = draw_point(img_data, cm)\n\n p0 = tuple((cm - d * cfg.LINE_SIZE/2)[::-1].astype('int32'))\n p1 = tuple((cm + d * cfg.LINE_SIZE/2)[::-1].astype('int32'))\n cv2.line(img_data, p0, p1, line_color, 2)\n #BGR to RGB\n rgb = np.fliplr(img_data.reshape(-1,3)).reshape(img_data.shape)\n plt.imshow(rgb)\n plt.axis('off')\n plt.savefig(name + \".png\")\n if cfg.QUERY:\n plt.show()",
"def display_groups(self, display_groups):\n\n self._display_groups = display_groups",
"def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))",
"def print_group_message(group, contact, message):\n print(f\"{group}: {contact}: {message}\")",
"def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)",
"def main(self, session: Session) -> None:\n buttons = []\n for group in groups_api.get_user_groups(session.user):\n if session.user in group.admins:\n buttons.append(self.ui.create_button_view(group.name,\n lambda s: self.show_small_menu(group,\n session)))\n self.ui.create_button_group_view(session, \"What group do you want to change?\",\n buttons).draw()",
"def app_or_group(group, app):\n def f(qtile):\n try:\n qtile.groupMap[group].cmd_toscreen()\n except KeyError:\n qtile.cmd_spawn(app)\n return f",
"def cli() -> None:\n try:\n group() # pylint: disable=no-value-for-parameter\n except forml.AnyError as err:\n print(err, file=sys.stderr)",
"def player_group(group):\n link = reverse('wouso.interface.profile.views.player_group', args=(group.id,))\n\n return u'<a href=\"%s%s\" title=\"%s\">%s</a>' % (link, group, group.name, group)",
"def setGroup(self, group):\n\t\tself.config.GROUP = group",
"async def send_group_help(self, group):\n self.add_command_formatting(group)\n\n filtered = await self.filter_commands(group.commands, sort=self.sort_commands)\n if filtered:\n note = await self.get_opening_note()\n if note:\n self.paginator.add_line(note, empty=True)\n\n self.paginator.add_line('**%s**' % self.commands_heading)\n for command in filtered:\n self.add_subcommand_formatting(command)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages()",
"def onGroup(a):\n data = a.data()\n if data:\n try:\n Gui.doCommand('Gui.activateWorkbench(\"' + data + '\")')\n except KeyError:\n pass",
"def change_channel_group(self, channel_group):\n self.model.channel_group = channel_group\n info(\"Switched to channel group {}.\".format(channel_group))\n self.emit('open')",
"def testDisplayClubak(self):\n parser = OptionParser(\"dummy\")\n parser.install_display_options(separator_option=True, dshbak_compat=True)\n options, _ = parser.parse_args([])\n disp = Display(options)\n self.assertEqual(bool(disp.gather), False)\n self.assertEqual(disp.line_mode, False)\n self.assertEqual(disp.label, True)\n self.assertEqual(disp.regroup, False)\n self.assertEqual(bool(disp.groupsource), False)\n self.assertEqual(disp.noprefix, False)\n self.assertEqual(disp.maxrc, False)\n self.assertEqual(disp.node_count, True)\n self.assertEqual(disp.verbosity, VERB_STD)",
"def mask_show(image, mask, groups, name=\"image\"):\n img = cv2.addWeighted(image, 0.4, mask, 0.6, 0)\n img = sg.mark_boundaries(img, groups, color=(1,1,1))\n cv2.imshow(name, img)\n cv2.waitKey(0)",
"def _fcn_cbar_display_grp(self):\n viz = self.cbqt.cbui._cbar_grp.isChecked()\n self.menuDispCbar.setChecked(viz)\n self._fcn_menu_disp_cbar()",
"async def send_group_help(self, group):\n self.add_command_formatting(group)\n\n filtered = await self.filter_commands(group.commands, sort=self.sort_commands)\n if filtered:\n note = await self.get_opening_note()\n if note:\n self.paginator.add_line(note, empty=True)\n\n self.paginator.add_line('**%s**' % self.commands_heading)\n for command in filtered:\n await asyncio.sleep(0)\n self.add_subcommand_formatting(command)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages()",
"def set_group(self, group: str) -> None:\n self.group = group",
"def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n print(\"\\tLocation: {}\".format(group.location))\n print(\"\\tTags: {}\".format(group.tags))\n if hasattr(group, 'status'):\n print(\"\\tStatus: {}\".format(group.status))\n if hasattr(group, 'state'): # Site\n print(\"\\tStatus: {}\".format(group.state))\n if hasattr(group, 'properties'):\n print_properties(group.properties)\n print(\"\\n\\n\")"
] | [
"0.5787579",
"0.56785434",
"0.5654656",
"0.5551346",
"0.55173004",
"0.5352051",
"0.52590203",
"0.51855826",
"0.5174819",
"0.5154698",
"0.51135373",
"0.5072139",
"0.506285",
"0.5051473",
"0.5040139",
"0.5034684",
"0.5029226",
"0.50125265",
"0.4989949",
"0.49824473",
"0.49776736",
"0.49702728",
"0.49625564",
"0.49500304",
"0.49392453",
"0.4925942",
"0.49084318",
"0.49066067",
"0.48726225",
"0.48535135"
] | 0.6218714 | 0 |
Adjust the rectangle coordinates based on rotation | def _apply_rotation(self, rectangle):
if self._rotation == 90:
return Rectangle(
self._height - rectangle.y2,
rectangle.x1,
self._height - rectangle.y1,
rectangle.x2,
)
if self._rotation == 180:
return Rectangle(
self._width - rectangle.x2,
self._height - rectangle.y2,
self._width - rectangle.x1,
self._height - rectangle.y1,
)
if self._rotation == 270:
return Rectangle(
rectangle.y1,
self._width - rectangle.x2,
rectangle.y2,
self._width - rectangle.x1,
)
return rectangle | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rotate(self):\r\n # Rotate the image.\r\n self.image = pg.transform.rotozoom(self.orig_image, -self.angle, 1)\r\n # Rotate the offset vector.\r\n offset_rotated = self.offset.rotate(self.angle)\r\n print(\"offset_rotated:\", offset_rotated)\r\n # Create a new rect with the center of the sprite + the offset.\r\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)",
"def _update_rect(self):\r\n self.rect.x = int(self.view_pt.x_to_scr(self.x) / self.z)\r\n self.rect.y = int(self.view_pt.y_to_scr(self.y) / self.z)",
"def minimum_rotated_rectangle(self): # -> BaseGeometry:\n ...",
"def rectangleRotation(a, b):\r\n\r\n line2 = (-1, sqrt(a**2 / 2))\r\n line4 = (-1, -sqrt(a**2 / 2))\r\n\r\n line1 = (1, sqrt(b**2 / 2))\r\n line3 = (1, -sqrt(b**2 / 2))\r\n\r\n tot = 0\r\n\r\n print(line2, line1)\r\n print(line3, line4)\r\n\r\n for xpts in range(-b * a, b * a):\r\n for ypts in range(-a * b, a * b):\r\n if (isunder(xpts, ypts, line1[0], line1[1]) and\r\n isunder(xpts, ypts, line2[0], line2[1]) and\r\n not isunder(xpts, ypts, line3[0], line3[1]) and\r\n not isunder(xpts, ypts, line4[0], line4[1])):\r\n tot += 1\r\n return tot",
"def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by",
"def adjust_position(self):\n\n # Adjust position for x-axis\n r = self.rect.x % 30\n if r != 0:\n if r <= 16:\n x = self.rect.x - r\n else:\n x = self.rect.x + (30 - r)\n\n else:\n x = self.rect.x\n\n # Adjust position for y-axis\n r = self.rect.y % 30\n if r != 0:\n if r <= 16:\n y = self.rect.y - r\n else:\n y = self.rect.y + (30 - r)\n else:\n y = self.rect.y\n\n return x, y",
"def rotate(self):\n tmp = self.width\n self.width = self.height\n self.height = tmp\n self.rotated = not self.rotated",
"def update(self):\n self.rect = (self.x, self.y, self.width, self.height)",
"def rot_center(image,rect,angle):\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect",
"def rotate_shape(shape, xy_center, angle_degrees):",
"def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]",
"def rotated_rect(w, h, angle):\n angle = math.radians(angle)\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\n\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\n\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\n\n delta = math.pi - alpha - gamma\n\n length = h if (w < h) else w\n\n d = length * math.cos(alpha)\n a = d * math.sin(alpha) / math.sin(delta)\n\n y = a * math.cos(gamma)\n x = y * math.tan(gamma)\n\n return bb_w - 2 * x, bb_h - 2 * y",
"def move_rectangle(r,dx,dy):\n\n r.corner.x=r.corner.x+dx\n r.corner.y=r.corner.y+dy\n turtle.setx(r.corner.x)\n turtle.sety(r.corner.y)\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)\n return r",
"def rot_center(image, rect, angle):\n\trot_image = pygame.transform.rotate(image, angle)\n\trot_rect = rot_image.get_rect(center=rect.center)\n\treturn rot_image,rot_rect",
"def _spin(self):\n center= self.rect.center\n self.dizzy= self.dizzy + 10 #12\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original\n else:\n rotate= pygame.transform.rotate\n self.image= rotate(self.original, self.dizzy)\n self.rect= self.image.get_rect(center= center)",
"def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y",
"def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y",
"def rotate(self):\r\n self.rot = (self.vel.y * -3)\r\n if self.rot < -90:\r\n self.rot = -90\r\n \r\n new_image = pg.transform.rotate(self.bird_sprites[self.sprite_frame], self.rot)\r\n old_center = self.rect.center\r\n self.image = new_image\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n # self.animate()\r",
"def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy",
"def draw_rectangle(t, w, h):\r\n for i in range(2):\r\n t.forward(w)\r\n t.left(90)\r\n t.forward(h)\r\n t.left(90)",
"def move_rectangle(old_rec,dx,dy):\n new_rec = Rectangle()\n new_rec.height = old_rec.height\n new_rec.width = old_rec.width\n new_rec.corner = Point()\n new_rec.corner.x = old_rec.corner.x + dx\n new_rec.corner.y = old_rec.corner.y + dy\n return new_rec",
"def drawRectangle(x, y, width, height):\n pen1.up()\n pen1.goto(x, y)\n pen1.down()\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)\n pen1.right(90)\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)",
"def rotate(self):\n\n self.x, self.y = self.scene.player.facing\n rotation = degrees(atan2(self.x, self.y))\n self.image = transform.rotate(self.image, rotation)\n self.rect = self.image.get_rect(left=self.rect.left, top=self.rect.top)\n self.rect.centerx = self.scene.player.rect.centerx + (8 * self.x)\n self.rect.centery = self.scene.player.rect.centery + (8 * self.y)",
"def draw_rect(t, rect):\n t.pu()\n t.goto(rect.corner.x, rect.corner.y)\n t.pd()\n t.setheading(0)\n for i in range(2):\n t.fd(rect.width)\n t.lt(90)\n t.fd(rect.height)\n t.lt(90)",
"def rect(r, theta):\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n return x,y",
"def update(self):\n\t\n\t\tself.rect.y += (self.settings.rectangle_speed *\n\t\t\t\tself.settings.rectangle_direction)",
"def draw_rect(r):\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)",
"def draw_rect(r):\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)",
"def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r",
"def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self"
] | [
"0.7102181",
"0.6861608",
"0.6767532",
"0.65712",
"0.6486802",
"0.643363",
"0.6404822",
"0.626136",
"0.6259617",
"0.62278646",
"0.621917",
"0.6212888",
"0.6200598",
"0.6192391",
"0.61897737",
"0.6155825",
"0.6155825",
"0.61425966",
"0.6127783",
"0.61275494",
"0.6120775",
"0.6085118",
"0.6084364",
"0.60600436",
"0.60531324",
"0.60221875",
"0.6021392",
"0.6021392",
"0.59955025",
"0.5994416"
] | 0.7782071 | 0 |
True when the display brightness is adjusted automatically, based on an ambient light sensor or other method. Note that some displays may have this set to True by default, but not actually implement automatic brightness adjustment. `auto_brightness` is set to False if `brightness` is set manually. | def auto_brightness(self):
return self._auto_brightness | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config_brightness(self):\n orig_brightness, prev_brightness = self.brightness, self.brightness\n self.make_ui_group(False, 'Brightness:', self.brightness)\n\n while True:\n action_left, action_right = (self.button_left.action(),\n self.button_right.action())\n if action_left is RichButton.HOLD:\n return self.brightness is not orig_brightness, False # Config\n if action_right is RichButton.HOLD:\n return self.brightness is not orig_brightness, True # Paint\n if action_left is RichButton.TAP:\n self.brightness = max(0.0, self.brightness - 0.1)\n elif action_right is RichButton.TAP:\n self.brightness = min(1.0, self.brightness + 0.1)\n\n if self.brightness is not prev_brightness:\n self.rect.x = int(board.DISPLAY.width * (self.brightness - 1.0))\n prev_brightness = self.brightness",
"def is_on(self):\n return self._brightness_pct != 0",
"def is_on(self):\n return self._brightness != 0",
"def auto_brightness_callback(self, kwargs):\n\n entity_dict = kwargs['entity_dict']\n immediate = kwargs.get('immediate')\n transition = kwargs.get('transition', 0)\n check_current_brightness = kwargs.get(\n 'check_current_brightness', False)\n ignore_state = kwargs.get('ignore_state')\n\n now = datetime.datetime.now()\n current_mode = self.get_state(entity_dict['mode'])\n state = self.get_state(entity_dict['light'])\n min_brightness = entity_dict['min_brightness']\n max_brightness = entity_dict['max_brightness']\n\n if current_mode != 'Automatic':\n return\n\n if state == 'off' and not ignore_state:\n return\n\n if check_current_brightness:\n current_brightness = self.get_state(\n entity_dict['light'], attribute='brightness')\n if not current_brightness:\n current_brightness = 0\n current_brightness_pct = current_brightness / 2.55\n\n # Iterate over the schedule, determine the brightness to use\n schedule = entity_dict['brightness_schedule']\n for i in range(len(schedule)):\n # Get the next schedule item, go to 0 (wrap around) if we're on the last schedule\n if i+1 == len(schedule):\n next_schedule = schedule[0]\n else:\n next_schedule = schedule[i+1]\n\n # Replace strings max/min_brightness with percents\n if next_schedule['pct'] == 'max_brightness':\n next_schedule_pct = max_brightness\n elif next_schedule['pct'] == 'min_brightness':\n next_schedule_pct = min_brightness\n else:\n next_schedule_pct = next_schedule['pct']\n\n if schedule[i]['pct'] == 'max_brightness':\n this_schedule_pct = max_brightness\n elif schedule[i]['pct'] == 'min_brightness':\n this_schedule_pct = min_brightness\n else:\n this_schedule_pct = schedule[i]['pct']\n\n # Determine if now is during or between two schedules\n in_schedule = self.timestr_delta(\n schedule[i]['start'], now, schedule[i]['end'])\n between_schedule = self.timestr_delta(\n schedule[i]['end'], now, next_schedule['start'])\n\n if in_schedule['now_is_between']:\n # If we're within a schedule entry's time window, match exactly\n target_percent = round(this_schedule_pct)\n transition = 0\n\n # don't eval any ore schedules\n break\n elif between_schedule['now_is_between']:\n # if we are between two schedules, calculate the brightness percentage\n time_diff = between_schedule['start_to_end'].total_seconds()\n bright_diff = this_schedule_pct - next_schedule_pct\n bright_per_second = bright_diff / time_diff\n\n if immediate:\n # If setting an immediate brightness, we want to calculate the brightness percentage and then make a recursive call\n target_percent = round(this_schedule_pct -\n (between_schedule['since_start'].total_seconds(\n ) * bright_per_second))\n transition = 0\n self.run_in(\n self.auto_brightness_callback,\n delay=5,\n entity_dict=entity_dict,\n transition=295\n )\n else:\n if between_schedule['to_end'].total_seconds() <= transition:\n # If we're in a new schedule in the next 5 minutes, use that schedule's brightness\n target_percent = round(next_schedule_pct)\n transition = between_schedule['to_end'].total_seconds(\n )\n else:\n target_percent = round(this_schedule_pct -\n ((between_schedule['since_start'].total_seconds(\n ) + transition) * bright_per_second))\n\n # don't eval any more schedules\n break\n\n # set brightness if a schedule was matched and the percent has changed since the last auto-brightness run\n # Don't change if the brightness was changed from another source (at the switch, hass ui, google assistant, etc.)\n if target_percent:\n last_percent = round(\n float(self.get_state(entity_dict['setpoint'])))\n if last_percent != target_percent:\n if check_current_brightness and abs(last_percent - current_brightness_pct) > 5:\n self.log(\n '{}: Brightness changed manually, not moving.'.format(entity_dict['friendly']))\n self.select_option(entity_dict['mode'], 'Manual')\n else:\n self.log(\"Setting {} to auto-brightness - {}% over {} seconds\".format(\n entity_dict['friendly'], round(target_percent, 2), transition))\n self.turn_on(\n entity_id=entity_dict['light'],\n brightness_pct=target_percent,\n transition=transition\n )\n self.set_value(\n entity_dict['setpoint'], round(target_percent))",
"def is_on(self):\n return self._brightness > 0 or self._white_value > 0",
"def lights_are_on(image_path):\n _brightness = get_image_brightness(image_path)\n if _brightness > 10:\n return True\n return False",
"def the_user_changes_the_brightness_of_the_device(brightness):\n web_app.change_property_softassert(\"brightness\",brightness)",
"def _update_brightness(self):\n while self.current_brightness != self.brightness:\n next_color = RGB(r=int(self.color.r * (self.current_brightness/100.0)),\n g=int(self.color.g * (self.current_brightness/100.0)),\n b=int(self.color.b * (self.current_brightness/100.0)))\n self._update_color(next_color)\n diff = self.brightness - self.current_brightness\n # adjust current brightness to +/- 1\n self.current_brightness = self.current_brightness + \\\n (diff) / abs(diff)\n time.sleep(.05)\n # Final update to exact brightness and default if no change in brightness setting\n final_color = RGB(r=int(self.color.r * (self.brightness/100.0)),\n g=int(self.color.g * (self.brightness/100.0)),\n b=int(self.color.b * (self.brightness/100.0)))\n self._update_color(final_color)",
"async def async_turn_on(self, **kwargs: Any) -> None:\n if (brightness := kwargs.get(ATTR_BRIGHTNESS)) is not None:\n # set the brightness, which will also turn on/off light\n if brightness == 255:\n brightness = 256 # this will end up as 16 which is max\n self._device.light_brightness = int(brightness / 16)\n else:\n self._device.light_on = True",
"def test_change_brightness_of_the_device_false():",
"def setBrightness(self, brightness):\n self._logger.debug(\"setBrightness\")",
"def set_brightness(self, brightness: int):\r\n if not self.backlight:\r\n return\r\n\r\n if brightness < 0 or brightness > 100:\r\n # Print an error, probably\r\n return\r\n\r\n self.backlight.brightness = brightness",
"def test_change_brightness_of_the_devicetrue():",
"async def async_set_brightness(self, brightness):\n await self.local_meural.send_control_backlight(brightness)",
"def set_brightness(self, brightness):\n if (self.pwm):\n self.brightness = brightness\n self.pwm.ChangeDutyCycle(brightness)",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def brightness(self):\n return self._brightness",
"def turn_on(self, **kwargs):\n brightness_pct = 100\n if kwargs.get(ATTR_BRIGHTNESS):\n brightness_pct = \\\n brightness_to_percentage(int(kwargs.get(ATTR_BRIGHTNESS)))\n elif self._is_dimmable:\n brightness_pct = 101 # Sets the light to last known brightness.\n self._client.set_brightness(self._id, brightness_pct)",
"def light_is_on(self):\r\n return self._light == \"ON\"",
"def _get_brightness(self):\n result = self._client_cmd('backlight_tool --get_brightness')\n return int(result.stdout.rstrip())"
] | [
"0.67415977",
"0.67098176",
"0.65921164",
"0.6376667",
"0.6299181",
"0.62167054",
"0.5999217",
"0.58982825",
"0.58737326",
"0.58720344",
"0.5764444",
"0.5740519",
"0.5720143",
"0.56394523",
"0.5623431",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5605068",
"0.5542826",
"0.5526366",
"0.551933"
] | 0.7451231 | 0 |
Merge the processed datasets with the name input | def data_merge(path, dataset_name="processed_data"):
files = glob.glob(path+"**//"+dataset_name+".json")
logger.info("Found {} files under the path {}".format(len(files),path))
final_data = []
for file in files:
assert dataset_name in file
data = json.load(open(file,"r",encoding="utf-8"))
final_data += data
data_analysis(final_data)
final_data = json.dumps(final_data,indent=4)
new_file = open(path + "//merged_data.json", "w+", encoding="UTF-8")
new_file.writelines(final_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_datasets(dslist):\n # We use a variant of our fast stitching routine\n # So first create a sorted list of angles and source files\n container = []\n print 'Passed %d datasets for merging ' % len(dslist)\n proc_info = \"\"\"This dataset was created by collating points from multiple datasets. Data reduction \n information for the individual source datasets is as follows:\"\"\"\n title_info = \"Merge:\"\n for num,dataset in enumerate(dslist):\n storage_info = zip(dataset.axes[0],dataset.storage,dataset.var.storage)\n container.extend(storage_info)\n proc_info += \"\\n\\n===Dataset %s===\\n\" % str(dataset.title)\n try:\n proc_info += dataset.harvest_metadata(\"CIF\")[\"_pd_proc_info_data_reduction\"]\n except KeyError:\n pass\n title_info = title_info + dataset.title + ':'\n # So we have a list of angle,intensity,variance triples which we sort on angle\n container = sorted(container, key=lambda(angle,intensity,variance):angle)\n angles = map(lambda (a,b,c):a,container)\n intensities = map(lambda (a,b,c):b,container)\n variances = map(lambda (a,b,c):c,container)\n rs = Dataset(intensities)\n rs.var = variances\n rs.axes[0] = angles\n rs.axes[0].title = 'Two theta (degrees)'\n rs.title = title_info\n # Add metadata\n AddCifMetadata.add_standard_metadata(rs)\n rs.add_metadata(\"_pd_proc_info_data_reduction\",proc_info,\"CIF\")\n return rs",
"def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))",
"def concatenate_data():",
"def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds",
"def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0",
"def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)",
"def load_processed_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n path = os.path.join('downloads', name)\n path_processed = os.path.join(path, 'processed')\n\n if name == 'iris':\n return pd.read_csv(os.path.join(path_processed, 'iris.csv'))\n\n elif name == 'wine':\n return pd.read_csv(os.path.join(path_processed, 'wine.csv'))\n\n elif name == 'titanic':\n return pd.read_csv(os.path.join(path_processed, 'titanic.csv'))\n\n elif name == 'lanl':\n with open(os.path.join(path_processed, 'train_data.pkl'), 'rb') as f:\n x = pkl.load(f)\n with open(os.path.join(path_processed, 'train_targets.pkl'), 'rb') as f:\n y = pkl.load(f)\n return x, y\n\n elif name == 'MNIST' or name == 'FashionMNIST':\n training = torch.load(os.path.join(path_processed, 'training.pt'))\n test = torch.load(os.path.join(path_processed, 'test.pt'))\n return training, test",
"def process_datasets(self):\n\n with open(self.mappings, \"r+\") as json_file:\n emsl_to_jgi = json.load(json_file)\n emsl_to_jgi_copy = copy.deepcopy(emsl_to_jgi)\n\n contaminant_file_loc = emsl_to_jgi[\"contaminant_file_loc\"]\n # run for each dataset\n for dataset_id, values in emsl_to_jgi.items():\n if dataset_id not in [\n \"contaminant_file_loc\",\n \"analysis_activity_file_loc\",\n \"data_objects_file_loc\",\n \"STUDY\",\n \"tools_used\",\n ]:\n raw_file_loc = values[\"raw_file_loc\"]\n self.dataset_name = values[\"dataset_name\"]\n # dataset search against a fasta file\n for genome_directory, locations in values[\n \"genome_directory\"\n ].items():\n # clear object to prepare next job\n ANALYSIS_JOBS_OBJECT.clear()\n\n # create log_dir\n self.save_job_results = os.path.join(\n self.result_loc, dataset_id, genome_directory\n )\n self.log_collected_at = os.path.join(\n os.path.abspath(self.save_job_results), \"analysis_jobs_logs\"\n )\n if not os.path.exists(self.log_collected_at):\n os.makedirs(self.log_collected_at)\n\n files = [locations[\"faa_file_loc\"], contaminant_file_loc]\n contaminated_faa_file_loc = self.contaminate_fasta(files)\n\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"contaminated_faa_file_loc\",\n contaminated_faa_file_loc,\n emsl_to_jgi_copy,\n )\n # convert .faa to .txt\n faa_txt_file = self.convert_faa2txt(\n dataset_id, contaminated_faa_file_loc\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"txt_faa_file_loc\",\n faa_txt_file,\n emsl_to_jgi_copy,\n )\n\n # log & run job\n self.run_n_log_job(\n dataset_id,\n genome_directory,\n contaminated_faa_file_loc,\n raw_file_loc,\n emsl_to_jgi_copy,\n )\n\n # merge analysis\n resultant_file = self.merge_analysis_jobs(\n dataset_id, genome_directory\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"resultant_file_loc\",\n resultant_file,\n emsl_to_jgi_copy,\n )\n\n # capture the job metadata object\n logger.info(\"Jobrun\", extra=LOGGED_ANALYSIS_JOB)\n\n # update emsl_to_jgi.json\n json_file.seek(0) # move back to BOF.\n json_file.truncate()\n json_file.write(json.dumps(emsl_to_jgi_copy, default=str, indent=4))\n pass",
"def combine_files(file_name):\n\n\tif file_name == \"train\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Train/all_level1_train.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Train/all_level1_train.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Train/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop([\"Response\", \"Id\"],1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Response\")\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col + [\"Response\"]\n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Train/all_level1_train.csv\", index = False)\n\n\telif file_name == \"test\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Test/all_level1_test.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Test/all_level1_test.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Test/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop(\"Id\",1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col \n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Test/all_level1_test.csv\", index = False)",
"def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")",
"def merge(self , station = '' , datasets = ''):\n \n \n \n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n \n \n \"\"\"\n try:\n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n except:\n print('Failed: ' , station )\n return False \n \"\"\"",
"def _merge_parameter_set_names_array(self):\n parameter_set_names_array = self._create_parameter_set_names_array()\n self.parameter_study = xarray.merge(\n [self.parameter_study.reset_coords(), parameter_set_names_array]).set_coords(_set_coordinate_key)",
"def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)",
"def get_new_datasets(self, output_name):\n return []",
"def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def processed_file_names(self):\n # For 'trainval', we use files from 'train' and 'val' to save\n # memory\n if self.stage == 'trainval' and self.val_mixed_in_train:\n return [\n osp.join('train', self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n if self.stage == 'trainval':\n return [\n osp.join(s, self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n return [\n osp.join(self.stage, self.pre_transform_hash, f'{w}.h5')\n for w in self.cloud_ids]",
"def get_data_loaders_1sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1_sentence_final_generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 3:\n history_chatbot = history[1]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset",
"def data_merge(detector_fields):\n print(\"Merging final data...\")\n\n # load files that contain phase and I/O processed data and store as dfs\n phase_data = pd.read_csv(results_folder + 'phases/processed/clean_merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n detection_data = pd.read_csv(results_folder + 'io/io_out.csv', header=0, skipinitialspace=True,\n usecols=detector_fields)\n phase_df = pd.DataFrame(phase_data)\n detection_df = pd.DataFrame(detection_data)\n\n # merge the two files based on their Date and Time fields\n output = pd.merge(phase_df, detection_df, on=['Date', 'Time'])\n\n # store the output with any duplicates dropped and create a final CSV file\n merged_df = output.drop_duplicates()\n merged_df.to_csv(results_folder + 'dataset.csv', sep=',', index=False)\n\n print(\"Data merged!\")\n print(\"Main dataset available: \" + results_folder + 'dataset.csv')\n\n # return location of dataset\n return results_folder + 'dataset.csv'",
"def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)",
"def datasets(self):\n pass",
"def google(name, output, input):\n GoogleDatasetProcessor(name=name,\n output_filename=output,\n source_filenames=input).process()",
"def match_data(self, datasets):\n raise NotImplementedError",
"def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)",
"def update_dataset(self, data_name: str, append: pd.DataFrame):\n df = getattr(self, data_name)\n setattr(self, data_name, df.join(append, how='left'))",
"def concatenate_datasets(filenames_list, img_rows=128, img_cols=128):\n print('Concatenating the datasets created by data augmentation into a single one')\n print('Using the following pairs of images / masks datasets: ')\n print(filenames_list)\n print('\\n')\n\n # total number of images\n n_samples = 600 * len(filenames_list)\n\n # create np.ndarrays for the images and the targets: xCenter, yCenter, xOrientation, yOrientation\n images_dataset = np.ndarray((n_samples, img_rows, img_cols), dtype=np.uint8)\n targets_dataset = np.ndarray((n_samples, 4), dtype=np.float32)\n\n for ds, (img, mask) in enumerate(filenames_list):\n print(\" Processing {}\".format(img))\n images = np.load(\"output/augmented_data/{}.npy\".format(img))\n masks = np.load(\"output/augmented_data/{}.npy\".format(mask))\n\n for idx, mat in enumerate(masks):\n\n # get the center coordinates of the left ventricle (on the resized image)\n row, col = findCenter(img=mat, pixelvalue=1)\n\n # get the orientation of the left ventricle (on the resized image)\n x_v1, y_v1 = findMainOrientation(img=mat, pixelvalue=1)\n\n # save the center coordinates & orientation to the y dataframe (which will be the output of the network)\n targets_dataset[ds*600 + idx] = np.array([row, col, x_v1, y_v1])\n\n # save image in main dataset file\n images_dataset[ds*600 + idx] = images[idx]\n\n print('Concatenated all datasets into one & created target values for (center, orientation)')\n\n print('Splitting the dataset into 70% training & 30% testing')\n images_train, images_test, targets_train, targets_test = train_test_split(images_dataset, targets_dataset,\n test_size=0.3,\n random_state=42,\n shuffle=True)\n\n # save all ndarrays to a .npy files (for faster loading later)\n # Create directory to store files.\n directory = os.path.join(os.getcwd(), 'output/processed_data/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # save training set to file\n np.save('output/processed_data/images_train.npy', images_train)\n np.save('output/processed_data/targets_train.npy', targets_train)\n\n # save testing set to file\n np.save('output/processed_data/images_test.npy', images_test)\n np.save('output/processed_data/targets_test.npy', targets_test)\n print('Saving to .npy files done. See files: ')\n print('output/processed_data/images_train.npy')\n print('output/processed_data/targets_train.npy')\n print('output/processed_data/images_test.npy')\n print('output/processed_data/targets_test.npy')",
"def get_data_loaders_4sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"]\n #history_complete.append(history)\n if len(history_splitted) > (len(persona)-1):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def get_files(self):\n\n # Grab master data - use existing header, remove unhappy columns\n\n self.df_mas_lab_data = pd.read_csv(\n self.master_csv, dtype=str, usecols=self.columns\n )\n\n # Delete rows, where column FACILITY_TYPE != Independent, Hospital,\n # Physician Office\n facility_type_keep_list = [\"Independent\", \"Hospital\", \"Physician Office\"]\n self.df_mas_lab_data = self.df_mas_lab_data[\n self.df_mas_lab_data[\"FACILITY_TYPE\"].isin(facility_type_keep_list)\n ]\n\n # Make everything a string and remove trailing and leading whitespaces\n self.df_mas_lab_data = self.df_mas_lab_data.astype(str)\n self.df_mas_lab_data = self.df_mas_lab_data.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n\n print_banner(\"Computing all the Data\")\n print(f\"{len(self.df_mas_lab_data)} original master CLIA labs...\")\n\n # Grab other inputed files to make new data file to compare with\n self.df_new_lab_data = pd.concat(\n [\n pd.read_csv(file, names=self.columns, header=None, dtype=str, usecols=self.columns)\n for file in self.new_files\n ]\n )\n\n # Probably not needed for the new data but just in case:\n # Delete rows, where column FACILITY_TYPE != Independent, Hospital,\n # Physician Office\n self.df_new_lab_data = self.df_new_lab_data[\n self.df_new_lab_data[\"FACILITY_TYPE\"].isin(facility_type_keep_list)\n ]\n\n # Make everything a string and remove trailing and leading whitespaces\n self.df_new_lab_data = self.df_new_lab_data.astype(str)\n self.df_new_lab_data = self.df_new_lab_data.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n\n print(f\"{len(self.df_new_lab_data)} inputted CLIA labs for comparison...\")",
"def __init__(self, out_dir = 'output' ):\n \n self.data = {} # will contain the data for each different dataset \n self.datasets = '' # will contain the input datasets (original dictionary)\n self.datasets_keys = '' # will contain the input datasets names only (i.e. keys of the datasets dictionary)\n self.datasets_all = ['igra2' , 'era5_1' , 'ncar_w' , 'ncar_t', 'bufr' , 'era5_1759' , 'era5_1761' , 'era5_3188'] # all possibly available datasets \n #self.observation_ids_merged = { 'igra2':1 , 'ncar_t':2 , 'ncar_w':2, 'bufr':3, 'era5_1':4 , 'era5_1759' :5 , 'era5_1761':6 , 'era5_3188' :7} # values used to convert original record_id to the merged record_id, see method merge_all_data \n \n self.observation_ids_merged = { 'igra2':1 , 'ncar':2, 'bufr':3, 'era5_1':4 , 'era5_1759' :5 , 'era5_1761':6 , 'era5_3188' :7} # values used to convert original record_id to the merged record_id, see method merge_all_data \n \n self.unique_dates = {} \n self.attributes = {} # will keep the original attributes from the CDM tables, read from the netCDF files \n self.out_dir = out_dir",
"def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles"
] | [
"0.6107001",
"0.6097907",
"0.60682017",
"0.60533506",
"0.6005032",
"0.58872175",
"0.5818851",
"0.5814989",
"0.5780272",
"0.5769151",
"0.5763386",
"0.5752234",
"0.5731635",
"0.57059276",
"0.5699131",
"0.56304216",
"0.56119347",
"0.5592348",
"0.5589795",
"0.5583951",
"0.5565674",
"0.55623496",
"0.5555347",
"0.5518295",
"0.5514262",
"0.5506352",
"0.5501912",
"0.5494089",
"0.5472766",
"0.5464557"
] | 0.7037669 | 0 |
Deploy new versions of all Lambda functions | def deploy(options, config):
processor = options.processor
# Terraform apply only to the module which contains our lambda functions
targets = set()
packages = []
def _publish_version(packages):
"""Publish Lambda versions"""
for package in packages:
if package.package_name in {'athena_partition_refresh', 'threat_intel_downloader'}:
published = LambdaVersion(
config=config, package=package, clustered_deploy=False).publish_function()
else:
published = LambdaVersion(config=config, package=package).publish_function()
if not published:
return False
return True
def _deploy_rule_processor():
"""Create Rule Processor package and publish versions"""
rule_package = RuleProcessorPackage(config=config, version=current_version)
rule_package.create_and_upload()
return rule_package
def _deploy_alert_processor():
"""Create Alert Processor package and publish versions"""
alert_package = AlertProcessorPackage(config=config, version=current_version)
alert_package.create_and_upload()
return alert_package
def _deploy_athena_partition_refresh():
"""Create Athena Partition Refresh package and publish"""
athena_package = AthenaPackage(config=config, version=current_version)
athena_package.create_and_upload()
return athena_package
def _deploy_apps_function():
"""Create app integration package and publish versions"""
app_integration_package = AppIntegrationPackage(config=config, version=apps_version)
app_integration_package.create_and_upload()
return app_integration_package
def _deploy_threat_intel_downloader():
"""Create Threat Intel downloader package and publish version"""
threat_intel_package = ThreatIntelDownloaderPackage(
config=config,
version=ti_downloader_version
)
threat_intel_package.create_and_upload()
return threat_intel_package
if 'all' in processor:
targets.update({'module.stream_alert_{}'.format(x) for x in config.clusters()})
targets.update({
'module.app_{}_{}'.format(app_name, cluster)
for cluster, info in config['clusters'].iteritems()
for app_name in info['modules'].get('stream_alert_apps', {})
})
packages.append(_deploy_rule_processor())
packages.append(_deploy_alert_processor())
packages.append(_deploy_apps_function())
# Only include the Athena function if it exists and is enabled
athena_config = config['lambda'].get('athena_partition_refresh_config')
if athena_config and athena_config.get('enabled', False):
targets.add('module.stream_alert_athena')
packages.append(_deploy_athena_partition_refresh())
else:
if 'rule' in processor:
targets.update({'module.stream_alert_{}'.format(x) for x in config.clusters()})
packages.append(_deploy_rule_processor())
if 'alert' in processor:
targets.update({'module.stream_alert_{}'.format(x) for x in config.clusters()})
packages.append(_deploy_alert_processor())
if 'apps' in processor:
targets.update({
'module.app_{}_{}'.format(app_name, cluster)
for cluster, info in config['clusters'].iteritems()
for app_name in info['modules'].get('stream_alert_apps', {})
})
packages.append(_deploy_apps_function())
if 'athena' in processor:
targets.add('module.stream_alert_athena')
packages.append(_deploy_athena_partition_refresh())
if 'threat_intel_downloader' in processor:
targets.add('module.threat_intel_downloader')
packages.append(_deploy_threat_intel_downloader())
# Regenerate the Terraform configuration with the new S3 keys
if not terraform_generate(config=config):
return
# Run Terraform: Update the Lambda source code in $LATEST
if not helpers.tf_runner(targets=targets):
sys.exit(1)
# TODO(jack) write integration test to verify newly updated function
# Publish a new production Lambda version
if not _publish_version(packages):
return
# Regenerate the Terraform configuration with the new Lambda versions
if not terraform_generate(config=config):
return
# Apply the changes to the Lambda aliases
helpers.tf_runner(targets=targets) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def task_deploy():\n client = boto3.client(\"lambda\")\n\n def upload_build():\n if function_exists(client):\n update_lambda_function(client)\n else:\n create_lambda_function(client)\n\n return {\"actions\": [upload_build], \"file_dep\": [f\"{DIST_DIR}/build.zip\"]}",
"def deploy():",
"def serverless_lambda_deploy(self, gateway_stack_name, lambda_stack_name, code_key):\n\n domain_name = self.get_stack_domain_name(gateway_stack_name)\n\n if not domain_name:\n print \"[WARN] Custom Domain not found. Depolying to $LATEST\"\n self.lambda_deploy_code(lambda_stack_name, code_key)\n return True\n\n current_live_stage = self.get_live_stack(domain_name)\n\n to_alias = None\n\n if current_live_stage == 'green':\n to_alias = 'blue'\n elif current_live_stage == 'blue':\n to_alias = 'green'\n\n lambdas = self.get_stack_lambdas(lambda_stack_name)\n print \"[INFO] Updating lambda code for %s \" % str(lambdas)\n\n for aws_lambda in lambdas:\n\n self.get_or_set_lambda_alias(aws_lambda)\n function_arn = self.get_lambda_arn(aws_lambda)\n self.update_lambda_code_key(function_arn, code_key)\n code_bucket, code_key = self.get_lambda_code_bucket_and_key(function_arn)\n\n print \"[INFO] Updating lambda code for %s with bucket: %s & key: %s\" % (\n aws_lambda, code_bucket, code_key)\n\n self.lambda_client.update_function_code(\n FunctionName=aws_lambda,\n S3Bucket=code_bucket,\n S3Key=code_key,\n # S3ObjectVersion='string', # TODO Soon!\n Publish=True\n )\n\n response = self.lambda_client.publish_version(\n FunctionName=aws_lambda,\n Description=datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p %S\")\n )\n\n version = response[\"Version\"]\n\n if not to_alias or not version:\n print \"[FAIL] Unable to version deploy\"\n\n self.lambda_client.update_alias(\n FunctionName=aws_lambda,\n Name=to_alias,\n FunctionVersion=version\n )\n\n print \"[INFO] Function %s Updated to version %s linked to alias %s\" % (aws_lambda, version, to_alias)",
"def deployFunc(runType):\n logger.info('Deploying lambda to {} environment'.format(runType))\n runProcess(runType, [\n 'deploy',\n '--config-file',\n 'run_config.yaml',\n '--requirements',\n 'requirements.txt'\n ])\n createEventMapping(runType)",
"def update_lambda() -> None:\n client = boto3.client(\"lambda\", \"us-west-2\")\n\n with SpooledTemporaryFile() as payload:\n with ZipFile(payload, \"w\") as zipfile:\n zipfile.write(\"chortle/__init__.py\")\n zipfile.write(\"chortle/lambda_function.py\")\n zipfile.write(\"chortle/strategy_handler.py\")\n\n payload.seek(0)\n\n client.update_function_code(\n FunctionName=LAMBDA_FUNCTION_NAME, ZipFile=payload.read()\n )\n\n client.update_function_configuration(\n FunctionName=LAMBDA_FUNCTION_NAME,\n Environment={\"Variables\": {\"CHORTLE_DYNAMO_TABLE\": TABLE_NAME}},\n )",
"def update_lambda_function(client):\n try:\n update_options = {\n \"FunctionName\": FUNCTION_NAME,\n \"ZipFile\": read_zip_file(),\n \"Publish\": True,\n }\n client.update_function_code(**update_options)\n except client.exceptions.ServiceException as error:\n print(error)",
"def deploy():\n build()\n copy()\n install()",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def _deploy_function_refs(self):\n for function_ref in self._spec.function_refs.values():\n logger.info(f\"deploy child function {function_ref.name} ...\")\n function_object = function_ref.function_object\n function_object.metadata.name = function_ref.fullname(self)\n function_object.metadata.project = self.metadata.project\n function_object.metadata.tag = self.metadata.tag\n function_object.spec.graph = self.spec.graph\n # todo: may want to copy parent volumes to child functions\n function_object.apply(mlrun.v3io_cred())\n function_ref.db_uri = function_object._function_uri()\n function_object.verbose = self.verbose\n function_object.spec.secret_sources = self.spec.secret_sources\n function_object.deploy()",
"def lambda_handler(Event, Context):\n if 'StateMachineArn' in Event.keys():\n step_function_arn = Event['StateMachineArn']\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))\n\n else:\n stepfunctions = [os.getenv(\"CHARGEBEEDOWNLOADARN\"), os.getenv(\"EXCHANGERATESDOWNLOADARN\")]\n\n for stepfunction in stepfunctions:\n step_function_arn = stepfunction\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))",
"def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()",
"def trigger_deploy_build(\n commit, source_version, lambda_name):\n return trigger_build(\n commit, source_version, lambda_name, 'buildspec-build')",
"def test_all_python_versions_deploy():\n pass",
"def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()",
"def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()",
"def hotdeploy():\n _run_deploy(do_update_requirements=True)\n compile_less()\n compile_webpack()\n collectstatic()\n migrate()\n restart()",
"def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()",
"def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()",
"def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def lambda_handler(event, context):\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n 'message': 'API deployed',\n })\n }",
"def deploy():\n db.drop_all()\n create_DB()\n app.run()",
"def upload_lambda_functions(context: CfnginContext, provider: Provider, **kwargs: Any):\n LOGGER.warning(\n \"%s is deprecated and will be removed in a future release - \"\n \"see documentation for replacement\",\n __name__,\n )\n # TODO add better handling for misconfiguration (e.g. forgetting function names)\n # TODO support defining dockerize_pip options at the top level of args\n custom_bucket = cast(str, kwargs.get(\"bucket\", \"\"))\n if not custom_bucket:\n if not context.bucket_name:\n raise ValueError(\"hook requires bucket argument or top-level cfngin_hook\")\n bucket_name = context.bucket_name\n LOGGER.info(\"using default bucket from CFNgin: %s\", bucket_name)\n else:\n bucket_name = custom_bucket\n LOGGER.info(\"using custom bucket: %s\", bucket_name)\n\n custom_bucket_region = cast(str, kwargs.get(\"bucket_region\", \"\"))\n if not custom_bucket and custom_bucket_region:\n raise ValueError(\"Cannot specify `bucket_region` without specifying `bucket`.\")\n\n bucket_region = select_bucket_region(\n custom_bucket,\n custom_bucket_region,\n context.config.cfngin_bucket_region,\n provider.region or \"us-east-1\",\n )\n\n # Check if we should walk / follow symlinks\n follow_symlinks = kwargs.get(\"follow_symlinks\", False)\n if not isinstance(follow_symlinks, bool):\n raise ValueError(\"follow_symlinks option must be a boolean\")\n\n # Check for S3 object acl. Valid values from:\n # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n payload_acl = cast(\"ObjectCannedACLType\", kwargs.get(\"payload_acl\", \"private\"))\n\n # Always use the global client for s3\n session = context.get_session(region=bucket_region)\n s3_client = session.client(\"s3\")\n\n ensure_s3_bucket(s3_client, bucket_name, bucket_region)\n\n prefix = kwargs.get(\"prefix\", \"\")\n\n results: Dict[str, Any] = {}\n for name, options in kwargs[\"functions\"].items():\n sys_path = (\n os.path.dirname(context.config_path)\n if os.path.isfile(context.config_path)\n else context.config_path\n )\n results[name] = _upload_function(\n s3_client,\n bucket_name,\n prefix,\n name,\n options,\n follow_symlinks,\n payload_acl,\n str(sys_path),\n work_dir=context.work_dir,\n )\n\n return results",
"def deploy():\n update_treesheets()\n restart_treesheets()",
"def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()",
"def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package",
"def deploy(n = 10):\n upload_current_release()\n install_requisites()\n create_redirects()\n make_symlinks()\n symlink_current_release()\n sudo('service nginx reload')\n gc_deploys(n)",
"def deploy():\n upload_static()\n compile_code()\n upload_code()\n upload_supervisor()\n start_server()"
] | [
"0.7347401",
"0.67256033",
"0.66800237",
"0.6473388",
"0.6435472",
"0.6272753",
"0.62598175",
"0.6169625",
"0.61490303",
"0.6129791",
"0.6049756",
"0.6026856",
"0.6001714",
"0.59440047",
"0.5925443",
"0.59033215",
"0.58928",
"0.58922964",
"0.58777225",
"0.5875698",
"0.5875698",
"0.5875698",
"0.58659947",
"0.58421785",
"0.58312446",
"0.58280873",
"0.5820539",
"0.5788837",
"0.5769696",
"0.57535064"
] | 0.6987193 | 1 |
Create Rule Processor package and publish versions | def _deploy_rule_processor():
rule_package = RuleProcessorPackage(config=config, version=current_version)
rule_package.create_and_upload()
return rule_package | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _deploy_alert_processor():\n alert_package = AlertProcessorPackage(config=config, version=current_version)\n alert_package.create_and_upload()\n return alert_package",
"def package_build_process(name, url, branch, path_to_missile=None,\n domain=None, stack=None):\n logfilename = \"build-%s-%s-%s.log\" % (name, branch, datetime.datetime.utcnow().isoformat())\n logfilepath = os.path.expanduser(os.path.join(dd.BUILD_LOGPATH, logfilename))\n sys.stdout = open(logfilepath, \"w\")\n sys.stderr = sys.stdout\n\n args = [\"name\", \"url\", \"branch\", \"path_to_missile\"]\n for arg in args:\n print arg , \": \", locals()[arg]\n\n # with settings(host_string=host, key_filename=key_filename):\n wc = WorkingCopy(name, base_folder=os.path.expanduser(\"~/build\"), repo=url)\n wc.prepare(branch=branch)\n\n latest_version = RedisBackend().get_latest_version(name)\n new_base_version = wc.generate_new_base_version(latest_version)\n\n new_version = wc.get_new_git_version(prefix=new_base_version, suffix=branch)\n # skipping existing build removed\n wc.set_version(new_version)\n if path_to_missile:\n path_to_missile = os.path.join(wc.working_copy, path_to_missile)\n debs_path = os.path.expanduser(dd.BUILD_DEBSPATH)\n result = wc.build(path_to_missile=path_to_missile, output_path=debs_path)\n RedisBackend().delete_lock(\"packages\", name)\n RedisBackend().create_package(name, new_version, result)\n print \"Built new:\", name, branch, new_version\n\n if domain is not None and stack is not None:\n RedisBackend().add_stack_package(domain, stack, name, new_version)\n print \"Added to 'domains:%s:stacks:%s:packages' as {'%s': '%s'}\" % (domain, stack, name, new_version)",
"def _provision_package(self):",
"def build(self, depVersions, output):\n raise NotImplementedError()",
"def build(\n self,\n production: bool, # PyPI or Test-PyPi - USED TO FIND THE NEXT VERSION NUMBER\n force=False, # BUILD even if no changes\n ) -> bool: # sourcery skip: default-mutable-arg, extract-duplicate-method, require-parameter-annotation\n log.info(f\"Build: {self.package_path.name}\")\n\n ok = self.update_package()\n self.status[\"version\"] = self.pkg_version\n if not ok:\n log.warning(f\"{self.package_name}: skipping as build failed\")\n self.status[\"error\"] = \"Build failed\"\n return False\n # If there are changes to the package, then publish it\n if self.is_changed():\n log.info(f\"Found changes to package sources: {self.package_name} {self.pkg_version} \")\n log.trace(f\"Old hash {self.hash} != New hash {self.calculate_hash()}\")\n elif force:\n log.info(f\"Force build: {self.package_name} {self.pkg_version} \")\n\n if self.is_changed() or force:\n # Build the distribution files\n old_ver = self.pkg_version\n self.pkg_version = self.update_pkg_version(production)\n self.status[\"version\"] = self.pkg_version\n # to get the next version\n log.debug(f\"{self.package_name}: bump version for {old_ver} to {self.pkg_version } {'production' if production else 'test'}\")\n self.write_package_json()\n log.trace(f\"New hash: {self.package_name} {self.pkg_version} {self.hash}\")\n if self.poetry_build():\n self.status[\"result\"] = \"Build OK\"\n else:\n log.warning(f\"{self.package_name}: skipping as build failed\")\n self.status[\"error\"] = \"Build failed\"\n return False\n return True",
"def create_package(self, release_tag=''):\n\n code_directory = self.function_config['Code']['Directory']\n package_name = self.function_selected\n hash_release = _get_git_release()\n logger.info(\"Creating package with git release {0}\".format(hash_release))\n\n lp = self.runtime['packager'](\n package_name,\n hash_release + release_tag,\n code_directory,\n target_directory='.')\n\n lp.build_and_save()\n\n self.hash_release = hash_release\n self.local_filename = lp.filename",
"def create(index):\n # Get the project root\n project_root = get_project_root()\n package_name = os.path.basename(project_root)\n logging.info(\"Creating package for current project: \" + package_name)\n Packager(package_name, project_root).create(index)",
"def _package_plugins(ctx):\n print(\"\\n\\n-- Creating Zip Files \\n\")\n\n project_dir = Path(__file__).parent\n plugins_projects = [\n x for x in (project_dir / \"build/build_directory_for_tests/\").iterdir() if x.is_dir()\n ]\n artifacts_dir = project_dir / \"build/artifacts\"\n\n plugins_zip = project_dir / \"build/plugin_zip\"\n if plugins_zip.exists():\n shutil.rmtree(plugins_zip)\n\n plugins_zip.mkdir()\n\n for project in plugins_projects:\n plugins_dirs = [\n x for x in (project / \"plugin\").iterdir() if x.is_dir() and (x / \"assets\").exists()\n ]\n hm_generator = HookManGenerator(\n hook_spec_file_path=project_dir / f\"tests/plugins/{project.name}/hook_specs.py\"\n )\n\n for plugin in plugins_dirs:\n (plugin / \"artifacts\").mkdir()\n if sys.platform == \"win32\":\n shutil.copy2(src=artifacts_dir / f\"{plugin.name}.dll\", dst=plugin / \"artifacts\")\n else:\n shutil.copy2(src=artifacts_dir / f\"lib{plugin.name}.so\", dst=plugin / \"artifacts\")\n\n hm_generator.generate_plugin_package(\n package_name=plugin.name, plugin_dir=plugin, dst_path=plugins_zip\n )",
"def main():\n if PEPQUEUED.exists():\n PEPQUEUED.remove()\n os.chdir(BETABUILD_CHECKOUT_DIR / \"peps\")\n try:\n cmd(\"svn up\")\n except RuntimeError, error:\n log('%s: %s' % (error.__class__.__name__, error))\n cmd(\"./pep2pyramid.py --force -d %s\" % PEPDIR)\n cmd(\"./pep2rss.py %s\" % PEPDIR)\n BUILDQUEUED.touch()\n\n if BUILDINPROCESS.exists():\n # allow new checkins to queue a new build during another build\n # (leave BUILDQUEUED in place)\n return\n\n if BUILDQUEUED.exists():\n revision = BUILDQUEUED.text()\n log('revision %s' % revision)\n BUILDQUEUED.remove()\n update(revision)\n\n #rebuild jobs rss\n cmd(\"%s/jobs2rss.py %s\"%(JOBSDIR, JOBSDIR))\n log('Rebuilding jobs.rss')",
"def do_release(self, version):\n build_dir = self.options.buildroot\n patch_dir = self.options.patch_dir\n\n # variables related to the version\n prev_version = version.prev_version\n\n # If we're operating in the same repo as this script, kindly make it\n # in a subdirectory to avoid polluting things\n if build_dir == os.path.dirname(os.path.abspath(__file__)):\n build_dir = os.path.join(build_dir, 'build')\n\n if not os.path.exists(build_dir):\n logging.debug('Creating build dir: %s', build_dir)\n os.mkdir(build_dir)\n\n os.chdir(build_dir)\n\n package = 'mediawiki-' + version.raw\n package_dir = os.path.join(build_dir, package)\n\n # Export the target. If we're going to patch later, use the branch\n if patch_dir:\n get_git(package_dir, version.branch)\n else:\n get_git(package_dir, version.tag)\n\n if patch_dir:\n maybe_apply_patches(\n package,\n get_patches_for_repo(patch_dir, 'core', version.branch))\n maybe_apply_patches(\n os.path.join(package, 'vendor'),\n get_patches_for_repo(patch_dir, 'vendor', version.branch))\n\n ext_exclude = []\n for ext in get_skins_and_extensions(package_dir):\n if patch_dir:\n maybe_apply_patches(\n os.path.join(package, ext),\n get_patches_for_repo(patch_dir, ext, version.branch))\n ext_exclude.append(\"--exclude\")\n ext_exclude.append(ext)\n\n # Generate the .tar.gz files\n out_files = [\n self.make_tar(\n package=package,\n input_dir=package,\n build_dir=build_dir),\n self.make_tar(\n package='mediawiki-core-' + version.raw,\n input_dir=package,\n build_dir=build_dir,\n add_args=ext_exclude)\n ]\n\n # Patch\n if not self.options.no_previous and prev_version is not None:\n prev_dir = 'mediawiki-' + prev_version\n get_git(os.path.join(build_dir, prev_dir),\n MwVersion(prev_version).tag)\n\n self.make_patch(\n build_dir, package + '.patch.gz', prev_dir, package, 'normal')\n out_files.append(package + '.patch.gz')\n logging.debug('%s.patch.gz written', package)\n if os.path.exists(os.path.join(package, 'languages', 'messages')):\n i18n_patch = 'mediawiki-i18n-' + version.raw + '.patch.gz'\n if (self.make_patch(\n build_dir, i18n_patch, prev_dir, package, 'i18n')):\n out_files.append(i18n_patch)\n logging.info('%s written', i18n_patch)\n else:\n i18n_patch = None\n\n # Sign\n for file_name in out_files:\n if self.options.sign:\n try:\n proc = subprocess.Popen([\n 'gpg', '--detach-sign',\n os.path.join(build_dir, file_name)])\n except OSError as ose:\n logging.error(\"gpg failed, does it exist? Skip with \" +\n \"--dont-sign.\")\n logging.error(\"Error %s: %s\", ose.errno, ose.strerror)\n sys.exit(1)\n if proc.wait() != 0:\n logging.error(\"gpg failed, exiting\")\n sys.exit(1)\n output(version, out_files)\n return 0",
"def init_add_package_to_stack_process(stack, name, version, file_name):\n with settings(host_string=host, key_filename=key_filename):\n s = Stack(MAGIC_DOMAIN, stack, meta_path=\"/var/gachette/\", operator=StackOperatorRedis(redis_host=dd.REDIS_HOST))\n s.add_package(name, version=version, file_name=file_name)\n send_notification(\"stack #%s package %s (%s) added\" % (stack, name, version))",
"def create_packages(self):\n if not self.rewrite:\n # The extra package structure is only required for vendored code used via import rewrites.\n return\n\n for index, _ in enumerate(self._subpath_components):\n relpath = _PACKAGE_COMPONENTS + self._subpath_components[: index + 1] + [\"__init__.py\"]\n touch(os.path.join(self.ROOT, *relpath))",
"def run_pre_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name)\n # do not forget to clean up the staging area\n staging.clear()",
"def run_post_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name, publisher_type=POST_PUBLISHER_TYPE)\n # do not forget to clean up the staging area\n staging.clear()",
"def create_package(self, **kwargs):\n results = self.api.action.package_create(**kwargs)\n self.get_ckan_metadata(True)\n return results",
"def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)",
"def custom_package_xml_generator(directory, packagename=None, version='45.0', filename='package.xml'):\n\n METADATA_TYPE = {\n 'applications':'CustomApplication', 'aura':'AuraDefinitionBundle', 'classes':'ApexClass', 'customPermissions':'CustomPermission', \n 'flexipages':'FlexiPage', 'flows':'Flow', 'globalValueSets':'GlobalValueSet', 'labels':'CustomLabels', 'layouts':'Layout',\n 'lwc': 'LightningComponentBundle', 'objects':'CustomObject', 'pages':'ApexPage', 'permissionsets':'PermissionSet', 'profiles':'Profile',\n 'staticresources':'StaticResource', 'tabs':'CustomTab', 'triggers':'ApexTrigger', 'contentassets':'ContentAsset', 'pathAssistants':'PathAssistant',\n 'quickActions':'QuickAction', 'remoteSiteSettings':'RemoteSiteSetting', 'workflows':'Workflow', 'dashboards':'Dashboard', 'reports':'Report',\n 'cspTrustedSites':'CspTrustedSite',\n }\n\n \"\"\"\n Non-implemented Metadata:\n 'ApexComponent', 'CustomMetadata' (needs custom manipulation), 'CustomObjectTranslation', 'DuplicateRule', \n 'FlowCategory', 'GlobalValueSetTranslation', 'MatchingRules',\n \"\"\"\n #read directory structure\n\n mdtypedirs = os.listdir(directory)\n\n nested_mdt_object = ['ValidationRule', 'CompactLayout', 'ListView', 'SharingReason', 'RecordType']\n nested_mdt_workflow = ['WorkflowFieldUpdate', 'WorkflowKnowledgePublish', 'WorkflowTask', 'WorkflowAlert', 'WorkflowSend', 'WorkflowOutboundMessage', 'WorkflowRule']\n\n # start our xml structure\n root = xml.Element('Package')\n root.set('xmlns','http://soap.sforce.com/2006/04/metadata')\n\n for mdtype in mdtypedirs:\n # create child node for each type of component\n if mdtype in METADATA_TYPE.keys():\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = str(METADATA_TYPE[mdtype])\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n \n if mdtype == 'objects':\n for nest_mdtyp in nested_mdt_object:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n if mdtype == 'workflows':\n for nest_mdtyp in nested_mdt_workflow:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n #Custom behavior for custom labels\n if mdtype == 'labels':\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = 'CustomLabel'\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n # add the final xml node package.api_version\n eversion = xml.SubElement(root, 'version')\n eversion.text = str(version)\n\n #package name\n if packagename != None:\n efname = xml.SubElement(root, 'fullName')\n efname.text = str(packagename)\n\n #pretty format for xml\n xmlstring = xml.tostring(root)\n reparsed = minidom.parseString(xmlstring)\n prettyxml = reparsed.toprettyxml(indent=' ', newl='\\n', encoding='UTF-8')\n \n #generate xml file from string\n try:\n with open(os.path.join(directory, filename), \"bw\") as xml_file:\n xml_file.write(prettyxml)\n except IOError:\n pass",
"def publish(\n self,\n db: PysonDB,\n *,\n production: bool, # PyPI or Test-PyPi\n build=False, #\n force=False, # publish even if no changes\n dry_run=False, # do not actually publish\n clean: bool = False, # clean up afterwards\n ) -> (\n bool\n ): # sourcery skip: assign-if-exp, default-mutable-arg, extract-method, remove-unnecessary-else, require-parameter-annotation, swap-if-else-branches, swap-if-expression\n log.info(f\"Publish: {self.package_path.name}\")\n # count .pyi files in the package\n filecount = len(list(self.package_path.rglob(\"*.pyi\")))\n if filecount == 0:\n log.debug(f\"{self.package_name}: starting build as no .pyi files found\")\n build = True\n\n if build or force or self.is_changed():\n self.build(production=production, force=force)\n\n if not self._publish:\n log.debug(f\"{self.package_name}: skip publishing\")\n return False\n\n self.update_pkg_version(production=production)\n # Publish the package to PyPi, Test-PyPi or Github\n if self.is_changed() or force:\n if self.mpy_version == \"latest\":\n log.warning(\"version: `latest` package will only be available on Github, and not published to PyPi.\")\n self.status[\"result\"] = \"Published to GitHub\"\n else:\n self.update_hashes() # resets is_changed to False\n if not dry_run:\n pub_ok = self.poetry_publish(production=production)\n else:\n log.warning(f\"{self.package_name}: Dry run, not publishing to {'' if production else 'Test-'}PyPi\")\n pub_ok = True\n if not pub_ok:\n log.warning(f\"{self.package_name}: Publish failed for {self.pkg_version}\")\n self.status[\"error\"] = \"Publish failed\"\n return False\n self.status[\"result\"] = \"Published to PyPi\" if production else \"Published to Test-PyPi\"\n self.update_hashes()\n if dry_run:\n log.warning(f\"{self.package_name}: Dry run, not saving to database\")\n else:\n # get the package state and add it to the database\n db.add(self.to_dict())\n db.commit()\n return True\n else:\n log.info(f\"No changes to package : {self.package_name} {self.pkg_version}\")\n\n if clean:\n self.clean()\n return True",
"def _setup(self):\n mkdir_p(self.output_folder)\n if self.symlink_dir:\n mkdir_p(self.symlink_dir)\n try:\n selected_versions = self._resolve_dependencies()\n if selected_versions:\n self._write_lock(selected_versions)\n print('\\n\\nVersions Selected for downloading:\\n')\n print('\\t' + '\\n\\t'.join(['{}: {}'.format(req, ver) for req, ver in selected_versions.items()]) + '\\n')\n for pkg_name, version in selected_versions.items():\n pkg_metadata = self._get_metadata(pkg_name)\n version_metadata = pkg_metadata.get('versions', dict()).get(str(version), dict())\n self._download_package(version_metadata)\n except (RequirementMatchError, DependencyError) as e:\n print(e.message)\n return self.created()",
"def create_model_package(ModelPackageName=None, ModelPackageDescription=None, InferenceSpecification=None, ValidationSpecification=None, SourceAlgorithmSpecification=None, CertifyForMarketplace=None):\n pass",
"def publish():\n pass",
"def package(self, platform):\n \n # Validate the platform.\n validations.validate_platform(platform)\n \n # Get the name of our build directory, and select a temporary directory to\n # perform our work in.\n build_dir = self.get_build_directory()\n temp_dir = tempfile.mkdtemp()\n \n \n # Copy the existing vesselinfo file into place.\n source_vesselinfo_fn = os.path.join(build_dir, 'vesselinfo')\n destination_vesselinfo_fn = os.path.join(temp_dir, 'vesselinfo')\n \n if not os.path.isfile(source_vesselinfo_fn):\n raise validations.ValidationError('There is no vesselinfo file for the given build ID.')\n \n shutil.copy(source_vesselinfo_fn, destination_vesselinfo_fn)\n \n \n # Create packages for the requested installers.\n packager.package_installers(temp_dir, [platform])\n \n # Move each file in the temporary directory to the destination directory.\n for file_to_move in os.listdir(temp_dir):\n source_filename = os.path.abspath(os.path.join(temp_dir, file_to_move))\n destination_filename = os.path.abspath(os.path.join(build_dir, file_to_move))\n shutil.move(source_filename, destination_filename)\n\n # Remove the temporary directory.\n shutil.rmtree(temp_dir)",
"def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)",
"def create_package(args, api, command_obj, resume=False):\n set_subcommand_file(args.output_dir)\n if resume:\n retrieve_subcommands()\n # read the metadata.json information\n message = ('Reading the metadata.json files.........\\n')\n u.log_message(message, log_file=session_file,\n console=args.verbosity)\n package_dir = args.package_dir\n output_dir = args.output_dir\n metadata_file = os.path.join(package_dir, METADATA_FILE)\n metadata = None\n\n with open(metadata_file) as metadata_handler:\n metadata = json.load(metadata_handler)\n # recurse into components/directories, if any\n if metadata.get(\"kind\") == \"package\" and 'components' in metadata:\n components = metadata.get(\"components\")\n for component in components:\n message = ('Inspecting component %s.........\\n' % component)\n u.log_message(message, log_file=session_file,\n console=args.verbosity)\n args.package_dir = os.path.join(package_dir, component)\n create_package(args, api, command_obj, resume=resume)\n args.package_dir = package_dir\n else:\n # create libraries or scripts\n imports = []\n category = str(metadata.get(\"category\", DFT_CATEGORY))\n if metadata.get(\"imports\") is not None:\n lib_imports = metadata.get(\"imports\")\n for lib_import in lib_imports:\n args.package_dir = os.path.join(package_dir, lib_import)\n if args.embed_libs:\n library_ref = create_package( \\\n args, api, command_obj, resume=resume)\n u.log_created_resources(\"imports\",\n output_dir, library_ref)\n else:\n try:\n # try to read the library id, if it is already there\n library_ref = read_library_id(os.path.join( \\\n output_dir, os.path.basename(args.package_dir)))\n except IOError:\n library_ref = create_package( \\\n args, api, command_obj, resume=resume)\n library_ref = read_library_id(os.path.join( \\\n output_dir, os.path.basename(args.package_dir)))\n imports.append(library_ref)\n args.package_dir = package_dir\n # read the metadata.json information\n message = ('Creating the %s.........\\n' % metadata.get(\"kind\"))\n u.log_message(message, log_file=session_file,\n console=args.verbosity)\n if metadata.get(\"kind\") in WHIZZML_RESOURCES:\n whizzml_code = os.path.normpath(os.path.join(args.package_dir, \\\n metadata.get(\"source_code\", \"%s.whizzml\" % \\\n metadata.get(\"kind\"))))\n if args.embed_libs and metadata.get(\"kind\") == WHIZZML_LIBRARY:\n return whizzml_code\n\n args.output_dir = os.path.join(output_dir, \\\n os.path.basename(package_dir))\n # creating command to create the resource\n command = COMMANDS[metadata.get(\"kind\")] % (whizzml_code,\n args.output_dir)\n command_args = command.split()\n bigml.util.check_dir(args.output_dir)\n\n # getting inputs and outputs for the script from metadata\n if \"inputs\" in metadata:\n inputs_file = os.path.join(args.output_dir, \"inputs.json\")\n u.write_to_utf8(inputs_file, json.dumps(metadata.get(\"inputs\")))\n command_args.extend([\"--declare-inputs\", inputs_file])\n if \"outputs\" in metadata:\n outputs_file = os.path.join(args.output_dir, \"outputs.json\")\n u.write_to_utf8(outputs_file, json.dumps(metadata.get(\"outputs\")))\n command_args.extend([\"--declare-outputs\", outputs_file])\n if \"description\" in metadata:\n desc_file = os.path.join(args.output_dir, \"description.txt\")\n u.write_to_utf8(desc_file, metadata.get(\"description\"))\n command_args.extend([\"--description\", desc_file])\n if metadata.get(\"name\"):\n command_args.extend([\"--name\", metadata.get(\"name\")])\n if args.tag:\n for tag in args.tag:\n command_args.extend([\"--tag\", tag])\n command_args.extend([\"--category\", category])\n\n # adding imports, if any\n if imports:\n if args.embed_libs:\n # imports to be embedded are in the same output directory\n command_args.extend( \\\n [\"--embedded-imports\", os.path.join(output_dir,\n \"imports\")])\n else:\n # imports to be refereced by ID\n command_args.extend([\"--imports\", \",\".join(imports)])\n command_args.extend([\"--verbosity\", str(args.verbosity)])\n command_obj.propagate(command_args)\n # u.add_api_context(command_args, args)\n if args.upgrade:\n command_args.extend([\"--upgrade\"])\n\n if resume:\n next_command = subcommand_list.pop()\n if different_command(next_command, command):\n resume = False\n u.sys_log_message(command, log_file=subcommand_file)\n execute_dispatcher(args=command_args)\n elif not subcommand_list:\n execute_dispatcher(args=['execute', '--resume'])\n resume = False\n else:\n u.sys_log_message(command, log_file=subcommand_file)\n execute_dispatcher(args=command_args)\n args.output_dir = output_dir\n return whizzml_code\n return \"\"",
"def package(build, dist, symlink, no_sanity_check, sanity_check, folder, requirement):\n folder = runez.resolved_path(folder)\n if not os.path.isdir(folder):\n sys.exit(\"Folder %s does not exist\" % runez.short(folder))\n\n if no_sanity_check:\n sanity_check = None\n\n finalizer = PackageFinalizer(folder, build, dist, symlink, sanity_check, requirement)\n problem = finalizer.resolve()\n if problem:\n sys.exit(problem)\n\n report = finalizer.finalize()\n if report:\n inform(\"\")\n inform(report)\n inform(\"\")\n\n inform(\"Packaged %s successfully\" % runez.bold(runez.short(folder)))",
"def transformation_catalog():\n tc = TransformationCatalog()\n\n # Add docker container\n #crisis_container = Container(\n # 'crisis_container',\n # Container.DOCKER,\n # image = \"docker://slnagark/crisis_wf:latest\",\n # arguments=\"--runtime=nvidia --shm-size=1gb\"\n # ).add_env(TORCH_HOME=\"/tmp\")\n \n crisis_container = Container(\n 'galaxy_container',\n Container.SINGULARITY,\n image = str(Path(\".\").parent.resolve() / \"containers/crisis-computing_latest.sif\"),\n image_site = \"local\",\n mounts=[\"${DONUT_USER_HOME}:${DONUT_USER_HOME}\"]\n ).add_env(TORCH_HOME=\"/tmp\")\n\n\n # preprocessing scripts\n preprocess_images = Transformation(\n \"preprocess_images\",\n site = \"local\",\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_images.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n preprocess_tweets = Transformation(\n \"preprocess_tweets\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_tweets.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n \n # HPO, training and inference scripts for ResNet-50\n hpo_train_resnet = Transformation(\n \"hpo_train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_resnet = Transformation(\n \"train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n resnet_inference = Transformation(\n \"resnet_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/resnet_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # HPO, training and inference scripts for Bi-LSTM\n\n hpo_train_bilstm = Transformation(\n \"hpo_train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n #.add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_bilstm = Transformation(\n \"train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n \n bilstm_inference = Transformation(\n \"bilstm_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/bilstm_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # late fusion script\n late_fusion = Transformation(\n \"late_fusion\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/late_fusion.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n\n tc.add_containers(crisis_container)\n tc.add_transformations(preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion)\n tc.write()\n\n return preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion",
"def lifecycle_package(self, cc_name, cc_version, cc_path, language):\n if self.version in BasicEnv.binary_versions_v2:\n label = cc_name+\"_\"+cc_version\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode package {}.tar.gz --path {} --lang {} --label {}\"\n .format(self.version, cc_name, cc_path, language, label))\n res = res >> 8\n print(\"res\", res)\n return",
"def do_package(package):\n\tn_ucr = extFile(package, 'univention-config-registry')\n\tif not os.path.exists(n_ucr):\n\t\treturn\n\n\tf_ucr = open(n_ucr, 'r')\n\n\tfor item in univention.config_registry.parseRfc822(f_ucr.read()):\n\t\ttyp = item['Type'][0]\n\t\tif typ == 'file':\n\t\t\tf = item['File'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'subfile':\n\t\t\tf = item['Subfile'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\telif typ == 'multifile':\n\t\t\tf = item['Multifile'][0]\n\t\t\tif os.path.exists(f):\n\t\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'script':\n\t\t\tf = item['Script'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'scripts'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'scripts'))\n\t\t\tdoIt('chmod', 'a+x', destPath(f, package, 'scripts'))\n\t\telif typ == 'module':\n\t\t\tf = item['Module'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'modules'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'modules'))\n\t\telse:\n\t\t\tprint >>sys.stderr, 'Unknown type: %s' % typ\n\t\t\treturn\n\n\tf_ucr.close()\n\n\tdoIt('install', '-d', destDir('', package, 'info'))\n\tdoIt('install', '-m644', n_ucr, destPath(package+'.info', package, 'info'))\n\tmapping_file = extFile( package, 'univention-config-registry-mapping')\n\tif os.path.exists(mapping_file):\n\t\tdoIt('install', '-d', destDir('', package, 'mapping'))\n\t\tdoIt('install', '-m644', mapping_file, destPath(package+'.univention-config-registry-mapping', package, 'mapping'))\n\n\tdata = {\n\t\t\t'pkg': quote(package),\n\t\t\t'info': quote(\"/etc/univention/templates/info/%s.info\" % package),\n\t\t\t'removed': quote(\"/etc/univention/templates/removed/%s.info\" % package),\n\t\t\t}\n\n\tf_preinst = open(extFile(package, 'preinst.debhelper'), 'a')\n\tf_preinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_preinst.write('if [ \"$1\" = \"install\" ] ; then\\n')\n\tf_preinst.write(' [ -e %(removed)s ] && [ ! -e %(info)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_preinst.write('fi\\n')\n\tf_preinst.write('# End automatically added section\\n')\n\tf_preinst.close()\n\n\tf_postinst = open(extFile(package, 'postinst.debhelper'), 'a')\n\tf_postinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_postinst.write('if [ \"$1\" = \"abort-remove\" ]; then\\n')\n\tf_postinst.write(' [ -e %(removed)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_postinst.write('fi\\n')\n\tf_postinst.write('[ -x /usr/sbin/univention-config-registry ] && univention-config-registry register %(pkg)s || true\\n' % data)\n\tf_postinst.write('# End automatically added section\\n')\n\tf_postinst.close()\n\n\tf_prerm = open(extFile(package, 'prerm.debhelper'), 'a')\n\tf_prerm.write('# Automatically added by univention-install-config-registry\\n')\n\tf_prerm.write('if [ \"$1\" = \"remove\" ] && [ -e %(info)s ] ; then\\n' % data)\n\tf_prerm.write(' [ -x /usr/sbin/univention-config-registry ] && univention-config-registry unregister %(pkg)s || true\\n' % data)\n\tf_prerm.write(' mv %(info)s %(removed)s || true\\n' % data)\n\tf_prerm.write('fi\\n')\n\tf_prerm.write('# End automatically added section\\n')\n\tf_prerm.close()\n\n\tdoIt('perl', '-e', 'use Debian::Debhelper::Dh_Lib;addsubstvar(\"%s\", \"misc:Depends\", \"univention-config (>= 7.0.25)\");' % package)",
"def update_package_files(self) -> None:\n # create the package folder\n self.package_path.mkdir(parents=True, exist_ok=True)\n\n self.clean() # Delete any previous *.py? files\n self.copy_stubs()\n self.create_readme()\n self.create_license()",
"def build_it(swagger_file: str, main_rel: bool):\n global _release_in_process\n reset_all()\n relname = load_stable(swagger_file)\n _release_in_process = relname\n path = prep_model_root(model_package)\n relpath = path / relname\n prep_rel_package(str(relpath))\n write_modules(str(relpath))\n if main_rel:\n # this is the main release; make the root package default to it\n make_root_init(model_package, relname)\n _release_in_process = None"
] | [
"0.57952726",
"0.5456674",
"0.53661186",
"0.5360605",
"0.5353057",
"0.53490925",
"0.5340909",
"0.5304026",
"0.52926666",
"0.52684975",
"0.5263543",
"0.5253568",
"0.5247544",
"0.52424407",
"0.524119",
"0.5240253",
"0.52080166",
"0.5200726",
"0.5174414",
"0.51352537",
"0.51273733",
"0.5118481",
"0.5118309",
"0.5116631",
"0.51067454",
"0.50982696",
"0.5096976",
"0.5086586",
"0.50697076",
"0.5067839"
] | 0.76288223 | 0 |
Create Alert Processor package and publish versions | def _deploy_alert_processor():
alert_package = AlertProcessorPackage(config=config, version=current_version)
alert_package.create_and_upload()
return alert_package | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _provision_package(self):",
"def create(index):\n # Get the project root\n project_root = get_project_root()\n package_name = os.path.basename(project_root)\n logging.info(\"Creating package for current project: \" + package_name)\n Packager(package_name, project_root).create(index)",
"def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package",
"def package_software(self, version: str) -> None:\n logger.info(f\"Generating a release package [{version}]\")\n pass",
"def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )",
"def publish():\n pass",
"def init_add_package_to_stack_process(stack, name, version, file_name):\n with settings(host_string=host, key_filename=key_filename):\n s = Stack(MAGIC_DOMAIN, stack, meta_path=\"/var/gachette/\", operator=StackOperatorRedis(redis_host=dd.REDIS_HOST))\n s.add_package(name, version=version, file_name=file_name)\n send_notification(\"stack #%s package %s (%s) added\" % (stack, name, version))",
"def package(self, platform):\n \n # Validate the platform.\n validations.validate_platform(platform)\n \n # Get the name of our build directory, and select a temporary directory to\n # perform our work in.\n build_dir = self.get_build_directory()\n temp_dir = tempfile.mkdtemp()\n \n \n # Copy the existing vesselinfo file into place.\n source_vesselinfo_fn = os.path.join(build_dir, 'vesselinfo')\n destination_vesselinfo_fn = os.path.join(temp_dir, 'vesselinfo')\n \n if not os.path.isfile(source_vesselinfo_fn):\n raise validations.ValidationError('There is no vesselinfo file for the given build ID.')\n \n shutil.copy(source_vesselinfo_fn, destination_vesselinfo_fn)\n \n \n # Create packages for the requested installers.\n packager.package_installers(temp_dir, [platform])\n \n # Move each file in the temporary directory to the destination directory.\n for file_to_move in os.listdir(temp_dir):\n source_filename = os.path.abspath(os.path.join(temp_dir, file_to_move))\n destination_filename = os.path.abspath(os.path.join(build_dir, file_to_move))\n shutil.move(source_filename, destination_filename)\n\n # Remove the temporary directory.\n shutil.rmtree(temp_dir)",
"def main(ctx, debug, config, index, python, delivery, packager):\n global PACKAGER\n PACKAGER = PexPackager if packager == \"pex\" else VenvPackager\n\n runez.system.AbortException = SystemExit\n if ctx.invoked_subcommand != \"package\":\n cli = TrackedSettings(delivery, index, python)\n base = find_base()\n CFG.set_base(base, config_path=config, cli=cli)\n\n runez.log.setup(\n debug=debug,\n console_format=\"%(levelname)s %(message)s\" if debug else \"%(message)s\",\n console_level=logging.WARNING,\n console_stream=sys.stderr,\n locations=None,\n )",
"def _deploy_rule_processor():\n rule_package = RuleProcessorPackage(config=config, version=current_version)\n rule_package.create_and_upload()\n return rule_package",
"def create_package(self, **kwargs):\n results = self.api.action.package_create(**kwargs)\n self.get_ckan_metadata(True)\n return results",
"def main(pkg_dir, years):\n pkgname = os.path.basename(pkg_dir)\n identifier = clean_name('archlinux_pkg_' + pkgname)\n metadata = {\n #'collection': ['test_collection', 'open_source_software'],\n #'collection': ['open_source_software'],\n 'collection': ['archlinuxarchive'],\n 'mediatype': 'software',\n 'publisher': 'Arch Linux',\n 'creator': 'Arch Linux',\n 'subject': ['archlinux', 'archlinux package'],\n }\n metadata['title'] = pkgname + \" package archive from Arch Linux\"\n metadata['subject'].append(pkgname)\n upload_pkg(identifier, pkgname, metadata, pkg_dir, years)",
"def publish_updates():\n run_subprocess(['osg-batch-update'])",
"def package(target, source, env):\n\n # Print out.\n print('')\n print(\"#######################\")\n print(\"# Packaging the files #\")\n print(\"#######################\")\n\n # List of distribution files.\n type_list = [env['DIST_TYPE']]\n if type_list[0] == 'ALL':\n type_list = ['zip', 'tar']\n\n # Loop over the distribution files.\n for dist_type in type_list:\n # The file name.\n if dist_type == 'zip':\n file = env['DIST_FILE'] + '.zip'\n elif dist_type == 'tar':\n file = env['DIST_FILE'] + '.tar.bz2'\n elif dist_type == 'dmg':\n file = env['DIST_FILE'] + '.dmg'\n\n # Print out.\n print(\"\\n\\nCreating the package distribution \" + repr(file) + \".\\n\")\n\n # Create the special Mac OS X DMG file and then stop execution.\n if dist_type == 'dmg':\n # Create the Mac OS X universal application.\n print(\"\\n# Creating the Mac OS X universal application.\\n\\n\")\n cmd = '%s setup.py py2app' % sys.executable\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Create the dmg image.\n print(\"\\n\\n# Creating the DMG image.\\n\\n\")\n cmd = 'hdiutil create -ov -fs HFS+ -volname \"relax\" -srcfolder dist/relax.app ../%s' % file\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Stop executing.\n return\n\n # Open the Zip distribution file.\n if dist_type == 'zip':\n archive = ZipFile(path.pardir + path.sep + file, 'w', compression=8)\n\n # Open the Tar distribution file.\n elif dist_type == 'tar':\n if search('.bz2$', file):\n archive = TarFile.bz2open(path.pardir + path.sep + file, 'w')\n elif search('.gz$', file):\n archive = TarFile.gzopen(path.pardir + path.sep + file, 'w')\n else:\n archive = TarFile.open(path.pardir + path.sep + file, 'w')\n\n # Base directory.\n base = getcwd() + sep\n\n # Walk through the directories.\n for root, dirs, files in walk(getcwd()):\n # Skip the subversion directories.\n if search(\"\\.svn\", root):\n continue\n\n # Add the files in the current directory to the archive.\n for i in range(len(files)):\n # Skip any '.sconsign' files, hidden files, byte-compiled '*.pyc' files, or binary objects '.o', '.os', 'obj', 'lib', and 'exp'.\n if search(\"\\.sconsign\", files[i]) or search(\"^\\.\", files[i]) or search(\"\\.pyc$\", files[i]) or search(\"\\.o$\", files[i]) or search(\"\\.os$\", files[i]) or search(\"\\.obj$\", files[i]) or search(\"\\.lib$\", files[i]) or search(\"\\.exp$\", files[i]):\n continue\n\n # Create the file name (without the base directory).\n name = path.join(root, files[i])\n name = name[len(base):]\n print('relax-' + version + path.sep + name)\n\n # The archive file name.\n arcname = 'relax-' + version + path.sep + name\n\n # Zip archives.\n if dist_type == 'zip':\n archive.write(filename=name, arcname=arcname)\n\n # Tar archives.\n if dist_type == 'tar':\n archive.add(name=name, arcname=arcname)\n\n # Close the archive.\n archive.close()\n\n # Final printout.\n print(\"\\n\\n\\n\")",
"def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))",
"def generate(self):\n\n if self.check_data():\n pkg_dict = self.create_package_dict()\n pkg_dir = pkg_dict['dir'] + pkg_dict['name']\n if os.path.exists(pkg_dir):\n but = QMessageBox().question(self, 'Message', \"Такой проект уже существует! Хотите перезаписать?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if but == QMessageBox.Yes:\n shutil.rmtree(pkg_dir)\n self.pkg = package.RosPackage(pkg_dict)\n self.msg2Statusbar.emit('Успешная генерация')\n else:\n self.pkg = package.RosPackage(pkg_dict)\n self.msg2Statusbar.emit('Успешная генерация')",
"def _package_plugins(ctx):\n print(\"\\n\\n-- Creating Zip Files \\n\")\n\n project_dir = Path(__file__).parent\n plugins_projects = [\n x for x in (project_dir / \"build/build_directory_for_tests/\").iterdir() if x.is_dir()\n ]\n artifacts_dir = project_dir / \"build/artifacts\"\n\n plugins_zip = project_dir / \"build/plugin_zip\"\n if plugins_zip.exists():\n shutil.rmtree(plugins_zip)\n\n plugins_zip.mkdir()\n\n for project in plugins_projects:\n plugins_dirs = [\n x for x in (project / \"plugin\").iterdir() if x.is_dir() and (x / \"assets\").exists()\n ]\n hm_generator = HookManGenerator(\n hook_spec_file_path=project_dir / f\"tests/plugins/{project.name}/hook_specs.py\"\n )\n\n for plugin in plugins_dirs:\n (plugin / \"artifacts\").mkdir()\n if sys.platform == \"win32\":\n shutil.copy2(src=artifacts_dir / f\"{plugin.name}.dll\", dst=plugin / \"artifacts\")\n else:\n shutil.copy2(src=artifacts_dir / f\"lib{plugin.name}.so\", dst=plugin / \"artifacts\")\n\n hm_generator.generate_plugin_package(\n package_name=plugin.name, plugin_dir=plugin, dst_path=plugins_zip\n )",
"def test_publish_deployment_run(self):\n pass",
"def custom_package_xml_generator(directory, packagename=None, version='45.0', filename='package.xml'):\n\n METADATA_TYPE = {\n 'applications':'CustomApplication', 'aura':'AuraDefinitionBundle', 'classes':'ApexClass', 'customPermissions':'CustomPermission', \n 'flexipages':'FlexiPage', 'flows':'Flow', 'globalValueSets':'GlobalValueSet', 'labels':'CustomLabels', 'layouts':'Layout',\n 'lwc': 'LightningComponentBundle', 'objects':'CustomObject', 'pages':'ApexPage', 'permissionsets':'PermissionSet', 'profiles':'Profile',\n 'staticresources':'StaticResource', 'tabs':'CustomTab', 'triggers':'ApexTrigger', 'contentassets':'ContentAsset', 'pathAssistants':'PathAssistant',\n 'quickActions':'QuickAction', 'remoteSiteSettings':'RemoteSiteSetting', 'workflows':'Workflow', 'dashboards':'Dashboard', 'reports':'Report',\n 'cspTrustedSites':'CspTrustedSite',\n }\n\n \"\"\"\n Non-implemented Metadata:\n 'ApexComponent', 'CustomMetadata' (needs custom manipulation), 'CustomObjectTranslation', 'DuplicateRule', \n 'FlowCategory', 'GlobalValueSetTranslation', 'MatchingRules',\n \"\"\"\n #read directory structure\n\n mdtypedirs = os.listdir(directory)\n\n nested_mdt_object = ['ValidationRule', 'CompactLayout', 'ListView', 'SharingReason', 'RecordType']\n nested_mdt_workflow = ['WorkflowFieldUpdate', 'WorkflowKnowledgePublish', 'WorkflowTask', 'WorkflowAlert', 'WorkflowSend', 'WorkflowOutboundMessage', 'WorkflowRule']\n\n # start our xml structure\n root = xml.Element('Package')\n root.set('xmlns','http://soap.sforce.com/2006/04/metadata')\n\n for mdtype in mdtypedirs:\n # create child node for each type of component\n if mdtype in METADATA_TYPE.keys():\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = str(METADATA_TYPE[mdtype])\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n \n if mdtype == 'objects':\n for nest_mdtyp in nested_mdt_object:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n if mdtype == 'workflows':\n for nest_mdtyp in nested_mdt_workflow:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n #Custom behavior for custom labels\n if mdtype == 'labels':\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = 'CustomLabel'\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n # add the final xml node package.api_version\n eversion = xml.SubElement(root, 'version')\n eversion.text = str(version)\n\n #package name\n if packagename != None:\n efname = xml.SubElement(root, 'fullName')\n efname.text = str(packagename)\n\n #pretty format for xml\n xmlstring = xml.tostring(root)\n reparsed = minidom.parseString(xmlstring)\n prettyxml = reparsed.toprettyxml(indent=' ', newl='\\n', encoding='UTF-8')\n \n #generate xml file from string\n try:\n with open(os.path.join(directory, filename), \"bw\") as xml_file:\n xml_file.write(prettyxml)\n except IOError:\n pass",
"def package():\n pass",
"def run_pre_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name)\n # do not forget to clean up the staging area\n staging.clear()",
"def create(self):\n LOGGER.info('Creating package for %s', self.PACKAGE_NAME)\n\n if os.path.exists(self.temp_package_path):\n shutil.rmtree(self.temp_package_path)\n\n # Copy the default package directory\n self._copy_directory(self.PACKAGE_NAME)\n\n # Copy the user-specified config directory\n # Ensure this is copied to the 'conf' destination directory\n self._copy_directory(self.config.config_path, destination='conf')\n\n # Copy in any user-specified files\n self._copy_user_config_files()\n\n if not self._resolve_libraries():\n LOGGER.error('Failed to install necessary libraries')\n return False\n\n # Zip it all up\n # Build these in the top-level of the terraform directory as streamalert.zip\n result = shutil.make_archive(\n os.path.join(self.config.build_directory, self.PACKAGE_NAME),\n 'zip',\n self.temp_package_path\n )\n\n LOGGER.info('Successfully created package: %s', result)\n\n # Remove temp files\n shutil.rmtree(self.temp_package_path)\n\n return result",
"def run_post_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name, publisher_type=POST_PUBLISHER_TYPE)\n # do not forget to clean up the staging area\n staging.clear()",
"def run(self):\n report_info = self.api_client.create_task(self.host_id,\n CompatibilityReport.Spec(self.targetRelease))\n print(\"Compatibility Report API Task ID : \", report_info.get_task_id())",
"def update_package_files(self) -> None:\n # create the package folder\n self.package_path.mkdir(parents=True, exist_ok=True)\n\n self.clean() # Delete any previous *.py? files\n self.copy_stubs()\n self.create_readme()\n self.create_license()",
"def svn_fs_invoke_pack_notify(*args):\r\n return _fs.svn_fs_invoke_pack_notify(*args)",
"def app_with_populated_format_versions(scope=\"package\"):\n app = create_app(\"test\")\n with app.app_context():\n db.create_all()\n\n storage_service = test_helpers.create_test_storage_service()\n storage_location = test_helpers.create_test_storage_location(\n storage_service_id=storage_service.id\n )\n _ = test_helpers.create_test_pipeline(storage_service_id=storage_service.id)\n fetch_job = test_helpers.create_test_fetch_job(\n storage_service_id=storage_service.id\n )\n\n aip1 = test_helpers.create_test_aip(\n uuid=AIP_1_UUID,\n create_date=datetime.strptime(AIP_1_CREATION_DATE, AIP_DATE_FORMAT),\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n\n aip2 = test_helpers.create_test_aip(\n uuid=AIP_2_UUID,\n create_date=datetime.strptime(AIP_2_CREATION_DATE, AIP_DATE_FORMAT),\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n\n _ = test_helpers.create_test_file(\n size=ORIGINAL_FILE_SIZE,\n uuid=ORIGINAL_FILE_1_UUID,\n name=\"original.jpg\",\n puid=JPEG_1_01_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_01_FORMAT_VERSION,\n aip_id=aip1.id,\n )\n\n _ = test_helpers.create_test_file(\n size=PRESERVATION_FILE_SIZE,\n uuid=PRESERVATION_FILE_1_UUID,\n name=\"preservation.jpg\",\n puid=JPEG_1_02_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_02_FORMAT_VERSION,\n aip_id=aip2.id,\n )\n\n _ = test_helpers.create_test_file(\n size=None,\n uuid=ORIGINAL_FILE_2_UUID,\n name=\"original.iso\",\n puid=\"fmt/468\",\n file_format=\"ISO Disk Image File\",\n format_version=None,\n aip_id=aip2.id,\n )\n\n yield app\n\n db.drop_all()",
"def main():\n produce()",
"def __create_vnf_package(cls, context, vnf_package_info):\n vnf_package = objects.VnfPackage(\n context=context,\n id=vnf_package_info.get('id'),\n onboarding_state=fields.PackageOnboardingStateType.CREATED,\n operational_state=fields.PackageOperationalStateType.DISABLED,\n usage_state=fields.PackageUsageStateType.NOT_IN_USE,\n tenant_id=context.project_id\n )\n vnf_package.create()\n return vnf_package",
"def package_created_event(order, event_bus_name):\n\n return {\n \"Time\": datetime.datetime.now(),\n \"Source\": \"ecommerce.warehouse\",\n \"Resources\": [order[\"orderId\"]],\n \"DetailType\": \"PackageCreated\",\n \"Detail\": json.dumps(order),\n \"EventBusName\": event_bus_name\n }"
] | [
"0.6084183",
"0.59676665",
"0.57402325",
"0.5731163",
"0.5620225",
"0.56090224",
"0.5588369",
"0.55562985",
"0.5465473",
"0.544694",
"0.5439961",
"0.5364832",
"0.5348297",
"0.53421897",
"0.5295498",
"0.52798015",
"0.5262978",
"0.52464217",
"0.5217506",
"0.52039903",
"0.5193559",
"0.5193551",
"0.518446",
"0.5176383",
"0.5170595",
"0.51642525",
"0.5163572",
"0.5157595",
"0.5147011",
"0.5128508"
] | 0.77806085 | 0 |
Create Athena Partition Refresh package and publish | def _deploy_athena_partition_refresh():
athena_package = AthenaPackage(config=config, version=current_version)
athena_package.create_and_upload()
return athena_package | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()",
"def deploy():\n update_treesheets()\n restart_treesheets()",
"def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()",
"def _provision_package(self):",
"def test_feathr_online_store_agg_features():\n\n online_test_table = get_online_test_table_name(\"nycTaxiCITableMaven\")\n test_workspace_dir = Path(\n __file__).parent.resolve() / \"test_user_workspace\"\n # os.chdir(test_workspace_dir)\n\n # The `feathr_runtime_location` was commented out in this config file, so feathr should use\n # Maven package as the dependency and `noop.jar` as the main file\n client: FeathrClient = basic_test_setup(os.path.join(test_workspace_dir, \"feathr_config_maven.yaml\"))\n\n \n \n location_id = TypedKey(key_column=\"DOLocationID\",\n key_column_type=ValueType.INT32,\n description=\"location id in NYC\",\n full_name=\"nyc_taxi.location_id\")\n\n feature_query = FeatureQuery(\n feature_list=[\"f_location_avg_fare\"], key=location_id)\n settings = ObservationSettings(\n observation_path=\"wasbs://[email protected]/sample_data/green_tripdata_2020-04.csv\",\n event_timestamp_column=\"lpep_dropoff_datetime\",\n timestamp_format=\"yyyy-MM-dd HH:mm:ss\")\n\n now = datetime.now()\n # set output folder based on different runtime\n if client.spark_runtime == 'databricks':\n output_path = ''.join(['dbfs:/feathrazure_cijob','_', str(now.minute), '_', str(now.second), \".avro\"])\n else:\n output_path = ''.join(['abfss://[email protected]/demo_data/output','_', str(now.minute), '_', str(now.second), \".avro\"])\n\n\n client.get_offline_features(observation_settings=settings,\n feature_query=feature_query,\n output_path=output_path)\n\n # assuming the job can successfully run; otherwise it will throw exception\n client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS)\n return\n backfill_time = BackfillTime(start=datetime(\n 2020, 5, 20), end=datetime(2020, 5, 20), step=timedelta(days=1))\n redisSink = RedisSink(table_name=online_test_table)\n settings = MaterializationSettings(\"TestJobName\",\n sinks=[redisSink],\n feature_names=[\n \"f_location_avg_fare\", \"f_location_max_fare\"],\n backfill_time=backfill_time)\n client.materialize_features(settings)\n # just assume the job is successful without validating the actual result in Redis. Might need to consolidate\n # this part with the test_feathr_online_store test case\n client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS)\n\n res = client.get_online_features(online_test_table, '265', [\n 'f_location_avg_fare', 'f_location_max_fare'])\n # just assume there are values. We don't hard code the values for now for testing\n # the correctness of the feature generation should be guaranteed by feathr runtime.\n # ID 239 and 265 are available in the `DOLocationID` column in this file:\n # https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2020-04.csv\n # View more details on this dataset: https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page\n assert len(res) == 2\n assert res[0] != None\n assert res[1] != None\n res = client.multi_get_online_features(online_test_table,\n ['239', '265'],\n ['f_location_avg_fare', 'f_location_max_fare'])\n assert res['239'][0] != None\n assert res['239'][1] != None\n assert res['265'][0] != None\n assert res['265'][1] != None",
"def createPartitions(config, logger):\n databaseConnection, databaseCursor = connectToDatabase(config, logger)\n try:\n for aDatabaseObjectClass in databaseObjectClassListForWeeklyPartitions:\n weekIterator = mondayPairsIteratorFactory(config.startDate, config.endDate)\n aDatabaseObject = aDatabaseObjectClass(logger=logger)\n aDatabaseObject.createPartitions(databaseCursor, weekIterator)\n databaseConnection.commit()\n except:\n databaseConnection.rollback()\n socorro_util.reportExceptionAndAbort(logger)",
"def main(\n date,\n input_bucket,\n input_prefix,\n output_bucket,\n output_prefix,\n output_version,\n sample_id,\n lag_days,\n):\n spark = SparkSession.builder.appName(\"clients_daily\").getOrCreate()\n # Per https://issues.apache.org/jira/browse/PARQUET-142 ,\n # don't write _SUCCESS files, which interfere w/ReDash discovery\n spark.conf.set(\"mapreduce.fileoutputcommitter.marksuccessfuljobs\", \"false\")\n main_summary = load_main_summary(spark, input_bucket, input_prefix)\n day_frame, start_date = extract_submission_window_for_activity_day(\n main_summary, date, lag_days\n )\n if sample_id:\n day_frame = day_frame.where(\"sample_id = '{}'\".format(sample_id))\n with_searches = extract_search_counts(day_frame)\n results = to_profile_day_aggregates(with_searches)\n partition_count = get_partition_count_for_writing(bool(sample_id))\n output_base_path = \"{}/v{}/\".format(\n format_spark_path(output_bucket, output_prefix), output_version\n )\n write_one_activity_day(results, start_date, output_base_path, partition_count)",
"def create():\n upgrade()\n populate()",
"def publish(self, if_exists=\"raise\"):\n\n if if_exists == \"replace\":\n self.delete(mode=\"prod\")\n\n self.client[\"bigquery_prod\"].query(\n (self.table_folder / \"publish.sql\").open(\"r\", encoding=\"utf-8\").read()\n ).result()\n\n self.update()\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"published\",\n )",
"def deploy():",
"def repository_create_hosted():\n pass",
"def __init__(\n self,\n *,\n dag_id: str,\n cloud_workspace: CloudWorkspace,\n publisher_id: str,\n format_specification: str,\n bq_dataset_id: str = \"onix\",\n bq_table_name: str = \"onix\",\n bq_dataset_description: str = \"Thoth ONIX Feed\",\n bq_table_description: str = None,\n api_dataset_id: str = \"onix\",\n host_name: str = \"https://export.thoth.pub\",\n schema_folder: str = default_schema_folder(),\n observatory_api_conn_id: str = AirflowConns.OBSERVATORY_API,\n catchup: bool = False,\n start_date: pendulum.DateTime = pendulum.datetime(2022, 12, 1),\n schedule: str = \"@weekly\",\n ):\n super().__init__(\n dag_id,\n start_date=start_date,\n schedule=schedule,\n airflow_conns=[observatory_api_conn_id],\n catchup=catchup,\n tags=[\"oaebu\"],\n )\n\n self.dag_id = dag_id\n self.cloud_workspace = cloud_workspace\n self.publisher_id = publisher_id\n self.bq_dataset_id = bq_dataset_id\n self.bq_table_name = bq_table_name\n self.bq_dataset_description = bq_dataset_description\n self.bq_table_description = bq_table_description\n self.api_dataset_id = api_dataset_id\n self.host_name = host_name\n self.format_specification = format_specification\n self.schema_folder = schema_folder\n self.observatory_api_conn_id = observatory_api_conn_id\n\n check_workflow_inputs(self)\n\n self.add_setup_task(self.check_dependencies)\n self.add_task(self.download)\n self.add_task(self.upload_downloaded)\n self.add_task(self.transform)\n self.add_task(self.upload_transformed)\n self.add_task(self.bq_load)\n self.add_task(self.add_new_dataset_releases)\n self.add_task(self.cleanup)",
"def __init__(\n self,\n dag_id: str = DAG_ID,\n start_date: pendulum.DateTime = pendulum.datetime(2018, 5, 14),\n schedule_interval: str = \"@weekly\",\n dataset_id: str = \"crossref\",\n dataset_description: str = \"The Crossref Events dataset: https://www.eventdata.crossref.org/guide/\",\n queue: str = \"remote_queue\",\n merge_partition_field: str = \"id\",\n schema_folder: str = default_schema_folder(),\n batch_load: bool = True,\n airflow_vars: List = None,\n mailto: str = \"[email protected]\",\n max_threads: int = min(32, os.cpu_count() + 4),\n max_processes: int = os.cpu_count(),\n ):\n\n if airflow_vars is None:\n airflow_vars = [\n AirflowVars.DATA_PATH,\n AirflowVars.PROJECT_ID,\n AirflowVars.DATA_LOCATION,\n AirflowVars.DOWNLOAD_BUCKET,\n AirflowVars.TRANSFORM_BUCKET,\n ]\n super().__init__(\n dag_id,\n start_date,\n schedule_interval,\n dataset_id,\n merge_partition_field,\n schema_folder,\n dataset_description=dataset_description,\n queue=queue,\n batch_load=batch_load,\n airflow_vars=airflow_vars,\n load_bigquery_table_kwargs={\"ignore_unknown_values\": True},\n )\n self.mailto = mailto\n self.max_threads = max_threads\n self.max_processes = max_processes\n\n self.add_setup_task(self.check_dependencies)\n self.add_task_chain(\n [self.download, self.upload_downloaded, self.transform, self.upload_transformed, self.bq_load_partition]\n )\n self.add_task_chain([self.bq_delete_old, self.bq_append_new, self.cleanup], trigger_rule=\"none_failed\")",
"def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")",
"def test_backup_restore_with_recreate(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n rest = RestConnection(self.backupset.cluster_host)\n rest.delete_bucket()\n bucket_name = \"default\"\n rest_helper = RestHelper(rest)\n rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)\n bucket_ready = rest_helper.vbucket_map_ready(bucket_name)\n if not bucket_ready:\n self.fail(\"Bucket {0} is not created after 120 seconds.\".format(bucket_name))\n self.log.info(\"Deleted {0} bucket and recreated it - restoring it now..\"\\\n .format(bucket_name))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")",
"def test_partition_tables_default_partition(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name_1 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name_1 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name_2 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name_2 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_el_var = \"${record:attribute('sdc.dataset.name')}\"\n table_el_var = \"${record:attribute('sdc.table.name')}\"\n records_count = 20\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[{\"type\": \"POKEMON\", \"field\": \"name\"},\n {\"type\": \"INTEGER\", \"field\": \"id\"}])\n\n # Build Stream Selector\n selector = pipeline_builder.add_stage('Stream Selector')\n\n # Build Expression Evaluators\n expression_evaluator_1 = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator_1.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name_1},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name_1}]\n )\n\n expression_evaluator_2 = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator_2.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name_2},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name_2}]\n )\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_el_var,\n table=table_el_var,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[\n {\"dataset\": dataset_name_1,\n \"table\": table_name_1,\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"MONTH\",\n \"timePartitionExpiration\": 0},\n {\"defaultPartition\": True,\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"YEAR\",\n \"timePartitionExpiration\": 0}\n ])\n\n dev_data_generator >> selector >> expression_evaluator_1 >> bigquery\n selector >> expression_evaluator_2 >> bigquery\n\n selector.condition = [dict(outputLane=selector.output_lanes[0], predicate='${record:value(\\'/id\\')%2==0}'),\n dict(outputLane=selector.output_lanes[1], predicate='default')]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref_1 = DatasetReference(gcp.project_id, dataset_name_1)\n dataset_ref_2 = DatasetReference(gcp.project_id, dataset_name_2)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table_1 = bigquery_client.get_table(f'{dataset_name_1}.{table_name_1}')\n data_from_bigquery_1 = [tuple(row.values()) for row in bigquery_client.list_rows(table_1)]\n data_from_bigquery_1.sort()\n\n table_2 = bigquery_client.get_table(f'{dataset_name_2}.{table_name_2}')\n data_from_bigquery_2 = [tuple(row.values()) for row in bigquery_client.list_rows(table_2)]\n data_from_bigquery_2.sort()\n\n # Assert table is partitioned as well\n assert table_1.time_partitioning.type_ == 'MONTH'\n assert table_2.time_partitioning.type_ == 'YEAR'\n assert len(data_from_bigquery_1) + len(data_from_bigquery_2) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref_1)\n _clean_up_bigquery(bigquery_client, dataset_ref_2)\n _clean_up_gcs(gcp, bucket, bucket_name)",
"def gbf_pub_update():\r\n LOG.info(\"Start: Update datasets in RLIDGeo warehouse.\")\r\n month_stamps = [\r\n datetime.date.today().strftime(\"%Y_%m\"),\r\n (\r\n datetime.date.today().replace(day=1)\r\n - datetime.timedelta(days=1)\r\n ).strftime(\"%Y_%m\"),\r\n ]\r\n for month_stamp in month_stamps:\r\n snapshot_db_path = SNAPSHOT_DB_PATH.format(month_stamp)\r\n if not os.path.exists(snapshot_db_path):\r\n LOG.warning(\"Snapshot database %s does not exist.\", snapshot_db_path)\r\n continue\r\n\r\n for _dataset in DATASETS:\r\n arcetl.features.update_from_dicts(\r\n dataset_path=_dataset.path(\"pub\"),\r\n update_features=source_rows(snapshot_db_path, _dataset.path(\"source\")),\r\n id_field_names=_dataset.id_field_names,\r\n field_names=_dataset.field_names,\r\n delete_missing_features=False,\r\n use_edit_session=False,\r\n )\r\n LOG.info(\"End: Update.\")",
"def publish():\n pass",
"def hello_pubsub(event, context):\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n print(pubsub_message)\n x = json.loads(pubsub_message)\n vm_name = x[\"jsonPayload\"][\"resource\"][\"name\"]\n vm_zone = x[\"jsonPayload\"][\"resource\"][\"zone\"]\n project_id = x[\"resource\"][\"labels\"][\"project_id\"]\n print(\"vm_name=\"+vm_name)\n print(\"vm_zone=\"+vm_zone)\n print(\"project_id=\"+project_id)\n \n compute = discovery.build('compute', 'v1')\n \n print(\"getting metadata fingerprint\")\n request = compute.instances().get(project= project_id, zone= vm_zone, instance= vm_name)\n response = request.execute()\n pprint(response)\n metadata_fingerprint= response['metadata']['fingerprint']\n print(\"existing metadata fingerprint = \" + metadata_fingerprint)\n\n vm_status=response['status']\n print(\"vm_status = \" + vm_status)\n while vm_status != 'RUNNING' :\n print(\"going to sleep for 1 second...\")\n time.sleep(1)\n request = compute.instances().get(project= project_id, zone= vm_zone, instance= vm_name)\n response = request.execute()\n vm_status=response['status']\n print(\"vm_status = \" + vm_status)\n\n print(\"Setting VM metadata\")\n metadata_body = {\n \"fingerprint\": metadata_fingerprint,\n \"items\": [\n {\n \"key\": \"startup-script-url\",\n \"value\": \"gs://mybucket/my_script.sh\"\n }\n ]\n }\n\n request = compute.instances().setMetadata(project=project_id, zone=vm_zone, instance=vm_name, body=metadata_body)\n response = request.execute()\n pprint(response)\n\n print(\"Restarting VM\")\n request = compute.instances().reset(project=project_id, zone=vm_zone, instance=vm_name)\n response = request.execute()\n pprint(response)",
"def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def main_process_function(project_id, config_file, retention, backup_type, expiration):\n print(\"Running bigquery dataset export for project:{}\".format(project_id))\n # Reading backup-parameters from json config\n with open(config_file) as f:\n master_config = json.load(f)\n backup_config = master_config[\"backup\"]\n\n location = backup_config[\"location\"]\n schema_path = backup_config[\"schema_uri\"]\n table_path = backup_config[\"table_uri\"]\n project_backup_config = backup_config[\"projects_dict\"][project_id]\n mapped_list = []\n\n # Get timestamp\n timestamp = datetime.now().strftime(\"%Y-%m-%d\")\n\n # Creating Big Query Client\n client = bigquery.Client(project=project_id)\n\n # Getting mapped relation between datasets and their tables\n if backup_type == \"all\":\n # Get all datasets\n datasets = list_all_datasets(client=client)\n # Map dataset->[tables]\n dataset_tables_map = get_datasets_tables_dict(\n client=client, project_id=project_id, datasets=datasets\n )\n mapped_list.append(dataset_tables_map)\n elif backup_type == \"config\":\n # Extract the backup pattern from config\n backup_pattern = project_backup_config[\"backup_pattern\"]\n for key, value in backup_pattern.items():\n dataset_tables_map = {}\n if value == \"all\":\n # Map dataset->[tables]\n dataset_tables_map = get_datasets_tables_dict(\n client=client, project_id=project_id, datasets=[key]\n )\n mapped_list.append(dataset_tables_map)\n else:\n # Map dataset->[tables]\n dataset_tables_map[key] = value\n mapped_list.append(dataset_tables_map)\n else:\n print(\n \"Please provide a valid backup_type option. Choose from ['all', 'config']\"\n )\n return None\n\n # Performing dataset export to gcs (data, schema)\n if mapped_list:\n for datasets_tables_dict in mapped_list:\n for bq_dataset_name in datasets_tables_dict.keys():\n print(\"Backup Operation on dataset: {}\".format(bq_dataset_name))\n for bq_table_name in datasets_tables_dict[bq_dataset_name]:\n print(\"Backing up table: {}\".format(bq_table_name))\n try:\n # Getting dataset and table objects\n dataset_ref = bigquery.DatasetReference(\n project_id, bq_dataset_name\n )\n table_ref = dataset_ref.table(bq_table_name)\n table_obj = client.get_table(table_ref)\n\n # Specifying extract-job parameters\n gcs_table_path = table_path.format(\n bucket_name=project_backup_config[\"bucket_name\"],\n retention=retention,\n dataset_name=bq_dataset_name,\n timestamp=timestamp,\n table_file_name=bq_table_name + \"-*.json\",\n )\n job_config = bigquery.ExtractJobConfig()\n job_config.compression = bigquery.Compression.GZIP\n job_config.destination_format = (\n bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON\n )\n\n # Exporting table-data to gcs\n extract_job = client.extract_table(\n table_ref,\n gcs_table_path,\n job_config=job_config,\n location=location,\n )\n extract_job.result()\n\n # Extracting table-schema\n table_schema = table_obj.schema\n table_schema = [\n {\n \"name\": item.name,\n \"mode\": item.mode,\n \"type\": item.field_type,\n }\n for item in table_schema\n ]\n json_schema = json.dumps(table_schema)\n\n # Defining schema-path\n gcs_schema_path = schema_path.format(\n bucket_name=project_backup_config[\"bucket_name\"],\n retention=retention,\n dataset_name=bq_dataset_name,\n timestamp=timestamp,\n schema_file_name=bq_table_name + \"-schema.json\",\n )\n\n # Writing table-schema to gcs\n sa_credentials = os.getenv(\"GOOGLE_APPLICATION_CREDENTIALS\")\n fs = gcsfs.GCSFileSystem(\n project=project_id, token=sa_credentials\n )\n with fs.open(\n gcs_schema_path,\n \"w\",\n metadata={\"Content-Type\": \"application/json\"},\n ) as f:\n f.write(json_schema)\n except Exception as error:\n print(\n \"Exception occurred for project {} at function {} inside export-loop: {}\".format(\n project_id, \"main_process_function\", error\n )\n )\n # Deleting backup data based on the backup_data_policy\n backup_data_policy = {\n \"daily\": 1,\n \"weekly\": 7,\n \"monthly\": 30,\n \"yearly\": 365,\n }\n if str(expiration).title() == \"True\":\n try:\n bucket_name = project_backup_config[\"bucket_name\"]\n storage_client = storage.Client(project_id)\n client_bucket = storage_client.get_bucket(bucket_name)\n delete_date = (\n datetime.now()\n - timedelta(days=backup_data_policy[retention])\n ).strftime(\"%Y-%m-%d\")\n delete_path = \"{retention}/{dataset_name}/{timestamp}\".format(\n retention=retention,\n dataset_name=bq_dataset_name,\n timestamp=delete_date,\n )\n for file in client_bucket.list_blobs(prefix=delete_path):\n file.delete()\n print(\"Deleted '{}'\".format(file.name))\n except Exception as error:\n print(\n \"Exception occurred at function {} inside expiration-loop: {}\".format(\n \"main_process_function\", error\n )\n )\n else:\n pass\n return None\n else:\n print(\"The mapping between datasets and their tables is empty.\")\n return None",
"def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package",
"def deploy_plan(plan_name):\n pass",
"def test_partition_tables_types(sdc_builder, sdc_executor, gcp, partition_type, file_format):\n\n if Version(sdc_builder.version) < Version('5.5.0') and file_format == 'JSON':\n pytest.skip('JSON staging introduced in 5.5.0')\n\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n records_count = 20\n\n partition = {\"dataset\": dataset_name,\n \"table\": table_name,\n \"partitionType\": partition_type,\n \"timePartitionExpiration\": 0}\n\n if partition_type == 'INGESTION':\n # it could be whatever, we do not partition on any column here\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = 'STRING'\n elif partition_type == 'TIMESTAMP':\n partition[\"columnName\"] = \"partition_column\"\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = 'DATETIME'\n elif partition_type in ['DATE', 'DATETIME']:\n partition[\"columnName\"] = \"partition_column\"\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = partition_type\n elif partition_type == 'INTEGER':\n partition[\"columnName\"] = \"partition_column\"\n partition[\"integerPartitionStart\"] = -1000\n partition[\"integerPartitionStep\"] = 100\n partition[\"integerPartitionEnd\"] = 1000\n data_type = partition_type\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[\n {\"type\": data_type, \"field\": \"partition_column\"},\n {\"type\": \"POKEMON\", \"field\": \"name\"}\n ])\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n staging_file_format=file_format,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[partition])\n\n dev_data_generator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n # Assert table is partitioned as well\n if partition_type == 'INTEGER':\n assert table.range_partitioning.field == 'partition_column'\n assert table.range_partitioning.range_.start == -1000\n assert table.range_partitioning.range_.interval == 100\n assert table.range_partitioning.range_.end == 1000\n elif partition_type == 'INGESTION':\n assert table.time_partitioning.type_ == 'MONTH'\n else:\n assert table.time_partitioning.field == 'partition_column'\n assert table.time_partitioning.type_ == 'MONTH'\n # And that we have records in the table\n assert len(data_from_bigquery) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)",
"def deploy(parameters):\n\n print(\"In deploy module\")",
"def load_dim_applicant():\n config = configparser.ConfigParser() \n config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg') \n \n loadType = config.get('PARAM', 'LOADTYPE')\n\n spark = create_spark_session() \n # Lectura de datos de staging layer\n df_stg_applicant = spark.read.parquet(config['AWS']['S3_BUCKET']+\"/staging/applications\")\n \n if loadType==\"full\":\n #DIM APPLICANT \n # transformando la dimensión \n df_dim = df_stg_applicant.select(\"user_id\").distinct() \\\n .withColumn(\"applicant_key\", expr(\"uuid()\")) \\\n .union(spark.createDataFrame([(\"-1\",\"-1\")],\n [\"user_id\", \"applicant_key\"]))\\\n .withColumn(\"created_date\", current_date()) \\\n .select(\"applicant_key\", \"user_id\", \"created_date\")\n df_dim.show(5)\n \n # carga dimensión\n df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + \"/presentation/dim_applicant\", mode=\"overwrite\")\n \n else: \n # Lectura de datos\n df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ \"/presentation/dim_applicant\")\n\n # transformando la dimensión\n df_dim = df_stg_benefits.select(\"user_id\").distinct() \\\n .withColumn(\"created_date\", current_date()) \\\n .select(\"user_id\", \"created_date\")\n \n # indentificando incremental de datos\n df_delta_dim = df_dim.join(df_act_dim, df_dim[\"user_id\"] == df_act_dim[\"user_id\"],\"leftanti\") \\\n .withColumn(\"applicant_key\", expr(\"uuid()\")) \\\n .select(\"applicant_key\", \"user_id\", \"created_date\")\n\n df_new_dim = df_act_dim.union(df_delta_dim)\n\n df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + \"/tmp/dim_applicant\", mode=\"overwrite\")\n\n spark.read.parquet(config['AWS']['S3_BUCKET']+ \"/tmp/dim_applicant\") \\\n .repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ \"/presentation/dim_applicant\", mode=\"overwrite\")",
"def load_dim_job_payment():\n config = configparser.ConfigParser() \n config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg') \n \n loadType = config.get('PARAM', 'LOADTYPE')\n\n spark = create_spark_session() \n # Lectura de datos de staging layer\n df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+\"/staging/job_posts\")\n \n if loadType==\"full\":\n #DIM JOB PAYMENT \n # transformando la dimensión\n df_dim = df_stg_jobposts.select(\"payment_period\", \"currency\", \"salary_minimum\", \"salary_maximum\").distinct() \\\n .withColumn(\"job_payment_key\", expr(\"uuid()\")) \\\n .select(\"job_payment_key\", \"payment_period\", \"currency\", \"salary_minimum\", \"salary_maximum\")\\\n .union(spark.createDataFrame([(\"-1\",\"-\",\"-\", 0, 0)],\n [\"job_payment_key\", \"payment_period\", \"currency\", \"salary_minimum\", \"salary_maximum\"]))\\\n .withColumn(\"created_date\", current_date())\n df_dim.show(5)\n \n # carga dimensión\n df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + \"/presentation/dim_job_payment\", mode=\"overwrite\")\n \n else: \n # Lectura de datos \n df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ \"/presentation/dim_job_payment\")\n\n # transformando la dimensión\n df_dim = df_stg_jobposts.select(\"payment_period\", \"currency\", \"salary_minimum\", \"salary_maximum\").distinct() \\\n .withColumn(\"created_date\", current_date()) \\\n .select(\"payment_period\", \"currency\", \"salary_minimum\", \"salary_maximum\", \"created_date\")\n \n # indentificando incremental de datos\n df_delta_dim = df_dim.join(df_act_dim, [df_dim[\"payment_period\"] == df_act_dim[\"payment_period\"],\n df_dim[\"currency\"] == df_act_dim[\"currency\"],\n df_dim[\"salary_minimum\"] == df_act_dim[\"salary_minimum\"],\n df_dim[\"salary_maximum\"] == df_act_dim[\"salary_maximum\"]],\"leftanti\") \\\n .withColumn(\"job_payment_key\", expr(\"uuid()\")) \\\n .select(\"job_payment_key\", \"payment_period\", \"currency\", \"salary_minimum\", \"salary_maximum\", \"created_date\") \n\n df_new_dim = df_act_dim.union(df_delta_dim)\n\n df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + \"/tmp/dim_job_payment\", mode=\"overwrite\")\n\n spark.read.parquet(config['AWS']['S3_BUCKET']+ \"/tmp/dim_job_payment\") \\\n .repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ \"/presentation/dim_job_payment\", mode=\"overwrite\")",
"def instantiate_s3_export(rds_snapshots, s3_bucket, IamRoleArn, KmsKeyId, today):\n year = today.strftime(\"%Y\")\n month = today.strftime(\"%m\")\n get_latest_snapshot_name,get_latest_snapshot_time = rds_snapshots['DBSnapshotIdentifier'], rds_snapshots['SnapshotCreateTime']\n return rds_client.start_export_task(\n ExportTaskIdentifier='MWP-snapshot-monthly-%s' % today.strftime(\"%b%Y\"),\n SourceArn=rds_snapshots['DBSnapshotArn'],\n S3BucketName=s3_bucket,\n S3Prefix='{year}/{month}'.format(year=year, month=month),\n IamRoleArn=IamRoleArn,\n KmsKeyId=KmsKeyId,\n # ExportOnly=[\n # 'string',\n # ]\n )",
"def recreate():\n drop()\n create()"
] | [
"0.52746564",
"0.52375937",
"0.5230696",
"0.5223075",
"0.5196875",
"0.51740795",
"0.51623076",
"0.51501435",
"0.51136917",
"0.5090603",
"0.50678575",
"0.50417167",
"0.5035841",
"0.49755764",
"0.4975141",
"0.49224213",
"0.4905247",
"0.48547333",
"0.48272827",
"0.48148355",
"0.48111963",
"0.47942388",
"0.47928405",
"0.479239",
"0.47813937",
"0.47749168",
"0.47726429",
"0.4758737",
"0.47552592",
"0.475452"
] | 0.7906745 | 0 |
Create app integration package and publish versions | def _deploy_apps_function():
app_integration_package = AppIntegrationPackage(config=config, version=apps_version)
app_integration_package.create_and_upload()
return app_integration_package | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_with_populated_format_versions(scope=\"package\"):\n app = create_app(\"test\")\n with app.app_context():\n db.create_all()\n\n storage_service = test_helpers.create_test_storage_service()\n storage_location = test_helpers.create_test_storage_location(\n storage_service_id=storage_service.id\n )\n _ = test_helpers.create_test_pipeline(storage_service_id=storage_service.id)\n fetch_job = test_helpers.create_test_fetch_job(\n storage_service_id=storage_service.id\n )\n\n aip1 = test_helpers.create_test_aip(\n uuid=AIP_1_UUID,\n create_date=datetime.strptime(AIP_1_CREATION_DATE, AIP_DATE_FORMAT),\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n\n aip2 = test_helpers.create_test_aip(\n uuid=AIP_2_UUID,\n create_date=datetime.strptime(AIP_2_CREATION_DATE, AIP_DATE_FORMAT),\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n\n _ = test_helpers.create_test_file(\n size=ORIGINAL_FILE_SIZE,\n uuid=ORIGINAL_FILE_1_UUID,\n name=\"original.jpg\",\n puid=JPEG_1_01_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_01_FORMAT_VERSION,\n aip_id=aip1.id,\n )\n\n _ = test_helpers.create_test_file(\n size=PRESERVATION_FILE_SIZE,\n uuid=PRESERVATION_FILE_1_UUID,\n name=\"preservation.jpg\",\n puid=JPEG_1_02_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_02_FORMAT_VERSION,\n aip_id=aip2.id,\n )\n\n _ = test_helpers.create_test_file(\n size=None,\n uuid=ORIGINAL_FILE_2_UUID,\n name=\"original.iso\",\n puid=\"fmt/468\",\n file_format=\"ISO Disk Image File\",\n format_version=None,\n aip_id=aip2.id,\n )\n\n yield app\n\n db.drop_all()",
"def create(index):\n # Get the project root\n project_root = get_project_root()\n package_name = os.path.basename(project_root)\n logging.info(\"Creating package for current project: \" + package_name)\n Packager(package_name, project_root).create(index)",
"def test_publish_deployment_run(self):\n pass",
"def deploy():\n build()\n collect()\n commit()\n push()",
"def deploy():\n build()\n copy()\n install()",
"def _provision_package(self):",
"def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)",
"def deploy_app(device_id, app_id, app_version):\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/applications\"}\n versions = esapp.App(kargs).get_app_version_by_id(app_id)\n\n kargs.update({\"url_path\": \"/tasks\"})\n if not app_version in versions:\n sys.exit(\"Fail: app_version \\\"%s\\\" not found, available list:%s\" \\\n %(str(app_version), str(jsn.dumps(versions))))\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.create_app_task(device_id, app_version, app_id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n sys.exit(\"Fail: error response\")\n\n try:\n click.echo(\"Success to create a task id: %s\" %(str(dict_resp[\"task_id\"])))\n except Exception as e:\n sys.exit(\"Fail: %s %s\" %(str(e), str(dict_resp)))\n\n if 'status' in dict_resp and dict_resp['status'].lower() != 'success':\n sys.exit(1)",
"def deploy_app(self, app_info):\n raise NotImplementedError",
"def deploy():",
"def deploy_api(dist_file, apt_req_file):\n _set_credentials()\n provision()\n _deploy_apt_requirements(apt_req_file)\n _deploy_python_package(dist_file)\n _sighup_api()\n _verify_api_heartbeat()\n send_build_stat(PROJECT_NAME, env.stage)",
"def release_pypi():\n local('python setup.py clean sdist register upload')",
"def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()",
"def upload(version=minv.__version__, release=\"1\"):\n version = version or minv.__version__\n put(\n join(\n env.builder_path,\n \"build/RPMS/minv-%s-%s.noarch.rpm\" % (version, release)\n ), \"\"\n )\n put(\"minv/package/minv_install_postgresql.sh\", \"\")\n sudo(\"chmod a+x minv_install_postgresql.sh\")\n with lcd(env.ink_path):\n for rpm in RPMS:\n put(rpm, \"\")",
"def install():\n execute(generate)\n execute(upload)",
"def installApp(dev, apkFile=None, appPackage=None, outFile=None, local=False):\n certFile = scriptRoot + '/certs/localtest.me.pem'\n with ServerContext(LocalMarketServer(certFile, config.officialServer)) as server:\n if apkFile:\n server.setApk(apkFile.read())\n elif appPackage:\n print('Downloading apk')\n apps = listApps(True)\n if appPackage not in apps:\n raise Exception('Unknown app: %s' % appPackage)\n server.setApk(apps[appPackage].release.asset)\n\n print('Starting task')\n xpdData = server.getXpd()\n\n print('Starting communication')\n # Point the camera to the web api\n result = installer.install(dev, server.host, server.port, xpdData, printStatus)\n if result.code != 0:\n raise Exception('Communication error %d: %s' % (result.code, result.message))\n\n result = server.getResult()\n\n if not local:\n try:\n RemoteAppStore(config.appengineServer).sendStats(result)\n except:\n pass\n\n print('Task completed successfully')\n\n if outFile:\n print('Writing to output file')\n json.dump(result, outFile, indent=2)\n\n return result",
"def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )",
"def test_create_deployment(self):\n pass",
"def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)",
"def task_deploy():\n client = boto3.client(\"lambda\")\n\n def upload_build():\n if function_exists(client):\n update_lambda_function(client)\n else:\n create_lambda_function(client)\n\n return {\"actions\": [upload_build], \"file_dep\": [f\"{DIST_DIR}/build.zip\"]}",
"def _publish(client, manifest_path, marketplace, skip, overrides):\n try:\n manifest_json = check_app_manifest(manifest_path, overrides, marketplace)\n app_url = \"{}://{}\".format(manifest_json[\"schemes\"][0], manifest_json[\"host\"])\n app_ip = urlparse(app_url).hostname\n\n if not skip:\n address = get_zerotier_address(marketplace)\n\n if address != app_ip:\n wrong_ip = click.style(\"It seems that the IP address that you put in your manifest file (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\") is different than your current 21market IP (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\")\\nAre you sure you want to continue publishing with \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\"?\")\n if not click.confirm(wrong_ip.format(app_ip, address, app_ip)):\n switch_host = click.style(\"Please edit \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" and replace \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" with \") +\\\n click.style(\"[{}].\", bold=True)\n logger.info(switch_host.format(manifest_path, app_ip, address))\n return\n\n except exceptions.ValidationError as ex:\n # catches and re-raises the same exception to enhance the error message\n publish_docs_url = click.style(\"https://21.co/learn/21-publish/\", bold=True)\n publish_instructions = \"For instructions on publishing your app, please refer to {}\".format(publish_docs_url)\n raise exceptions.ValidationError(\n \"The following error occurred while reading your manifest file at {}:\\n{}\\n\\n{}\"\n .format(manifest_path, ex.args[0], publish_instructions),\n json=ex.json)\n\n app_name = manifest_json[\"info\"][\"title\"]\n app_endpoint = \"{}://{}{}\".format(manifest_json[\"schemes\"][0],\n manifest_json[\"host\"],\n manifest_json[\"basePath\"])\n\n logger.info(\n (click.style(\"Publishing {} at \") + click.style(\"{}\", bold=True) + click.style(\" to {}.\"))\n .format(app_name, app_endpoint, marketplace))\n payload = {\"manifest\": manifest_json, \"marketplace\": marketplace}\n try:\n response = client.publish(payload)\n except ServerRequestError as e:\n if e.status_code == 403 and e.data.get(\"error\") == \"TO600\":\n logger.info(\n \"The endpoint {} specified in your manifest has already been registered in \"\n \"the marketplace by another user.\\nPlease check your manifest file and make \"\n \"sure your 'host' field is correct.\\nIf the problem persists please contact \"\n \"[email protected].\".format(app_endpoint), fg=\"red\")\n return\n else:\n raise e\n\n if response.status_code == 201:\n response_data = response.json()\n mkt_url = response_data['mkt_url']\n permalink = response_data['permalink']\n logger.info(\n click.style(\n \"\\n\"\n \"You have successfully published {} to {}. \"\n \"You should be able to view the listing within a few minutes at {}\\n\\n\"\n \"Users will be able to purchase it, using 21 buy, at {} \",\n fg=\"magenta\")\n .format(app_name, marketplace, permalink, mkt_url)\n )",
"def upload_package(self, __contents):\n raise NotImplementedError",
"def test_create_deployment_entire(self):\n pass",
"def deploy(version):\n toolkit.readmegen(version)",
"def full_deploy(api_version='HEAD', renderer_version='HEAD',\n markup_renderer_version=None):\n setup()\n\n api.full_deploy(api_version)\n renderer.full_deploy(renderer_version)\n markup_renderer.full_deploy(markup_renderer_version)\n\n upload_nginx_conf()\n upload_uwsgi_conf()\n install_systemd_services()",
"def publish():\n pass",
"def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)",
"def upload():\n sh('python setup.py register sdist upload')",
"def PostModelVersionsDeployment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_release_deployment_run(self):\n pass"
] | [
"0.64178437",
"0.6273049",
"0.6126758",
"0.6119737",
"0.61063516",
"0.60995317",
"0.60610634",
"0.60608906",
"0.60279197",
"0.6006815",
"0.6002025",
"0.5996725",
"0.59186065",
"0.5889281",
"0.58661443",
"0.5863472",
"0.5857245",
"0.5812294",
"0.580601",
"0.58002794",
"0.57939917",
"0.5748665",
"0.5746625",
"0.5727573",
"0.5713684",
"0.5693241",
"0.56869227",
"0.56787485",
"0.56765956",
"0.5673568"
] | 0.7573291 | 0 |
Create Threat Intel downloader package and publish version | def _deploy_threat_intel_downloader():
threat_intel_package = ThreatIntelDownloaderPackage(
config=config,
version=ti_downloader_version
)
threat_intel_package.create_and_upload()
return threat_intel_package | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_pack():\n\n local(\"mkdir -p versions\")\n current = dt.now()\n current = current.now()\n tgz = \"web_static_{}.tgz\".format(current.strftime(\"%Y%m%d%H%M%S\"))\n working = local(\"tar -cavf versions/{} web_static\".format(tgz))\n\n if working.failed:\n return None\n else:\n return \"versions/{}\".format(tgz)",
"def do_pack():\n local(\"sudo mkdir -p versions\")\n date_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name_file = \"versions/web_static{}.tgz\".format(date_time)\n local(\"sudo tar -cvzf {} web_static\".format(name_file))\n return name_file",
"def do_pack():\n d = datetime.now()\n local(\"mkdir -p versions\")\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz\\\n'.format(d.year, d.month, d.day, d.hour, d.minute, d.second)\n status = local(\"tar -cvzf\" + file_name + \" ./web_static/\", capture=True)\n if status.succeeded:\n return file_name\n return None",
"def do_pack():\n local(\"mkdir -p versions\")\n time = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file = local(\"tar -czvf versions/web_static_%s.tgz web_static\" % time)\n if file:\n return \"versions/web_static_{}.tgz\".format(time)\n else:\n return None",
"def do_pack():\n now = datetime.now()\n file_name = \"web_static_{}{}{}{}{}{}.tgz\".format(\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second\n )\n try:\n local(\"sudo tar -cvzf {} ./web_static\".format(file_name))\n local(\"sudo mkdir -p versions\")\n local(\"sudo mv ./{} versions/\".format(file_name))\n except:\n return (None)\n return (\"versions/{}\".format(file_name))",
"def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\".format(a.year,\n a.month,\n a.day,\n a.hour,\n a.minute,\n a.second)\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None",
"def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\\\n\".format(a.year if a.year > 999 else \"0\" + str(a.year),\n a.month if a.month > 9 else \"0\" + str(a.month),\n a.day if a.day > 9 else \"0\" + str(a.day),\n a.hour if a.hour > 9 else \"0\" + str(a.hour),\n a.minute if a.minute > 9 else \"0\" + str(a.minute),\n a.second if a.second > 9 else \"0\" + str(a.second))\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None",
"def do_pack():\n try:\n if not os.path.exists(\"versions\"):\n local(\"mkdir versions\")\n date = datetime.now()\n date = date.strftime(\"%Y%m%d%H%M%S\")\n new_versions = \"versions/web_static_{}.tgz\".format(date)\n local(\"tar -cvzf {} web_static\".format(new_versions))\n return new_versions\n except:\n return None",
"def do_pack():\n try:\n now = time.strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p ./versions')\n local('tar -cvzf versions/web_static_{}.tgz web_static'.format(now))\n return(\"versions/web_static_{}.tgz\".format(now))\n except:\n return None",
"def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result",
"def do_pack():\n date = (datetime.strftime(datetime.now(), \"%Y%m%d%H%M%S\"))\n name = \"versions/web_static_{}.tgz\".format(date)\n\n if not os.path.exists(\"./versions/\"):\n os.makedirs(\"./versions/\")\n try:\n local(\"tar -cvzf {} web_static\".format(name))\n return (name)\n except:\n return (None)",
"def do_pack():\n\n now = datetime.now()\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\n archive_name = \"versions/web_static_\" + time_now + \".tgz\"\n local('mkdir -p versions')\n archive_command = local(\"tar -zcvf \" + archive_name + \" web_static\")\n\n if archive_command.succeeded:\n return archive_name\n\n return None",
"def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None",
"def do_pack():\n time = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n file_name = \"versions/web_static_{}.tgz\".format(time)\n try:\n local(\"mkdir -p ./versions\")\n local(\"tar --create --verbose -z --file={} ./web_static\"\n .format(file_name))\n return file_name\n except:\n return None",
"def do_pack():\n time = datetime.now()\n file = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n time.year,\n time.month,\n time.day,\n time.hour,\n time.minute,\n time.second\n )\n local('mkdir -p versions')\n if local('tar -cvzf ' + file + ' web_static').succeeded:\n return file\n return None",
"def do_pack():\n try:\n date = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n if isdir(\"versions\") is False:\n local(\"mkdir versions\")\n file_name = \"versions/web_static_{}.tgz\".format(date)\n local(\"tar -cvzf {} web_static\".format(file_name))\n return file_name\n except BaseException:\n return None",
"def do_pack():\n\n sd = '{0:%Y%m%d%H%M%S}'.format(datetime.now())\n fname = 'versions/web_static_' + sd + '.tgz'\n local('mkdir -p versions')\n rs = local('tar -cvzf ' + fname + ' web_static')\n\n if rs.succeeded:\n return fname\n return None",
"def do_pack():\n with api.settings(warn_only=True):\n isdir = os.path.isdir(\"versions\")\n if not isdir:\n mkdir = api.local(\"mkdir versions\")\n if mkdir.failed:\n return None\n sfx = datetime.now().strftime(\"%Y%m%d%M%S\")\n path = \"versions/web_static_{:s}.tgz\".format(sfx)\n tar = api.local(\"tar -cvzf {:s} web_static\".format(path))\n if tar.failed:\n return None\n size = os.stat(path).st_size\n print(\"wb_static packed: {} -> {}Bytes\".format(path, size))\n return path",
"def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None",
"def do_pack():\n\n datenow = datetime.now()\n full_date = datenow.strftime(\"%Y%m%d%H%M%S\")\n\n try:\n if not os.path.isdir(\"versions\"):\n local(\"mkdir versions\")\n local_command = local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(full_date))\n return local_command\n except Exception:\n return None",
"def do_pack():\n try:\n if os.path.isdir(\"versions\") is False:\n os.mkdir(\"versions\")\n time = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n packed = 'versions/web_static_' + time + '.tgz'\n fabric.api.local(\"tar -cvzf {} web_static\".format(packed))\n return packed\n except:\n return None",
"def do_pack():\n with settings(warn_only=True):\n res = local(\"mkdir -p versions\")\n date = dt.now()\n pathname = \"versions/web_static_\"\n pathname += str(date.year)\n pathname += str(date.month)\n pathname += str(date.day)\n pathname += str(date.hour)\n pathname += str(date.minute)\n pathname += str(date.second)\n pathname += \".tgz\"\n res2 = local(\"tar -cvzf \" + pathname + \" web_static\")\n if res2.return_code == 0:\n return pathname",
"def do_pack():\n files = 'versions/web_static_{}{}{}{}{}{}.tgz'\\\n .format(T.year, T.month, T.day, T.hour, T.minute, T.second)\n local('mkdir -p versions')\n execute = local(\"tar -cvzf \" + files + \" ./web_static/\")\n if execute.succeeded:\n return files\n return None",
"def do_pack():\n\n local('mkdir -p versions')\n\n time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_time = 'versions/web_static_{}.tgz'.format(time)\n\n compressed = local(\"tar -cvzf \" + file_time + \" web_static/\")\n\n if compressed.succeeded:\n return file_time\n return None",
"def do_pack():\n date = datetime.datetime.now()\n archive = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second)\n local('mkdir -p versions')\n check = local('tar -cvzf {} web_static'.format(archive))\n if check.failed:\n return None\n else:\n return archive",
"def do_pack():\n time_f = '%Y%m%d%H%M%S'\n try:\n if not os.path.exists('versions'):\n local('mkdir versions')\n to = 'versions/web_static_{}.tgz'.format(\n datetime.now().strftime(time_f))\n\n local('tar -cvzf {} web_static'.format(to))\n return(to)\n except:\n return (None)",
"def do_pack():\n makedirs('versions', exist_ok=True)\n date = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n time.year, time.month, time.day, time.minute, time.second)\n check = local(\"tar -cvzf \" + date + \" ./web_static/\")\n if check.succeeded:\n return date\n return None",
"def do_pack():\n now = datetime.datetime.now()\n path = 'versions/web_static_' +\\\n '{}{}{}{}{}{}'.format(now.year, now.month,\n now.day, now.hour,\n now.minute, now.second) + '.tgz'\n\n local('mkdir -p versions')\n success = local('tar -cvzf {:s} web_static'.format(path), capture=True)\n if success.return_code == 0:\n return path",
"def do_pack():\n local(\"mkdir -p versions\", capture=True)\n time = datetime.now()\n date = time.strftime(\"%Y%m%d%H%M%S\")\n path = \"versions/web_static_{}.tgz\".format(date)\n if local(\"tar -czvf {} web_static/\".format(path), capture=False):\n return path\n else:\n return None",
"def do_pack():\n with api.settings(warn_only=True):\n isdir = os.path.isdir('versions')\n if not isdir:\n mkdir = api.local('mkdir versions')\n if mkdir.failed:\n return False\n suffix = datetime.now().strftime('%Y%m%d%M%S')\n path = 'versions/web_static_{}.tgz'.format(suffix)\n tar = api.local('tar -cvzf {} web_static'.format(path))\n if tar.failed:\n return False\n size = os.stat(path).st_size\n print('web_static packed: {} -> {}Bytes'.format(path, size))\n return path"
] | [
"0.68054",
"0.6779139",
"0.6761292",
"0.6759235",
"0.6692559",
"0.6660694",
"0.6659605",
"0.663814",
"0.6613684",
"0.65913004",
"0.65760773",
"0.65745074",
"0.6542268",
"0.6528658",
"0.6519053",
"0.6510979",
"0.6508965",
"0.64934427",
"0.64717263",
"0.64318633",
"0.64158547",
"0.64138556",
"0.6413225",
"0.6406644",
"0.640041",
"0.6370909",
"0.6370618",
"0.6357414",
"0.6355094",
"0.6258757"
] | 0.71114427 | 0 |
Converts an integer to the string representation of an IP address. | def int2ip(n: int) -> str:
return socket.inet_ntoa(struct.pack("!I", n)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def int_2_ip_str(ip_int):\n return socket.inet_ntoa(struct.pack(\"!I\", ip_int))",
"def int2ip(ipint: int) -> str:\n try:\n return socket.inet_ntoa(struct.pack(\"!I\", ipint))\n except struct.error:\n return socket.inet_ntop(\n socket.AF_INET6,\n struct.pack(\"!QQ\", ipint >> 64, ipint & 0xFFFFFFFFFFFFFFFF),\n )",
"def int_to_ip(self, ip_int):\n return socket.inet_ntoa(struct.pack(\"=I\", ip_int))",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def int_ip_to_ip_str(ip_integer, ipv4=True):\n ip_byte_strings = []\n if ipv4:\n for i in range(3, -1, -1):\n byte = (ip_integer >> (i * 8)) & 0xFF\n ip_byte_strings.append(str(byte))\n ip_str = '.'.join(ip_byte_strings)\n return ip_str\n else:\n return None",
"def int32_to_ip(int32):\n return str(ipaddress.IPv4Address(int32))",
"def ip_to_str(address):\n return socket.inet_ntop(socket.AF_INET, address)",
"def int2ip(int_num):\n try:\n return inet_ntoa(pack(\"!I\", int_num))\n except Exception:\n return False",
"def AioNodeToIpAddressString(node):\n ip = aio_node_to_ip_address.AioNodeToIpAddress(node)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)",
"def ipwrap(address: Any) -> str:\n try:\n if not isinstance(address, int):\n ipaddress.IPv6Address(address)\n return f\"[{address}]\"\n except ValueError:\n pass\n\n return str(address)",
"def AioMessageTypeToIpAddressString(message_type):\n ip = network_config.AioMessageTypeToIpAddress(message_type)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)",
"def test_Int_to_IP(self):\n self.assertEqual(helpers.int_to_IP(0), '00000000000000000000000000000000')\n self.assertEqual(helpers.int_to_IP(2291809961), '10001000100110100011111010101001')",
"def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))",
"def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']",
"def convert_ipv4_to_str(n_int):\n return \".\".join([str(n_int >> n & 0xFF) for n in [24, 16, 8, 0]])",
"def intToCommaStr(int_ip):\n import sys\n if sys.version_info < (2, 7):\n return str(int_ip)\n else:\n int_ip = int(int_ip)\n return '{:,d}'.format(int_ip)",
"def binary_to_ip(self, binary_num):\n ip_parts = []\n for i in range (0, 4):\n first = 8*i\n second = first + 8\n ip_parts.append(str(int(binary_num[first:second], 2)))\n\n final_ip = '.'.join(ip_parts)\n return final_ip",
"def inet_to_str(inet) -> str:\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)",
"def longToIp(longIp):\n stringIp = socket.inet_ntoa(struct.pack(\"!L\", longIp))\n return stringIp",
"def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)",
"def inet_to_str(inet):\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)",
"def inet_to_str(inet):\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)",
"def inet_to_str(inet):\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)",
"def inet_to_str(inet):\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)",
"def inet_to_str(inet):\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)",
"def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))",
"def inet_to_str(inet):\n\t# first try ipv4 then ipv6\n\ttry:\n\t\treturn socket.inet_ntop(socket.AF_INET, inet)\n\texcept ValueError:\n\t\treturn socket.inet_ntop(socket.AF_INET6, inet)",
"def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))",
"def get_ip_type2(self) -> str:\n hex_ip = hexlify(self.message)[154:162]\n ip_addr = int(hex_ip[0:2] + hex_ip[2:4] + hex_ip[4:6] + hex_ip[6:8], 16)\n return inet_ntoa(pack(\">L\", ip_addr))",
"def pack_ip(self, str_ip):\n return struct.pack(\">BBBB\", *[ int(c) for c in str_ip.split(\".\") ])"
] | [
"0.8371373",
"0.8149403",
"0.81202126",
"0.76974213",
"0.7516056",
"0.73901826",
"0.7243528",
"0.72142303",
"0.71925503",
"0.71764463",
"0.7035509",
"0.7021221",
"0.6959364",
"0.6925901",
"0.6877639",
"0.68566036",
"0.65964144",
"0.6507255",
"0.6496198",
"0.6474135",
"0.6445155",
"0.6445155",
"0.6445155",
"0.6445155",
"0.6445155",
"0.64232403",
"0.640159",
"0.6390635",
"0.6348876",
"0.6343695"
] | 0.824994 | 1 |
Generates SSH server keys using RSA, writes them to the correct files, then returns the bytes that were written. This will overwrite any existing key files. | def generate_rsa_server_keys() -> Tuple[bytes, bytes]:
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
# Generate the key
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=2048
)
# Get the private key in the standard PEM/PKCS8 format for SSH private keys.
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.OpenSSH,
crypto_serialization.NoEncryption())
# Get the public key in the standard OpenSSH format.
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
# Write the two keys.
with open('host_rsa', 'wb') as f:
f.write(private_key)
with open('host_rsa.pub', 'wb') as f:
f.write(public_key)
# Return them.
return private_key, public_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generateSSHKey(self, private_filepath, public_filepath):\n self.log.debug(\"Writing SSH keys to: \" + private_filepath + \" and \" + public_filepath)\n\n (ssh_dir, filename) = os.path.split(os.path.expanduser(private_filepath))\n if not os.path.exists(ssh_dir):\n self.log.debug(\"SSH Directory doesn't exist, creating \" + ssh_dir)\n os.makedirs(ssh_dir)\n\n key = paramiko.RSAKey.generate(1024)\n key.write_private_key_file(os.path.expanduser(private_filepath))\n \n with open(os.path.expanduser(public_filepath),\"w\") as public:\n public.write(\"%s %s\" % (key.get_name(), key.get_base64()))\n\n public.close()",
"def get_rsa_server_keys() -> Tuple[keys.Key, keys.Key]:\n try:\n # Load existing keys\n return keys.Key.fromFile('host_rsa'), keys.Key.fromFile('host_rsa.pub')\n\n except (FileNotFoundError, keys.BadKeyError):\n # Keys need to be generated.\n private_key, public_key = generate_rsa_server_keys()\n logger.info(\"New server keys were generated.\")\n\n return (\n keys.Key.fromString(private_key, type=\"PRIVATE_OPENSSH\"),\n keys.Key.fromString(public_key)\n )",
"def gen_keys_old(name):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n if not os.path.isfile('%s/%s.pem'%(d,name)):\n open('%s/%s.pem'%(d,name),'w').write(Crypto.PublicKey.RSA.generate(1024,os.urandom).exportKey('PEM'))",
"def generateKeys(self, keys_path, minion_id):\n #Change directory to keys path\n os.chdir(keys_path)\n #Give permission to the salt user\n self.console_manager.printRed(\"Giving permission to the salt user\")\n command = ['sudo', 'chmod', 'a+rwx', '.']\n self.console_manager.runCommandFromShell(command)\n #Generate keys\n self.console_manager.printRed(''.join([\"Generating keys for minion id: \", minion_id]))\n command = ['sudo', 'salt-key', ''.join(['--gen-keys=', minion_id])]\n self.console_manager.runCommandFromShell(command)\n #Give permission to the salt user\n self.console_manager.printRed(\"Allowing vagrant to handle private keys\")\n command = ['sudo', 'chmod', 'a+rwx', ''.join([minion_id, '.pub']), ''.join([minion_id, '.pem'])]\n self.console_manager.runCommandFromShell(command)\n #Add public key to the accepted minion folder\n self.console_manager.printRed(\"Copying the minion public key to the salt master public keys folder\")\n command = ['sudo', 'cp', ''.join([minion_id, '.pub']), ''.join(['/var/lib/salt/pki/master/minions/', minion_id])]\n self.console_manager.runCommandFromShell(command)\n command = ['sudo', 'cp', ''.join([minion_id, '.pub']), ''.join(['/etc/salt/pki/master/minions/', minion_id])]\n self.console_manager.runCommandFromShell(command)\n return",
"def create_pem_keys(self):\n self.random_rsa()\n\n return self.keys",
"def create_ssh_keys(self):\n self.random_ssh()\n\n return self.keys",
"def generateKeys(bits=256):\n #print \"generating first prime number\"\n p = generatePrime(bits/2)\n #print \"generating second prime number\"\n q = generatePrime(bits/2)\n \n assert p != q\n #print p, \"\\n\", q\n assert gcd(p*q, (p-1)*(q-1)) == 1\n \n priv = PrivateKey(p, q)\n pub = PublicKey(p, q)\n \n priv.saveToFile()\n pub.saveToFile()\n \n return priv, pub",
"def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)",
"def generateKeys(filename: str=\"monRSA\", keylength: int=10):\n minn = int(\"1\".ljust(int(keylength/2), '0'))\n maxx = int(\"9\".ljust(int(keylength/2), '9'))\n if args.verbose : print(\"min max of the possble primes :\", minn, maxx)\n pos1 = randint(minn, maxx)\n pos2 = randint(minn, maxx)\n\n if args.verbose : print(\"position of the primes chosen :\", pos1, pos2)\n p = primesieve.nth_prime(pos1)\n q = primesieve.nth_prime(pos2)\n \n # fixed values used to generate my key paire (i don't care if you hack me)\n # p = primesieve.nth_prime(97885344)\n # q = primesieve.nth_prime(85785656)\n \n # smaller primes used for testing\n # p = nth_prime(1256)\n # q = nth_prime(1478)\n \n if args.verbose : print(\"p\", p)\n if args.verbose : print(\"q\", q)\n n = p*q\n if args.verbose : print(\"n\", n)\n if args.verbose : print(\"length\", len(str(n)))\n nn = (p-1)*(q-1)\n if args.verbose : print(\"nn\",nn)\n temp = genED(nn)\n e = temp[0]\n if args.verbose : print(\"e\",e)\n d = temp[1]\n if args.verbose : print(\"d\",d)\n ed = temp[2]\n if args.verbose : print(\"ed\",ed)\n generateKeyFile(n, e, \"public\", filename)\n generateKeyFile(n, d, \"private\", filename)",
"def _rsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n content.write_string('ssh-rsa')\n content.write_mpint(numbers.public_numbers.n)\n content.write_mpint(numbers.public_numbers.e)\n content.write_mpint(numbers.d)\n content.write_mpint(numbers.iqmp)\n content.write_mpint(numbers.p)\n content.write_mpint(numbers.q)\n return content.data",
"def gen_keys(lname,dsa=False):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n for n in lname:\n if not os.path.isfile('%s/%s.pem'%(d,n)):\n key = Crypto.PublicKey.DSA.generate(512, os.urandom) if dsa else Crypto.PublicKey.RSA.generate(1024,os.urandom)\n open('%s/%s.pem'%(d,n),'w').write(key.exportKey('PEM'))",
"def generate(self, module):\n\n # If size is wrong, delete the key. A new key will be generated in the next step.\n if self.key_current_size != self.size and not self.ignore_size:\n self.remove()\n self.key_exists = False\n else:\n self.changed = False\n\n # If there is no key or user has set \"force\"\n if not self.key_exists or self.force:\n if self.type == \"RSA\":\n self.key = crypto_rsa.generate_private_key(public_exponent=65537, key_size=self.size, backend=crypto_default_backend())\n elif self.type == \"DSA\":\n self.key = crypto_dsa.generate_private_key(key_size=self.size, backend=crypto_default_backend())\n elif self.type == \"ECDSA\":\n if self.size == 256:\n self.curve = crypto_ec.SECP256R1()\n elif self.size == 384:\n self.curve = crypto_ec.SECP384R1()\n elif self.size == 521:\n self.curve = crypto_ec.SECP521R1()\n self.key = crypto_ec.generate_private_key(curve=self.curve, backend=crypto_default_backend())\n elif self.type == \"ED25519\":\n self.size = 128\n self.curve = \"EC25519\"\n else:\n raise HostkeyError(\"Unknown key type.\")\n\n if self.type != \"ED25519\":\n self.privkey = self.key.private_bytes(crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.PKCS8, crypto_serialization.NoEncryption())\n self.pubkey = self.key.public_key().public_bytes(crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH)\n\n try:\n privfile = os.open(self.fullpath, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, self.mode)\n os.write(privfile, self.privkey)\n os.close(privfile)\n pubfile = os.open(self.fullpath + \".pub\", os.O_WRONLY | os.O_CREAT | os.O_TRUNC, self.mode)\n os.write(pubfile, self.pubkey)\n os.close(pubfile)\n self.changed = True\n except IOError:\n self.remove()\n raise HostkeyError(get_exception())\n else:\n # use ssh-keygen to generate ED25519 Hostkeys\n # Keyfile must not exist, as there is no \"force-overwrite\" in ssh-keygen\n self.remove()\n retcode = subprocess.call([\"ssh-keygen\", \"-q\", \"-t\", \"ed25519\", \"-N\", '', \"-f\", self.fullpath])\n self.changed = True\n else:\n self.changed = False\n\n file_args = module.load_file_common_arguments(module.params)\n file_args['path'] = self.fullpath\n if module.set_fs_attributes_if_different(file_args, False):\n self.changed = True\n file_args['path'] = self.fullpath + \".pub\"\n file_args['mode'] = self.pubmode\n if module.set_fs_attributes_if_different(file_args, False):\n self.changed = True",
"def generate_rsa_key_pair(self):\n\t\tprint \"Started rsa key generation\"\n\t\tkey = RSA.generate(self.key_size, randfunc=self.random_number_generator)\n\t\t\t\n\t\tpub_key = key.publickey().exportKey()\n\t\tprint pub_key\n\t\t\n\n\t\tpriv_key = key.exportKey()\n\t\tprint \"Private key\", priv_key \n\t\tprint \"Note: Normally, the private key should be protected. For the purposes of this demo, I'm printing it to terminal.\"",
"def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)",
"def test_export_keys_no_private_key_written(self):\n mock_open_obj = mock_open()\n fake_public_key_path = 'fake-public-key-path'\n fake_private_key_path = 'fake-private-key-path'\n\n # file sizes get popped, so this will return 0 for the second call\n fake_file_sizes = [0, 10]\n\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.return_value = mock_gpg\n with patch('iceit.crypto.open', mock_open_obj, create=True):\n with patch('os.path.getsize') as mock_getsize:\n mock_getsize.side_effect = lambda x: fake_file_sizes.pop()\n encryptor = self.test_init()\n\n with self.assertRaises(IceItException):\n encryptor.export_keys(public_key_dest=fake_public_key_path, private_key_dest=fake_private_key_path)",
"def generate_keystream(self):",
"def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context",
"def push_key_to_slave(master_node, slave_vms):\n if(config.HOST_TYPE == 'linux'):\n command = 'cat /root/.ssh/id_rsa.pub'\n _, stdout, stderr = master_node.conn.execute_command(command)\n key_data = stdout[0]\n for slave_node in slave_vms:\n # Create directory if it doesn't exists\n slave_node.makedir('/root/.ssh')\n command = 'echo {} >> /root/.ssh/authorized_keys'.format(key_data)\n status, stdout, stderr = slave_node.conn.execute_command(command)\n if status:\n log.info(stdout)\n log.error(stderr)\n # disable strict host ksy checking\n data = 'Host *\\nStrictHostKeyChecking no\\nUserKnownHostsFile=/dev/null'\n command = \"echo -e '{}' > /root/.ssh/config\".format(data)\n status, stdout, stderr = master_node.conn.execute_command(command)\n else:\n command = 'cmd /c type \"C:\\\\Program Files (x86)\\\\freeSSHd\\\\RSAKey.cfg\"'\n _, stdout, stderr = master_node.conn.execute_command(command)\n key_data = \"\"\n for line in stdout:\n champ=key_data+line.strip('\\n')\n for slave_node in slave_vms:\n # Create directory if it doesn't exists\n slave_node.makedir('C:\\\\.ssh')\n command = 'cmd /c echo {} >> C:\\\\.ssh\\\\authorized_keys'.format(key_data)\n status, stdout, stderr = slave_node.conn.execute_command(command)\n if status:\n log.info(stdout)\n log.error(stderr)",
"def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()",
"def test_export_keys_no_public_key_written(self):\n mock_open_obj = mock_open()\n fake_public_key_path = 'fake-public-key-path'\n fake_private_key_path = 'fake-private-key-path'\n\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.return_value = mock_gpg\n with patch('iceit.crypto.open', mock_open_obj, create=True):\n with patch('os.path.getsize') as mock_getsize:\n mock_getsize.return_value = 0\n encryptor = self.test_init()\n\n with self.assertRaises(IceItException):\n encryptor.export_keys(public_key_dest=fake_public_key_path, private_key_dest=fake_private_key_path)",
"def setup_ssh_keys(output_keyfile=\"id_rsa\", ssh_type=\"rsa\", quickname=None):\n with settings(warn_only=True):\n local(\"mkdir -p $HOME/.ssh\")\n with cd(\"$HOME/.ssh\"):\n local(\"ssh-keygen -t %s -f %s\" % (ssh_type, output_keyfile))\n for host in env.hosts:\n local(\"scp %s.pub %s:temp_id_key.pub\" % (output_keyfile, host))\n with settings(warn_only=True):\n run(\"mkdir -p $HOME/.ssh\")\n run(\"cat $HOME/temp_id_key.pub >> ~/.ssh/authorized_keys\")\n run(\"rm $HOME/temp_id_key.pub\")\n run(\"chmod 600 $HOME/.ssh/authorized_keys\")\n run(\"chmod 700 $HOME/.ssh\")\n run(\"chmod go-w $HOME\")\n if quickname:\n update_ssh_shortcut(output_keyfile, quickname)",
"def generate(self, force=False):\n if not self.check_force_generate(force):\n return False\n\n mkdirs(self.path)\n\n command = [openssl, 'genrsa', '-out', self.key_file]\n if self.password:\n command += ['-passout', 'pass:{}'.format(self.password)]\n command += [str(self.key_size)]\n\n self.log.info('Generating RSA key')\n if not run_command(command):\n raise RuntimeError('RSA key generation failed')\n\n if not self.exists():\n raise RuntimeError(\n 'Key generation succeeded but key file does not exist. '\n 'This should not happen', self\n )",
"def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key",
"def write_keys(path, keys):\n p_keys = pickle.dumps(keys)\n b_keys = base64.b64encode(p_keys)\n with open(path, \"wb+\") as walletfile:\n walletfile.write(b_keys)",
"def gen_nacl_keys(path=paths.nacl_keys, *args, **kwargs) -> tuple:\n prvk, pubk = NaclCipher.generate_keys(path)\n prvk_b64 = prvk.encode(encoder=Base64Encoder)\n pubk_b64 = pubk.encode(encoder=Base64Encoder)\n\n return prvk_b64, pubk_b64",
"def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile",
"def generate_key():\n return get_random_bytes(KEY_SIZE)",
"def test_export_keys_success(self):\n mock_open_obj = mock_open()\n fake_public_key_path = 'fake-public-key-path'\n fake_private_key_path = 'fake-private-key-path'\n fake_public_key_string = 'my fake key string'\n fake_private_key_string = 'my fake private key string'\n\n def return_fake_key_data(key_id, private=False):\n if private:\n return fake_private_key_string\n else:\n return fake_public_key_string\n\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.export_keys.side_effect = return_fake_key_data\n mock_gpg.return_value = mock_gpg\n with patch('iceit.crypto.open', mock_open_obj, create=True):\n with patch('os.path.getsize') as mock_getsize:\n mock_getsize.return_value = 10\n encryptor = self.test_init()\n\n encryptor.export_keys(public_key_dest=fake_public_key_path, private_key_dest=fake_private_key_path)\n\n # assert the calls to write the public key\n (pub_name, pub_args, pub_kwargs) = mock_open_obj.mock_calls[0]\n self.assertEqual(pub_args[0], fake_public_key_path)\n self.assertEqual(pub_args[1], 'w')\n\n # assert the calls to write the public key data\n (pub_name, pub_args, pub_kwargs) = mock_open_obj.mock_calls[2]\n self.assertEqual(pub_args[0], fake_public_key_string)\n\n # assert the calls to write the private key\n (pub_name, pub_args, pub_kwargs) = mock_open_obj.mock_calls[4]\n self.assertEqual(pub_args[0], fake_private_key_path)\n self.assertEqual(pub_args[1], 'w')\n\n # assert the calls to write the private key data\n (pub_name, pub_args, pub_kwargs) = mock_open_obj.mock_calls[6]\n self.assertEqual(pub_args[0], fake_private_key_string)",
"def generateKeyFile(n: int, e: int, typ: str, filename: str):\n print(\"Generating\", typ, \"key\")\n message = str(hex(n) + '\\n' + hex(e))\n message_bytes = message.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n \n key = str(base64_bytes.decode(\"ascii\")) # we decode to remove the wierd characters\n \n if typ == \"private\" :\n f = open(filename + \".priv\", \"w\")\n f.write(\"---begin \" + filename + \" private key---\\n\")\n f.write(key+'\\n')\n f.write(\"---end \" + filename + \" key---\")\n f.close()\n elif typ == \"public\" :\n f = open(filename + \".pub\", \"w\")\n f.write(\"---begin \" + filename + \" public key---\\n\")\n f.write(key+'\\n')\n f.write(\"---end \" + filename + \" key---\")\n f.close()\n else :\n print(\"wrong type\")\n return",
"def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key"
] | [
"0.7006498",
"0.6714016",
"0.6606957",
"0.6420449",
"0.62895983",
"0.62841547",
"0.6251957",
"0.624364",
"0.62175035",
"0.62054116",
"0.62047213",
"0.615605",
"0.6127582",
"0.612011",
"0.61119735",
"0.61077297",
"0.6107519",
"0.6101342",
"0.6089132",
"0.60842407",
"0.59800094",
"0.5962113",
"0.5957934",
"0.590548",
"0.5896622",
"0.5870128",
"0.586314",
"0.58450854",
"0.5802899",
"0.5783838"
] | 0.7349047 | 0 |
Aborts the given connections. | def abort_many(self, connections: typing.Iterable[twisted.conch.ssh.transport.SSHServerTransport]):
for conn in connections:
conn.transport.connectionLost(self.connectionAborted) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def abortConnection():\n pass",
"def killall(connections):\n for connection in connections:\n try: connection.close()\n except: pass",
"def abort(self) -> None:\n self._connector.abort_transaction()",
"def terminate(self):\r\n for call in self._deathCandidates.itervalues():\r\n call.cancel()\r\n\r\n self._deathCandidates = {}\r\n\r\n for connection in self._connections.copy():\r\n connection.destroy()\r\n assert len(self._connections) == 0\r\n\r\n Endpoint.terminate(self)",
"def killconnections(self):\n for conn in self._connections:\n try:conn.close()\n except:pass\n self._connections=[]",
"def disconnect(self):\n for connection in six.itervalues(self):\n connection.disconnect()",
"def disconnect(self):\n for conn in self.all_connections():\n conn.disconnect()",
"def stop(self):\n self.connection.abort()",
"def stop(self):\n self.connection.abort()",
"def abort(self):\n if self.transaction:\n token = self.transaction\n self.transaction = None\n self.client.abort(self.creds, token, self.environment)",
"def disconnect(self):\n for connection in six.itervalues(self.hosts):\n connection.disconnect()",
"def close_connections(self):\n self._connection.close_connection()",
"def abort(self, conn: twisted.conch.ssh.transport.SSHServerTransport):\n conn.transport.connectionLost(self.connectionAborted)",
"def _doAbort(self):\n self._cmdAbort()",
"def abort(self, protocol, exc):\n print \"*** bad data from Player %d: %s\" % (protocol.peer_id, exc)\n print \"*** aborting!\"\n for p in self.protocols.itervalues():\n p.loseConnection()\n reactor.stop()\n print \"*** all protocols disconnected\"",
"def abort_transaction(self) -> None:\n pass",
"def disconnect_all(self):\n all_conns = chain([_x[0] for _x in self._available_connections], self._in_use_connections)\n for connection in all_conns:\n try:\n connection.disconnect()\n except Exception as err:\n self.class_logger.warning(\"Error occurred while disconnecting connection: %s\" % (err, ))\n self._available_connections = []\n self._in_use_connections = set()",
"def shutdown(self):\n self._msg_disp.abort()\n self._conn_mgr.shutdown_connections()",
"def close_connection(self) -> None:\n self.batch.__exit__(*sys.exc_info())",
"def tpc_abort(self, transaction):\n raise NotImplementedError",
"def ctxAbort(*args, **kwargs)->None:\n pass",
"def abort(self, transaction):\n raise NotImplementedError",
"def Abort(self):\n handler = self.get_command_object(\"Abort\")\n handler()",
"def _interrupt(self, threadId, connection):\n try:\n connection.close()\n except pymysql.Error:\n pass",
"def n_close(conns):\n\n\tfor conn in conns:\n\t\tconn.close()",
"def delete_connection(connectionId=None):\n pass",
"def close_connections(self):\n self.db_connection.close_connections()",
"def delete_connections(\n self,\n host_groups=None, # type: List[models.ReferenceType]\n hosts=None, # type: List[models.ReferenceType]\n volumes=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n host_group_names=None, # type: List[str]\n host_names=None, # type: List[str]\n volume_names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n host_group_names=host_group_names,\n host_names=host_names,\n volume_names=volume_names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._connections_api.api20_connections_delete_with_http_info\n _process_references(host_groups, ['host_group_names'], kwargs)\n _process_references(hosts, ['host_names'], kwargs)\n _process_references(volumes, ['volume_names'], kwargs)\n return self._call_api(endpoint, kwargs)",
"def __close_connection_with_clients(self):\n for current_client in self.__connected_clients:\n self.__close_connection_with_client(current_client)",
"def abort(self):\r\n LOG(\"Aborting execution\")\r\n self.controller.abort()"
] | [
"0.72578377",
"0.66971624",
"0.64351416",
"0.6316366",
"0.63013047",
"0.6207053",
"0.6194674",
"0.61273026",
"0.61273026",
"0.61241126",
"0.608286",
"0.6022746",
"0.5989",
"0.59806645",
"0.5919448",
"0.5896091",
"0.5892902",
"0.5887989",
"0.5864397",
"0.58221346",
"0.57808983",
"0.5772336",
"0.5704112",
"0.5693362",
"0.56644183",
"0.5655946",
"0.5649315",
"0.5631402",
"0.5597118",
"0.559143"
] | 0.7448409 | 0 |
Starts the watchdog thread. | def start(self):
self._watchdog_thread.start() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()",
"def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()",
"def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()",
"def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)",
"def watchdog(self):\n pass",
"def start(self):\n self._setup_thread()\n self.thread.start()",
"def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()",
"def start(self):\n self._thread.start()",
"def start(self):\n self.stop() # Stop current process\n self._watchdog = threading.Thread(target=self._watch)\n self._defunctdog = threading.Thread(target=self._defunct)\n self._stdin_thread = threading.Thread(target=self.stdin_thread)\n logfile = settings.get_path(\"logs\") + '/' + self.name + '.log'\n try:\n self.stderr = open(logfile, 'w')\n except IOError:\n log.warning(\"There is no where ({0}) to put log of {1}\".format(logfile, self.name))\n self.stderr = None\n self._running.set()\n # if self.name == 'vlcvideo':\n # log.debug('HIGH PRIORITY')\n # self._popen = Popen( 'chrt -r 80 '+self.command, bufsize=0, executable=None, stdin=PIPE, stdout=PIPE, stderr=self.stderr,\n # close_fds=False, shell=True, cwd=None, env=None,\n # universal_newlines=False, startupinfo=None, creationflags=0, preexec_fn=None) \n # # preexec_fn=lambda : os.nice(-20)\n # else: \n self._popen = Popen(shlex.split(self.command), bufsize=0, executable=None, stdin=PIPE, stdout=PIPE,\n stderr=self.stderr,\n close_fds=False, shell=False, cwd=None, env=None,\n universal_newlines=False, startupinfo=None, creationflags=0, preexec_fn=None)\n # preexec_fn=lambda : os.nice(-20)\n self._watchdog.start()\n self._defunctdog.start()\n self._stdin_thread.start()\n register_thread(self)\n if self.onOpen:\n self.onEvent([self.onOpen])",
"def start(self):\n self.thread.start()",
"def startFactory(self):\n self.watchdog.start()\n super().startFactory()",
"def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )",
"def start_thread(self):\n self.stop_thread()\n self.running = True\n self.run_thread = threading.Thread(target=self.run, daemon=True)\n self.run_thread.start()",
"def start(self):\n logging.info(\"ICMPecho health monitor plugin: Starting to watch \"\n \"instances.\")\n\n self.monitor_thread = threading.Thread(target = self.start_monitoring,\n name = self.thread_name)\n self.monitor_thread.daemon = True\n self.monitor_thread.start()",
"def starting(self) -> None:\n self._prepopulate_runnables()\n self._loop_handler = threading.Thread(target=self._loop)\n self._loop_handler.daemon = True\n self._loop_handler.start()",
"def start(self):\n threading.Thread(target=self.serve_forever).start()",
"def start(self):\n if self.isAlive == False:\n try:\n time.sleep(1)\n os.remove(os.path.join(self.inbox, 'stop_service.txt'))\n except:\n pass\n try:\n time.sleep(1)\n os.remove(os.path.join(self.inbox, 'ReadDirectoryChangesW.txt'))\n except:\n pass\n return\n \n serviceconfig.logger.debug('*** \"%s\": Starting the worker thread' % self.inbox)\n self.queue = Queue()\n t = Thread(target=self.worker)\n t.start()\n \n \"\"\"\n If files were dropped during the recovering process,\n we need to handle those files\n \"\"\"\n timer = Timer(1, self.triggerChangeEvent, kwargs={})\n timer.start()\n \n while self.isAlive:\n self.queue.put(win32file.ReadDirectoryChangesW (\n self.hDir,\n 1024,\n True,\n win32con.FILE_NOTIFY_CHANGE_FILE_NAME |\n win32con.FILE_NOTIFY_CHANGE_DIR_NAME |\n win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |\n win32con.FILE_NOTIFY_CHANGE_SIZE |\n win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |\n win32con.FILE_NOTIFY_CHANGE_SECURITY,\n None,\n None\n ))\n self.queue.join()\n timer.join()\n \n \"\"\"\n Delete the stop_service.txt file generated by stopping the service\n \"\"\"\n try:\n os.remove(os.path.join(self.inbox, 'stop_service.txt'))\n except:\n pass",
"def start(self):\n self._class_setup()\n\n self._inotify_fd = InotifyFileWatcher._libc.inotify_init()\n if self._inotify_fd < 0:\n error = OSError('failed call to inotify_init')\n error.errno = ctypes.get_errno()\n error.strerror = errno.errorcode[ctypes.get_errno()]\n raise error\n self._inotify_poll = select.poll()\n self._inotify_poll.register(self._inotify_fd, select.POLLIN)\n self._add_watch_for_path(self._directory)",
"def start_background_thread(self):\n self.runner = Runner(queue=queue, app_id=self.app_id)\n self.runner.start()\n # TODO: stop the thread at some point?",
"def start(self):\n listening_thread = Thread(\n target=self.sock.start_listening, daemon=True)\n listening_thread.start()\n sending_thread = Thread(target=self.sock.start_sending, daemon=True)\n sending_thread.start()\n\n ack_watch_thread = Thread(target=self.watch_for_acks, daemon=True)\n ack_watch_thread.start()\n\n ack_timeout_thread = Thread(\n target=self.watch_for_ack_timeout, daemon=True)\n ack_timeout_thread.start()\n\n self.report()",
"def start(self):\n if self.__running:\n raise RuntimeError('already started')\n self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),))\n self.__running = True\n self.__hook_thread.start()",
"def start(self):\n \n self.thread.start()\n self.state = \"running\"",
"def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)",
"def background(self):\n self.thread = threading.Thread(target=self.run)\n self.thread.setDaemon(True)\n self.thread.start()",
"def run(self):\n self.monitor.start()",
"def run(self):\n\t\tt = threading.Thread(target=self.__temperature_thread)\n\t\tt.daemon = True\n\t\tt.start()",
"def start_daemon(self, *args, **kwargs):\n pass",
"def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)",
"def _start_thread(self, fn, daemon=False):\n daemon = Thread(target=fn, daemon=daemon)\n daemon.start()",
"def start(self) -> None:\n pass # for pydocstyle\n\n def serve() -> None:\n \"\"\"Serve forever.\"\"\"\n prefix = f\"In {ThreadedServer.__name__}.{serve.__name__}\"\n try:\n print(\n f\"{prefix}: Starting to serve {self.scenarios_dir} forever on: \"\n f\"http://localhost:{self.port}\",\n file=self.stdout,\n )\n\n self._httpd.serve_forever()\n\n print(f\"{prefix}: Stopped serving forever.\", file=self.stdout)\n\n except Exception as error:\n print(\n f\"{prefix}: Caught an exception in the HTTPD server \"\n f\"(it will be raised at shutdown): {error}\",\n file=self.stderr,\n )\n\n with self._server_exception_lock:\n self._server_exception = error\n\n self._work_thread = threading.Thread(target=serve)\n self._work_thread.start()"
] | [
"0.71217984",
"0.69169587",
"0.6903784",
"0.6872781",
"0.67613095",
"0.6656482",
"0.66208094",
"0.66070104",
"0.65288895",
"0.6528326",
"0.65111154",
"0.6494165",
"0.64625794",
"0.64450014",
"0.64385915",
"0.6376325",
"0.6373174",
"0.63639104",
"0.6355011",
"0.633504",
"0.6303347",
"0.62937397",
"0.62700707",
"0.6250083",
"0.6228534",
"0.6214563",
"0.6211892",
"0.6208017",
"0.62075555",
"0.6187285"
] | 0.85973275 | 0 |
Shuts down the watchdog thread. | def stop(self):
self._watchdog_flag.set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)",
"def stop(self):\n if self.is_running():\n self._stdin_queue.put_nowait(None) # Ask to stop the stdin_thread\n try:\n self._popen.terminate() # Send SIGTERM to the player, asking to stop\n log.debug('SIGTERM ' + self.name)\n except:\n pass\n self._watchdog.join(timeout=0.2) # Waiting maximum of 250 ms before killing brutaly the processus\n if self._watchdog.is_alive():\n self._popen.kill() # Send SIGNKILL to brutaly kill the process\n log.warning('KILLED ' + self.name)\n unregister_thread(self)\n self.join() # Wait for watchdog thread to terminate",
"def shutdown(self):\n\t\tself._log.info('shutting down DHT')\n\t\tself._threads.shutdown() # Trigger shutdown of maintainance threads\n\t\tself._krpc.shutdown() # Stop listening for incoming connections\n\t\tself._nodes.shutdown()\n\t\tself._threads.join() # Trigger shutdown of maintainance threads",
"def shutdown(self):\n self.socket_thread.stop()",
"def shutdown(self) -> None:",
"def shutdown(self) -> None:",
"def stop(self):\n logging.info(\"Shutting down thread...\")\n self.disconnect()\n self.running = False",
"async def shutdown_gracefully(self) -> None:",
"async def shutdown_gracefully(self) -> None:",
"def shutdown(self):\n try:\n self.driver.stop()\n except:\n logging.exception(\"Could not stop driver on shutdown\")\n\n self.arduino.stop()",
"def shutdown(self):\n self._shutdown(None, None)\n self._running = False",
"def shutdown(self):\n\n pass",
"def shutdown(self) -> None:\n pass",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def shutup():\n try:\n ttsEng.shutup()\n except Exception, e:\n logging.error(e)",
"def shutdown(self):\n ...",
"def shutdown(self):\n self.action('shutdown')",
"def shutdown(self) -> None:\n if self.is_alive():\n self.terminate()\n else:\n logger.warning(\"DHT shutdown has no effect: dht process is already not alive\")",
"def shutdown(self):",
"def tear_down_cleanup(self):\n self.hass.stop()",
"def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()",
"def shutdown(self):\n\n if self._pyro_daemon:\n self._pyro_daemon.shutdown()",
"def quit(self):\n self.switch_off()\n self._blinking_thread.stop()",
"def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()",
"def shutdown(self):\n self._ifccountslock.acquire()\n self._ifccounts.clear()\n self._ifccountslock.release()\n self._objslock.acquire()\n if len(self._objs) == 0:\n self._objslock.release()\n return\n logger.info(\"Stopping EMANE daemons.\")\n self.deinstallnetifs()\n self.stopdaemons()\n self.stopeventmonitor()\n self._objslock.release()",
"async def shutdown(self):\n if self._unsub_stop:\n self._unsub_stop()\n self._unsub_stop = None\n await self.device.shutdown()",
"def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()",
"def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()"
] | [
"0.67894304",
"0.6773918",
"0.66595274",
"0.66030633",
"0.6601126",
"0.6601126",
"0.65712124",
"0.65464056",
"0.65464056",
"0.6538498",
"0.6519675",
"0.6505519",
"0.6490607",
"0.6484351",
"0.6484351",
"0.6484351",
"0.6464716",
"0.64617556",
"0.6446465",
"0.64330095",
"0.6422914",
"0.63994914",
"0.6396398",
"0.63840336",
"0.6348962",
"0.6336325",
"0.63235587",
"0.6318178",
"0.6303711",
"0.62912047"
] | 0.7268581 | 0 |
Called when the factory is starting up. Starts the watchdog thread. | def startFactory(self):
self.watchdog.start()
super().startFactory() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\n self._watchdog_thread.start()",
"def watchdog(self):\n pass",
"def starting(self) -> None:\n self._prepopulate_runnables()\n self._loop_handler = threading.Thread(target=self._loop)\n self._loop_handler.daemon = True\n self._loop_handler.start()",
"def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()",
"def start(self):\n self._setup_thread()\n self.thread.start()",
"def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()",
"def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()",
"def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()",
"def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)",
"def start_daemon(self, *args, **kwargs):\n pass",
"def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)",
"def start(self):\n self._class_setup()\n\n self._inotify_fd = InotifyFileWatcher._libc.inotify_init()\n if self._inotify_fd < 0:\n error = OSError('failed call to inotify_init')\n error.errno = ctypes.get_errno()\n error.strerror = errno.errorcode[ctypes.get_errno()]\n raise error\n self._inotify_poll = select.poll()\n self._inotify_poll.register(self._inotify_fd, select.POLLIN)\n self._add_watch_for_path(self._directory)",
"def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()",
"def start(self):\n listening_thread = Thread(\n target=self.sock.start_listening, daemon=True)\n listening_thread.start()\n sending_thread = Thread(target=self.sock.start_sending, daemon=True)\n sending_thread.start()\n\n ack_watch_thread = Thread(target=self.watch_for_acks, daemon=True)\n ack_watch_thread.start()\n\n ack_timeout_thread = Thread(\n target=self.watch_for_ack_timeout, daemon=True)\n ack_timeout_thread.start()\n\n self.report()",
"def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )",
"def startup(self):\n pass",
"def start(self):\n with self._lock:\n if not self.started():\n self._started = None\n getattr(self.factory, 'start_' + self.class_name())(self)",
"def start(self):\n self._thread.start()",
"def setUp(self):\n self.server = ResourceManagerServer(log_to_screen=False)\n self._server_thread = Thread(target=self.server.start)\n\n self._server_thread.start()\n time.sleep(self.SERVER_STARTUP_TIME)",
"def start(self):\n if self.__running:\n raise RuntimeError('already started')\n self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),))\n self.__running = True\n self.__hook_thread.start()",
"def start(self):\n print('start watching {}'.format(self.conf_directory))\n self.conf_observer.start()",
"def start( self ):\n\n self.service()",
"def run(self):\n\t\tt = threading.Thread(target=self.__temperature_thread)\n\t\tt.daemon = True\n\t\tt.start()",
"def start(self) -> None:\n pass # for pydocstyle\n\n def serve() -> None:\n \"\"\"Serve forever.\"\"\"\n prefix = f\"In {ThreadedServer.__name__}.{serve.__name__}\"\n try:\n print(\n f\"{prefix}: Starting to serve {self.scenarios_dir} forever on: \"\n f\"http://localhost:{self.port}\",\n file=self.stdout,\n )\n\n self._httpd.serve_forever()\n\n print(f\"{prefix}: Stopped serving forever.\", file=self.stdout)\n\n except Exception as error:\n print(\n f\"{prefix}: Caught an exception in the HTTPD server \"\n f\"(it will be raised at shutdown): {error}\",\n file=self.stderr,\n )\n\n with self._server_exception_lock:\n self._server_exception = error\n\n self._work_thread = threading.Thread(target=serve)\n self._work_thread.start()",
"def start(self):\n logging.info(\"ICMPecho health monitor plugin: Starting to watch \"\n \"instances.\")\n\n self.monitor_thread = threading.Thread(target = self.start_monitoring,\n name = self.thread_name)\n self.monitor_thread.daemon = True\n self.monitor_thread.start()",
"def run_forever(self):\n self.factory.manager.run_forever()",
"def start(self):\n self.thread.start()",
"def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()",
"def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()",
"def start(self):\n thread.start_new_thread(Pyro4.naming.startNSloop, tuple())\n\n self.ns = Pyro4.locateNS()\n if self.ns == None:\n logging.error('Cannot locate Pyro NS.')\n return\n\n daemon = export(self)\n thread.start_new_thread(daemon.requestLoop, tuple())\n thread.start_new_thread(self.healthcheck, tuple())\n logging.info('%s started' % self.name)"
] | [
"0.79512626",
"0.67765105",
"0.6722657",
"0.648831",
"0.6435419",
"0.6422165",
"0.63980657",
"0.627728",
"0.62633985",
"0.6260767",
"0.622428",
"0.6201285",
"0.62006295",
"0.6145395",
"0.61417264",
"0.61376876",
"0.6122323",
"0.60849005",
"0.60740936",
"0.6072932",
"0.60684186",
"0.6057753",
"0.6051294",
"0.6012194",
"0.6005794",
"0.60011154",
"0.6000948",
"0.59902346",
"0.59902346",
"0.5987232"
] | 0.7969129 | 0 |
Called when the factory is being shut down. Ends the watchdog thread. | def stopFactory(self):
self.watchdog.stop()
super().stopFactory() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shutdown(self):\n logger.info(\"Shutting down the factory\")\n self.to_shutdown = True",
"def shutdown(self) -> None:",
"def shutdown(self) -> None:",
"def shutdown(self):\n\n pass",
"def shutdown(self):\n ...",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def shutdown(self):",
"def shutdown(self) -> None:\n pass",
"def shutdown(self):\n #=======================================================================\n #\n # TODO: Place any cleanup code here.\n #\n #=======================================================================\n return",
"def shutdown(self):\n try:\n self.driver.stop()\n except:\n logging.exception(\"Could not stop driver on shutdown\")\n\n self.arduino.stop()",
"def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)",
"def stop(self):\n self._watchdog_flag.set()",
"def shutdown(self):\n raise NotImplementedError",
"def shutdown(self):\n self.exit_event.set()",
"def shutdown(self):\n\n reactor.callLater(0, reactor.stop)",
"def shutdown(self):\n\n raise NotImplementedError",
"def shutdown(self):\n raise NotImplementedError('Abstract Method: shutdown')",
"def shutdown(self):\n self._shutdown(None, None)\n self._running = False",
"def _shutdown(self):",
"def shutdown(self):\n self._ifccountslock.acquire()\n self._ifccounts.clear()\n self._ifccountslock.release()\n self._objslock.acquire()\n if len(self._objs) == 0:\n self._objslock.release()\n return\n logger.info(\"Stopping EMANE daemons.\")\n self.deinstallnetifs()\n self.stopdaemons()\n self.stopeventmonitor()\n self._objslock.release()",
"def shutdown(self):\n self.logger.info(\"Received graceful shutdown request\")\n self.stop()",
"def tear_down_cleanup(self):\n self.hass.stop()",
"def shutdown(self):\n self.exit_app()",
"def shutdown(self):\n\n if self._pyro_daemon:\n self._pyro_daemon.shutdown()",
"def end(self):\n self.shutdown = True",
"def shutdown(self):\n try:\n if self.working and self.exiting.acquire():\n self.bot('shutting down...')\n self.working = False\n self._handleEvent(self.getEvent('EVT_STOP'))\n if self._cron:\n self._cron.stop()\n self.bot('shutting down database connections...')\n self.storage.shutdown()\n except Exception, e:\n self.error(e)",
"def shutdown() -> None: # TODO Better place for this code\n # TODO Safe landing\n pass",
"def on_shutdown(self) -> None:\n pass"
] | [
"0.7936102",
"0.74216986",
"0.74216986",
"0.7417915",
"0.7410274",
"0.7405886",
"0.7405886",
"0.7405886",
"0.73579687",
"0.7253566",
"0.71567553",
"0.7110801",
"0.71059704",
"0.7041802",
"0.70151883",
"0.6975538",
"0.6938473",
"0.6936065",
"0.6928262",
"0.6928152",
"0.6919136",
"0.69170505",
"0.6907151",
"0.68973243",
"0.68788147",
"0.6876767",
"0.687633",
"0.6872054",
"0.6859566",
"0.6858895"
] | 0.76305586 | 1 |
Return dictionary with primes number. Reads prime numbers from OpenSSH compatible moduli file. | def getPrimes(self):
try:
primes_file = open(self.primes_path, 'r')
except FileNotFoundError:
logger.warning(f"Unable to open moduli file '{self.primes_path}'. This will reduce the number of"
f"available key exchange algorithms, and may affect compatibility.")
return {}
try:
primes = {}
for line in primes_file:
line = line.strip()
if not line or line[0] == '#':
continue
tim, typ, tst, tri, size, gen, mod = line.split()
size = int(size) + 1
gen = int(gen)
mod = int(mod, 16)
if size not in primes:
primes[size] = []
primes[size].append((gen, mod))
return primes
finally:
primes_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_prime_numbers():\n print('Loading prime numbers...')\n path_to_primes_file = f'{os.getcwd()}{os.sep}PrimeNumbers.txt'\n primes = list(loadtxt(path_to_primes_file, dtype=str, comments=\"#\", delimiter=\", \", unpack=False))\n return [int(p) for p in primes]",
"def get_prime_array(number_of_primes) -> array:\n p = array('i')\n with open(f'prime{number_of_primes}.bin', 'rb') as prime_file:\n p.fromfile(prime_file, number_of_primes) \n return p",
"def load_priors(file_name):\n with open(file_name, \"r\") as fp:\n priors = json.load(fp)\n return priors",
"def generatePrime(ls_primes, feed_pipe,return_dict):\n local_primes = []\n while True:\n n = feed_pipe.recv()\n if n == -1: # sentinel given by distributor.\n break\n else:\n is_prime = True\n\n ##check for divisibility\n ## no need to check for 2 since all are odd numbers\n for prime in ls_primes[1:]:\n if n%prime == 0:\n is_prime = False\n break\n\n ##if the number is prime, append to global list\n if is_prime:\n local_primes.append(n)\n if len(local_primes) >0:\n return_dict[os.getpid()] = local_primes\n return return_dict\n return 0",
"def parsePrimesFromTxt(filename :str, keySep:str ,itemSep:str) -> dict:\r\n try:\r\n with open(filename,\"r\") as file:\r\n data = file.read()\r\n if itemSep != \",\" :\r\n data = data.replace(itemSep,\",\")\r\n if keySep != \":\":\r\n data = data.replace(keySep,\":\")\r\n data = data.lstrip(\"{\").rstrip(\"}\")\r\n return literal_eval(\"{\"+data+\"}\")\r\n except Exception as e:\r\n print(\"Exception Occurred\",e)",
"def getPrime(self, group=17):\n default_group = 17\n\n primes = {\n 5: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,\n 14: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,\n 15: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,\n 16: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,\n 17:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,\n 18:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF\n }\n\n if group in primes.keys():\n return primes[group]\n else:\n print(\"Error: No prime with group %i. Using default.\" % group)\n return primes[default_group]",
"def gen_primes():\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n\n D = {}\n\n # The running integer that's checked for primeness\n\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1",
"def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac",
"def find_prime_divisors(self, num):\n # If the number is prime, it is only divisible by itself.\n if pe_005.is_prime(num) or num < 2:\n return {num: 1}\n\n # If there were no primes searched for, then search for primes.\n if len(self._primes) <= 0:\n self.find_primes(num)\n\n results = dict()\n # Loop through the sorted primes list and stop when the prime is larger than the given number.\n for prime in self._primes[::-1]:\n if num <= 0:\n break\n\n # Count the number of divisions of the prime number into the current number.\n count, num = pe_005.count_divisions(num, prime)\n if count > 0:\n results[prime] = count\n\n return results",
"def readpar(file):\n par={}\n with open(file) as f:\n for line in f:\n if \"Gamma\" or \" \" in line:\n break # ignore header line\n for line in f:\n line=line.rstrip() # remove blank lines and whitespace\n if line and not \"title\" in line:\n (key, val) = line.split(\":\")\n par[str(key)] = val\n return par",
"def main():\n primes = getNPrime(100)\n\n write_primes(primes, 'output.csv')\n\n prime_list = read_primes('output.csv')\n\n print(prime_list)",
"def primes():\r\n try:\r\n args = request.args\r\n start_num, end_num = validate_request(args)\r\n # cache key\r\n key = f'primes:{start_num}:{end_num}'\r\n rv = cache.get(key)\r\n if rv is None: # not in cache\r\n job = get_primes_list.queue(start_num, end_num)\r\n print(job.get_id())\r\n cache.set(key, job.get_id(), timeout=3600)\r\n return jsonify(job.get_id()), 200\r\n else:\r\n return jsonify(rv), 200\r\n except Exception as e:\r\n raise InvalidUsage(\"Error Processing request {}\".format(e))",
"def primes(numOfPrimes):\n\n primes = []\n # we want to start at 2003, which is the first prime after 2000, seeing as\n # we absolutely need to fit all 2000 keys on the hash table,\n i = 2003\n\n while len(primes) < numOfPrimes:\n isPrime = True\n\n for k in range(2, i):\n if i % k == 0:\n isPrime = False\n break\n\n if isPrime:\n primes.append(i)\n i += 1\n\n return primes",
"def get_primes(self, startnum=2):\n i = startnum\n while True:\n if self.is_prime(i):\n yield i\n i += 1",
"def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key",
"def gen_primes():\n\n # Maps composites (=non-primes) to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\" indefinitely,\n # but only as long as required by the current number being tested.\n D = {}\n\n q = 1 # the running integer that is checked for primeness\n while (q := q+1):\n if q not in D:\n # q is a new prime. Yield it and mark its first multiple that is\n # not already marked in previous iterations\n yield q\n D[q*q] = [q]\n else:\n # q is composite. D[q] is the list of primes that divide it. Since\n # we have reached q, we no longer need it in the map, but we will\n # mark the next multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p+q, []).append(p)\n del D[q]",
"def gen_primes():\n\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current number\n # being tested\n\n D = {}\n\n # The runing integer that is checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next multiples\n # of its witnesses to prepare for larger numbers\n\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1",
"def primes():\n D = {} # map composite integers to primes witnessing their compositeness\n q = 2 # first integer to test for primality\n while True:\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory\n q += 1",
"def getNums():\n key = allprimes() # Empty list for key is created\n\n # Runs code endlessly as no instruction was\n while True: # given to end the code\n num = input(\"Please enter a number:\") # Changed number to integer as it's outputted\n try: # as a string from input\n selected_num = int(num) # Asked for number with try function\n except:\n print(\"\\n Please input only a number!\") # Only accepts a number\n continue\n if selected_num > 100: # Limits number to 100 as that was limit\n print(\"Please only select a number up to 100.\")\n continue\n if selected_num in key:\n print(\"You have picked a prime number please select another number.\")\n continue\n for i, number in enumerate(key): # Iterator function to run through key\n complementary = selected_num - number # Initiated formula\n if complementary in key[i:]: # Obtained complimentary number if available\n print(str(selected_num) + \" = {} + {}\".format(number, complementary))\n break # Printed values as requested for assignment",
"def get_prime_factors_by_number(self, number):\n if int(number) < 2:\n print \"this method needs number >= 2\"\n return {}\n ret = {}\n import math\n # use math.sqrt for speedup\n if number >= 4:\n number_sqrt = math.sqrt(number)\n else:\n number_sqrt = 2\n primes = self.get_primes_by_limit_number(number_sqrt)\n num = number\n for p in primes:\n if num == 1:\n break\n while num % p == 0:\n num /= p\n if p in ret:\n ret[p] = ret[p] + 1\n else:\n ret[p] = 1\n if num == number:\n # in this case, number is prime\n ret[number] = 1\n elif num != 1:\n ret[num] = 1\n return ret",
"def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def prime_factors(number: int) -> dict:\n f = {}\n i = 2\n while number > 1 and number >= i:\n if number % i == 0:\n if i not in f:\n f[i] = 1\n else:\n f[i] += 1\n number //= i\n else:\n i += 1\n return f",
"def readInitialAndPriors(infile):\n init_priors = OrderedDict()\n f = open(infile).readlines()\n for line in f:\n print(line.split())\n if line.split()[0] == 'norm_range':\n name, lower, upper = line.split()\n init_priors[name] = (float(lower), float(upper))\n elif line.split()[0].startswith('s1') or line.split()[0].startswith('s2'):\n name, initial = line.split()\n init_priors[name] = OrderedDict()\n init_priors[name]['initial'] = float(initial)\n else:\n name, initial, sigma_walkers, prior_l, prior_h = line.split()\n init_priors[name] = OrderedDict()\n init_priors[name]['initial'] = float(initial)\n init_priors[name]['sigma_walkers'] = float(sigma_walkers)\n init_priors[name]['prior_l'] = float(prior_l)\n init_priors[name]['prior_h'] = float(prior_h)\n if 'norm_range' not in init_priors.keys():\n init_priors['norm_range'] = None\n return init_priors",
"def primes(n):\n return [i for i, v in enumerate(prime_cache(n)) if v]",
"def factorize(n:int,primesDict:dict = primesDict):\r\n\r\n \r\n if isPrime(n,primesDict):\r\n return {n:1}\r\n\r\n factors = {}\r\n\r\n lastPrime = getLastPrime(primesDict)\r\n print (lastPrime,\"Lastprimes\")\r\n if lastPrime < n:\r\n print (\"Creating DictS\")\r\n\r\n prma(n,lastPrime,primesDict)\r\n\r\n for i in primesDict:\r\n if n%i == 0 :\r\n count = 0\r\n while n % i**(count+1) == 0 :\r\n count+=1 \r\n factors[i]= count\r\n\r\n return factors",
"def getPrimeFactors(num):\n n = num\n primes = {}\n\n p = 2\n sqrt = math.sqrt(num)\n\n def checkAndUpdate(inc):\n nonlocal n\n nonlocal p\n nonlocal primes\n if n % p == 0:\n if str(p) in primes.keys():\n primes[str(p)] += 1\n else:\n primes[str(p)] = 1\n n /= p\n else:\n p += inc\n \n while p == 2 and p <= n:\n checkAndUpdate(1)\n while p <= n and p <= sqrt:\n checkAndUpdate(2)\n if len(primes.keys()) == 0:\n primes[str(num)] = 1\n elif n != 1:\n primes[str(n)] = 1\n return primes",
"def primeFactorsGivenPrimes(n, primes):\n factors = {}\n for p in primes: \n while n % p == 0:\n n //= p\n factors[p] = factors.get(p,0)+1\n if n < p*p:\n if n > 1:\n factors[n] = factors.get(n,0)+1\n return factors\n return factors",
"def generarPrimo(self, bits):\n while True:\n p = primes.bigppr(bits)\n if p & 3 == 3:\n return p",
"def load_pecfile_dict(pec_file):\n file_dict = {}\n while True:\n record = pec_file.readline()[:-1]\n if record == '':\n break\n else:\n record = pec.PecRecSeq(record)\n if record.sequence in file_dict:\n return 1\n else:\n file_dict[record.sequence] = record\n return file_dict"
] | [
"0.72578806",
"0.60250926",
"0.57705534",
"0.5733895",
"0.57035697",
"0.56756115",
"0.56674397",
"0.5618918",
"0.55990535",
"0.559901",
"0.55590177",
"0.5556551",
"0.5540792",
"0.552214",
"0.5515603",
"0.54981625",
"0.5493559",
"0.54897684",
"0.54535276",
"0.54260296",
"0.5398784",
"0.53821063",
"0.5376927",
"0.53646433",
"0.5360266",
"0.53300047",
"0.5319358",
"0.5262675",
"0.5246016",
"0.5237983"
] | 0.7742141 | 0 |
Bans a host from connecting. | def ban_host(self, host, hard=False, duration=None):
# TODO: Timed bans?
logger.verbose("Banning IP {0}".format(host))
self.ip_bans.add(host, hard) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ban(sock, user):\r\n chat(sock, \"/ban {}\".format(user))",
"def connect(self, host):\n return False",
"def handle_ping(self, host):\n self.send(\"PONG :{}\".format(host))",
"def k(self, irc, msg, args, nicks):\n\n if(self._checkCPO(irc, msg)):\n \n hostmasks = []\n \n for nick in nicks:\n prefix = irc.state.nickToHostmask(nick)\n user = ircutils.userFromHostmask(prefix)\n host = ircutils.hostFromHostmask(prefix)\n \n hostmask = '*!*@%s' % host\n hostmasks.append(hostmask)\n \n irc.queueMsg(ircmsgs.bans(msg.args[0], hostmasks))\n irc.queueMsg(ircmsgs.kicks(msg.args[0], nicks, 'Your behavior is not conducive to the desired environment.'))\n \n def unban():\n irc.queueMsg(ircmsgs.unbans(msg.args[0], hostmasks))\n \n schedule.addEvent(unban, time.time() + 900)\n \n irc.noReply()",
"def ban(sock, chan, user):\n chat(sock, \".ban {}\\r\\n\".format(user))\n console.info(\"banned user {} from channel {}\".format(user, chan))",
"def connect_without_host_data(self, host: Host, bounce: bool):\n print_light_grey('Host data not found, trying to find a connection path...')\n\n if bounce:\n bounce_host = DiscoverHost(self.account_obj, bounce=True).get_bounce()\n host = DiscoverHost(self.account_obj, bounce=True).discover_host(host, bounce_host)\n\n if not DoConnectAndSave(host, self.account_obj).bounce_regular_connect(bounce_host):\n sys.exit(0)\n else:\n host = DiscoverHost(self.account_obj, bounce=False).discover_host(host)\n\n if not DoConnectAndSave(host, self.account_obj).regular_connect():\n sys.exit(0)",
"def ban (phenny, input):\n if not input.admin: return\n text = input.group().split()\n argc = len(text)\n if argc < 2: return\n opt = text[1]\n banmask = opt\n channel = input.sender\n if opt.startswith('#'):\n if argc < 3: return\n channel = opt\n banmask = text[2]\n banmask = configureHostMask(banmask)\n if banmask == '': return\n phenny.write(['MODE', channel, '+b', banmask])",
"def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()",
"def get(self, ip_addr: str) -> typing.Optional[HostBan]:\n ban = self.bans.get(ip_addr, None)\n if ban is None:\n return None\n if ban.expired:\n del self.bans[ip_addr]\n return None\n return ban",
"def connect(self, host):\n if not self.app.connect(host):\n command = \"Connect({0})\".format(host).encode(\"utf-8\")\n self.exec_command(command)\n self.last_host = host",
"def _host_blocked(self, host: str) -> bool:\n bad_masks = self.config.core.host_blocks\n for bad_mask in bad_masks:\n bad_mask = bad_mask.strip()\n if not bad_mask:\n continue\n if (re.match(bad_mask + '$', host, re.IGNORECASE) or\n bad_mask == host):\n return True\n return False",
"async def bans(self, ctx):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Getting bans failed')\n else:\n await self.bot.say('\\N{SMALL ORANGE DIAMOND}'.join(user.name for user in bans))",
"async def ping(self, ctx, ip):\n\n # Check for valid IP else do DNS lookup\n valid_ip = re.compile(\"[0-9]{,3}\\.[0-9]{,3}\\.[0-9]{,3}\")\n valid_hostname = re.compile(\".*\\.[a-zA-Z]{2,}\")\n valid = False\n\n if valid_ip.match(ip):\n valid = True\n elif valid_hostname.match(ip):\n valid = True\n try:\n await self.bot.say('Doing DNS lookup...')\n ip = socket.gethostbyname(ip)\n\n if valid == True:\n start = time.time()\n response = os.system(\"sudo ping -c 1 -w3 \" + ip)\n duration = time.time() - start\n duration = round(duration * 1000, 0)\n if response == 0:\n await self.bot.say(ip + ' is up and responding in ' +\n str(duration) + 'ms.')\n else:\n await self.bot.say(ip + ' is not reachable.')\n else:\n await self.bot.say(ip + ' is not a valid IP or Domain.')\n\n except socket.gaierror:\n await self.bot.say('Whoops! That Address cant be resolved!')",
"def ban_all():\n sudo(\"varnishadm 'ban req.url ~ .'\")",
"def setupHostConnection(self, host_ip):\n if host_ip != self.ip and host_ip != '':\n host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n indicator = host_socket.connect_ex((host_ip, 9090))\n if indicator != 0:\n return False\n else:\n new_host_msg = Message(\"NHST\", self.ip, '\\0')\n host_socket.sendall(new_host_msg.generateByteMessage())\n print('NHST message sent to Host at ' + host_ip)\n area_message = self.parseMessage(host_socket)\n if(area_message.type == 'AREA'):\n print('AREA message received from ' + area_message.origin)\n payload_array = area_message.payload.split(':')\n curr_host_ip = area_message.origin\n host_min_x = int(payload_array[0])\n host_max_x = int(payload_array[1])\n self.x_min = host_max_x\n self.x_max = self.x_min + 50\n if host_max_x > self.curr_x_max:\n self.curr_x_max = host_max_x\n if self.x_min == host_max_x:\n self.l_neighbor = curr_host_ip\n if host_min_x <= self.curr_x_min:\n self.curr_x_min = host_min_x\n self.curr_x_min_ip = curr_host_ip\n new_thread = Thread(target=lambda: self.listenToHost(host_socket))\n new_thread.daemon = True\n new_thread.start()\n new_connection = Connection(host_ip, host_socket, new_thread)\n self.connections.append(new_connection)\n return True\n else:\n print('Invalid message type received from ' + area_message.origin + ' - Host corrupt')\n return False\n return True",
"def ping():\n # TODO: this ought to live in ISlaveControl, maybe with disconnect()\n # or something. However the event that is emitted is most useful in\n # the Builder column, so it kinda fits here too.",
"def ping(host):\n\n # Ping parameters as function of OS\n parameters = \"-n 1\" if system_name().lower() == \"windows\" else \"-c 1\"\n\n # Pinging\n return system_call(\"ping \" + parameters + \" \" + host) == 0",
"def ban(self, mask, target, args):\n self.bot.send('MODE %s +b %s' % (as_channel(args['<channel>']), args['<nick>']))",
"def run(self):\n if not self.stopResponder:\n try:\n if not exploit.is_authorized(self.target_ip):\n print 'Tyring to get authorization to {0}...'.format(self.target_ip)\n exploit.auto_connect(self.src_ip, self.target_ip)\n time.sleep(5)\n self.run()\n except Exception as e:\n #print ('[!] Error occured,' , e)\n if not exploit.is_reachable(self.target_ip):\n \tprint '[!] Target no longer reachable'\n else:\n \tprint ('[!] Error occured,' , e)\n #uncomment line below if you would like it to keep retrying to connect to host even if it was unreachable\n self.run()\n #print('Exiting responder...')",
"def ping(self):\n pass",
"def verifyWalabotIsConnected():\n while True:\n try:\n wlbt.ConnectAny()\n except wlbt.WalabotError as err:\n input(\"- Connect Walabot and press 'Enter'.\")\n else:\n print('- Connection to Walabot established.')\n return",
"def ping (self,host):\n # Ping parameters as function of OS\n ping_str = \"-n 1\" if platform.system().lower()==\"windows\" else \"-c 1\"\n\n # Ping\n return os.system(\"ping \" + ping_str + \" \" + host) == 0",
"def is_spam(self, host):\n \"\"\"\n Run async spam checking on host\n\n :param host: domain or ip address\n :return: None\n \"\"\"\n # Severs where host is blacklisted\n self.blacklisted = []\n # Generate ars for checker\n args = [(host, i) for i in self.serverlist]\n # Init Pool\n self.pool = Pool(self.threads)\n # Spawn pool\n self.pool.map(self.check, args)\n return self.blacklisted",
"def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True",
"def ping(self) -> None:\n ...",
"async def unban(self, ctx, name: str):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n user = discord.utils.get(bans, name=name)\n if user is not None:\n await self.bot.unban(ctx.message.server, user)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Unbanning failed')\n else:\n await self.bot.say('\\N{OK HAND SIGN}')",
"def connect(self):\r\n try:\r\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(self.ip))\r\n print (\"connected!\")\r\n except Exception as error:\r\n print (\"connect() - error - {}\".format(error))",
"def ban_command(server, output):\n for target in output.message.split()[1:]:\n if target in server.ops:\n server.tell(output.name, 'Operators cannot be banned')\n continue\n server.banip(target)\n server.ban(target)\n return",
"def ping(host):\n import os, platform\n\n # Ping parameters as function of OS\n ping_str = \"-n 1\" if platform.system().lower()==\"windows\" else \"-c 1\"\n\n # Ping\n return os.system(\"ping \" + ping_str + \" \" + host) == 0",
"def ping(host):\n import os, platform\n\n # Ping parameters as function of OS\n ping_str = \"-n 1\" if platform.system().lower()==\"windows\" else \"-c 1\"\n\n # Ping\n return os.system(\"ping \" + ping_str + \" \" + host) == 0"
] | [
"0.6156989",
"0.59887886",
"0.59198314",
"0.569596",
"0.56383455",
"0.5601381",
"0.5566465",
"0.5516461",
"0.550819",
"0.54816896",
"0.5463091",
"0.54218864",
"0.5382467",
"0.5342919",
"0.5336694",
"0.53214467",
"0.53200334",
"0.5309691",
"0.52745163",
"0.5259725",
"0.52515334",
"0.52503043",
"0.5246818",
"0.52465206",
"0.523794",
"0.5200418",
"0.5190665",
"0.51859224",
"0.51765925",
"0.51765925"
] | 0.6886727 | 0 |
Return 10 feeds with custom query params | def get_custom_feeds(request):
start = int(request.paginate_number) * 10
end = start + 10
feeds = Feed.objects.all().order_by('-id')[start: end]
return get_feed_list(feeds) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all(self, start_at, limit, order=None):",
"def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data",
"def get_queryset(self):\n queryset = Article.objects.all().order_by('-id')\n title = self.request.query_params.get('title', None)\n limit = self.request.query_params.get('limit', None)\n random = self.request.query_params.get('random', None)\n if title is not None:\n queryset = queryset.filter(title__icontains=title).order_by('-id')\n elif limit is not None:\n queryset = queryset[:limit]\n elif random is not None:\n queryset = queryset[COUNT:COUNT_MAX]\n return queryset",
"def top_ten(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__lte=datetime.date.today())\n movies = movies.order_by('-rating')[:10]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)",
"def feeds(request, page='1'):\n\n __time_update(request.user)\n\n feeds = Feed.objects.filter(user=request.user)\n\n bottom = (int(page) - 1) * 10\n top = bottom + 10\n\n prev = 'none' if page == '1' else \\\n 'none' if len(feeds[bottom - 1: bottom]) == 0 else \\\n int(page) - 1\n\n next = 'none' if len(feeds[top: top + 1]) == 0 else \\\n int(page) + 1\n\n return render_to_response('feeds.html', {\n 'username': request.user.username,\n 'feeds': feeds[bottom: top],\n 'prev': prev, 'next': next})",
"def get_list(self, **kwargs):\n self.fields = self.get_fields(**kwargs)\n fields = \", \".join(self.fields)\n kwargs[\"query\"] = 'SELECT {0}'.format(fields)\n start = kwargs.pop(\"offset\", None)\n end = kwargs.pop(\"count\", None)\n data = self.filter(**kwargs)\n\n return self.paginate(data, start=start, end=end)",
"def get_all(self, start=0, count=-1, filter='', query='', sort=''):\n return self._client.get_all(start, count, filter=filter, sort=sort, query=query)",
"def get_all_from_top_ten(title,users,max = 3):\n \"\"\" ten prolific users \"\"\"\n \"\"\" max : number of user with related followers \"\"\"\n getAllUsers(users,all_users,users_set,proceeded_users,max,user_cpt,title)\n for data in users_set:\n print(data.id)",
"def api_get_threads(request, count):\n\n if PARAMETER_TAG in request.GET:\n tag_name = request.GET[PARAMETER_TAG]\n if tag_name is not None:\n tag = get_object_or_404(Tag, name=tag_name)\n threads = tag.threads.filter(archived=False)\n else: \n threads = Thread.objects.filter(archived=False)\n\n if PARAMETER_OFFSET in request.GET:\n offset = request.GET[PARAMETER_OFFSET]\n offset = int(offset) if offset is not None else 0\n else:\n offset = 0\n\n threads = threads.order_by('-bump_time')\n threads = threads[offset:offset + int(count)]\n\n opening_posts = []\n for thread in threads:\n opening_post = thread.get_opening_post()\n\n # TODO Add tags, replies and images count\n opening_posts.append(_get_post_data(opening_post.id,\n include_last_update=True))\n\n return HttpResponse(content=json.dumps(opening_posts))",
"def get_feed(request):\n user = request.user\n\n # Get feed data and paginate it.\n feed = Post.objects.filter(community__communitymember__user=user).order_by(\n \"-created_at\"\n )\n\n # Check for filter phrase.\n phrase = request.GET.get(\"phrase\")\n if phrase is not None:\n if phrase != \"\" and not phrase.isspace():\n for term in phrase.split():\n feed = feed.filter(Q(title__icontains=term))\n else:\n feed = feed.none()\n\n return JsonResponse(\n json_paginator(request, feed, lambda d: d.serialize(request)),\n status=200,\n )",
"def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results",
"def list():\n\n page_limit = app.config['PAGINATION_LIMIT']\n page = request.args.get('page') if 'page' in request.args else 1\n per_page = request.args.get('per_page') if 'per_page' in request.args else page_limit\n\n # TODO: Can be done in much more elegant way\n try:\n page = int(page)\n except:\n page = 1\n\n try:\n per_page = int(per_page)\n except:\n per_page = page_limit\n if per_page > page_limit:\n per_page = page_limit\n\n # Get all rows and order by published datetime and paginate by page count and per_page\n posts = YTSearch.query.order_by(desc(YTSearch.published_at)) \\\n .paginate(page, per_page, error_out=True)\n\n # Get JSON data from list of objects\n result = [i.serialize() for i in posts.items]\n return jsonify({'data': result, 'has_next': posts.has_next, 'next_page': posts.next_num,\n 'has_prev': posts.has_prev, 'prev_page': posts.prev_num, 'length': len(result)}), 200",
"def get_all(self, start=0, count=-1, filter='', sort=''):\n return self._client.get_all(start=start, count=count, filter=filter, sort=sort)",
"def get_all(self, start=0, count=-1, sort='', query='', view=''):\n return self._client.get_all(start, count, sort=sort, query=query, view=view)",
"def how_many_comments(comment_queryset, count=10):\n\n if count == \"all\":\n return comment_queryset\n return comment_queryset[:int(count)]",
"def construct_page_requests(key, max_pages, rank, tid):\n template = lambda i : f\"https://search.bilibili.com/all?keyword={key}&from_source=nav_search_new&order={rank}&duration=0&tids_1={tid}&page={i}\"\n return [template(i) for i in range(1, max_pages+1)]",
"def list(self, request, *args, **kwargs):\n\n queryset = self.filter_queryset(self.get_queryset())\n\n page = request.query_params.get('page', 1)\n paginator = Paginator(queryset, 8)\n\n try:\n queryset = paginator.page(page)\n\n except PageNotAnInteger:\n queryset = paginator.page(1)\n\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n page = int(page)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response({'items': serializer.data, 'page': page, 'pages': paginator.num_pages})",
"def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])",
"def get_restaurants():\n restaurants = []\n start = 0\n\n while(True):\n response = requests.get(REQUEST_URL + \"&start=\" + str(start), \\\n headers=HEADERS)\n response_body = json.loads(response.text)\n if (response_body[\"results_shown\"] < 1):\n break\n \n restaurants += response_body[\"restaurants\"] \n start += 20\n\n return restaurants",
"def get_rss(limit):\n rss_data = feedparser.parse(URL)\n if limit == 1:\n title = rss_data.entries[0].title\n link = rss_data.entries[0].link\n rss_print(title, link)\n else:\n for i in range(0, limit):\n title = rss_data.entries[i].title\n link = rss_data.entries[i].link\n\n print(Back.CYAN + str(i + 1) + \"\\t\")\n rss_print(title, link)",
"def get_series(self, page=0, filters=''):",
"def getTopMovies(endpoint, date, count=10):\n\n try:\n response = urlreq.urlopen(endpoint.format(date))\n soup = BeautifulSoup(response.read(), \"html.parser\")\n table = soup.find('table', border=\"0\", cellpadding=\"5\", cellspacing=\"1\")\n tdata = []\n\n for i, row in enumerate(table.find_all('tr')[1:], start=1):\n if i > count:\n break\n\n cells = row.find_all('td')\n tdict = {}\n\n tdict['rank'] = i\n tdict['title'] = cells[2].text.strip()\n tdict['daily_gross'] = int(re.sub(r'[^\\d]', '', cells[4].text))\n tdict['theaters'] = int(re.sub(r'[^\\d]', '', cells[7].text))\n tdict['todate_gross'] = int(re.sub(r'[^\\d]', '', cells[9].text))\n tdict['release_day'] = int(cells[10].text)\n\n tdata.append(tdict)\n\n tdata = pd.DataFrame(tdata)\n tdata['gross_date'] = date\n return tdata\n\n except urlerr.URLError as err:\n print(\"\\nThere was an error retrieving daily revenue information\")\n print(err)\n return None\n except Exception:\n print(\"\\nThere's something wrong with the BOMojo daily revenue page\")\n return None",
"def _make_paged_query(\n conn, search_base, search_scope, ad_query, attr_list, page_size\n):\n result = []\n page_result_control = SimplePagedResultsControl(\n size=page_size,\n cookie=''\n )\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n\n while True:\n r_type, r_data, r_msgid, serverctrls = conn.result3(msgid)\n result.extend(r_data)\n\n if serverctrls:\n if serverctrls[0].cookie:\n page_result_control.size = page_size\n page_result_control.cookie = serverctrls[0].cookie\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n else:\n break\n\n return result",
"def page10(self):\n result = request1001.GET('/Cars_Sample_App/search.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/car.do?query=carEnquiries&cid=2'), ))\n\n return result",
"def query_newsfeed(user, **kwargs):\n page = kwargs.get(\"page\", 0)\n max_items = kwargs.get(\"max_items\", 5)\n if page and max_items:\n start_item = (page-1)*max_items\n end_item = page*max_items\n else:\n start_item = \"\"\n end_item = \"\"\n notification_query = \"\"\"\n SELECT a.* \n FROM notifications_notification a \n WHERE ( ( NOT EXISTS (\n SELECT 1 \n FROM notifications_notification b\n WHERE b.target_object_id = a.target_object_id \n AND b.timestamp > a.timestamp\n AND b.recipient_id=%(user_id)d\n ) ) AND a.recipient_id=%(user_id)d )\n GROUP BY a.target_object_id\n ORDER BY a.timestamp DESC\n \"\"\"\n if start_item >= 0 and end_item :\n notification_query += \"LIMIT %(start_item)d,%(end_item)s\"\n \n notification_query = notification_query % {\"user_id\" : user.id, \n \"start_item\" : start_item, \n \"end_item\" : end_item,\n }\n notification_list = Notification.objects.raw(notification_query)\n return notification_list",
"def get_list(self, *args, **kwargs):\r\n request_params = {\r\n 'headers': {\r\n 'User-Agent':'Google-Bot'\r\n },\r\n 'params': {\r\n 'page':kwargs.get('page', self.page),\r\n 'per_page':kwargs.get('per_page', self.per_page)\r\n }\r\n }\r\n if kwargs.get('proxy', None):\r\n request_params['proxies'] = kwargs['proxies']\r\n\r\n response = getattr(requests, 'get')('{api_endpoint}'.format(**kwargs), **request_params)\r\n return response.json()",
"def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:\n\n next_link_url = None\n\n if odata_filter:\n count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)\n\n if count_data.status_code == 400:\n print(\"Received an error while retrieving data from %s:\" % url + '?$filter=' + odata_filter)\n pprint(count_data.json()['error'])\n return {}\n\n count_data = count_data.json()\n if count_data['@odata.count'] <= 0:\n print(\"No results found!\")\n return {}\n else:\n count_data = requests.get(url, headers=authenticated_headers, verify=False).json()\n\n if 'value' in count_data:\n data = count_data['value']\n else:\n data = count_data\n\n if '@odata.nextLink' in count_data:\n # Grab the base URI\n next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']\n\n i = 1\n while next_link_url is not None:\n # Break if we have reached the maximum number of pages to be returned\n if max_pages:\n if i >= max_pages:\n break\n else:\n i = i + 1\n response = requests.get(next_link_url, headers=authenticated_headers, verify=False)\n next_link_url = None\n if response.status_code == 200:\n requested_data = response.json()\n if requested_data['@odata.count'] <= 0:\n print(\"No results found!\")\n return {}\n\n # The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it\n # is present we get a link to the page with the next set of results.\n if '@odata.nextLink' in requested_data:\n next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \\\n requested_data['@odata.nextLink']\n\n if 'value' in requested_data:\n data += requested_data['value']\n else:\n data += requested_data\n else:\n print(\"Unknown error occurred. Received HTTP response code: \" + str(response.status_code) +\n \" with error: \" + response.text)\n raise Exception(\"Unknown error occurred. Received HTTP response code: \" + str(response.status_code)\n + \" with error: \" + response.text)\n\n return data",
"def generatePosts(self,**kwargs):\n oldestTimeSoFar = None\n while True:\n if oldestTimeSoFar is None:\n items = self.getPosts(**kwargs)\n else:\n items = self.getPosts(before_time=oldestTimeSoFar,**kwargs)\n if not items:\n return\n for item in items:\n yield item\n oldestTimeSoFar = item['published_at']\n time.sleep(0.5)",
"def fetch(api_key, query='', page=1, from_date=False, to_date=False):\n fetch_articles(api_key, query, page, from_date, to_date)",
"def query(q, top_n=12):\n print('Query: ' + q + '; Top N: ' + str(top_n))\n\n driver = None\n bad_request = False\n urls = set()\n try:\n driver = Fetcher.get_selenium_driver()\n driver.get('https://api.duckduckgo.com/?q=' + q + '&kl=wt-wt')\n except:\n print('An error occurred while searching query: ' + q)\n bad_request = True\n finally:\n try:\n if not bad_request:\n results = driver.find_elements_by_class_name('result__a')\n result_size = len(results)\n print('Result Size: ' + str(result_size))\n while result_size > 0 and len(urls) < top_n:\n for element in results:\n new_url = element.get_attribute('href')\n # TODO: Filter URLs if required\n urls.add(new_url)\n if len(urls) == top_n:\n break\n\n # Infinite Scroll\n if len(urls) < top_n:\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n results = driver.find_elements_by_class_name('result__a')\n results = results[result_size:]\n result_size = len(results)\n print('Moved to Next Page. Result Size: ' + str(result_size))\n except:\n print('An error occurred while searching query: ' + q + ' and fetching results')\n finally:\n if driver:\n Fetcher.close_selenium_driver(driver)\n print('Search Completed')\n return urls"
] | [
"0.6454365",
"0.63795257",
"0.6170948",
"0.6094697",
"0.6044983",
"0.59835553",
"0.5974764",
"0.596247",
"0.5931192",
"0.59081125",
"0.5877002",
"0.5860104",
"0.585344",
"0.5846139",
"0.58022046",
"0.5781544",
"0.5773379",
"0.57530355",
"0.574533",
"0.57441443",
"0.5736443",
"0.5735765",
"0.5702383",
"0.56920195",
"0.56837785",
"0.5681229",
"0.5680031",
"0.56792545",
"0.5675607",
"0.56665945"
] | 0.7382841 | 0 |
Create a new bookmark to a feed | def create_bookmark_for_feed(request):
try:
feed = Feed.objects.get(id=request.feed.id)
Bookmarked.objects.create(
user=request.user.username,
feed=feed,
)
except (ValidationError, Feed.DoesNotExist) as e:
exc = e
logger(__name__, "Could not add Bookmark due to {}".format(str(exc)))
errors = _get_errors(exc)
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('FAILURE'),
details={'errors': feeds_pb2.RepeatedString(data=errors)},
)
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('SUCCESS'),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createBookmark(self, address: ghidra.program.model.address.Address, category: unicode, note: unicode) -> ghidra.program.model.listing.Bookmark:\n ...",
"def bookmark_entry(request, entry_id):\n entry = get_object_or_404(Entry, id=entry_id)\n entry.bookmarks.add(request.user)\n return redirect(\"feed_entries\", feed_id=entry.feed.id)",
"def bookmark(search_me, source, url):\n user_id = request.form['userid']\n search_term = db_search_terms.find_one({\"value\": search_me.lower()})\n search_id = search_term.get(\"_id\")\n data = {\"user\": user_id,\n \"search_id\": search_id,\n \"source\": source,\n \"url\": url, \n \"date_saved\": datetime.utcnow()}\n x = db_bookmarks.insert(data, check_keys=False)\n msg = {\"status\" : { \"type\" : \"success\" , \"message\" : \"Bookmark created\"}}\n return jsonify(msg)",
"def bookmark(user_id, item_id):\n Bookmark.objects.get_or_create(user=User.objects.get(pk=user_id),\n item=Item.objects.get(pk=item_id))",
"def __create_bookmark(self, url, username):\r\n b = Bmark(\r\n url=url,\r\n username=username\r\n )\r\n tagname = gen_random_word(5)\r\n b.tags[tagname] = Tag(tagname)\r\n return b",
"def create_bookmark(record_settings):\n bookmark_name = record_settings['bookmark']\n # Set bookmark to None to avoid future loading attempting to bookmark\n record_settings['bookmark'] = None\n quickstreams = read_quickstreams()\n while bookmark_name in quickstreams or bookmark_name == '':\n bookmark_name = input(\"Name in use or blank, enter another: \")\n\n quickstreams[bookmark_name.lower()] = record_settings\n save_quickstreams(quickstreams)",
"def bookmark(self, request, pk=None):\n user = request.auth.user\n try:\n event = Event.objects.get(pk=pk)\n except Event.DoesNotExist:\n return Response(\n {'message': 'Event does not exist.'},\n status=status.HTTP_400_BAD_REQUEST\n )\n if request.method == \"POST\":\n try:\n event.bookmarks.add(user)\n event.bookmarked = True\n return Response({}, status=status.HTTP_201_CREATED)\n except Exception as ex:\n return Response({'message': ex.args[0]})\n elif request.method == \"DELETE\":\n try:\n event.bookmarks.remove(user)\n event.bookmarked = False\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n except Exception as ex:\n return Response({'message': ex.args[0]})",
"def obj_create(self, bundle, request=None, **kwargs):\n return super(BookmarkResource, self).obj_create(bundle, request, user=request.user)",
"def make_bookmark(user=None):\r\n bmark = Bmark(random_url(),\r\n username=u\"admin\",\r\n desc=random_string(),\r\n ext=random_string(),\r\n tags=u\"bookmarks\")\r\n\r\n if user:\r\n bmark.username = user.username\r\n bmark.user = user\r\n\r\n DBSession.add(bmark)\r\n DBSession.flush()\r\n return bmark",
"def bmark_add(request):\r\n rdict = request.matchdict\r\n try:\r\n if 'url' in request.params or 'hash_id' in request.params:\r\n params = request.params\r\n elif 'url' in request.json_body or 'hash_id' in request.json_body:\r\n params = request.json_body\r\n else:\r\n raise ValueError('No url provided')\r\n except ValueError:\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No url provided'\r\n })\r\n\r\n user = request.user\r\n\r\n if 'url' not in params and 'hash_id' not in rdict:\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: missing url',\r\n })\r\n\r\n elif 'hash_id' in rdict:\r\n try:\r\n mark = BmarkMgr.get_by_hash(\r\n rdict['hash_id'],\r\n username=user.username\r\n )\r\n mark = _update_mark(mark, params)\r\n\r\n except NoResultFound:\r\n request.response.status_code = 404\r\n return _api_response(request, {\r\n 'error': 'Bookmark with hash id {0} not found.'.format(\r\n rdict['hash_id'])\r\n })\r\n\r\n else:\r\n # check if we already have this\r\n try:\r\n mark = BmarkMgr.get_by_url(params['url'],\r\n username=user.username)\r\n mark = _update_mark(mark, params)\r\n\r\n except NoResultFound:\r\n # then let's store this thing\r\n # if we have a dt param then set the date to be that manual\r\n # date\r\n if 'dt' in request.params:\r\n # date format by delapi specs:\r\n # CCYY-MM-DDThh:mm:ssZ\r\n fmt = \"%Y-%m-%dT%H:%M:%SZ\"\r\n stored_time = datetime.strptime(request.params['dt'], fmt)\r\n else:\r\n stored_time = None\r\n\r\n # check to see if we know where this is coming from\r\n inserted_by = params.get('inserted_by', u'unknown_api')\r\n\r\n mark = BmarkMgr.store(\r\n params['url'],\r\n user.username,\r\n params.get('description', u''),\r\n params.get('extended', u''),\r\n params.get('tags', u''),\r\n dt=stored_time,\r\n inserted_by=inserted_by,\r\n )\r\n\r\n # we need to process any commands associated as well\r\n commander = Commander(mark)\r\n mark = commander.process()\r\n\r\n # if we have content, stick it on the object here\r\n if 'content' in params:\r\n content = StringIO(params['content'])\r\n content.seek(0)\r\n parsed = ReadContent.parse(content,\r\n content_type=u\"text/html\",\r\n url=mark.hashed.url)\r\n\r\n mark.readable = Readable()\r\n mark.readable.content = parsed.content\r\n mark.readable.content_type = parsed.content_type\r\n mark.readable.status_code = parsed.status\r\n mark.readable.status_message = parsed.status_message\r\n\r\n # we need to flush here for new tag ids, etc\r\n DBSession.flush()\r\n\r\n mark_data = dict(mark)\r\n mark_data['tags'] = [dict(mark.tags[tag]) for tag in mark.tags.keys()]\r\n\r\n return _api_response(request, {\r\n 'bmark': mark_data,\r\n 'location': request.route_url('bmark_readable',\r\n hash_id=mark.hash_id,\r\n username=user.username),\r\n })",
"def _make_presentable(self, bookmark):\n return Bookmark(\n id= bookmark.id,\n name=bookmark.name,\n url=bookmark.url,\n date_created=bookmark.date_created\n )",
"def add_bookmark(bookmark_info):\n BookmarkHandler.handle_bookmark(\n bookmark_info[\"USER_ID\"], bookmark_info[\"PROJECT_ID\"], status=True\n )",
"def _add_bookmark(self, user=None):\r\n if user:\r\n DBSession.add(user)\r\n username = user.username\r\n else:\r\n username = u'admin'\r\n\r\n b = Bmark(\r\n url=gen_random_word(12),\r\n username=username,\r\n tags=gen_random_word(4),\r\n )\r\n\r\n b.clicks = randint(0, MAX_CLICKS)\r\n b.hash_id = gen_random_word(5)\r\n\r\n DBSession.add(b)\r\n DBSession.flush()\r\n b.hashed.clicks = b.clicks\r\n DBSession.flush()\r\n transaction.commit()",
"def test_add_bookmark(self):\r\n # we need to know what the current admin's api key is so we can try to\r\n # add\r\n res = DBSession.execute(\r\n \"SELECT api_key FROM users WHERE username = 'admin'\").fetchone()\r\n key = res['api_key']\r\n\r\n test_bmark = {\r\n 'url': u'http://bmark.us',\r\n 'description': u'Bookie',\r\n 'extended': u'Extended notes',\r\n 'tags': u'bookmarks',\r\n 'api_key': key,\r\n }\r\n\r\n res = self.testapp.post('/api/v1/admin/bmark',\r\n params=test_bmark,\r\n status=200)\r\n\r\n self.assertTrue(\r\n '\"location\":' in res.body,\r\n \"Should have a location result: \" + res.body)\r\n self.assertTrue(\r\n 'description\": \"Bookie\"' in res.body,\r\n \"Should have Bookie in description: \" + res.body)\r\n self._check_cors_headers(res)",
"def link(request, link_id):\n bkmrk_id = Bookmark.decode_id(link_id)\n bookmark = get_object_or_404(Bookmark, pk=bkmrk_id)\n if request.user.is_authenticated():\n Click.objects.create(human=request.user, bookmark=bookmark)\n else:\n Click.objects.create(bookmark=bookmark)\n return redirect(bookmark.url)",
"def post(self, request, slug):\n serializer_context = {'request': request}\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise NotFound(\"An article with this slug does not exist\")\n bookmark = Bookmarks.objects.filter(\n user=request.user.profile, article=article).first()\n if not bookmark:\n bookmarks = Bookmarks(article=article, user=request.user.profile)\n bookmarks.save()\n serializer = self.serializer_class(\n article,\n context=serializer_context\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response({\n \"msg\": \"Article with the slug '{}' is already in bookmarks\".format(slug)\n }, status=status.HTTP_202_ACCEPTED)",
"def create_bookmark(\n self,\n name: str,\n query: str,\n results: str = None,\n notes: str = None,\n labels: List[str] = None,\n ) -> Optional[str]:\n self.check_connected() # type: ignore\n # Generate or use resource ID\n bkmark_id = str(uuid4())\n bookmark_url = self.sent_urls[\"bookmarks\"] + f\"/{bkmark_id}\" # type: ignore\n data_items: Dict[str, Union[str, List]] = {\n \"displayName\": name,\n \"query\": query,\n }\n if results:\n data_items[\"queryResult\"] = results\n if notes:\n data_items[\"notes\"] = notes\n if labels:\n data_items[\"labels\"] = labels\n data = _build_sent_data(data_items, props=True)\n params = {\"api-version\": \"2020-01-01\"}\n response = httpx.put(\n bookmark_url,\n headers=get_api_headers(self.token), # type: ignore\n params=params,\n content=str(data),\n timeout=get_http_timeout(),\n )\n if response.status_code == 200:\n print(\"Bookmark created.\")\n return response.json().get(\"name\")\n raise CloudError(response=response)",
"def __addBookmarked(self):\n an = self.getActiveName()\n if an is not None and an not in self.bookmarked:\n self.bookmarked.append(an)",
"def create(request):\n\t#Create bookmark if request method type is POST\n\tif request.method == 'POST':\n\t\tserializer = BookmarkSerializers(data = request.data)\n\t\t\n\t\tif serializer.is_valid():\n\t\t\tbookmark = serializer.save()\n\n\t\t\treturn Response('Book mark saved successfull', status = status.HTTP_200_OK)\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn Response(serializer.errors, status = status.HTTP_409_CONFLICT)\n\t\n\t#Get Customer list with associated Bookmarks for request type GET\n\telif request.method == 'GET':\n\t\tcustomers = Customer.objects.all()\n\t\tserializer = CustomerSerializers(customers, many=True)\n\t\t\n\t\treturn Response(serializer.data)",
"def _pushbookmark(pushop):\n if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:\n return\n pushop.stepsdone.add(b'bookmarks')\n ui = pushop.ui\n remote = pushop.remote\n\n for b, old, new in pushop.outbookmarks:\n action = b'update'\n if not old:\n action = b'export'\n elif not new:\n action = b'delete'\n\n with remote.commandexecutor() as e:\n r = e.callcommand(\n b'pushkey',\n {\n b'namespace': b'bookmarks',\n b'key': b,\n b'old': hex(old),\n b'new': hex(new),\n },\n ).result()\n\n if r:\n ui.status(bookmsgmap[action][0] % b)\n else:\n ui.warn(bookmsgmap[action][1] % b)\n # discovery can have set the value form invalid entry\n if pushop.bkresult is not None:\n pushop.bkresult = 1",
"def add(self,\n url: Url,\n title: str = \"\",\n comments: str = \"\",\n tags: str = \"\",\n added: str = \"\") -> bool:\n\n bookmark = self.find_url(url)\n\n if bookmark:\n self._execute(\n \"\"\"UPDATE bookmarks\n SET title=?, tags=?, comments=?,\n updated=CURRENT_TIMESTAMP,\n deleted=NULL WHERE rowid=?\"\"\",\n (\n title,\n tags,\n comments,\n bookmark[\"rowid\"]\n )\n )\n\n return True\n\n if added and added.isnumeric():\n add_date = cherrypy.engine.publish(\n \"clock:from_timestamp\",\n int(added)\n ).pop()\n else:\n add_date = cherrypy.engine.publish(\n \"clock:now\",\n ).pop()\n\n add_date_formatted = cherrypy.engine.publish(\n \"clock:format\",\n add_date,\n \"%Y-%m-%d %H:%M:%S\"\n ).pop()\n\n self._execute(\n \"\"\"INSERT INTO bookmarks\n (domain, url, added, title, tags, comments)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\",\n (\n url.domain,\n url.address,\n add_date_formatted,\n title,\n tags,\n comments\n )\n )\n\n if tags:\n cherrypy.engine.publish(\n \"cache:clear\",\n \"bookmarks:all_tags\"\n )\n\n cherrypy.engine.publish(\n \"scheduler:add\",\n 2,\n \"bookmarks:add:fulltext\"\n )\n\n cherrypy.engine.publish(\n \"scheduler:add\",\n 5,\n \"bookmarks:tags:all\",\n for_precache=True\n )\n\n return True",
"def add_bookmark(request):\r\n \r\n if request.method == 'POST':\r\n if request.POST.get('path') and request.POST.get('title'):\r\n next = urllib.unquote(request.POST.get('path'))\r\n try:\r\n bookmark = Bookmark.objects.get(user=request.user)\r\n except Bookmark.DoesNotExist:\r\n bookmark = Bookmark(user=request.user)\r\n bookmark.save()\r\n try:\r\n bookmarkitem = BookmarkItem.objects.get(bookmark=bookmark, link=urllib.unquote(request.POST.get('path')))\r\n msg = ['error', 'A pagina já esta cadastrada como Atalho.']\r\n except BookmarkItem.DoesNotExist:\r\n try:\r\n bookmarkitem = BookmarkItem(bookmark=bookmark, title=request.POST.get('title'), link=urllib.unquote(request.POST.get('path')))\r\n bookmarkitem.save()\r\n msg = ['success', 'A pagina foi adicionada aos Atalhos.']\r\n except:\r\n msg = ['error', 'A pagina não pode ser adicionada aos Atalhos']\r\n else:\r\n msg = ['error', 'A pagina não pode ser adicionada aos Atalhos']\r\n next = request.POST.get('path')\r\n else:\r\n msg = ['error', 'A pagina não pode ser adicionada aos Atalhos']\r\n next = ADMIN_URL\r\n \r\n # MESSAGE & REDIRECT\r\n if not request.session.get('grappelli'):\r\n request.session['grappelli'] = {}\r\n request.session['grappelli']['message'] = msg\r\n request.session.modified = True\r\n return HttpResponseRedirect(next)",
"def POST(url: str, **kwargs: str) -> None:\n\n title = kwargs.get(\"title\", \"\")\n tags = kwargs.get(\"tags\", \"\")\n comments = kwargs.get(\"comments\", \"\")\n added = kwargs.get(\"added\", \"\")\n\n result = cherrypy.engine.publish(\n \"scheduler:add\",\n 2,\n \"bookmarks:add\",\n Url(url),\n title,\n comments,\n tags,\n added\n ).pop()\n\n if not result:\n raise cherrypy.HTTPError(400)\n\n cherrypy.response.status = 204",
"def test_save_bookmark_endpoint(self, mock_publish, mock_update, mock_read):\n\n payload = {\n \"link\": \"https://google.com\",\n \"name\": \"Google\"\n }\n\n mock_read.return_value = {\"bookmarks\": [{\"link\":\"www.google.com\", \"name\": \"Google\"}]}\n mock_update.return_value = {\"status\": 200}\n mock_publish.return_value = {\"status_code\": 200}\n\n response = self.client.post(\n reverse(\"create_bookmark\",\n kwargs={\n \"org_id\": \"614679ee1a5607b13c00bcb7\",\n \"room_id\": \"6150e4fc05c9716b90f33f33\"\n }\n ),\n data=payload)\n \n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.data)\n self.assertTrue(response.data[\"event\"] == \"bookmark_create\")",
"def add_feed(self, url, feed):\n print \"Adding the podcast: %s\" % url\n self.t.click(\"Sidebar\")\n self.shortcut('n')\n time.sleep(2)\n type(url + \"\\n\")\n time.sleep(10) #give it 10 seconds to add and update the feed\n self.click_podcast(feed)\n time.sleep(3)",
"def get_bookmark(self):\n bookmark = {\n 'status_id': None,\n 'message_id': None\n }\n coll = self._db.get_collection(COLLECTION_BOOKMARK)\n cursor = coll.find_one(filter={\n 'account': self._bookmark_account\n })\n if cursor is not None:\n bookmark.update({\n 'status_id': cursor['status_id'],\n 'message_id': cursor['message_id']\n })\n return bookmark",
"def test_bookmark_model_can_create_bookmark(self):\n\n bookmark = Bookmark.objects.create(**self.bookmark_data)\n self.assertEqual(bookmark.profile, self.user.profile)",
"def save_bookmark(self, url, desc, ext, tags, dt=None):\r\n # If a bookmark has the tag \"private\" then we ignore it to prevent\r\n # leaking user data.\r\n if tags and 'private' in tags.lower().split(' '):\r\n return None\r\n\r\n check_hash = generate_hash(url)\r\n\r\n # We should make sure that this url isn't already bookmarked before\r\n # adding it...if the hash matches, you must skip!\r\n if check_hash not in self.hash_list:\r\n bmark = BmarkMgr.store(\r\n url,\r\n self.username,\r\n desc,\r\n ext,\r\n tags,\r\n dt=dt,\r\n inserted_by=IMPORTED\r\n )\r\n\r\n # Add this hash to the list so that we can skip dupes in the\r\n # same import set.\r\n self.hash_list.add(check_hash)\r\n return bmark\r\n\r\n # If we don't store a bookmark then just return None back to the\r\n # importer.\r\n return None",
"def importBookmark(self, bookmarkFile, saveAsPdfName=None):\n outlines = readBookmarkFromFile(bookmarkFile)\n output = PdfFileWriter()\n for i in range(0, self._pdfReader.getNumPages()):\n output.addPage(self._pdfReader.getPage(i))\n _writeOutlinesToPdf(outlines, output, None)\n\n if saveAsPdfName == None:\n saveAsPdfName = self.pdfFileName[0:-4] + '_bookmark.pdf'\n stream = open(saveAsPdfName, 'wb')\n output.write(stream)\n print (\"Add bookmarks in %s to %s finished!\" % (bookmarkFile, saveAsPdfName))",
"def setAddBookmarks(self,value):\n self.PDFreactorConfiguration.in1[\"addBookmarks\"] = value"
] | [
"0.7388493",
"0.7282241",
"0.726063",
"0.6870702",
"0.68682045",
"0.6795213",
"0.64987797",
"0.644406",
"0.6403909",
"0.6362326",
"0.6347653",
"0.6318558",
"0.62411666",
"0.6172005",
"0.6101691",
"0.6096739",
"0.6004369",
"0.5997753",
"0.5956816",
"0.5872486",
"0.5831158",
"0.58048236",
"0.5791174",
"0.5771953",
"0.5749465",
"0.57225096",
"0.5704691",
"0.56808704",
"0.5587623",
"0.55635387"
] | 0.7652424 | 0 |
Validate and Create new Feed Source | def create_new_feed_source(link):
try:
response = parse_new_feeds(link)
if response["status"]:
if "logo" in response["details"]:
logo_link = response["details"]["logo"]
elif "image" in response["details"]:
logo_link = response["details"]["image"]["href"]
else:
logo_link = ''
FeedSource.objects.create(
name=response["details"]["title"],
link=link,
logo_link=logo_link,
details=json.dumps(response["details"]),
)
else:
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('FAILURE'),
details={'errors': feeds_pb2.RepeatedString(data=['Could not parse given link'])},
)
except ValidationError as e:
exc = e
logger(__name__, "Could not add Feed Source due to {}".format(str(exc)))
errors = _get_errors(exc)
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('FAILURE'),
details={'errors': feeds_pb2.RepeatedString(data=errors)},
)
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('SUCCESS'),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_new_feed(feed, source):\n try:\n with transaction.atomic():\n slug = feed.get(\"id\") + feed.get('title')\n new_feed = Feed.objects.create(\n feed_id=feed.get(\"id\"),\n title=feed.get(\"title\"),\n summary=feed.get(\"summary\", \"\"),\n author=feed.get(\"author\", \"\"),\n slug=slugify(slug[0:254]),\n link=get_link_from_feed(feed),\n links=get_links_from_feed(feed),\n source=source,\n )\n FeedDetail.objects.create(\n feed=new_feed,\n content_json=json.dumps(feed),\n )\n source.last_active_on = datetime.now()\n source.save()\n except ValidationError as e:\n exc = e\n logger(__name__, \"Could not create new Feed due to {}\".format(str(exc)))\n raise ValidationError(str(exc))\n logger(__name__, \"Successfull create new feed\")\n return",
"def source_add(request: HttpRequest) -> HttpResponse:\n if request.method == 'POST':\n # Process with adding category\n form = SourceForm(request.POST)\n if form.is_valid():\n logger.debug('Adding a source to database %s', form.cleaned_data)\n source = form.save(commit=False)\n\n # Following metadata will be parsed from Feed.s\n psource = feedparser.parse(source.url)\n source.name = psource.channel.get('title')\n source.description = psource.channel.get('description')\n\n source.save()\n messages.success(request, 'Source successfully added')\n return redirect('rss-index')",
"def post(self):\n s = ScuttlebuttService()\n try:\n feed_dict = simplejson.loads(self.request.body)\n feed = s.CreateFeed(feed_dict)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(feed.ToDict()))\n except simplejson.JSONDecodeError:\n # HTTP 400 for bad syntax.\n self.response.set_status(\n 400, 'Failed to create source. Invalid JSON: %s' % self.request.body)\n except Exception, e:\n # HTTP 422 for syntactically correct but semantically wrong.\n self.response.set_status(422, 'Error creating source: %s' % e)",
"def test_add_flow_request_with_sources(self):\n res = self._add_flow_request(flow_request=self.flow_request)\n self.assertEqual(res.status_code, 201)\n flow_request = res.json()\n destination = Destination.objects.get(name='Destination 1')\n self.assertEqual(flow_request['flow_id'], self.flow_request['flow_id'])\n self.assertEqual(flow_request['status'], 'PE')\n self.assertDictEqual(flow_request['profile'], self.flow_request['profile'])\n self.assertEqual(FlowRequest.objects.all().count(), 4)\n self.assertEqual(ConfirmationCode.objects.all().count(), 1)\n self.assertEqual(FlowRequest.objects.get(flow_id=flow_request['flow_id']).destination, destination)\n self.assertEqual(FlowRequest.objects.get(flow_id=flow_request['flow_id']).sources.count(), 1)\n source = FlowRequest.objects.get(flow_id=flow_request['flow_id']).sources.first()\n self.assertDictEqual(\n {'source_id': source.source_id, 'name': source.name},\n {'source_id': SOURCE_1_ID, 'name': SOURCE_1_NAME}\n )",
"def update_feed_source(request):\n try:\n feed = FeedSource.objects.get(id=request.id)\n feed.status = not feed.status\n feed.save()\n except (ValidationError, FeedSource.DoesNotExist) as e:\n exc = e\n logger(__name__, \"Could not update Feed Source due to {}\".format(str(exc)))\n errors = _get_errors(exc)\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('FAILURE'),\n details={'errors': feeds_pb2.RepeatedString(data=errors)},\n )\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('SUCCESS'),\n )",
"def test_add_flow_request_with_no_sources(self):\n res = self._add_flow_request(flow_request=self.flow_request_without_sources)\n self.assertEqual(res.status_code, 201)\n flow_request = res.json()\n destination = Destination.objects.get(name='Destination 1')\n self.assertEqual(flow_request['flow_id'], self.flow_request['flow_id'])\n self.assertEqual(flow_request['status'], 'PE')\n self.assertDictEqual(flow_request['profile'], self.flow_request['profile'])\n self.assertEqual(FlowRequest.objects.all().count(), 4)\n self.assertEqual(ConfirmationCode.objects.all().count(), 1)\n self.assertEqual(FlowRequest.objects.get(flow_id=flow_request['flow_id']).destination, destination)\n flow_request_sources = FlowRequest.objects.get(flow_id=flow_request['flow_id']).sources.all()\n all_sources = Source.objects.all()\n self.assertEqual(list(flow_request_sources), list(all_sources))",
"def test_feed_creation(self):\n items = []\n feed = Feed(items)\n assert isinstance(feed, Feed)\n assert items == feed.items",
"def test_ach_create_failure(self):\n\n with self.assertRaises(MarqetaError):\n self.client.funding_sources.ach.create({})",
"def add_feed(request):\n\n __time_update(request.user)\n\n try:\n url = request.POST['url']\n if not url.startswith('http://'):\n url = 'http://' + url\n except KeyError:\n return render_to_response('add_feed.html',\n {'username': request.user.username})\n\n try:\n Feed.objects.get(url=url, user=request.user)\n return render_to_response('message.html', {'message':\n 'There is already such feed',\n 'back': '/feeds'})\n except Feed.DoesNotExist:\n pass\n\n feed = feedparser.parse(url)\n\n # If were errors loading XML\n try:\n # Title field in Feed\n title = feed.feed.title\n except AttributeError:\n # Display warning message\n return render_to_response('message.html', {'message':\n 'Wrong feed URL or connection Error.',\n 'back': '/add_feed'})\n\n # Time field in Feed\n time = datetime.now()\n\n # Create Feed and save it\n feed_obj = Feed(title=title, url=url, time=time,\n user=request.user)\n feed_obj.save()\n\n __add_entries(feed.entries, feed_obj)\n\n return redirect('/feeds')",
"def on_new_source(self, source_params, new_sources):\n if new_sources:\n new_model = False\n model = self.dialog_base.data_sources_list_view.model()\n if not model:\n model = QStandardItemModel(self.dialog_base.data_sources_list_view)\n new_model = True\n for key, count in new_sources.iteritems():\n if key in self.sources:\n self.sources[key].count = count\n else:\n new_item = SourceItem(key, source_params, count)\n new_item.setCheckable(True)\n new_item.setCheckState(Qt.Checked)\n model.appendRow(new_item)\n self.sources[key] = new_item\n geometry_params = VectorsGeometriesParams(source_params, key)\n self.query_geometries(geometry_params)\n if new_model:\n model.itemChanged.connect(self.on_source_checked)\n self.dialog_base.data_sources_list_view.setModel(model)\n self.on_task_complete()",
"def addNewFeed(feed):\n # config exist?\n configfile_path = confighome+\"config\"\n print(\"::checking for config\")\n if fileAccessible(configfile_path,'r'):\n print(\"::reading config\")\n appendFeed(feed,configfile_path)\n elif fileAccessible(configfile_path,'w'):\n createNewConfig(feed,configfile_path)\n else:\n print(\"::unable to read\")",
"def test_project_funding_source_creation(self):\n name = 'A project function source name'\n description = 'A project funding source description'\n project_funding_source = self.create_project_funding_source(\n name=name,\n description=description,\n )\n self.assertTrue(isinstance(project_funding_source, ProjectFundingSource))\n self.assertEqual(project_funding_source.__str__(), project_funding_source.name)\n self.assertEqual(project_funding_source.name, name)\n self.assertEqual(project_funding_source.description, description)",
"def valid_source_format(self, valid_source_format):\n\n self._valid_source_format = valid_source_format",
"def test_add_asset_share_feed(self):\n pass",
"def test_add_source_type(self):\n # check if documentalist has access to create form\n self.login_documentalist()\n response = self.client.get('/type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = { \n 'status': '0',\n 'acronym': 'site',\n 'name': 'Website',\n 'language' : 'pt-br',\n 'sourcetypelocal_set-TOTAL_FORMS': '0', \n 'sourcetypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/type/new', form_data, follow=True )\n \n self.assertRedirects(response, '/types')\n self.assertContains(response, \"Website\")",
"def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass",
"def setUp(self):\n self.new_source = News_Source(\"CBSN\",\"CBSN NEWS\",\"CBSN is the leading free news platform\",\"cbsn.com\",\"business\",\"us\", \"en\")",
"def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )",
"def newspaper_article(source, article, keywords=[]):\n\n src = None\n try:\n src = Source.objects.get(name=source)\n except Source.DoesNotExist:\n #This is jank but can be touched up manually\n src = Source(name=source, url=article['url'])\n src.save()\n print 'source added to db with name: ' + source\n \n #unpacks article into article constructor\n try: \n art = Article(source=src, **article)\n art.save()\n make_keywords(art, keywords)\n except IntegrityError:\n print 'not unique headline for ' + article['headline'] + ' skipping.'",
"def storeFeeds(self, url, feeds):\n for feed in feeds:\n _date = time.localtime()\n if 'published_parsed' in feed:\n _date = feed['published_parsed']\n date = datetime(_date.tm_year, _date.tm_mon, _date.tm_mday)\n doc = {\n '_id': md5_new(feed.id).hexdigest(),\n 'title': feed.title,\n 'date': date,\n 'link': feed.link,\n 'summary': feed.summary,\n 'type': url,\n 'status': 'new',\n }\n try:\n self.feedsCol.insert(doc)\n except DuplicateKeyError:\n pass",
"def add_by_url(self, feed_url, name=None):\n feed_data = {\"url\": feed_url}\n if name:\n feed_data['name'] = name\n else:\n f = feedparser.parse(feed_url)\n feed_data['name'] = f.feed.title\n feed = Feed(feed_data, self)\n feed._save()\n self.feeds.append(feed)",
"def test_feed_item_creation(self):\n title = 'title'\n link = 'link'\n description = 'description'\n item = FeedItem(title, link, description)\n assert isinstance(item, FeedItem)\n assert item.title == title\n assert item.link == link\n assert item.description == description",
"def test_invalid_source_couchdb(self):\n with self.assertRaises(CloudantFeedException) as cm:\n invalid_feed = [x for x in InfiniteFeed(self.client)]\n self.assertEqual(str(cm.exception),\n 'Infinite _db_updates feed not supported for CouchDB.')",
"def create(self, validated_data):",
"def validate_source(cls, source_data: Dict[str, dict], verbose: bool = True):\n cls._validate_source_data(source_data=source_data, verbose=verbose)",
"def source():\n\n source = models.Source(name=u\"Joe's Funerals.com\", url=u\"http://www.joesfunerals.com\")\n return source",
"def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')",
"def __init__(self, source):\n self.source = source",
"def __init__(self, source):\n self.source = source",
"def createFeedItems(self):\r\n for item in self.item_data:\r\n self.initCreateFeedItem(item)\r\n self.createItem(item)"
] | [
"0.7013951",
"0.64773726",
"0.62411964",
"0.5868654",
"0.5822305",
"0.5581811",
"0.5531281",
"0.55312055",
"0.55255294",
"0.5506018",
"0.54661417",
"0.5444872",
"0.54306674",
"0.53630376",
"0.5349686",
"0.53175807",
"0.52853334",
"0.52751404",
"0.5265009",
"0.52596027",
"0.52385145",
"0.5226836",
"0.52140033",
"0.52040964",
"0.51803756",
"0.5155651",
"0.5117151",
"0.50957835",
"0.50957835",
"0.5095662"
] | 0.75302297 | 0 |
Update Feed Source Active Status | def update_feed_source(request):
try:
feed = FeedSource.objects.get(id=request.id)
feed.status = not feed.status
feed.save()
except (ValidationError, FeedSource.DoesNotExist) as e:
exc = e
logger(__name__, "Could not update Feed Source due to {}".format(str(exc)))
errors = _get_errors(exc)
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('FAILURE'),
details={'errors': feeds_pb2.RepeatedString(data=errors)},
)
return feeds_pb2.OperationStatus(
op_status=feeds_pb2.Status.Value('SUCCESS'),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateStatus(self, status):\n pass",
"def _update_on_active(self):\n pass",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def update_from_latest_data(self) -> None:\n self._attr_is_on = self.coordinator.data[self.entity_description.uid][\"active\"]",
"def UpdateStatus(self, status):\r\n self.status.update(status)",
"def __updateStreamStatus(self):\n while(True):\n for server,streams in self._streamsByServer.items():\n activeStreams = server.getActiveStreams()\n # Update each streams state\n for stream in streams:\n stream.lock.acquire()\n stream.setStreamState(server,Stream.STATE.DOWN)\n if (stream.name in activeStreams):\n stream.setStreamState(server,Stream.STATE.UP)\n stream.setStreamAddress(server,activeStreams[stream.name])\n stream.lock.release()\n time.sleep(StreamManager.SECS_BETWEEN_STATUS_CHECKS)",
"def updateOneFeed(self):\n feeds = backend.Feed.query.order_by(\"check_date\").limit(1).all()\n if feeds:\n feed = feeds[0]\n print feed.check_date\n # Only check if it has not been checked in at least 10 minutes\n if (datetime.datetime.now() - feed.check_date).seconds > 600:\n print \"Scheduled update of: \",feed.xmlurl\n fetcher_in.put(['update', feed.xmlurl, feed.etag, feed.check_date])",
"def refresh_status(self):\n\n pass",
"def set_channel_status(self, keyfind, valfind, updatedict, origin):\n\n self.get_channel_obj(keyfind, valfind, origin).set_status(updatedict)",
"def update(self):\n _LOGGER.debug(\"Updating status using the client AC instance...\")\n self.ac.update_status()\n _LOGGER.debug(\"Status updated using the client AC instance\")",
"def test_meeting_live_stream_status_update(self):\n pass",
"def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False",
"def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))",
"def update_activity():\n pass",
"def refresh_status() -> None:\n ...",
"def update_status(stdscr):\n safe_put(stdscr, \"Checking node status, please wait ...\", (2, 1))\n users = taunet.users.all()\n i = 0\n j = len(users)\n for user in users:\n i += 1\n safe_put(stdscr, \"({i}/{j}) {name}\".format(i=i, j=j, name=user.name.ljust(30)), (2, 39))\n is_online(user)\n stdscr.refresh()\n stdscr.clear()\n stdscr.refresh()",
"def change_status(self, status, application_id):",
"def update(self):\n self._is_on = self._is_on",
"def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")",
"def status(self, id):",
"def make_active(self, request, queryset):\n queryset.update(is_active=True)",
"def _update_on_active(self):\n if self.user:\n self.set_user(self.user.name, self.user.balance, self.user.credit)\n else:\n self.set_unknown_user()\n\n for product in self.owner.products:\n self.on_scan(product)",
"def update_from_existing(self, existing_status=None):\n if isinstance(existing_status, self.__class__):\n self.status = self.status or existing_status.status\n self.owner = self.owner or existing_status.owner\n self.urgency = self.urgency or existing_status.urgency",
"def update_status(conn, episode_info, status=\"watched_status\"):\n\tp_key = get_p_key(episode_info)\n\t\n\tstatus_update = f'UPDATE shows SET watched_status = {episode_info[status]} WHERE p_key = \"{p_key}\";'\n\t\n\texecute_sql(conn, status_update)",
"def update(self) -> None:\n active = None\n if self.type == \"on_off\":\n self._data = self._tm_client.api.data\n if self._data:\n active = self._data.active_torrent_count > 0\n\n elif self.type == \"turtle_mode\":\n active = self._tm_client.api.get_alt_speed_enabled()\n\n if active is None:\n return\n\n self._state = STATE_ON if active else STATE_OFF",
"def update_status_info (cls, nffg, status,\n log=logging.getLogger(\"UPDATE-STATUS\")):\n log.debug(\"Add %s status for NFs and Flowrules...\" % status)\n for nf in nffg.nfs:\n nf.status = status\n for infra in nffg.infras:\n for flowrule in infra.flowrules():\n flowrule.status = status\n return nffg",
"def change_status(publisher_name, package_name, status=PackageStateEnum.active):\n try:\n data = Package.query.join(Publisher). \\\n filter(Publisher.name == publisher_name,\n Package.name == package_name).one()\n data.status = status\n db.session.add(data)\n db.session.commit()\n return True\n except Exception as e:\n app.logger.error(e)\n return False",
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def change_status(self, inf, status):\n self.interfaces[inf]['status'] = status",
"def OnUpdateActive(self, event):\n self.StatusbarUpdate()"
] | [
"0.64761364",
"0.64385563",
"0.6301665",
"0.627237",
"0.6260324",
"0.6197054",
"0.61570495",
"0.6132218",
"0.5923778",
"0.589606",
"0.5825407",
"0.5817488",
"0.57928306",
"0.5789204",
"0.578507",
"0.5777448",
"0.57570195",
"0.57291734",
"0.5704006",
"0.5671347",
"0.5657238",
"0.5644809",
"0.564184",
"0.561363",
"0.56111676",
"0.5608233",
"0.559477",
"0.5583241",
"0.55595803",
"0.5546768"
] | 0.65877944 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.